This fixes Trials Fusion often crashing when disconnecting a controller while there are more still connected.
-- v6: hidclass: Set Status for pending IRPs of removed devices to STATUS_DEVICE_NOT_CONNECTED. ntdll/tests: Test IOSB values of the cancel operation.
From: Matteo Bruni mbruni@codeweavers.com
--- dlls/ntdll/unix/file.c | 38 ++++++++++++++++---------------------- 1 file changed, 16 insertions(+), 22 deletions(-)
diff --git a/dlls/ntdll/unix/file.c b/dlls/ntdll/unix/file.c index 5bed090a45b..c4883f3be39 100644 --- a/dlls/ntdll/unix/file.c +++ b/dlls/ntdll/unix/file.c @@ -6431,19 +6431,16 @@ NTSTATUS WINAPI NtFlushBuffersFileEx( HANDLE handle, ULONG flags, void *params, }
-/************************************************************************** - * NtCancelIoFile (NTDLL.@) - */ -NTSTATUS WINAPI NtCancelIoFile( HANDLE handle, IO_STATUS_BLOCK *io_status ) +static NTSTATUS cancel_io( HANDLE handle, IO_STATUS_BLOCK *io, IO_STATUS_BLOCK *io_status, + BOOL only_thread ) { unsigned int status;
- TRACE( "%p %p\n", handle, io_status ); - SERVER_START_REQ( cancel_async ) { req->handle = wine_server_obj_handle( handle ); - req->only_thread = TRUE; + req->iosb = wine_server_client_ptr( io ); + req->only_thread = only_thread; if (!(status = wine_server_call( req ))) { io_status->Status = status; @@ -6456,28 +6453,25 @@ NTSTATUS WINAPI NtCancelIoFile( HANDLE handle, IO_STATUS_BLOCK *io_status ) }
+/************************************************************************** + * NtCancelIoFile (NTDLL.@) + */ +NTSTATUS WINAPI NtCancelIoFile( HANDLE handle, IO_STATUS_BLOCK *io_status ) +{ + TRACE( "%p %p\n", handle, io_status ); + + return cancel_io( handle, NULL, io_status, TRUE ); +} + + /************************************************************************** * NtCancelIoFileEx (NTDLL.@) */ NTSTATUS WINAPI NtCancelIoFileEx( HANDLE handle, IO_STATUS_BLOCK *io, IO_STATUS_BLOCK *io_status ) { - unsigned int status; - TRACE( "%p %p %p\n", handle, io, io_status );
- SERVER_START_REQ( cancel_async ) - { - req->handle = wine_server_obj_handle( handle ); - req->iosb = wine_server_client_ptr( io ); - if (!(status = wine_server_call( req ))) - { - io_status->Status = status; - io_status->Information = 0; - } - } - SERVER_END_REQ; - - return status; + return cancel_io( handle, io, io_status, FALSE ); }
From: Rémi Bernon rbernon@codeweavers.com
--- server/async.c | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-)
diff --git a/server/async.c b/server/async.c index 1ed241dcb65..6ab9ede4a0d 100644 --- a/server/async.c +++ b/server/async.c @@ -575,6 +575,16 @@ int async_waiting( struct async_queue *queue ) return !async->terminated; }
+static struct async *find_async_from_user( struct process *process, client_ptr_t user ) +{ + struct async *async; + + LIST_FOR_EACH_ENTRY( async, &process->asyncs, struct async, process_entry ) + if (async->data.user == user) return async; + + return NULL; +} + static int cancel_async( struct process *process, struct object *obj, struct thread *thread, client_ptr_t iosb ) { struct async *async; @@ -829,17 +839,10 @@ DECL_HANDLER(cancel_async) /* get async result from associated iosb */ DECL_HANDLER(get_async_result) { - struct iosb *iosb = NULL; struct async *async; + struct iosb *iosb;
- LIST_FOR_EACH_ENTRY( async, ¤t->process->asyncs, struct async, process_entry ) - if (async->data.user == req->user_arg) - { - iosb = async->iosb; - break; - } - - if (!iosb) + if (!(async = find_async_from_user( current->process, req->user_arg )) || !(iosb = async->iosb)) { set_error( STATUS_INVALID_PARAMETER ); return;
From: Rémi Bernon rbernon@codeweavers.com
--- server/async.c | 44 ++++++++++++++++++++++---------------------- server/file.h | 2 +- server/process.c | 4 +++- server/process.h | 1 + 4 files changed, 27 insertions(+), 24 deletions(-)
diff --git a/server/async.c b/server/async.c index 6ab9ede4a0d..20f906c5d78 100644 --- a/server/async.c +++ b/server/async.c @@ -54,7 +54,6 @@ struct async unsigned int direct_result :1;/* a flag if we're passing result directly from request instead of APC */ unsigned int alerted :1; /* fd is signaled, but we are waiting for client-side I/O */ unsigned int terminated :1; /* async has been terminated */ - unsigned int canceled :1; /* have we already queued cancellation for this async? */ unsigned int unknown_status :1; /* initial status is not known yet */ unsigned int blocking :1; /* async is blocking */ unsigned int is_system :1; /* background system operation not affecting userspace visible state. */ @@ -276,7 +275,6 @@ struct async *create_async( struct fd *fd, struct thread *thread, const struct a async->direct_result = 0; async->alerted = 0; async->terminated = 0; - async->canceled = 0; async->unknown_status = 0; async->blocking = !is_fd_overlapped( fd ); async->is_system = 0; @@ -575,17 +573,26 @@ int async_waiting( struct async_queue *queue ) return !async->terminated; }
+static void cancel_async( struct process *process, struct async *async ) +{ + list_remove( &async->process_entry ); + list_add_tail( &process->cancelled_asyncs, &async->process_entry ); + fd_cancel_async( async->fd, async ); +} + static struct async *find_async_from_user( struct process *process, client_ptr_t user ) { struct async *async;
+ LIST_FOR_EACH_ENTRY( async, &process->cancelled_asyncs, struct async, process_entry ) + if (async->data.user == user) return async; LIST_FOR_EACH_ENTRY( async, &process->asyncs, struct async, process_entry ) if (async->data.user == user) return async;
return NULL; }
-static int cancel_async( struct process *process, struct object *obj, struct thread *thread, client_ptr_t iosb ) +static int cancel_process_async( struct process *process, struct object *obj, struct thread *thread, client_ptr_t iosb ) { struct async *async; int woken = 0; @@ -597,13 +604,12 @@ static int cancel_async( struct process *process, struct object *obj, struct thr restart: LIST_FOR_EACH_ENTRY( async, &process->asyncs, struct async, process_entry ) { - if (async->terminated || async->canceled || async->is_system) continue; + if (async->terminated || async->is_system) continue; if ((!obj || (get_fd_user( async->fd ) == obj)) && (!thread || async->thread == thread) && (!iosb || async->data.iosb == iosb)) { - async->canceled = 1; - fd_cancel_async( async->fd, async ); + cancel_async( process, async ); woken++; goto restart; } @@ -619,12 +625,11 @@ static int cancel_blocking( struct process *process, struct thread *thread, clie restart: LIST_FOR_EACH_ENTRY( async, &process->asyncs, struct async, process_entry ) { - if (async->terminated || async->canceled) continue; + if (async->terminated) continue; if (async->blocking && async->thread == thread && (!iosb || async->data.iosb == iosb)) { - async->canceled = 1; - fd_cancel_async( async->fd, async ); + cancel_async( process, async ); woken++; goto restart; } @@ -632,16 +637,15 @@ restart: return woken; }
-void cancel_process_asyncs( struct process *process ) +void cancel_terminating_process_asyncs( struct process *process ) { struct async *async;
restart: LIST_FOR_EACH_ENTRY( async, &process->asyncs, struct async, process_entry ) { - if (async->terminated || async->canceled) continue; - async->canceled = 1; - fd_cancel_async( async->fd, async ); + if (async->terminated) continue; + cancel_async( process, async ); goto restart; } } @@ -658,11 +662,9 @@ int async_close_obj_handle( struct object *obj, struct process *process, obj_han restart: LIST_FOR_EACH_ENTRY( async, &process->asyncs, struct async, process_entry ) { - if (async->terminated || async->canceled || get_fd_user( async->fd ) != obj) continue; + if (async->terminated || get_fd_user( async->fd ) != obj) continue; if (!async->completion || !async->data.apc_context || async->event) continue; - - async->canceled = 1; - fd_cancel_async( async->fd, async ); + cancel_async( process, async ); goto restart; } return 1; @@ -675,12 +677,10 @@ void cancel_terminating_thread_asyncs( struct thread *thread ) restart: LIST_FOR_EACH_ENTRY( async, &thread->process->asyncs, struct async, process_entry ) { - if (async->thread != thread || async->terminated || async->canceled) continue; + if (async->thread != thread || async->terminated) continue; if (async->completion && async->data.apc_context && !async->event) continue; if (async->is_system) continue; - - async->canceled = 1; - fd_cancel_async( async->fd, async ); + cancel_async( thread->process, async ); goto restart; } } @@ -830,7 +830,7 @@ DECL_HANDLER(cancel_async)
if (obj) { - int count = cancel_async( current->process, obj, thread, req->iosb ); + int count = cancel_process_async( current->process, obj, thread, req->iosb ); if (!count && !thread) set_error( STATUS_NOT_FOUND ); release_object( obj ); } diff --git a/server/file.h b/server/file.h index 567194bf00a..7f2d0185bd4 100644 --- a/server/file.h +++ b/server/file.h @@ -278,7 +278,7 @@ extern void fd_copy_completion( struct fd *src, struct fd *dst ); extern struct iosb *async_get_iosb( struct async *async ); extern struct thread *async_get_thread( struct async *async ); extern struct async *find_pending_async( struct async_queue *queue ); -extern void cancel_process_asyncs( struct process *process ); +extern void cancel_terminating_process_asyncs( struct process *process ); extern void cancel_terminating_thread_asyncs( struct thread *thread ); extern int async_close_obj_handle( struct object *obj, struct process *process, obj_handle_t handle );
diff --git a/server/process.c b/server/process.c index 9b3b0b8fc6c..268744ef2cf 100644 --- a/server/process.c +++ b/server/process.c @@ -705,6 +705,7 @@ struct process *create_process( int fd, struct process *parent, unsigned int fla list_init( &process->thread_list ); list_init( &process->locks ); list_init( &process->asyncs ); + list_init( &process->cancelled_asyncs ); list_init( &process->classes ); list_init( &process->views );
@@ -780,6 +781,7 @@ static void process_destroy( struct object *obj ) /* we can't have a thread remaining */ assert( list_empty( &process->thread_list )); assert( list_empty( &process->asyncs )); + assert( list_empty( &process->cancelled_asyncs ));
assert( !process->sigkill_timeout ); /* timeout should hold a reference to the process */
@@ -986,7 +988,7 @@ static void process_killed( struct process *process ) close_process_desktop( process ); process->winstation = 0; process->desktop = 0; - cancel_process_asyncs( process ); + cancel_terminating_process_asyncs( process ); close_process_handles( process ); if (process->idle_event) release_object( process->idle_event ); process->idle_event = NULL; diff --git a/server/process.h b/server/process.h index d9b78004afa..0e01fa9eafb 100644 --- a/server/process.h +++ b/server/process.h @@ -67,6 +67,7 @@ struct process struct job *job; /* job object associated with this process */ struct list job_entry; /* list entry for job object */ struct list asyncs; /* list of async object owned by the process */ + struct list cancelled_asyncs;/* list of async object owned by the process */ struct list locks; /* list of file locks owned by the process */ struct list classes; /* window classes owned by the process */ struct console *console; /* console input */
From: Matteo Bruni mbruni@codeweavers.com
I should split out a separate patch (before this one) like: server: Introduce a separate server object for the async cancel operation. --- dlls/ntdll/unix/file.c | 16 +- dlls/ntdll/unix/server.c | 21 ++- include/wine/server_protocol.h | 20 ++- server/async.c | 257 ++++++++++++++++++++++++++++++--- server/process.c | 4 +- server/process.h | 2 +- server/protocol.def | 8 + server/request_handlers.h | 6 + server/request_trace.h | 13 ++ 9 files changed, 319 insertions(+), 28 deletions(-)
diff --git a/dlls/ntdll/unix/file.c b/dlls/ntdll/unix/file.c index c4883f3be39..dec5442cedd 100644 --- a/dlls/ntdll/unix/file.c +++ b/dlls/ntdll/unix/file.c @@ -5131,6 +5131,8 @@ static BOOL irp_completion( void *user, ULONG_PTR *info, unsigned int *status ) { struct async_irp *async = user;
+ TRACE( "user %p, info %p, *status %#x\n", user, info, *status ); + if (*status == STATUS_ALERTED) { SERVER_START_REQ( get_async_result ) @@ -6434,6 +6436,7 @@ NTSTATUS WINAPI NtFlushBuffersFileEx( HANDLE handle, ULONG flags, void *params, static NTSTATUS cancel_io( HANDLE handle, IO_STATUS_BLOCK *io, IO_STATUS_BLOCK *io_status, BOOL only_thread ) { + HANDLE cancel_handle; unsigned int status;
SERVER_START_REQ( cancel_async ) @@ -6442,13 +6445,18 @@ static NTSTATUS cancel_io( HANDLE handle, IO_STATUS_BLOCK *io, IO_STATUS_BLOCK * req->iosb = wine_server_client_ptr( io ); req->only_thread = only_thread; if (!(status = wine_server_call( req ))) - { - io_status->Status = status; - io_status->Information = 0; - } + cancel_handle = wine_server_ptr_handle( reply->cancel_handle ); } SERVER_END_REQ;
+ if (!status && cancel_handle) NtWaitForMultipleObjects( 1, &cancel_handle, FALSE, TRUE, NULL ); + + /* TODO: Add a test for this? It's tricky since we probably have to + * check from a different thread without using win32 IPC for + * synchronization to avoid syncing on system APC execution. */ + io_status->Status = status; + io_status->Information = 0; + return status; }
diff --git a/dlls/ntdll/unix/server.c b/dlls/ntdll/unix/server.c index 3969c2bb898..6c14ba48c63 100644 --- a/dlls/ntdll/unix/server.c +++ b/dlls/ntdll/unix/server.c @@ -418,8 +418,27 @@ static void invoke_system_apc( const union apc_call *call, union apc_result *res result->async_io.total = info; /* the server will pass us NULL if a call failed synchronously */ set_async_iosb( call->async_io.sb, result->async_io.status, info ); + //TRACE("call->async_io.sb %#llx, status %#x, result->async_io.status %#x\n", call->async_io.sb, status, result->async_io.status); + /* if ( call->async_io.sb && !in_wow64_call()) */ + /* { */ + /* IO_STATUS_BLOCK *io = wine_server_get_ptr( call->async_io.sb ); */ + /* TRACE( "&io->Status %p, io->Status %#x\n", &io->Status, io->Status ); */ + /* } */ + if (status == STATUS_CANCELLED) + { + SERVER_START_REQ( notify_cancelled_async ) + { + req->user_arg = wine_server_client_ptr( user ); + wine_server_call( req ); + } + SERVER_END_REQ; + } + } + else + { + TRACE("callback returned 0\n"); + result->async_io.status = STATUS_PENDING; /* restart it */ } - else result->async_io.status = STATUS_PENDING; /* restart it */ break; } case APC_VIRTUAL_ALLOC: diff --git a/include/wine/server_protocol.h b/include/wine/server_protocol.h index 1f9348e9e86..665f091be61 100644 --- a/include/wine/server_protocol.h +++ b/include/wine/server_protocol.h @@ -3278,6 +3278,21 @@ struct cancel_async_request char __pad_28[4]; }; struct cancel_async_reply +{ + struct reply_header __header; + obj_handle_t cancel_handle; + char __pad_12[4]; +}; + + + +struct notify_cancelled_async_request +{ + struct request_header __header; + char __pad_12[4]; + client_ptr_t user_arg; +}; +struct notify_cancelled_async_reply { struct reply_header __header; }; @@ -6079,6 +6094,7 @@ enum request REQ_cancel_sync, REQ_register_async, REQ_cancel_async, + REQ_notify_cancelled_async, REQ_get_async_result, REQ_set_async_direct_result, REQ_read, @@ -6382,6 +6398,7 @@ union generic_request struct cancel_sync_request cancel_sync_request; struct register_async_request register_async_request; struct cancel_async_request cancel_async_request; + struct notify_cancelled_async_request notify_cancelled_async_request; struct get_async_result_request get_async_result_request; struct set_async_direct_result_request set_async_direct_result_request; struct read_request read_request; @@ -6683,6 +6700,7 @@ union generic_reply struct cancel_sync_reply cancel_sync_reply; struct register_async_reply register_async_reply; struct cancel_async_reply cancel_async_reply; + struct notify_cancelled_async_reply notify_cancelled_async_reply; struct get_async_result_reply get_async_result_reply; struct set_async_direct_result_reply set_async_direct_result_reply; struct read_reply read_reply; @@ -6846,6 +6864,6 @@ union generic_reply struct set_keyboard_repeat_reply set_keyboard_repeat_reply; };
-#define SERVER_PROTOCOL_VERSION 880 +#define SERVER_PROTOCOL_VERSION 881
#endif /* __WINE_WINE_SERVER_PROTOCOL_H */ diff --git a/server/async.c b/server/async.c index 20f906c5d78..1b00e2fb6fe 100644 --- a/server/async.c +++ b/server/async.c @@ -62,6 +62,8 @@ struct async unsigned int comp_flags; /* completion flags */ async_completion_callback completion_callback; /* callback to be called on completion */ void *completion_callback_private; /* argument to completion_callback */ + + struct async_cancel *async_cancel; };
static void async_dump( struct object *obj, int verbose ); @@ -145,6 +147,13 @@ static void async_destroy( struct object *obj ) struct async *async = (struct async *)obj; assert( obj->ops == &async_ops );
+ if (async->async_cancel) + { + fprintf( stderr, "unexpected non-NULL async_cancel %p!\n", async->async_cancel ); + release_object( async->fd ); + async->async_cancel = NULL; + } + list_remove( &async->process_entry );
if (async->queue) @@ -161,6 +170,8 @@ static void async_destroy( struct object *obj ) release_object( async->thread ); }
+static void async_complete_cancel( struct async *async ); + /* notifies client thread of new status of its async request */ void async_terminate( struct async *async, unsigned int status ) { @@ -282,6 +293,7 @@ struct async *create_async( struct fd *fd, struct thread *thread, const struct a async->comp_flags = 0; async->completion_callback = NULL; async->completion_callback_private = NULL; + async->async_cancel = NULL;
if (iosb) async->iosb = (struct iosb *)grab_object( iosb ); else async->iosb = NULL; @@ -573,33 +585,141 @@ int async_waiting( struct async_queue *queue ) return !async->terminated; }
-static void cancel_async( struct process *process, struct async *async ) +struct async_cancel +{ + struct object obj; /* object header */ + struct process *process; + struct thread *thread; + struct list asyncs; + obj_handle_t wait_handle; + unsigned int count; + struct list async_cancels_entry; +}; + +static void async_cancel_dump( struct object *obj, int verbose ); +static int async_cancel_signaled( struct object *obj, struct wait_queue_entry *entry ); +static void async_cancel_satisfied( struct object * obj, struct wait_queue_entry *entry ); +static void async_cancel_destroy( struct object *obj ); + +static const struct object_ops async_cancel_ops = +{ + sizeof(struct async_cancel), /* size */ + &no_type, /* type */ + async_cancel_dump, /* dump */ + add_queue, /* add_queue */ + remove_queue, /* remove_queue */ + async_cancel_signaled, /* signaled */ + async_cancel_satisfied, /* satisfied */ + no_signal, /* signal */ + no_get_fd, /* get_fd */ + default_get_sync, /* get_sync */ + default_map_access, /* map_access */ + default_get_sd, /* get_sd */ + default_set_sd, /* set_sd */ + no_get_full_name, /* get_full_name */ + no_lookup_name, /* lookup_name */ + no_link_name, /* link_name */ + NULL, /* unlink_name */ + no_open_file, /* open_file */ + no_kernel_obj_list, /* get_kernel_obj_list */ + no_close_handle, /* close_handle */ + async_cancel_destroy /* destroy */ +}; + +static void async_cancel_dump( struct object *obj, int verbose ) +{ + struct async_cancel *cancel = (struct async_cancel *)obj; + assert( obj->ops == &async_cancel_ops ); + fprintf( stderr, "async_cancel process=%p, thread=%p, wait_handle %u, count %u\n", + cancel->process, cancel->thread, cancel->wait_handle, cancel->count ); +} + +static int async_cancel_signaled( struct object *obj, struct wait_queue_entry *entry ) { + struct async_cancel *cancel = (struct async_cancel *)obj; + + assert( obj->ops == &async_cancel_ops ); + return !cancel->count; +} + +static void async_cancel_satisfied( struct object *obj, struct wait_queue_entry *entry ) +{ + //struct async *async = (struct async *)obj; + assert( obj->ops == &async_cancel_ops ); + + //fprintf( stderr, "async_cancel_satisfied\n" ); +} + +static void async_cancel_destroy( struct object *obj ) +{ + struct async_cancel *cancel = (struct async_cancel *)obj; + + //fprintf( stderr, "async_cancel_destroy cancel %p\n", cancel ); + + assert( obj->ops == &async_cancel_ops ); + list_remove( &cancel->async_cancels_entry ); + if (cancel->thread) + release_object( cancel->thread ); +} + +static struct async_cancel *create_async_cancel( struct process *process, struct thread *thread ) +{ + struct async_cancel *cancel; + + if (!(cancel = alloc_object( &async_cancel_ops ))) + { + return NULL; + } + //fprintf( stderr, "new async_cancel %p\n", cancel ); + //FIXME: We should probably also grab_object() + release_object() the process + cancel->process = process; + cancel->thread = thread ? (struct thread *)grab_object( thread ) : 0; + list_init(&cancel->asyncs); + cancel->wait_handle = 0; + cancel->count = 0; + list_add_tail( &process->async_cancels, &cancel->async_cancels_entry ); + return cancel; +} + +static void cancel_async( struct async_cancel *cancel, struct async *async ) +{ + /* fprintf( stderr, "cancel_async cancel %p, async %p, async->direct_result %u, async->iosb->status %#x\n", */ + /* cancel, async, async->direct_result, async->iosb ? async->iosb->status : 0xffffffff ); */ list_remove( &async->process_entry ); - list_add_tail( &process->cancelled_asyncs, &async->process_entry ); + list_add_tail( &cancel->asyncs, &async->process_entry ); + grab_object( async ); + grab_object( cancel ); + async->async_cancel = cancel; + cancel->count++; + //fprintf( stderr, "cancel %p, count %u\n", cancel, cancel->count ); fd_cancel_async( async->fd, async ); }
static struct async *find_async_from_user( struct process *process, client_ptr_t user ) { + struct async_cancel *cancel; struct async *async;
- LIST_FOR_EACH_ENTRY( async, &process->cancelled_asyncs, struct async, process_entry ) - if (async->data.user == user) return async; + LIST_FOR_EACH_ENTRY( cancel, &process->async_cancels, struct async_cancel, async_cancels_entry ) + LIST_FOR_EACH_ENTRY( async, &cancel->asyncs, struct async, process_entry ) + if (async->data.user == user) return async; LIST_FOR_EACH_ENTRY( async, &process->asyncs, struct async, process_entry ) if (async->data.user == user) return async;
return NULL; }
-static int cancel_process_async( struct process *process, struct object *obj, struct thread *thread, client_ptr_t iosb ) +static struct async_cancel *cancel_process_async( struct process *process, struct object *obj, struct thread *thread, + client_ptr_t iosb ) { + struct async_cancel *cancel; struct async *async; int woken = 0;
- /* FIXME: it would probably be nice to replace the "canceled" flag with a - * single LIST_FOR_EACH_ENTRY_SAFE, but currently cancelling an async can - * cause other asyncs to be removed via async_reselect() */ + //struct fd *alloc_pseudo_fd( const struct fd_ops *fd_user_ops, struct object *user, unsigned int options ); + cancel = create_async_cancel( process, thread ); + if (!cancel) + return 0;
restart: LIST_FOR_EACH_ENTRY( async, &process->asyncs, struct async, process_entry ) @@ -609,19 +729,52 @@ restart: (!thread || async->thread == thread) && (!iosb || async->data.iosb == iosb)) { - cancel_async( process, async ); + cancel_async( cancel, async ); woken++; goto restart; } } - return woken; + if (cancel->count) + cancel->wait_handle = alloc_handle( process, cancel, SYNCHRONIZE, 0 ); + //fprintf( stderr, "woken %u, wait_handle %#x\n", woken, cancel->wait_handle ); + release_object( cancel ); + return woken ? cancel : NULL; +} + +static void async_complete_cancel( struct async *async ) +{ + struct async_cancel *cancel; + //struct async *async; + + //fprintf( stderr, "async_complete_cancel async %p async->wait_handle %u\n", async, async->wait_handle ); + if ((cancel = async->async_cancel)) + { + //fprintf( stderr, "cancel %p, count %u\n", cancel, cancel->count ); + if (!--cancel->count) + { + //fprintf( stderr, "cancel->count == 0\n" ); + if (cancel->wait_handle) + { + //fprintf( stderr, "wake_up()\n" ); + wake_up( &cancel->obj, 0 ); + } + } + release_object( cancel ); + async->async_cancel = NULL; + release_object( async ); + } }
static int cancel_blocking( struct process *process, struct thread *thread, client_ptr_t iosb ) { + struct async_cancel *cancel; struct async *async; int woken = 0;
+ cancel = create_async_cancel( process, thread ); + if (!cancel) + return 0; + restart: LIST_FOR_EACH_ENTRY( async, &process->asyncs, struct async, process_entry ) { @@ -629,25 +782,45 @@ restart: if (async->blocking && async->thread == thread && (!iosb || async->data.iosb == iosb)) { - cancel_async( process, async ); + cancel_async( cancel, async ); woken++; goto restart; } } + + /* There's no waiting for blocking asyncs */ + cancel->wait_handle = 0; + release_object( cancel ); return woken; }
void cancel_terminating_process_asyncs( struct process *process ) { - struct async *async; + struct async_cancel *cancel, *next_cancel; + struct async *async, *next_async; + + //fprintf( stderr, "cancel_terminating_process_asyncs process %p\n", process );
restart: LIST_FOR_EACH_ENTRY( async, &process->asyncs, struct async, process_entry ) { if (async->terminated) continue; - cancel_async( process, async ); + fd_cancel_async( async->fd, async ); goto restart; } + + LIST_FOR_EACH_ENTRY_SAFE( cancel, next_cancel, &process->async_cancels, struct async_cancel, async_cancels_entry ) + { + if (cancel->count) + fprintf( stderr, "cancel_terminating_process_asyncs cancel %p count %u\n", cancel, cancel->count ); + LIST_FOR_EACH_ENTRY_SAFE( async, next_async, &cancel->asyncs, struct async, process_entry ) + { + fprintf( stderr, "async %p didn't complete before process termination\n", async ); + async->async_cancel = NULL; + release_object( cancel ); + release_object( async ); + } + } }
int async_close_obj_handle( struct object *obj, struct process *process, obj_handle_t handle ) @@ -655,34 +828,50 @@ int async_close_obj_handle( struct object *obj, struct process *process, obj_han /* Handle a special case when the last object handle in the given process is closed. * If this is the last object handle overall that is handled in object's close_handle and * destruction. */ + struct async_cancel *cancel; struct async *async;
if (obj->handle_count == 1 || get_obj_handle_count( process, obj ) != 1) return 1;
+ cancel = create_async_cancel( process, NULL ); + if (!cancel) + return 0; + restart: LIST_FOR_EACH_ENTRY( async, &process->asyncs, struct async, process_entry ) { if (async->terminated || get_fd_user( async->fd ) != obj) continue; if (!async->completion || !async->data.apc_context || async->event) continue; - cancel_async( process, async ); + cancel_async( cancel, async ); goto restart; } + cancel->wait_handle = 0; + release_object( cancel ); return 1; }
void cancel_terminating_thread_asyncs( struct thread *thread ) { + struct async_cancel *cancel; struct async *async;
+ //fprintf( stderr, "cancel_terminating_thread_asyncs thread %p\n", thread ); + + cancel = create_async_cancel( thread->process, thread ); + if (!cancel) + return; + restart: LIST_FOR_EACH_ENTRY( async, &thread->process->asyncs, struct async, process_entry ) { if (async->thread != thread || async->terminated) continue; if (async->completion && async->data.apc_context && !async->event) continue; if (async->is_system) continue; - cancel_async( thread->process, async ); + cancel_async( cancel, async ); goto restart; } + cancel->wait_handle = 0; + release_object( cancel ); }
/* wake up async operations on the queue */ @@ -827,13 +1016,40 @@ DECL_HANDLER(cancel_async) { struct object *obj = get_handle_obj( current->process, req->handle, 0, NULL ); struct thread *thread = req->only_thread ? current : NULL; + struct async_cancel *cancel; + + if (!obj) + return; + + cancel = cancel_process_async( current->process, obj, thread, req->iosb ); + if (!cancel && !thread) set_error( STATUS_NOT_FOUND ); + release_object( obj ); + if (cancel) + { + if (cancel->count) + reply->cancel_handle = cancel->wait_handle; + else + { + fprintf( stderr, "unexpected cancel->count == 0\n" ); + close_handle( current->process, cancel->wait_handle ); + cancel->wait_handle = 0; + } + } +} + +/* Notify of the completion of a cancelled async */ +DECL_HANDLER(notify_cancelled_async) +{ + struct async *async;
- if (obj) + /* TODO: Only look into the cancelled async lists? */ + if (!(async = find_async_from_user( current->process, req->user_arg ))) { - int count = cancel_process_async( current->process, obj, thread, req->iosb ); - if (!count && !thread) set_error( STATUS_NOT_FOUND ); - release_object( obj ); + set_error( STATUS_INVALID_PARAMETER ); + return; } + + async_complete_cancel( async ); }
/* get async result from associated iosb */ @@ -858,6 +1074,7 @@ DECL_HANDLER(get_async_result) } } set_error( iosb->status ); + async_complete_cancel( async ); }
/* notify direct completion of async and close the wait handle if not blocking */ @@ -907,5 +1124,7 @@ DECL_HANDLER(set_async_direct_result) */ reply->handle = async->wait_handle;
+ async_complete_cancel( async ); + release_object( &async->obj ); } diff --git a/server/process.c b/server/process.c index 268744ef2cf..722195ecc2f 100644 --- a/server/process.c +++ b/server/process.c @@ -705,7 +705,7 @@ struct process *create_process( int fd, struct process *parent, unsigned int fla list_init( &process->thread_list ); list_init( &process->locks ); list_init( &process->asyncs ); - list_init( &process->cancelled_asyncs ); + list_init( &process->async_cancels ); list_init( &process->classes ); list_init( &process->views );
@@ -781,7 +781,7 @@ static void process_destroy( struct object *obj ) /* we can't have a thread remaining */ assert( list_empty( &process->thread_list )); assert( list_empty( &process->asyncs )); - assert( list_empty( &process->cancelled_asyncs )); + assert( list_empty( &process->async_cancels ));
assert( !process->sigkill_timeout ); /* timeout should hold a reference to the process */
diff --git a/server/process.h b/server/process.h index 0e01fa9eafb..1de426eed53 100644 --- a/server/process.h +++ b/server/process.h @@ -67,7 +67,7 @@ struct process struct job *job; /* job object associated with this process */ struct list job_entry; /* list entry for job object */ struct list asyncs; /* list of async object owned by the process */ - struct list cancelled_asyncs;/* list of async object owned by the process */ + struct list async_cancels; /* list of async_cancel objects */ struct list locks; /* list of file locks owned by the process */ struct list classes; /* window classes owned by the process */ struct console *console; /* console input */ diff --git a/server/protocol.def b/server/protocol.def index 22470e33ae0..b171739230f 100644 --- a/server/protocol.def +++ b/server/protocol.def @@ -2461,6 +2461,14 @@ enum message_type obj_handle_t handle; /* handle to comm port, socket or file */ client_ptr_t iosb; /* I/O status block (NULL=all) */ int only_thread; /* cancel matching this thread */ +@REPLY + obj_handle_t cancel_handle; +@END + + +/* Notify of the completion of a cancelled async */ +@REQ(notify_cancelled_async) + client_ptr_t user_arg; /* user arg used to identify async */ @END
diff --git a/server/request_handlers.h b/server/request_handlers.h index f9e5423ff40..54949e8bc32 100644 --- a/server/request_handlers.h +++ b/server/request_handlers.h @@ -142,6 +142,7 @@ DECL_HANDLER(set_serial_info); DECL_HANDLER(cancel_sync); DECL_HANDLER(register_async); DECL_HANDLER(cancel_async); +DECL_HANDLER(notify_cancelled_async); DECL_HANDLER(get_async_result); DECL_HANDLER(set_async_direct_result); DECL_HANDLER(read); @@ -442,6 +443,7 @@ static const req_handler req_handlers[REQ_NB_REQUESTS] = (req_handler)req_cancel_sync, (req_handler)req_register_async, (req_handler)req_cancel_async, + (req_handler)req_notify_cancelled_async, (req_handler)req_get_async_result, (req_handler)req_set_async_direct_result, (req_handler)req_read, @@ -1400,6 +1402,10 @@ C_ASSERT( offsetof(struct cancel_async_request, handle) == 12 ); C_ASSERT( offsetof(struct cancel_async_request, iosb) == 16 ); C_ASSERT( offsetof(struct cancel_async_request, only_thread) == 24 ); C_ASSERT( sizeof(struct cancel_async_request) == 32 ); +C_ASSERT( offsetof(struct cancel_async_reply, cancel_handle) == 8 ); +C_ASSERT( sizeof(struct cancel_async_reply) == 16 ); +C_ASSERT( offsetof(struct notify_cancelled_async_request, user_arg) == 16 ); +C_ASSERT( sizeof(struct notify_cancelled_async_request) == 24 ); C_ASSERT( offsetof(struct get_async_result_request, user_arg) == 16 ); C_ASSERT( sizeof(struct get_async_result_request) == 24 ); C_ASSERT( sizeof(struct get_async_result_reply) == 8 ); diff --git a/server/request_trace.h b/server/request_trace.h index 8824eab2d09..471283be528 100644 --- a/server/request_trace.h +++ b/server/request_trace.h @@ -1574,6 +1574,16 @@ static void dump_cancel_async_request( const struct cancel_async_request *req ) fprintf( stderr, ", only_thread=%d", req->only_thread ); }
+static void dump_cancel_async_reply( const struct cancel_async_reply *req ) +{ + fprintf( stderr, " cancel_handle=%04x", req->cancel_handle ); +} + +static void dump_notify_cancelled_async_request( const struct notify_cancelled_async_request *req ) +{ + dump_uint64( " user_arg=", &req->user_arg ); +} + static void dump_get_async_result_request( const struct get_async_result_request *req ) { dump_uint64( " user_arg=", &req->user_arg ); @@ -3517,6 +3527,7 @@ static const dump_func req_dumpers[REQ_NB_REQUESTS] = (dump_func)dump_cancel_sync_request, (dump_func)dump_register_async_request, (dump_func)dump_cancel_async_request, + (dump_func)dump_notify_cancelled_async_request, (dump_func)dump_get_async_result_request, (dump_func)dump_set_async_direct_result_request, (dump_func)dump_read_request, @@ -3816,6 +3827,7 @@ static const dump_func reply_dumpers[REQ_NB_REQUESTS] = NULL, NULL, NULL, + (dump_func)dump_cancel_async_reply, NULL, (dump_func)dump_get_async_result_reply, (dump_func)dump_set_async_direct_result_reply, @@ -4117,6 +4129,7 @@ static const char * const req_names[REQ_NB_REQUESTS] = "cancel_sync", "register_async", "cancel_async", + "notify_cancelled_async", "get_async_result", "set_async_direct_result", "read",
From: Matteo Bruni mbruni@codeweavers.com
--- dlls/ntdll/tests/pipe.c | 63 ++++++++++++++++++++++++++++++++++++----- 1 file changed, 56 insertions(+), 7 deletions(-)
diff --git a/dlls/ntdll/tests/pipe.c b/dlls/ntdll/tests/pipe.c index 14a6148a2c5..61d1aba1edf 100644 --- a/dlls/ntdll/tests/pipe.c +++ b/dlls/ntdll/tests/pipe.c @@ -569,13 +569,36 @@ static void test_nonalertable(void) CloseHandle(hPipe); }
-static void test_cancelio(void) +struct cancelio_ctx { - IO_STATUS_BLOCK iosb; + HANDLE pipe; + HANDLE event; + IO_STATUS_BLOCK *iosb; + BOOL null_iosb; +}; + +static DWORD WINAPI cancelioex_thread_func(void *arg) +{ + struct cancelio_ctx *ctx = arg; IO_STATUS_BLOCK cancel_sb; - HANDLE hEvent; - HANDLE hPipe; - NTSTATUS res; + NTSTATUS res, status; + + res = pNtCancelIoFileEx(ctx->pipe, ctx->null_iosb ? NULL : ctx->iosb, &cancel_sb); + status = ctx->iosb->Status; + ok(!res, "NtCancelIoFileEx returned %lx\n", res); + + ok(status == STATUS_CANCELLED, "Wrong iostatus %lx\n", status); + ok(WaitForSingleObject(ctx->event, 0) == 0, "hEvent not signaled\n"); + + return 0; +} + +static void test_cancelio(void) +{ + IO_STATUS_BLOCK cancel_sb, iosb; + HANDLE hEvent, hPipe, thread; + struct cancelio_ctx ctx; + NTSTATUS res, status;
hEvent = CreateEventW(NULL, TRUE, FALSE, NULL); ok(hEvent != INVALID_HANDLE_VALUE, "can't create event, GetLastError: %lx\n", GetLastError()); @@ -589,9 +612,13 @@ static void test_cancelio(void) ok(res == STATUS_PENDING, "NtFsControlFile returned %lx\n", res);
res = pNtCancelIoFile(hPipe, &cancel_sb); + /* Save Status first thing after the call. We want to make very unlikely + * that the kernel APC updating Status could be executed after the call + * but before peeking at Status here. */ + status = iosb.Status; ok(!res, "NtCancelIoFile returned %lx\n", res);
- ok(iosb.Status == STATUS_CANCELLED, "Wrong iostatus %lx\n", iosb.Status); + ok(status == STATUS_CANCELLED, "Wrong iostatus %lx\n", status); ok(WaitForSingleObject(hEvent, 0) == 0, "hEvent not signaled\n");
ok(!ioapc_called, "IOAPC ran too early\n"); @@ -616,9 +643,10 @@ static void test_cancelio(void) ok(res == STATUS_PENDING, "NtFsControlFile returned %lx\n", res);
res = pNtCancelIoFileEx(hPipe, &iosb, &cancel_sb); + status = iosb.Status; ok(!res, "NtCancelIoFileEx returned %lx\n", res);
- ok(iosb.Status == STATUS_CANCELLED, "Wrong iostatus %lx\n", iosb.Status); + ok(status == STATUS_CANCELLED, "Wrong iostatus %lx\n", status); ok(WaitForSingleObject(hEvent, 0) == 0, "hEvent not signaled\n");
iosb.Status = 0xdeadbeef; @@ -626,6 +654,27 @@ static void test_cancelio(void) ok(res == STATUS_NOT_FOUND, "NtCancelIoFileEx returned %lx\n", res); ok(iosb.Status == 0xdeadbeef, "Wrong iostatus %lx\n", iosb.Status);
+ memset(&iosb, 0x55, sizeof(iosb)); + res = listen_pipe(hPipe, hEvent, &iosb, FALSE); + ok(res == STATUS_PENDING, "NtFsControlFile returned %lx\n", res); + + ctx.pipe = hPipe; + ctx.event = hEvent; + ctx.iosb = &iosb; + ctx.null_iosb = FALSE; + thread = CreateThread(NULL, 0, cancelioex_thread_func, &ctx, 0, NULL); + WaitForSingleObject(thread, INFINITE); + CloseHandle(thread); + + memset(&iosb, 0x55, sizeof(iosb)); + res = listen_pipe(hPipe, hEvent, &iosb, FALSE); + ok(res == STATUS_PENDING, "NtFsControlFile returned %lx\n", res); + + ctx.null_iosb = TRUE; + thread = CreateThread(NULL, 0, cancelioex_thread_func, &ctx, 0, NULL); + WaitForSingleObject(thread, INFINITE); + CloseHandle(thread); + CloseHandle(hPipe); } else
From: Matteo Bruni mbruni@codeweavers.com
--- dlls/ntdll/tests/pipe.c | 12 ++++++++++++ 1 file changed, 12 insertions(+)
diff --git a/dlls/ntdll/tests/pipe.c b/dlls/ntdll/tests/pipe.c index 61d1aba1edf..500feb02820 100644 --- a/dlls/ntdll/tests/pipe.c +++ b/dlls/ntdll/tests/pipe.c @@ -589,6 +589,8 @@ static DWORD WINAPI cancelioex_thread_func(void *arg)
ok(status == STATUS_CANCELLED, "Wrong iostatus %lx\n", status); ok(WaitForSingleObject(ctx->event, 0) == 0, "hEvent not signaled\n"); + ok(!cancel_sb.Status, "Unexpected Status %lx\n", cancel_sb.Status); + ok(!cancel_sb.Information, "Unexpected Information %Iu\n", cancel_sb.Information);
return 0; } @@ -617,6 +619,8 @@ static void test_cancelio(void) * but before peeking at Status here. */ status = iosb.Status; ok(!res, "NtCancelIoFile returned %lx\n", res); + ok(!cancel_sb.Status, "Unexpected Status %lx\n", cancel_sb.Status); + ok(!cancel_sb.Information, "Unexpected Information %Iu\n", cancel_sb.Information);
ok(status == STATUS_CANCELLED, "Wrong iostatus %lx\n", status); ok(WaitForSingleObject(hEvent, 0) == 0, "hEvent not signaled\n"); @@ -630,6 +634,8 @@ static void test_cancelio(void) res = pNtCancelIoFile(hPipe, &cancel_sb); ok(!res, "NtCancelIoFile returned %lx\n", res); ok(iosb.Status == STATUS_CANCELLED, "Wrong iostatus %lx\n", iosb.Status); + ok(!cancel_sb.Status, "Unexpected Status %lx\n", cancel_sb.Status); + ok(!cancel_sb.Information, "Unexpected Information %Iu\n", cancel_sb.Information);
CloseHandle(hPipe);
@@ -645,6 +651,8 @@ static void test_cancelio(void) res = pNtCancelIoFileEx(hPipe, &iosb, &cancel_sb); status = iosb.Status; ok(!res, "NtCancelIoFileEx returned %lx\n", res); + ok(!cancel_sb.Status, "Unexpected Status %lx\n", cancel_sb.Status); + ok(!cancel_sb.Information, "Unexpected Information %Iu\n", cancel_sb.Information);
ok(status == STATUS_CANCELLED, "Wrong iostatus %lx\n", status); ok(WaitForSingleObject(hEvent, 0) == 0, "hEvent not signaled\n"); @@ -653,6 +661,8 @@ static void test_cancelio(void) res = pNtCancelIoFileEx(hPipe, NULL, &cancel_sb); ok(res == STATUS_NOT_FOUND, "NtCancelIoFileEx returned %lx\n", res); ok(iosb.Status == 0xdeadbeef, "Wrong iostatus %lx\n", iosb.Status); + ok(cancel_sb.Status == STATUS_NOT_FOUND, "Unexpected Status %lx\n", cancel_sb.Status); + ok(!cancel_sb.Information, "Unexpected Information %Iu\n", cancel_sb.Information);
memset(&iosb, 0x55, sizeof(iosb)); res = listen_pipe(hPipe, hEvent, &iosb, FALSE); @@ -739,6 +749,8 @@ static void test_cancelsynchronousio(void) ok(ret == WAIT_TIMEOUT, "WaitForSingleObject returned %lu (error %lu)\n", ret, GetLastError()); memset(&iosb, 0x55, sizeof(iosb)); res = pNtCancelSynchronousIoFile(thread, NULL, &iosb); + ok(ctx.iosb.Status == 0xdeadbabe, "Unexpected Status %lx\n", ctx.iosb.Status); + ok(ctx.iosb.Information == 0xdeadbeef, "Unexpected Information %Iu\n", ctx.iosb.Information); ok(res == STATUS_SUCCESS, "Failed to cancel I/O\n"); ok(iosb.Status == STATUS_SUCCESS, "iosb.Status got changed to %lx\n", iosb.Status); ok(iosb.Information == 0, "iosb.Information got changed to %Iu\n", iosb.Information);
From: Matteo Bruni mbruni@codeweavers.com
Also adjust xinput to handle that. --- dlls/hidclass.sys/device.c | 2 +- dlls/xinput1_3/main.c | 6 +++++- 2 files changed, 6 insertions(+), 2 deletions(-)
diff --git a/dlls/hidclass.sys/device.c b/dlls/hidclass.sys/device.c index cb65391e443..852c8ebfbff 100644 --- a/dlls/hidclass.sys/device.c +++ b/dlls/hidclass.sys/device.c @@ -122,7 +122,7 @@ void hid_queue_remove_pending_irps( struct hid_queue *queue )
while ((irp = hid_queue_pop_irp( queue ))) { - irp->IoStatus.Status = STATUS_DELETE_PENDING; + irp->IoStatus.Status = STATUS_DEVICE_NOT_CONNECTED; IoCompleteRequest( irp, IO_NO_INCREMENT ); } } diff --git a/dlls/xinput1_3/main.c b/dlls/xinput1_3/main.c index 28f57e93bce..379a1d2f0b8 100644 --- a/dlls/xinput1_3/main.c +++ b/dlls/xinput1_3/main.c @@ -573,7 +573,11 @@ static void read_controller_state(struct xinput_controller *controller) if (!GetOverlappedResult(controller->device, &controller->hid.read_ovl, &read_len, TRUE)) { if (GetLastError() == ERROR_OPERATION_ABORTED) return; - if (GetLastError() == ERROR_ACCESS_DENIED || GetLastError() == ERROR_INVALID_HANDLE) controller_destroy(controller, TRUE); + if (GetLastError() == ERROR_ACCESS_DENIED || GetLastError() == ERROR_INVALID_HANDLE || + GetLastError() == ERROR_DEVICE_NOT_CONNECTED) + { + controller_destroy(controller, TRUE); + } else ERR("Failed to read input report, GetOverlappedResult failed with error %lu\n", GetLastError()); return; }
Paul Gofman (@gofman) commented about dlls/ntdll/unix/server.c:
result->async_io.total = info; /* the server will pass us NULL if a call failed synchronously */ set_async_iosb( call->async_io.sb, result->async_io.status, info );
//TRACE("call->async_io.sb %#llx, status %#x, result->async_io.status %#x\n", call->async_io.sb, status, result->async_io.status);
/* if ( call->async_io.sb && !in_wow64_call()) */
/* { */
/* IO_STATUS_BLOCK *io = wine_server_get_ptr( call->async_io.sb ); */
/* TRACE( "&io->Status %p, io->Status %#x\n", &io->Status, io->Status ); */
/* } */
if (status == STATUS_CANCELLED)
What is the rationale behind handling STATUS_CANCELLED on the client and separately relaying to the server? In any case, the server gets IOSB result relayed back from system APC processing. Regardless, the server knows async canceled status explicitly. In either case, should not it stick to its explicit knowledge that async is canceled (and not rely to STATUS_CANCELED in iosb which might be set arbitrary by the app in IOSB or driver under some different circumstances)? I suppose one or another way, all that logic should be contained inside wineserver and it should be signaling cancel wait object based on its knowledge that async canceled (would it be the explicit flag as it is now or derived in some other way, like checking the canceled async queue, as the flag seems to be going away in previous patches for some reason).
On Tue Jul 22 20:26:02 2025 +0000, Paul Gofman wrote:
What is the rationale behind handling STATUS_CANCELLED on the client and separately relaying to the server? In any case, the server gets IOSB result relayed back from system APC processing. Regardless, the server knows async canceled status explicitly. In either case, should not it stick to its explicit knowledge that async is canceled (and not rely to STATUS_CANCELED in iosb which might be set arbitrary by the app in IOSB or driver under some different circumstances)? I suppose one or another way, all that logic should be contained inside wineserver and it should be signaling cancel wait object based on its knowledge that async canceled (would it be the explicit flag as it is now or derived in some other way, like checking the canceled async queue, as the flag seems to be going away in previous patches for some reason).
Besides that cancellation handling doesn't seem to belong to client and to IOSB status, what will happen with NtCancelIo wait if the thread or process which is supposed to notify cancel gets killed?
On Tue Jul 22 20:30:14 2025 +0000, Paul Gofman wrote:
Besides that cancellation handling doesn't seem to belong to client and to IOSB status, what will happen with NtCancelIo wait if the thread or process which is supposed to notify cancel gets killed?
Also, maybe there is simplier way to achieve the same? Ref https://gitlab.winehq.org/wine/wine/-/merge_requests/8592 . It is not exactly related as touches different cases, in particular, NtCancelSynchronousIoFile() handling WRT the synchronous IO wait but not NtCancelIoFile with asynchronous IO. But is there anything which would prevent in principle creating and relaying to NtCancelIoFile the same wait_handle to the async being canceled and then just performing wait_async() on it same way as with synchronous IO? Presumably the cancellation then may go without much changes and complications on the server side or elsewhere. It will still need a way to wait for multiple asyncs, but then maybe cancel_async() may return the handles for all at once.
On Tue Jul 22 20:46:26 2025 +0000, Paul Gofman wrote:
Also, maybe there is simplier way to achieve the same? Ref https://gitlab.winehq.org/wine/wine/-/merge_requests/8592 . It is not exactly related as touches different cases, in particular, NtCancelSynchronousIoFile() handling WRT the synchronous IO wait but not NtCancelIoFile with asynchronous IO. But is there anything which would prevent in principle creating and relaying to NtCancelIoFile the same wait_handle to the async being canceled and then just performing wait_async() on it same way as with synchronous IO? Presumably the cancellation then may go without much changes and complications on the server side or elsewhere. It will still need a way to wait for multiple asyncs, but then maybe cancel_async() may return the handles for all at once.
Note that async's wait_handle signaling logic should probably account for everything already WRT synchronous IO wait (in particular, terminating wait on cancelled but not before the cancellation is processed and IOSB is filled). So why not to use exact same mechanics here if we need to wait for async to conclude after cancellation request in NtCancelIoFile?
I suppose one or another way, all that logic should be contained inside wineserver and it should be signaling cancel wait object based on its knowledge that async canceled
I have to recheck but I think that would signal too early? We want to signal the cancel as completed only after all the `iosb.Status` are set on the client.
(would it be the explicit flag as it is now or derived in some other way, like checking the canceled async queue, as the flag seems to be going away in previous patches for some reason).
That's a patch from Rémi's gitlab, he rearranged the previous version of this MR here: https://gitlab.winehq.org/rbernon/wine/-/commits/mr/7797
Besides that cancellation handling doesn't seem to belong to client and to IOSB status, what will happen with NtCancelIo wait if the thread or process which is supposed to notify cancel gets killed?
Process termination should be no concern since `NtCancelIoFile[Ex]()` are restricted to the same process in the first place. Valid point about thread termination though, thanks.
[...] So why not to use exact same mechanics here if we need to wait for async to conclude after cancellation request in NtCancelIoFile?
That sounds like the previous version of this MR (I think you can still see it here: https://gitlab.winehq.org/wine/wine/-/merge_requests/7797/diffs?commit_id=40..., or from Rémi's gitlab as above), which indeed was not very nice WRT passing and waiting on multiple handles.
Also, maybe there is simplier way to achieve the same? Ref https://gitlab.winehq.org/wine/wine/-/merge_requests/8592
I noticed your MR but didn't get to look at it in any detail, I'll do it now.
That sounds like the previous version of this MR (I think you can still see it here: https://gitlab.winehq.org/wine/wine/-/merge_requests/7797/diffs?commit_id=40..., or from Rémi's gitlab as above), which indeed was not very nice WRT passing and waiting on multiple handles.
If waiting for multiple handles is the issue (why?) then maybe some aggregate wait handle can be introduced (not sure that is needed though). But why that would warrant the whole parallel async termination wait object and logic implementation? Current wait_handle and the logic behind it should guarantee that it is signaled when async is cancelled and after the async is processed on client / IOSB filled (or client died). If that doesn't work this way then it should be fixed for it in the first place (but I think it works).
On Tue Jul 22 21:23:46 2025 +0000, Paul Gofman wrote:
That sounds like the previous version of this MR (I think you can
still see it here: https://gitlab.winehq.org/wine/wine/-/merge_requests/7797/diffs?commit_id=40..., or from Rémi's gitlab as above), which indeed was not very nice WRT passing and waiting on multiple handles. If waiting for multiple handles is the issue (why?) then maybe some aggregate wait handle can be introduced (not sure that is needed though). But why that would warrant the whole parallel async termination wait object and logic implementation? Current wait_handle and the logic behind it should guarantee that it is signaled when async is cancelled and after the async is processed on client / IOSB filled (or client died). If that doesn't work this way then it should be fixed for it in the first place (but I think it works).
Note also that it is not needed to do NtWaitForMultipleObjects for multiple handles (which maybe can be unfortunate in the view of nsync, and anyway there is a limit on the number of handles there). It is possible to wait for single object one after another.
If waiting for multiple handles is the issue (why?)
A couple of reasons: ferrying a variable-size handle list from the server to the client feels awkward and waiting on the multiple handles can be tricky [does this work? https://gitlab.winehq.org/wine/wine/-/merge_requests/7797/diffs?commit_id=40...].
Those have been mentioned in https://gitlab.winehq.org/wine/wine/-/merge_requests/7797#note_102534 and https://gitlab.winehq.org/wine/wine/-/merge_requests/7797#note_101439
Note also that it is not needed to do NtWaitForMultipleObjects for multiple handles (which maybe can be unfortunate in the view of nsync, and anyway there is a limit on the number of handles there). It is possible to wait for single object one after another.
I suggest you look at the previous version of this MR, it was probably much closer to what you expect.
Let me know if you think I should go back to the old way altogether.
On Tue Jul 22 21:48:01 2025 +0000, Matteo Bruni wrote:
If waiting for multiple handles is the issue (why?)
A couple of reasons: ferrying a variable-size handle list from the server to the client feels awkward and waiting on the multiple handles can be tricky [does this work? https://gitlab.winehq.org/wine/wine/-/merge_requests/7797/diffs?commit_id=40...]. Those have been mentioned in https://gitlab.winehq.org/wine/wine/-/merge_requests/7797#note_102534 and https://gitlab.winehq.org/wine/wine/-/merge_requests/7797#note_101439
Note also that it is not needed to do NtWaitForMultipleObjects for
multiple handles (which maybe can be unfortunate in the view of nsync, and anyway there is a limit on the number of handles there). It is possible to wait for single object one after another. I suggest you look at the previous version of this MR, it was probably much closer to what you expect. Let me know if you think I should go back to the old way altogether.
Actually I could push a rebased version of the old version here and open a new MR for this new approach. It should allow for easier comparison between the two, which could be useful?
A couple of reasons: ferrying a variable-size handle list from the server to the client feels awkward
It is easier of course when variable-size server reply can be avoided, but it is used on many occasion in Wine. I guess, the real question is, which is more awkward: use the variable handle list, or implement the whole new wait object and a logic behind it in the server which to the large extent duplicates a universal async's wait_handle rules?
waiting on the multiple handles can be tricky
'NtWaitForMultipleObjects( count, handles, FALSE, TRUE, NULL );' like in the original MR should not probably be used (for one, there is limit on handle count). But what is the problems to wait for each handle one after another?
Anyway, if for some reason that multiple handles are considered a big deal, that could be some event on the server signaled based on those wait_asyncs just handled internally and not introducing new entities related to specifically cancelled asyncs. But so far I honestly don't see how that can be less awkward.
Let me know if you think I should go back to the old way altogether.
Well, it needs much more look for details which I didn't (just e. g., it probably can't first cancel asyncs and then fail with `STATUS_BUFFER_OVERFLOW`, that should be checked first because otherwise asyncs are already canceled and client can't retry; what ensures proper wait_handle lifetime and if 'blocking' checks do not interfere since wait_handle is currently only used for those I think). In the idea I personally think it is much better, yes. Maybe we should ask for @zfigura opinion as she's been doing a lot around those asyncs and reviewing all the patches so far.
Well, it needs much more look for details which I didn't (just e. g., it probably can't first cancel asyncs and then fail with `STATUS_BUFFER_OVERFLOW`, that should be checked first because otherwise asyncs are already canceled and client can't retry
That should be handled correctly IIRC.
One of the concerns with the old approach was [https://gitlab.winehq.org/wine/wine/-/merge_requests/7797#note_102524]. If that's actually considered not a worry I can basically restore the very first version of this MR (which also waited for the handles one by one IIRC) and be done with it.
On Tue Jul 22 22:17:49 2025 +0000, Matteo Bruni wrote:
Well, it needs much more look for details which I didn't (just e. g.,
it probably can't first cancel asyncs and then fail with `STATUS_BUFFER_OVERFLOW`, that should be checked first because otherwise asyncs are already canceled and client can't retry That should be handled correctly IIRC. One of the concerns with the old approach was [https://gitlab.winehq.org/wine/wine/-/merge_requests/7797#note_102524]. If that's actually considered not a worry I can basically restore the very first version of this MR (which also waited for the handles one by one IIRC) and be done with it.
That was [https://gitlab.winehq.org/wine/wine/-/merge_requests/7797/diffs?diff_id=1701...] / [https://gitlab.winehq.org/wine/wine/-/merge_requests/7797/diffs?diff_id=1701...], for reference.
On Tue Jul 22 22:23:54 2025 +0000, Matteo Bruni wrote:
That was [https://gitlab.winehq.org/wine/wine/-/merge_requests/7797/diffs?diff_id=1701...] / [https://gitlab.winehq.org/wine/wine/-/merge_requests/7797/diffs?diff_id=1701...], for reference.
Sorry, which one (the link covers discussion for different topics, can't easily deduce where exactly it points to). Some guesses:
After the async has been changed to one of the "signaled" state, it's probably not supposed to go back to a state where it's not signaled. Mapping this to your state machine it's not a valid state transition as it wasn't done anywhere
I think of course async should not go back to 'unsignaled' state ever. But it should be going to signaled only once IOSB is delivered to client (AFAIU that is currently the case). And then it is the matter of wait_handle lifetime, not of unsignalling the async.
Then, I failed to find the place where sequential wait for handles is questioned, but I suspect the only possible matter their again deduces to wait_handle lifetime and making sure it is not closed prematurely? While should probably be created only on cancellation requests in the cases when it is not exist yet.
Sorry, which one (the link covers discussion for different topics, can't easily deduce where exactly it points to).
Sorry, gitlab sucks for this :rolling_eyes: The first one, which you guessed correctly, was:
Well, it's not really about the `signaled` meaning, but about resetting it arbitrarily just so that the cancelling thread can wait on it.
After the async has been changed to one of the "signaled" state, it's probably not supposed to go back to a state where it's not signaled.
Which is seen at line +616 in server/async.c (version 1 of the MR, you can get to that from the Changes tab if you select "version 1" from the second dropdown menu).
I think of course async should not go back to 'unsignaled' state ever. But it should be going to signaled only once IOSB is delivered to client (AFAIU that is currently the case).
It does not. The async is set to `signaled` right away, so that Status can be set to STATUS_PENDING and subsequently checked via `GetOverlappedResult()`. But I see the point. I'll look for some way to do it without "unsignaling" the async.
The other one I have no problem with. I originally waited for the handles one by one with server_select() directly, like from line +6464 of dlls/ntdll/unix/file.c onwards (again "version 1"). I changed that to `NtWaitForMultipleObjects()` afterwards because of some review comment.
I'll look for some way to do it without "unsignaling" the async.
The other thing to be careful about is to not "satisfy" the wait for the final IOSB Status (i.e. we want the application to still be able to wait on the file handle or the OVERLAPPED event after `NtCancelIoFile()` returns).
It does not. The async is set to `signaled` right away, so that Status can be set to STATUS_PENDING and subsequently checked via `GetOverlappedResult()`. But I see the point. I'll look for some way to do it without "unsignaling" the async.
If that's the case this is also a problem for sync IO which can in theory run away from wait before IOSB status is set? Maybe it is hard to get in practice (or even impossible before ntsync) because system APC with IO completion is delivered to waiting thread. But I think we want the same for sync IO wait as well, async should only be signaled once IOSB is delivered. There is no reason this thing should work different for cancellation wait, right?
The other thing to be careful about is to not "satisfy" the wait for the final IOSB Status (i.e. we want the application to still be able to wait on the file handle or the OVERLAPPED event after `NtCancelIoFile()` returns).
Do we care when the wait completion for the IO caller happens WRT to the waits in NtCancelIO? If this is async IO the caller should never meet async's wait object. I suppose it is not detectable whether NtCancelIO exited before wait on overlapped event or for IO completion or after, so do we care if IO completion is performed from async_wait been satisfied for NtCancelIO wait or before that when server processed cancellation?
It does not. The async is set to `signaled` right away, so that Status can be set to STATUS_PENDING and subsequently checked via `GetOverlappedResult()`. But I see the point. I'll look for some way to do it without "unsignaling" the async.
If that's the case this is also a problem for sync IO which can in theory run away from wait before IOSB status is set? Maybe it is hard to get in practice (or even impossible before ntsync) because system APC with IO completion is delivered to waiting thread. But I think we want the same for sync IO wait as well, async should only be signaled once IOSB is delivered. There is no reason this thing should work different for cancellation wait, right?
IIRC for synchronous IO it depends on a number of factors but I think it can happen in theory. Looking at `async_handoff`, it looks like device reads (which have `unknown_status` set) will always take the async path, server-side. The other side is that actually async reads are sometimes effectively synchronous (plain file access generally does that, I think?) So we currently signal the async right away to communicate if we have a result right away (status != `STATUS_PENDING`) or not.
That's my understanding at least. We could certainly change the async signaling semantics but we need to change all that depends on the current semantics as well.
The other thing to be careful about is to not "satisfy" the wait for the final IOSB Status (i.e. we want the application to still be able to wait on the file handle or the OVERLAPPED event after `NtCancelIoFile()` returns).
Do we care when the wait completion for the IO caller happens WRT to the waits in NtCancelIO? If this is async IO the caller should never meet async's wait object. I suppose it is not detectable whether NtCancelIO exited before wait on overlapped event or for IO completion or after, so do we care if IO completion is performed from async_wait been satisfied for NtCancelIO wait or before that when server processed cancellation?
You're probably right. One less thing to worry about! :smile:
On Tue Jul 22 23:34:30 2025 +0000, Matteo Bruni wrote:
It does not. The async is set to `signaled` right away, so that
Status can be set to STATUS_PENDING and subsequently checked via `GetOverlappedResult()`. But I see the point. I'll look for some way to do it without "unsignaling" the async.
If that's the case this is also a problem for sync IO which can in
theory run away from wait before IOSB status is set? Maybe it is hard to get in practice (or even impossible before ntsync) because system APC with IO completion is delivered to waiting thread. But I think we want the same for sync IO wait as well, async should only be signaled once IOSB is delivered. There is no reason this thing should work different for cancellation wait, right? IIRC for synchronous IO it depends on a number of factors but I think it can happen in theory. Looking at `async_handoff`, it looks like device reads (which have `unknown_status` set) will always take the async path, server-side. The other side is that actually async reads are sometimes effectively synchronous (plain file access generally does that, I think?) So we currently signal the async right away to communicate if we have a result right away (status != `STATUS_PENDING`) or not. That's my understanding at least. We could certainly change the async signaling semantics but we need to change all that depends on the current semantics as well.
The other thing to be careful about is to not "satisfy" the wait for
the final IOSB Status (i.e. we want the application to still be able to wait on the file handle or the OVERLAPPED event after `NtCancelIoFile()` returns).
Do we care when the wait completion for the IO caller happens WRT to
the waits in NtCancelIO? If this is async IO the caller should never meet async's wait object. I suppose it is not detectable whether NtCancelIO exited before wait on overlapped event or for IO completion or after, so do we care if IO completion is performed from async_wait been satisfied for NtCancelIO wait or before that when server processed cancellation? You're probably right. One less thing to worry about! :smile:
I probably haven't read the whole conversation completely, but I'm going to try to weigh in regardless:
* Asyncs are signaled when the client can return from the dispatcher function. That means:
- If the async is blocking, the async object is signaled only once the whole async is completed, and that includes filling the IOSB, if the IOSB needs to be filled—and note that in the case of synchronous failure it will *not* be filled.
- If the async is nonblocking, the async object is signaled once the initial status is known, but the async itself is not necessarily complete yet. Note also that the wait on the async returns with this initial status, which can be literally anything. This is a lightweight way of returning that information that doesn't need a whole system APC. There are two cases where the initial status is not known immediately:
* new-style async I/O that uses set_async_direct_result. This doesn't really matter because the client won't be waiting on the async handle until after it calls set_async_direct_result(), which will signal the async handle.
* device files (only once the IRP dispatcher returns). This is the only really important case.
This means that you *cannot* wait on an async to determine when it is cancelled. If it was an overlapped file handle to a device, the async handle will already be signalled. You could designal the async when cancelling it, but...
* I feel fairly strongly that async handles should not be designalled. I think everyone else in this thread has expressed the same.
Combined with the above, that means we *need* a new object to determine when an async is truly done.
On Wed Jul 23 00:39:47 2025 +0000, Elizabeth Figura wrote:
I probably haven't read the whole conversation completely, but I'm going to try to weigh in regardless:
- Asyncs are signaled when the client can return from the dispatcher
function. That means:
- If the async is blocking, the async object is signaled only once the
whole async is completed, and that includes filling the IOSB, if the IOSB needs to be filled—and note that in the case of synchronous failure it will *not* be filled.
- If the async is nonblocking, the async object is signaled once the
initial status is known, but the async itself is not necessarily complete yet. Note also that the wait on the async returns with this initial status, which can be literally anything. This is a lightweight way of returning that information that doesn't need a whole system APC. There are two cases where the initial status is not known immediately: * new-style async I/O that uses set_async_direct_result. This doesn't really matter because the client won't be waiting on the async handle until after it calls set_async_direct_result(), which will signal the async handle. * device files (only once the IRP dispatcher returns). This is the only really important case. This means that you *cannot* wait on an async to determine when it is cancelled. If it was an overlapped file handle to a device, the async handle will already be signalled. You could designal the async when cancelling it, but...
- I feel fairly strongly that async handles should not be designalled. I
think everyone else in this thread has expressed the same. Combined with the above, that means we *need* a new object to determine when an async is truly done.
I see, thanks. But even if we need a separate sync object to mark async complete completion, the server should have everything needed to know when to signal it, including the result of system APC which should deliver IOSB status. Do we really need a distinct server call to deliver notification for async completion on the client?
I see, thanks. But even if we need a separate sync object to mark async complete completion, the server should have everything needed to know when to signal it, including the result of system APC which should deliver IOSB status. Do we really need a distinct server call to deliver notification for async completion on the client?
No, we don't, and I apologize, I should have noticed and replied to that point as well.
The server is made aware of async completion after the IOSB (and output buffers, if applicable) are filled. That's either done by waiting on the async handle itself (synchronous completion for old-style client-driven I/O, as currently used by serials), by calling set_async_direct_result (synchronous completion for new-style client-driven I/O, as currently used by sockets), or by closing the APC_ASYNC_IO thread APC handle by returning back to select (asynchronous completion).
This is necessary because completion has to be signaled (event, IOCP, user APC) after the IOSB and buffers are filled. All of this is done in async_set_result(); cancellation can also be handled there.
It's probably worth also noting, as I look at the code, that checking for STATUS_CANCELLED is neither sufficient (cancelled asyncs can still continue as normal for various reasons) nor necessary (STATUS_CANCELLED can be returned for asyncs that are not formally cancelled with NtCancelIo etc.)
The server is made aware of async completion after the IOSB (and output buffers, if applicable) are filled.
I originally started without the new server call (see e.g. the direct `async_complete_cancel()` call still in `set_async_direct_result()`), then I think I didn't find out all the cases so I added the new server call.
or by closing the APC_ASYNC_IO thread APC handle by returning back to select (asynchronous completion).
Probably I missed this one at least.
This is necessary because completion has to be signaled (event, IOCP, user APC) after the IOSB and buffers are filled. All of this is done in async_set_result(); cancellation can also be handled there.
I'll move the `async_complete_cancel()` call there and see what happens. Thanks a lot!
It's probably worth also noting, as I look at the code, that checking for STATUS_CANCELLED is neither sufficient (cancelled asyncs can still continue as normal for various reasons) nor necessary (STATUS_CANCELLED can be returned for asyncs that are not formally cancelled with NtCancelIo etc.)
Right, I think I should just get rid of the check and call `async_complete_cancel()` unconditionally.