@@ -274,7 +274,7 @@ struct io_sq_data {
274274
275275 unsigned long state ;
276276 struct completion startup ;
277- struct completion completion ;
277+ struct completion parked ;
278278 struct completion exited ;
279279};
280280
@@ -6656,7 +6656,7 @@ static void io_sq_thread_parkme(struct io_sq_data *sqd)
66566656 * wait_task_inactive().
66576657 */
66586658 preempt_disable ();
6659- complete (& sqd -> completion );
6659+ complete (& sqd -> parked );
66606660 schedule_preempt_disabled ();
66616661 preempt_enable ();
66626662 }
@@ -6751,14 +6751,18 @@ static int io_sq_thread(void *data)
67516751
67526752 io_run_task_work ();
67536753
6754- if (io_sq_thread_should_park (sqd ))
6755- io_sq_thread_parkme (sqd );
6756-
67576754 /*
6758- * Clear thread under lock so that concurrent parks work correctly
6755+ * Ensure that we park properly if racing with someone trying to park
6756+ * while we're exiting. If we fail to grab the lock, check park and
6757+ * park if necessary. The ordering with the park bit and the lock
6758+ * ensures that we catch this reliably.
67596759 */
6760- complete (& sqd -> completion );
6761- mutex_lock (& sqd -> lock );
6760+ if (!mutex_trylock (& sqd -> lock )) {
6761+ if (io_sq_thread_should_park (sqd ))
6762+ io_sq_thread_parkme (sqd );
6763+ mutex_lock (& sqd -> lock );
6764+ }
6765+
67626766 sqd -> thread = NULL ;
67636767 list_for_each_entry (ctx , & sqd -> ctx_list , sqd_list ) {
67646768 ctx -> sqo_exec = 1 ;
@@ -7067,29 +7071,25 @@ static int io_sqe_files_unregister(struct io_ring_ctx *ctx)
70677071static void io_sq_thread_unpark (struct io_sq_data * sqd )
70687072 __releases (& sqd - > lock )
70697073{
7070- if (!sqd -> thread )
7071- return ;
70727074 if (sqd -> thread == current )
70737075 return ;
70747076 clear_bit (IO_SQ_THREAD_SHOULD_PARK , & sqd -> state );
7075- wake_up_state (sqd -> thread , TASK_PARKED );
7077+ if (sqd -> thread )
7078+ wake_up_state (sqd -> thread , TASK_PARKED );
70767079 mutex_unlock (& sqd -> lock );
70777080}
70787081
7079- static bool io_sq_thread_park (struct io_sq_data * sqd )
7082+ static void io_sq_thread_park (struct io_sq_data * sqd )
70807083 __acquires (& sqd - > lock )
70817084{
70827085 if (sqd -> thread == current )
7083- return true;
7086+ return ;
7087+ set_bit (IO_SQ_THREAD_SHOULD_PARK , & sqd -> state );
70847088 mutex_lock (& sqd -> lock );
7085- if (! sqd -> thread ) {
7086- mutex_unlock ( & sqd -> lock );
7087- return false ;
7089+ if (sqd -> thread ) {
7090+ wake_up_process ( sqd -> thread );
7091+ wait_for_completion ( & sqd -> parked ) ;
70887092 }
7089- set_bit (IO_SQ_THREAD_SHOULD_PARK , & sqd -> state );
7090- wake_up_process (sqd -> thread );
7091- wait_for_completion (& sqd -> completion );
7092- return true;
70937093}
70947094
70957095static void io_sq_thread_stop (struct io_sq_data * sqd )
@@ -7185,7 +7185,7 @@ static struct io_sq_data *io_get_sq_data(struct io_uring_params *p)
71857185 mutex_init (& sqd -> lock );
71867186 init_waitqueue_head (& sqd -> wait );
71877187 init_completion (& sqd -> startup );
7188- init_completion (& sqd -> completion );
7188+ init_completion (& sqd -> parked );
71897189 init_completion (& sqd -> exited );
71907190 return sqd ;
71917191}
@@ -7829,7 +7829,7 @@ static int io_sq_thread_fork(struct io_sq_data *sqd, struct io_ring_ctx *ctx)
78297829 int ret ;
78307830
78317831 clear_bit (IO_SQ_THREAD_SHOULD_STOP , & sqd -> state );
7832- reinit_completion (& sqd -> completion );
7832+ reinit_completion (& sqd -> parked );
78337833 ctx -> sqo_exec = 0 ;
78347834 sqd -> task_pid = current -> pid ;
78357835 tsk = create_io_thread (io_sq_thread , sqd , NUMA_NO_NODE );
@@ -8712,19 +8712,17 @@ static void io_uring_cancel_task_requests(struct io_ring_ctx *ctx,
87128712 struct files_struct * files )
87138713{
87148714 struct task_struct * task = current ;
8715- bool did_park = false;
87168715
87178716 if ((ctx -> flags & IORING_SETUP_SQPOLL ) && ctx -> sq_data ) {
87188717 /* never started, nothing to cancel */
87198718 if (ctx -> flags & IORING_SETUP_R_DISABLED ) {
87208719 io_sq_offload_start (ctx );
87218720 return ;
87228721 }
8723- did_park = io_sq_thread_park (ctx -> sq_data );
8724- if ( did_park ) {
8725- task = ctx -> sq_data -> thread ;
8722+ io_sq_thread_park (ctx -> sq_data );
8723+ task = ctx -> sq_data -> thread ;
8724+ if ( task )
87268725 atomic_inc (& task -> io_uring -> in_idle );
8727- }
87288726 }
87298727
87308728 io_cancel_defer_files (ctx , task , files );
@@ -8733,10 +8731,10 @@ static void io_uring_cancel_task_requests(struct io_ring_ctx *ctx,
87338731 if (!files )
87348732 io_uring_try_cancel_requests (ctx , task , NULL );
87358733
8736- if (did_park ) {
8734+ if (task )
87378735 atomic_dec (& task -> io_uring -> in_idle );
8736+ if (ctx -> sq_data )
87388737 io_sq_thread_unpark (ctx -> sq_data );
8739- }
87408738}
87418739
87428740/*
@@ -8836,15 +8834,12 @@ static void io_uring_cancel_sqpoll(struct io_ring_ctx *ctx)
88368834
88378835 if (!sqd )
88388836 return ;
8839- if (!io_sq_thread_park (sqd ))
8840- return ;
8841- tctx = ctx -> sq_data -> thread -> io_uring ;
8842- /* can happen on fork/alloc failure, just ignore that state */
8843- if (!tctx ) {
8837+ io_sq_thread_park (sqd );
8838+ if (!sqd -> thread || !sqd -> thread -> io_uring ) {
88448839 io_sq_thread_unpark (sqd );
88458840 return ;
88468841 }
8847-
8842+ tctx = ctx -> sq_data -> thread -> io_uring ;
88488843 atomic_inc (& tctx -> in_idle );
88498844 do {
88508845 /* read completions before cancelations */
0 commit comments