summaryrefslogtreecommitdiff
path: root/fs/io_uring.c
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2020-02-04 16:48:34 -0700
committerJens Axboe <axboe@kernel.dk>2020-02-04 16:48:34 -0700
commitdf069d80c8e38c19531c392322e9a16617475c44 (patch)
tree714dcb977224e28c8f08af8ed9db505f2226c915 /fs/io_uring.c
parent01d7a356872eec22ef34a33a5f9cfa917d145468 (diff)
io_uring: spin for sq thread to idle on shutdown
As part of io_uring shutdown, we cancel work that is pending and won't necessarily complete on its own. That includes requests like poll commands and timeouts. If we're using SQPOLL for kernel side submission and we shutdown the ring immediately after queueing such work, we can race with the sqthread doing the submission. This means we may miss cancelling some work, which results in the io_uring shutdown hanging forever. Cc: stable@vger.kernel.org Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs/io_uring.c')
-rw-r--r--fs/io_uring.c13
1 files changed, 12 insertions, 1 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index edb00ae2619b..87f8655656b5 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -5070,7 +5070,8 @@ static int io_sq_thread(void *data)
* reap events and wake us up.
*/
if (inflight ||
- (!time_after(jiffies, timeout) && ret != -EBUSY)) {
+ (!time_after(jiffies, timeout) && ret != -EBUSY &&
+ !percpu_ref_is_dying(&ctx->refs))) {
cond_resched();
continue;
}
@@ -6324,6 +6325,16 @@ static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
percpu_ref_kill(&ctx->refs);
mutex_unlock(&ctx->uring_lock);
+ /*
+ * Wait for sq thread to idle, if we have one. It won't spin on new
+ * work after we've killed the ctx ref above. This is important to do
+ * before we cancel existing commands, as the thread could otherwise
+ * be queueing new work post that. If that's work we need to cancel,
+ * it could cause shutdown to hang.
+ */
+ while (ctx->sqo_thread && !wq_has_sleeper(&ctx->sqo_wait))
+ cpu_relax();
+
io_kill_timeouts(ctx);
io_poll_remove_all(ctx);