diff options
author | Pavel Begunkov <asml.silence@gmail.com> | 2025-05-09 12:12:51 +0100 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2025-05-09 08:01:02 -0600 |
commit | 19a94da447f832ee614f8f5532d31c1c70061520 (patch) | |
tree | 292178a7aa733a844698658eaef075786ab7331d | |
parent | e91e4f692f7993d5d192228c5f8a9a2e12ff5250 (diff) |
io_uring: consolidate drain seq checking
We check sequences when queuing drained requests as well when flushing
them. Instead, always queue and immediately try to flush, so that all
seq handling can be kept contained in the flushing code.
Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/d4651f742e671af5b3216581e539ea5d31bc7125.1746788718.git.asml.silence@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r-- | io_uring/io_uring.c | 45 |
1 files changed, 17 insertions, 28 deletions
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index f83abdf8a056..3d1f4b2e4536 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -389,17 +389,6 @@ static void io_account_cq_overflow(struct io_ring_ctx *ctx) ctx->cq_extra--; } -static bool req_need_defer(struct io_kiocb *req, u32 seq) -{ - if (unlikely(req->flags & REQ_F_IO_DRAIN)) { - struct io_ring_ctx *ctx = req->ctx; - - return seq + READ_ONCE(ctx->cq_extra) != ctx->cached_cq_tail; - } - - return false; -} - static void io_clean_op(struct io_kiocb *req) { if (unlikely(req->flags & REQ_F_BUFFER_SELECTED)) @@ -566,11 +555,10 @@ static bool io_drain_defer_seq(struct io_kiocb *req, u32 seq) return seq + READ_ONCE(ctx->cq_extra) != ctx->cached_cq_tail; } -static __cold noinline void io_queue_deferred(struct io_ring_ctx *ctx) +static __cold noinline void __io_queue_deferred(struct io_ring_ctx *ctx) { bool drain_seen = false, first = true; - spin_lock(&ctx->completion_lock); while (!list_empty(&ctx->defer_list)) { struct io_defer_entry *de = list_first_entry(&ctx->defer_list, struct io_defer_entry, list); @@ -584,7 +572,12 @@ static __cold noinline void io_queue_deferred(struct io_ring_ctx *ctx) kfree(de); first = false; } - spin_unlock(&ctx->completion_lock); +} + +static __cold noinline void io_queue_deferred(struct io_ring_ctx *ctx) +{ + guard(spinlock)(&ctx->completion_lock); + __io_queue_deferred(ctx); } void __io_commit_cqring_flush(struct io_ring_ctx *ctx) @@ -1671,30 +1664,26 @@ static __cold void io_drain_req(struct io_kiocb *req) __must_hold(&ctx->uring_lock) { struct io_ring_ctx *ctx = req->ctx; + bool drain = req->flags & IOSQE_IO_DRAIN; struct io_defer_entry *de; - u32 seq = io_get_sequence(req); - io_prep_async_link(req); de = kmalloc(sizeof(*de), GFP_KERNEL_ACCOUNT); if (!de) { io_req_defer_failed(req, -ENOMEM); return; } - spin_lock(&ctx->completion_lock); - if (!req_need_defer(req, seq) && list_empty(&ctx->defer_list)) { - spin_unlock(&ctx->completion_lock); - kfree(de); - ctx->drain_active = false; - io_req_task_queue(req); - return; - } - + io_prep_async_link(req); trace_io_uring_defer(req); de->req = req; - de->seq = seq; - list_add_tail(&de->list, &ctx->defer_list); - spin_unlock(&ctx->completion_lock); + de->seq = io_get_sequence(req); + + scoped_guard(spinlock, &ctx->completion_lock) { + list_add_tail(&de->list, &ctx->defer_list); + __io_queue_deferred(ctx); + if (!drain && list_empty(&ctx->defer_list)) + ctx->drain_active = false; + } } static bool io_assign_file(struct io_kiocb *req, const struct io_issue_def *def, |