summaryrefslogtreecommitdiff
path: root/io_uring/io_uring.c
diff options
context:
space:
mode:
authorPavel Begunkov <asml.silence@gmail.com>2023-08-24 23:53:29 +0100
committerJens Axboe <axboe@kernel.dk>2023-08-24 17:16:19 -0600
commitec26c225f06f5993f8891fa6c79fab3c92981181 (patch)
treeac8f3e43760f70381360a290840a743955e8278a /io_uring/io_uring.c
parent54927baf6c195fb512ac38b26a041ca44edb2e29 (diff)
io_uring: merge iopoll and normal completion paths
io_do_iopoll() and io_submit_flush_completions() are pretty similar, both filling CQEs and then free a list of requests. Don't duplicate it and make iopoll use __io_submit_flush_completions(), which also helps with inlining and other optimisations. For that, we need to first find all completed iopoll requests and splice them from the iopoll list and then pass it down. This adds one extra list traversal, which should be fine as requests will stay hot in cache. CQ locking is already conditional, introduce ->lockless_cq and skip locking for IOPOLL as it's protected by ->uring_lock. We also add a wakeup optimisation for IOPOLL to __io_cq_unlock_post(), so it works just like io_cqring_ev_posted_iopoll(). Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Link: https://lore.kernel.org/r/3840473f5e8a960de35b77292026691880f6bdbc.1692916914.git.asml.silence@gmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'io_uring/io_uring.c')
-rw-r--r--io_uring/io_uring.c18
1 files changed, 12 insertions, 6 deletions
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index 7c1ef5b6628d..e8321903e3f3 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -147,7 +147,6 @@ static bool io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
bool cancel_all);
static void io_queue_sqe(struct io_kiocb *req);
-static void __io_submit_flush_completions(struct io_ring_ctx *ctx);
struct kmem_cache *req_cachep;
@@ -616,7 +615,7 @@ void __io_commit_cqring_flush(struct io_ring_ctx *ctx)
static inline void __io_cq_lock(struct io_ring_ctx *ctx)
{
- if (!ctx->task_complete)
+ if (!ctx->lockless_cq)
spin_lock(&ctx->completion_lock);
}
@@ -630,8 +629,11 @@ static inline void __io_cq_unlock_post(struct io_ring_ctx *ctx)
{
io_commit_cqring(ctx);
if (!ctx->task_complete) {
- spin_unlock(&ctx->completion_lock);
- io_cqring_wake(ctx);
+ if (!ctx->lockless_cq)
+ spin_unlock(&ctx->completion_lock);
+ /* IOPOLL rings only need to wake up if it's also SQPOLL */
+ if (!ctx->syscall_iopoll)
+ io_cqring_wake(ctx);
}
io_commit_cqring_flush(ctx);
}
@@ -1485,7 +1487,8 @@ void io_queue_next(struct io_kiocb *req)
io_req_task_queue(nxt);
}
-void io_free_batch_list(struct io_ring_ctx *ctx, struct io_wq_work_node *node)
+static void io_free_batch_list(struct io_ring_ctx *ctx,
+ struct io_wq_work_node *node)
__must_hold(&ctx->uring_lock)
{
do {
@@ -1522,7 +1525,7 @@ void io_free_batch_list(struct io_ring_ctx *ctx, struct io_wq_work_node *node)
} while (node);
}
-static void __io_submit_flush_completions(struct io_ring_ctx *ctx)
+void __io_submit_flush_completions(struct io_ring_ctx *ctx)
__must_hold(&ctx->uring_lock)
{
struct io_submit_state *state = &ctx->submit_state;
@@ -3836,6 +3839,9 @@ static __cold int io_uring_create(unsigned entries, struct io_uring_params *p,
!(ctx->flags & IORING_SETUP_SQPOLL))
ctx->task_complete = true;
+ if (ctx->task_complete || (ctx->flags & IORING_SETUP_IOPOLL))
+ ctx->lockless_cq = true;
+
/*
* lazy poll_wq activation relies on ->task_complete for synchronisation
* purposes, see io_activate_pollwq()