summaryrefslogtreecommitdiff
path: root/io_uring
diff options
context:
space:
mode:
authorPavel Begunkov <asml.silence@gmail.com>2023-01-05 11:22:27 +0000
committerJens Axboe <axboe@kernel.dk>2023-01-29 15:17:40 -0700
commit326a9e482e2134d7a44b7f8f9a721b38c6bbb146 (patch)
tree10ff76aed7f6aaf0a8c25f14e72203b015b05641 /io_uring
parent490c00eb4fa5e5e25e0127240f6d6c1b499da95b (diff)
io_uring: set TASK_RUNNING right after schedule
Instead of constantly watching that the state of the task is running before executing tw or taking locks in io_cqring_wait(), switch it back to TASK_RUNNING immediately. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Link: https://lore.kernel.org/r/246dddee247d89fd52023f785ed17cc34962a008.1672916894.git.asml.silence@gmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'io_uring')
-rw-r--r--io_uring/io_uring.c5
1 files changed, 2 insertions, 3 deletions
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index 4cb9cce23c90..2ec011f0ba7d 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -2544,6 +2544,7 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
ret = io_cqring_wait_schedule(ctx, &iowq, &timeout);
if (ret < 0)
break;
+ __set_current_state(TASK_RUNNING);
/*
* Run task_work after scheduling and before io_should_wake().
* If we got woken because of task_work being processed, run it
@@ -2556,10 +2557,8 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
check_cq = READ_ONCE(ctx->check_cq);
if (unlikely(check_cq)) {
/* let the caller flush overflows, retry */
- if (check_cq & BIT(IO_CHECK_CQ_OVERFLOW_BIT)) {
- finish_wait(&ctx->cq_wait, &iowq.wq);
+ if (check_cq & BIT(IO_CHECK_CQ_OVERFLOW_BIT))
io_cqring_do_overflow_flush(ctx);
- }
if (check_cq & BIT(IO_CHECK_CQ_DROPPED_BIT)) {
ret = -EBADR;
break;