summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorzhangyi (F) <yi.zhang@huawei.com>2019-10-23 15:10:08 +0800
committerJens Axboe <axboe@kernel.dk>2019-10-23 22:09:56 -0600
commitef03681ae8df770745978148a7fb84796ae99cba (patch)
tree3112e7810ebfb63944675e06320061f98b723c02
parentbc808bced39f4e4b626c5ea8c63d5e41fce7205a (diff)
io_uring : correct timeout req sequence when waiting timeout
The sequence number of reqs on the timeout_list before the timeout req should be adjusted in io_timeout_fn(), because the current timeout req will consumes a slot in the cq_ring and cq_tail pointer will be increased, otherwise other timeout reqs may return in advance without waiting for enough wait_nr. Signed-off-by: zhangyi (F) <yi.zhang@huawei.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r--fs/io_uring.c11
1 files changed, 10 insertions, 1 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 08c2c428e212..b65a68582a7c 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -1877,7 +1877,7 @@ static int io_poll_add(struct io_kiocb *req, const struct io_uring_sqe *sqe)
static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
{
struct io_ring_ctx *ctx;
- struct io_kiocb *req;
+ struct io_kiocb *req, *prev;
unsigned long flags;
req = container_of(timer, struct io_kiocb, timeout.timer);
@@ -1885,6 +1885,15 @@ static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
atomic_inc(&ctx->cq_timeouts);
spin_lock_irqsave(&ctx->completion_lock, flags);
+ /*
+ * Adjust the reqs sequence before the current one because it
+ * will consume a slot in the cq_ring and the the cq_tail pointer
+ * will be increased, otherwise other timeout reqs may return in
+ * advance without waiting for enough wait_nr.
+ */
+ prev = req;
+ list_for_each_entry_continue_reverse(prev, &ctx->timeout_list, list)
+ prev->sequence++;
list_del(&req->list);
io_cqring_fill_event(ctx, req->user_data, -ETIME);