summaryrefslogtreecommitdiff
path: root/io_uring/io_uring.c
diff options
context:
space:
mode:
Diffstat (limited to 'io_uring/io_uring.c')
-rw-r--r--io_uring/io_uring.c28
1 files changed, 23 insertions, 5 deletions
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index f3570e81ecb4..b2736e3491b8 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -321,7 +321,7 @@ static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
sizeof(struct io_kiocb));
ret |= io_futex_cache_init(ctx);
if (ret)
- goto err;
+ goto free_ref;
init_completion(&ctx->ref_comp);
xa_init_flags(&ctx->personalities, XA_FLAGS_ALLOC1);
mutex_init(&ctx->uring_lock);
@@ -349,6 +349,9 @@ static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
io_napi_init(ctx);
return ctx;
+
+free_ref:
+ percpu_ref_exit(&ctx->refs);
err:
io_alloc_cache_free(&ctx->rsrc_node_cache, kfree);
io_alloc_cache_free(&ctx->apoll_cache, kfree);
@@ -635,6 +638,21 @@ static void __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool dying)
}
list_del(&ocqe->list);
kfree(ocqe);
+
+ /*
+ * For silly syzbot cases that deliberately overflow by huge
+ * amounts, check if we need to resched and drop and
+ * reacquire the locks if so. Nothing real would ever hit this.
+ * Ideally we'd have a non-posting unlock for this, but hard
+ * to care for a non-real case.
+ */
+ if (need_resched()) {
+ io_cq_unlock_post(ctx);
+ mutex_unlock(&ctx->uring_lock);
+ cond_resched();
+ mutex_lock(&ctx->uring_lock);
+ io_cq_lock(ctx);
+ }
}
if (list_empty(&ctx->cq_overflow_list)) {
@@ -2023,7 +2041,7 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
req->opcode = opcode = READ_ONCE(sqe->opcode);
/* same numerical values with corresponding REQ_F_*, safe to copy */
sqe_flags = READ_ONCE(sqe->flags);
- req->flags = (io_req_flags_t) sqe_flags;
+ req->flags = (__force io_req_flags_t) sqe_flags;
req->cqe.user_data = READ_ONCE(sqe->user_data);
req->file = NULL;
req->rsrc_node = NULL;
@@ -2164,7 +2182,7 @@ static inline int io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
* conditions are true (normal request), then just queue it.
*/
if (unlikely(link->head)) {
- trace_io_uring_link(req, link->head);
+ trace_io_uring_link(req, link->last);
link->last->link = req;
link->last = req;
@@ -2472,7 +2490,7 @@ static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
return 1;
if (unlikely(!llist_empty(&ctx->work_llist)))
return 1;
- if (unlikely(test_thread_flag(TIF_NOTIFY_SIGNAL)))
+ if (unlikely(task_work_pending(current)))
return 1;
if (unlikely(task_sigpending(current)))
return -EINTR;
@@ -2579,9 +2597,9 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events, u32 flags,
* If we got woken because of task_work being processed, run it
* now rather than let the caller do another wait loop.
*/
- io_run_task_work();
if (!llist_empty(&ctx->work_llist))
io_run_local_work(ctx, nr_wait);
+ io_run_task_work();
/*
* Non-local task_work will be run on exit to userspace, but