summaryrefslogtreecommitdiff
path: root/fs/io_uring.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/io_uring.c')
-rw-r--r--fs/io_uring.c62
1 files changed, 37 insertions, 25 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 44c57dca358d..c5a476e6c068 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -523,7 +523,7 @@ struct io_uring_task {
spinlock_t task_lock;
struct io_wq_work_list task_list;
- struct io_wq_work_list prior_task_list;
+ struct io_wq_work_list prio_task_list;
struct callback_head task_work;
struct file **registered_rings;
bool task_running;
@@ -2893,10 +2893,10 @@ static void tctx_task_work(struct callback_head *cb)
struct io_wq_work_node *node1, *node2;
spin_lock_irq(&tctx->task_lock);
- node1 = tctx->prior_task_list.first;
+ node1 = tctx->prio_task_list.first;
node2 = tctx->task_list.first;
INIT_WQ_LIST(&tctx->task_list);
- INIT_WQ_LIST(&tctx->prior_task_list);
+ INIT_WQ_LIST(&tctx->prio_task_list);
if (!node2 && !node1)
tctx->task_running = false;
spin_unlock_irq(&tctx->task_lock);
@@ -2910,7 +2910,7 @@ static void tctx_task_work(struct callback_head *cb)
cond_resched();
if (data_race(!tctx->task_list.first) &&
- data_race(!tctx->prior_task_list.first) && uring_locked)
+ data_race(!tctx->prio_task_list.first) && uring_locked)
io_submit_flush_completions(ctx);
}
@@ -2921,24 +2921,19 @@ static void tctx_task_work(struct callback_head *cb)
io_uring_drop_tctx_refs(current);
}
-static void io_req_task_work_add(struct io_kiocb *req, bool priority)
+static void __io_req_task_work_add(struct io_kiocb *req,
+ struct io_uring_task *tctx,
+ struct io_wq_work_list *list)
{
- struct task_struct *tsk = req->task;
struct io_ring_ctx *ctx = req->ctx;
- struct io_uring_task *tctx = tsk->io_uring;
struct io_wq_work_node *node;
unsigned long flags;
bool running;
- WARN_ON_ONCE(!tctx);
-
io_drop_inflight_file(req);
spin_lock_irqsave(&tctx->task_lock, flags);
- if (priority)
- wq_list_add_tail(&req->io_task_work.node, &tctx->prior_task_list);
- else
- wq_list_add_tail(&req->io_task_work.node, &tctx->task_list);
+ wq_list_add_tail(&req->io_task_work.node, list);
running = tctx->task_running;
if (!running)
tctx->task_running = true;
@@ -2951,12 +2946,12 @@ static void io_req_task_work_add(struct io_kiocb *req, bool priority)
if (ctx->flags & IORING_SETUP_TASKRUN_FLAG)
atomic_or(IORING_SQ_TASKRUN, &ctx->rings->sq_flags);
- if (likely(!task_work_add(tsk, &tctx->task_work, ctx->notify_method)))
+ if (likely(!task_work_add(req->task, &tctx->task_work, ctx->notify_method)))
return;
spin_lock_irqsave(&tctx->task_lock, flags);
tctx->task_running = false;
- node = wq_list_merge(&tctx->prior_task_list, &tctx->task_list);
+ node = wq_list_merge(&tctx->prio_task_list, &tctx->task_list);
spin_unlock_irqrestore(&tctx->task_lock, flags);
while (node) {
@@ -2968,6 +2963,23 @@ static void io_req_task_work_add(struct io_kiocb *req, bool priority)
}
}
+static void io_req_task_work_add(struct io_kiocb *req)
+{
+ struct io_uring_task *tctx = req->task->io_uring;
+
+ __io_req_task_work_add(req, tctx, &tctx->task_list);
+}
+
+static void io_req_task_prio_work_add(struct io_kiocb *req)
+{
+ struct io_uring_task *tctx = req->task->io_uring;
+
+ if (req->ctx->flags & IORING_SETUP_SQPOLL)
+ __io_req_task_work_add(req, tctx, &tctx->prio_task_list);
+ else
+ __io_req_task_work_add(req, tctx, &tctx->task_list);
+}
+
static void io_req_tw_post(struct io_kiocb *req, bool *locked)
{
io_req_complete_post(req, req->cqe.res, req->cqe.flags);
@@ -2978,7 +2990,7 @@ static void io_req_tw_post_queue(struct io_kiocb *req, s32 res, u32 cflags)
req->cqe.res = res;
req->cqe.flags = cflags;
req->io_task_work.func = io_req_tw_post;
- io_req_task_work_add(req, false);
+ io_req_task_work_add(req);
}
static void io_req_task_cancel(struct io_kiocb *req, bool *locked)
@@ -3002,19 +3014,19 @@ static void io_req_task_queue_fail(struct io_kiocb *req, int ret)
{
req->cqe.res = ret;
req->io_task_work.func = io_req_task_cancel;
- io_req_task_work_add(req, false);
+ io_req_task_work_add(req);
}
static void io_req_task_queue(struct io_kiocb *req)
{
req->io_task_work.func = io_req_task_submit;
- io_req_task_work_add(req, false);
+ io_req_task_work_add(req);
}
static void io_req_task_queue_reissue(struct io_kiocb *req)
{
req->io_task_work.func = io_queue_iowq;
- io_req_task_work_add(req, false);
+ io_req_task_work_add(req);
}
static void io_queue_next(struct io_kiocb *req)
@@ -3422,7 +3434,7 @@ static void io_complete_rw(struct kiocb *kiocb, long res)
return;
req->cqe.res = res;
req->io_task_work.func = io_req_task_complete;
- io_req_task_work_add(req, !!(req->ctx->flags & IORING_SETUP_SQPOLL));
+ io_req_task_prio_work_add(req);
}
static void io_complete_rw_iopoll(struct kiocb *kiocb, long res)
@@ -4924,7 +4936,7 @@ void io_uring_cmd_complete_in_task(struct io_uring_cmd *ioucmd,
req->uring_cmd.task_work_cb = task_work_cb;
req->io_task_work.func = io_uring_cmd_work;
- io_req_task_work_add(req, !!(req->ctx->flags & IORING_SETUP_SQPOLL));
+ io_req_task_prio_work_add(req);
}
EXPORT_SYMBOL_GPL(io_uring_cmd_complete_in_task);
@@ -6764,7 +6776,7 @@ static void __io_poll_execute(struct io_kiocb *req, int mask, int events)
req->io_task_work.func = io_apoll_task_func;
trace_io_uring_task_add(req->ctx, req, req->cqe.user_data, req->opcode, mask);
- io_req_task_work_add(req, false);
+ io_req_task_work_add(req);
}
static inline void io_poll_execute(struct io_kiocb *req, int res, int events)
@@ -7265,7 +7277,7 @@ static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
req->cqe.res = -ETIME;
req->io_task_work.func = io_req_task_complete;
- io_req_task_work_add(req, false);
+ io_req_task_work_add(req);
return HRTIMER_NORESTART;
}
@@ -8385,7 +8397,7 @@ static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
spin_unlock_irqrestore(&ctx->timeout_lock, flags);
req->io_task_work.func = io_req_task_link_timeout;
- io_req_task_work_add(req, false);
+ io_req_task_work_add(req);
return HRTIMER_NORESTART;
}
@@ -10066,7 +10078,7 @@ static __cold int io_uring_alloc_task_context(struct task_struct *task,
task->io_uring = tctx;
spin_lock_init(&tctx->task_lock);
INIT_WQ_LIST(&tctx->task_list);
- INIT_WQ_LIST(&tctx->prior_task_list);
+ INIT_WQ_LIST(&tctx->prio_task_list);
init_task_work(&tctx->task_work, tctx_task_work);
return 0;
}