summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPavel Begunkov <asml.silence@gmail.com>2022-06-25 11:52:59 +0100
committerJens Axboe <axboe@kernel.dk>2022-07-24 18:39:16 -0600
commit3218e5d32dbcf1b9c6dc589eca21deebb14215fa (patch)
tree59a08cec06c27462fa3fe1fdbdaf9104c1cb689e
parent37c7bd31b3e9e4b6aee3c5227f789c0b586a33a2 (diff)
io_uring: fuse fallback_node and normal tw node
Now as both normal and fallback paths use llist, just keep one node head in struct io_task_work and kill off ->fallback_node. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Link: https://lore.kernel.org/r/d04ebde409f7b162fe247b361b4486b193293e46.1656153285.git.asml.silence@gmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r--include/linux/io_uring_types.h5
-rw-r--r--io_uring/io_uring.c5
2 files changed, 3 insertions, 7 deletions
diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h
index 918165a20053..3ca8f363f504 100644
--- a/include/linux/io_uring_types.h
+++ b/include/linux/io_uring_types.h
@@ -427,10 +427,7 @@ enum {
typedef void (*io_req_tw_func_t)(struct io_kiocb *req, bool *locked);
struct io_task_work {
- union {
- struct llist_node node;
- struct llist_node fallback_node;
- };
+ struct llist_node node;
io_req_tw_func_t func;
};
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index 45538b3c3a76..86a0b0c6f5bf 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -233,7 +233,7 @@ static __cold void io_fallback_req_func(struct work_struct *work)
bool locked = false;
percpu_ref_get(&ctx->refs);
- llist_for_each_entry_safe(req, tmp, node, io_task_work.fallback_node)
+ llist_for_each_entry_safe(req, tmp, node, io_task_work.node)
req->io_task_work.func(req, &locked);
if (locked) {
@@ -1091,13 +1091,12 @@ void io_req_task_work_add(struct io_kiocb *req)
if (likely(!task_work_add(req->task, &tctx->task_work, ctx->notify_method)))
return;
-
node = llist_del_all(&tctx->task_list);
while (node) {
req = container_of(node, struct io_kiocb, io_task_work.node);
node = node->next;
- if (llist_add(&req->io_task_work.fallback_node,
+ if (llist_add(&req->io_task_work.node,
&req->ctx->fallback_llist))
schedule_delayed_work(&req->ctx->fallback_work, 1);
}