summaryrefslogtreecommitdiff
path: root/fs
diff options
context:
space:
mode:
authorPavel Begunkov <asml.silence@gmail.com>2020-06-15 10:24:02 +0300
committerJens Axboe <axboe@kernel.dk>2020-06-15 08:51:33 -0600
commitf4c2665e33f48904f2766d644df33fb3fd54b5ec (patch)
tree7834e46a1346682d19424c2badfaca6e1b0075a8 /fs
parent59960b9deb5354e4cdb0b6ed3a3b653a2b4eb602 (diff)
io-wq: reorder cancellation pending -> running
Go all over all pending lists and cancel works there, and only then try to match running requests. No functional changes here, just a preparation for bulk cancellation. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs')
-rw-r--r--fs/io-wq.c54
1 files changed, 32 insertions, 22 deletions
diff --git a/fs/io-wq.c b/fs/io-wq.c
index 0b65a912b036..03c7e37548c2 100644
--- a/fs/io-wq.c
+++ b/fs/io-wq.c
@@ -927,19 +927,14 @@ static bool io_wq_worker_cancel(struct io_worker *worker, void *data)
return ret;
}
-static enum io_wq_cancel io_wqe_cancel_work(struct io_wqe *wqe,
- struct io_cb_cancel_data *match)
+static bool io_wqe_cancel_pending_work(struct io_wqe *wqe,
+ struct io_cb_cancel_data *match)
{
struct io_wq_work_node *node, *prev;
struct io_wq_work *work;
unsigned long flags;
bool found = false;
- /*
- * First check pending list, if we're lucky we can just remove it
- * from there. CANCEL_OK means that the work is returned as-new,
- * no completion will be posted for it.
- */
spin_lock_irqsave(&wqe->lock, flags);
wq_list_for_each(node, prev, &wqe->work_list) {
work = container_of(node, struct io_wq_work, list);
@@ -952,21 +947,20 @@ static enum io_wq_cancel io_wqe_cancel_work(struct io_wqe *wqe,
}
spin_unlock_irqrestore(&wqe->lock, flags);
- if (found) {
+ if (found)
io_run_cancel(work, wqe);
- return IO_WQ_CANCEL_OK;
- }
+ return found;
+}
+
+static bool io_wqe_cancel_running_work(struct io_wqe *wqe,
+ struct io_cb_cancel_data *match)
+{
+ bool found;
- /*
- * Now check if a free (going busy) or busy worker has the work
- * currently running. If we find it there, we'll return CANCEL_RUNNING
- * as an indication that we attempt to signal cancellation. The
- * completion will run normally in this case.
- */
rcu_read_lock();
found = io_wq_for_each_worker(wqe, io_wq_worker_cancel, match);
rcu_read_unlock();
- return found ? IO_WQ_CANCEL_RUNNING : IO_WQ_CANCEL_NOTFOUND;
+ return found;
}
enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel,
@@ -976,18 +970,34 @@ enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel,
.fn = cancel,
.data = data,
};
- enum io_wq_cancel ret = IO_WQ_CANCEL_NOTFOUND;
int node;
+ /*
+ * First check pending list, if we're lucky we can just remove it
+ * from there. CANCEL_OK means that the work is returned as-new,
+ * no completion will be posted for it.
+ */
for_each_node(node) {
struct io_wqe *wqe = wq->wqes[node];
- ret = io_wqe_cancel_work(wqe, &match);
- if (ret != IO_WQ_CANCEL_NOTFOUND)
- break;
+ if (io_wqe_cancel_pending_work(wqe, &match))
+ return IO_WQ_CANCEL_OK;
}
- return ret;
+ /*
+ * Now check if a free (going busy) or busy worker has the work
+ * currently running. If we find it there, we'll return CANCEL_RUNNING
+ * as an indication that we attempt to signal cancellation. The
+ * completion will run normally in this case.
+ */
+ for_each_node(node) {
+ struct io_wqe *wqe = wq->wqes[node];
+
+ if (io_wqe_cancel_running_work(wqe, &match))
+ return IO_WQ_CANCEL_RUNNING;
+ }
+
+ return IO_WQ_CANCEL_NOTFOUND;
}
static bool io_wq_io_cb_cancel_data(struct io_wq_work *work, void *data)