summaryrefslogtreecommitdiff
path: root/fs
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2021-03-05 08:14:08 -0700
committerJens Axboe <axboe@kernel.dk>2021-03-05 08:44:09 -0700
commit09ca6c40c2024211657fdb2c50522a355610c3b7 (patch)
tree5f7e881811b2efdd4b8ba1240f61caaaea7ee0b1 /fs
parentb5b0ecb736f1ce1e68eb50613c0cfecff10198eb (diff)
io-wq: kill hashed waitqueue before manager exits
If we race with shutting down the io-wq context and someone queueing a hashed entry, then we can exit the manager with it armed. If it then triggers after the manager has exited, we can have a use-after-free where io_wqe_hash_wake() attempts to wake a now gone manager process. Move the killing of the hashed write queue into the manager itself, so that we know we've killed it before the task exits. Fixes: e941894eae31 ("io-wq: make buffered file write hashed work map per-ctx") Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs')
-rw-r--r--fs/io-wq.c9
1 files changed, 5 insertions, 4 deletions
diff --git a/fs/io-wq.c b/fs/io-wq.c
index d7cfe8fd282a..28868eb4cd09 100644
--- a/fs/io-wq.c
+++ b/fs/io-wq.c
@@ -726,6 +726,11 @@ static int io_wq_manager(void *data)
if (atomic_read(&wq->worker_refs))
wait_for_completion(&wq->worker_done);
+ spin_lock_irq(&wq->hash->wait.lock);
+ for_each_node(node)
+ list_del_init(&wq->wqes[node]->wait.entry);
+ spin_unlock_irq(&wq->hash->wait.lock);
+
io_wq_cancel_pending(wq);
complete(&wq->exited);
do_exit(0);
@@ -1051,15 +1056,11 @@ static void io_wq_destroy(struct io_wq *wq)
set_bit(IO_WQ_BIT_EXIT, &wq->state);
io_wq_destroy_manager(wq);
- spin_lock_irq(&wq->hash->wait.lock);
for_each_node(node) {
struct io_wqe *wqe = wq->wqes[node];
-
- list_del_init(&wqe->wait.entry);
WARN_ON_ONCE(!wq_list_empty(&wqe->work_list));
kfree(wqe);
}
- spin_unlock_irq(&wq->hash->wait.lock);
io_wq_put_hash(wq->hash);
kfree(wq->wqes);
kfree(wq);