summaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
authorMing Lei <ming.lei@redhat.com>2024-05-17 10:05:14 +0800
committerJens Axboe <axboe@kernel.dk>2024-05-17 09:40:26 -0600
commit7b815817aa58d2e2101feb2fcf64c60cae0b2695 (patch)
tree697362701100e5e1714f5f792d9d55db8785bc43 /block
parent9d230c09964e6e18c8f6e4f0d41ee90eef45ec1c (diff)
blk-mq: add helper for checking if one CPU is mapped to specified hctx
Commit a46c27026da1 ("blk-mq: don't schedule block kworker on isolated CPUs") rules out isolated CPUs from hctx->cpumask, and hctx->cpumask should only be used for scheduling kworker. Add helper blk_mq_cpu_mapped_to_hctx() and apply it into cpuhp handlers. This patch avoids to forget clearing INACTIVE of hctx state in case that one isolated CPU becomes online, and fixes hang issue when allocating request from this hctx's tags. Cc: Raju Cheerla <rcheerla@redhat.com> Fixes: a46c27026da1 ("blk-mq: don't schedule block kworker on isolated CPUs") Signed-off-by: Ming Lei <ming.lei@redhat.com> Link: https://lore.kernel.org/r/20240517020514.149771-1-ming.lei@redhat.com Tested-by: Raju Cheerla <rcheerla@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r--block/blk-mq.c20
1 files changed, 18 insertions, 2 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 8e01e4b32e10..579921a2f1c6 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -3545,12 +3545,28 @@ static int blk_mq_hctx_notify_offline(unsigned int cpu, struct hlist_node *node)
return 0;
}
+/*
+ * Check if one CPU is mapped to the specified hctx
+ *
+ * Isolated CPUs have been ruled out from hctx->cpumask, which is supposed
+ * to be used for scheduling kworker only. For other usage, please call this
+ * helper for checking if one CPU belongs to the specified hctx
+ */
+static bool blk_mq_cpu_mapped_to_hctx(unsigned int cpu,
+ const struct blk_mq_hw_ctx *hctx)
+{
+ struct blk_mq_hw_ctx *mapped_hctx = blk_mq_map_queue_type(hctx->queue,
+ hctx->type, cpu);
+
+ return mapped_hctx == hctx;
+}
+
static int blk_mq_hctx_notify_online(unsigned int cpu, struct hlist_node *node)
{
struct blk_mq_hw_ctx *hctx = hlist_entry_safe(node,
struct blk_mq_hw_ctx, cpuhp_online);
- if (cpumask_test_cpu(cpu, hctx->cpumask))
+ if (blk_mq_cpu_mapped_to_hctx(cpu, hctx))
clear_bit(BLK_MQ_S_INACTIVE, &hctx->state);
return 0;
}
@@ -3568,7 +3584,7 @@ static int blk_mq_hctx_notify_dead(unsigned int cpu, struct hlist_node *node)
enum hctx_type type;
hctx = hlist_entry_safe(node, struct blk_mq_hw_ctx, cpuhp_dead);
- if (!cpumask_test_cpu(cpu, hctx->cpumask))
+ if (!blk_mq_cpu_mapped_to_hctx(cpu, hctx))
return 0;
ctx = __blk_mq_get_ctx(hctx->queue, cpu);