summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2012-08-21 22:06:55 +0200
committerJens Axboe <axboe@kernel.dk>2012-10-04 11:04:37 +0200
commit6e8a32aa291d2970ef0d32c287d2e91dc7601ac9 (patch)
tree5ea39c781160e6f0fce7730bfad1800ff0a11768
parent5fc24696e746e766e6b5e3e2187dbc75a69501e2 (diff)
blk-mq: get rid of passing in a lock to blk_mq_init_queue()
It was a disaster on the API side for blk_init_queue(), and in the current code we don't even use the functionality anymore. So lets get rid of this, along with the BLK_MQ_F_SHOULD_LOCK flag. Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r--block/blk-mq-sysfs.c8
-rw-r--r--block/blk-mq.c17
-rw-r--r--drivers/block/null.c2
-rw-r--r--drivers/block/virtio_blk.c4
-rw-r--r--include/linux/blk-mq.h6
5 files changed, 11 insertions, 26 deletions
diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c
index b52b3b713606..3d889b8c655e 100644
--- a/block/blk-mq-sysfs.c
+++ b/block/blk-mq-sysfs.c
@@ -187,9 +187,9 @@ static ssize_t blk_mq_hw_sysfs_ipi_show(struct blk_mq_hw_ctx *hctx, char *page)
{
ssize_t ret;
- spin_lock_irq(hctx->lock);
+ spin_lock_irq(&hctx->lock);
ret = sprintf(page, "%u\n", !!(hctx->flags & BLK_MQ_F_SHOULD_IPI));
- spin_unlock_irq(hctx->lock);
+ spin_unlock_irq(&hctx->lock);
return ret;
}
@@ -204,12 +204,12 @@ static ssize_t blk_mq_hw_sysfs_ipi_store(struct blk_mq_hw_ctx *hctx,
ret = simple_strtoul(p, &p, 10);
- spin_lock_irq(hctx->lock);
+ spin_lock_irq(&hctx->lock);
if (ret)
hctx->flags |= BLK_MQ_F_SHOULD_IPI;
else
hctx->flags &= ~BLK_MQ_F_SHOULD_IPI;
- spin_unlock_irq(hctx->lock);
+ spin_unlock_irq(&hctx->lock);
hctx_for_each_ctx(hctx, ctx, i)
ctx->ipi_redirect = !!ret;
diff --git a/block/blk-mq.c b/block/blk-mq.c
index bca900ab03f0..35a1c9272a44 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -315,7 +315,6 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
struct blk_mq_ctx *ctx;
struct request *rq;
struct llist_node *first, *last = NULL;
- unsigned long flags = 0;
LLIST_HEAD(rq_list);
LIST_HEAD(tmp);
int bit, queued;
@@ -366,9 +365,6 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
/*
* Now process all the entries, sending them to the driver.
*/
- if (hctx->flags & BLK_MQ_F_SHOULD_LOCK)
- spin_lock_irqsave(hctx->lock, flags);
-
while (first) {
struct llist_node *entry;
int ret;
@@ -398,9 +394,6 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
}
}
- if (hctx->flags & BLK_MQ_F_SHOULD_LOCK)
- spin_unlock_irqrestore(hctx->lock, flags);
-
if (!queued)
hctx->dispatched[0]++;
else if (queued < (1 << (BLK_MQ_MAX_DISPATCH_ORDER - 1)))
@@ -646,8 +639,7 @@ static int blk_mq_init_rq_map(struct blk_mq_hw_ctx *hctx)
return 0;
}
-struct request_queue *blk_mq_init_queue(struct blk_mq_reg *reg,
- spinlock_t *lock)
+struct request_queue *blk_mq_init_queue(struct blk_mq_reg *reg)
{
struct blk_mq_hw_ctx **hctxs;
struct blk_mq_hw_ctx *hctx;
@@ -724,18 +716,13 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_reg *reg,
unsigned int num_maps;
INIT_DELAYED_WORK(&hctx->delayed_work, blk_mq_work_fn);
- spin_lock_init(&hctx->__lock);
+ spin_lock_init(&hctx->lock);
init_llist_head(&hctx->dispatch);
hctx->queue = q;
hctx->flags = reg->flags;
hctx->queue_depth = reg->queue_depth;
hctx->numa_node = reg->numa_node;
- if (!lock)
- hctx->lock = &hctx->__lock;
- else
- hctx->lock = lock;
-
/* FIXME: alloc failure handling */
blk_mq_init_rq_map(hctx);
diff --git a/drivers/block/null.c b/drivers/block/null.c
index 66770b5d245c..1d1073b24b42 100644
--- a/drivers/block/null.c
+++ b/drivers/block/null.c
@@ -278,7 +278,7 @@ static int null_add_dev(void)
null_mq_reg.nr_hw_queues = submit_queues;
}
- nullb->q = blk_mq_init_queue(&null_mq_reg, &nullb->lock);
+ nullb->q = blk_mq_init_queue(&null_mq_reg);
} else {
nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock, home_node);
if (nullb->q)
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index b7c425e3c446..1651c51889b7 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -497,7 +497,7 @@ static struct blk_mq_reg virtio_mq_reg = {
.nr_hw_queues = 1,
.queue_depth = 64,
.numa_node = NUMA_NO_NODE,
- .flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SHOULD_LOCK,
+ .flags = BLK_MQ_F_SHOULD_MERGE,
};
static int __devinit virtblk_probe(struct virtio_device *vdev)
@@ -558,7 +558,7 @@ static int __devinit virtblk_probe(struct virtio_device *vdev)
goto out_mempool;
}
- q = vblk->disk->queue = blk_mq_init_queue(&virtio_mq_reg, NULL);
+ q = vblk->disk->queue = blk_mq_init_queue(&virtio_mq_reg);
if (!q) {
err = -ENOMEM;
goto out_put_disk;
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 8aa84a58d8d9..319d3a8cf076 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -23,8 +23,7 @@ struct blk_mq_ctx {
};
struct blk_mq_hw_ctx {
- spinlock_t __lock;
- spinlock_t *lock;
+ spinlock_t lock;
struct llist_head dispatch;
struct delayed_work delayed_work;
@@ -83,12 +82,11 @@ enum {
BLK_MQ_F_SHOULD_MERGE = 1 << 0,
BLK_MQ_F_SHOULD_SORT = 1 << 1,
BLK_MQ_F_SHOULD_IPI = 1 << 2,
- BLK_MQ_F_SHOULD_LOCK = 1 << 3, /* lock on queue_rq invocation */
BLK_MQ_MAX_DEPTH = 2048,
};
-struct request_queue *blk_mq_init_queue(struct blk_mq_reg *, spinlock_t *);
+struct request_queue *blk_mq_init_queue(struct blk_mq_reg *);
void blk_mq_free_queue(struct request_queue *);
int blk_mq_register_disk(struct gendisk *);
void blk_mq_unregister_disk(struct gendisk *);