From 723fbf563a6a9cefbd3c58e95694583ad1cb8704 Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Thu, 15 Feb 2018 09:03:56 +0530 Subject: lib/scatterlist: Add SG_CHAIN and SG_END macros for LSB encodings This replaces scatterlist->page_link LSB encodings with SG_CHAIN and SG_END definitions without any functional change. Signed-off-by: Anshuman Khandual Signed-off-by: Jens Axboe --- include/linux/scatterlist.h | 23 +++++++++++++---------- 1 file changed, 13 insertions(+), 10 deletions(-) diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h index 22b2131bcdcd..b6fe1815f5c4 100644 --- a/include/linux/scatterlist.h +++ b/include/linux/scatterlist.h @@ -65,16 +65,18 @@ struct sg_table { */ #define SG_MAGIC 0x87654321 +#define SG_CHAIN 0x01UL +#define SG_END 0x02UL /* * We overload the LSB of the page pointer to indicate whether it's * a valid sg entry, or whether it points to the start of a new scatterlist. * Those low bits are there for everyone! (thanks mason :-) */ -#define sg_is_chain(sg) ((sg)->page_link & 0x01) -#define sg_is_last(sg) ((sg)->page_link & 0x02) +#define sg_is_chain(sg) ((sg)->page_link & SG_CHAIN) +#define sg_is_last(sg) ((sg)->page_link & SG_END) #define sg_chain_ptr(sg) \ - ((struct scatterlist *) ((sg)->page_link & ~0x03)) + ((struct scatterlist *) ((sg)->page_link & ~(SG_CHAIN | SG_END))) /** * sg_assign_page - Assign a given page to an SG entry @@ -88,13 +90,13 @@ struct sg_table { **/ static inline void sg_assign_page(struct scatterlist *sg, struct page *page) { - unsigned long page_link = sg->page_link & 0x3; + unsigned long page_link = sg->page_link & (SG_CHAIN | SG_END); /* * In order for the low bit stealing approach to work, pages * must be aligned at a 32-bit boundary as a minimum. */ - BUG_ON((unsigned long) page & 0x03); + BUG_ON((unsigned long) page & (SG_CHAIN | SG_END)); #ifdef CONFIG_DEBUG_SG BUG_ON(sg->sg_magic != SG_MAGIC); BUG_ON(sg_is_chain(sg)); @@ -130,7 +132,7 @@ static inline struct page *sg_page(struct scatterlist *sg) BUG_ON(sg->sg_magic != SG_MAGIC); BUG_ON(sg_is_chain(sg)); #endif - return (struct page *)((sg)->page_link & ~0x3); + return (struct page *)((sg)->page_link & ~(SG_CHAIN | SG_END)); } /** @@ -178,7 +180,8 @@ static inline void sg_chain(struct scatterlist *prv, unsigned int prv_nents, * Set lowest bit to indicate a link pointer, and make sure to clear * the termination bit if it happens to be set. */ - prv[prv_nents - 1].page_link = ((unsigned long) sgl | 0x01) & ~0x02; + prv[prv_nents - 1].page_link = ((unsigned long) sgl | SG_CHAIN) + & ~SG_END; } /** @@ -198,8 +201,8 @@ static inline void sg_mark_end(struct scatterlist *sg) /* * Set termination bit, clear potential chain bit */ - sg->page_link |= 0x02; - sg->page_link &= ~0x01; + sg->page_link |= SG_END; + sg->page_link &= ~SG_CHAIN; } /** @@ -215,7 +218,7 @@ static inline void sg_unmark_end(struct scatterlist *sg) #ifdef CONFIG_DEBUG_SG BUG_ON(sg->sg_magic != SG_MAGIC); #endif - sg->page_link &= ~0x02; + sg->page_link &= ~SG_END; } /** -- cgit v1.2.3 From 025aecd8bdfed9ee1325d3f21d0f84b3622bdda5 Mon Sep 17 00:00:00 2001 From: Jiufei Xue Date: Wed, 28 Feb 2018 13:44:18 +0800 Subject: writeback: remove dead code in wb_blkcg/memcg_offline Signed-off-by: Jiufei Xue Acked-by: Tejun Heo Signed-off-by: Jens Axboe --- mm/backing-dev.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/mm/backing-dev.c b/mm/backing-dev.c index b5f940ce0143..d2984e9fcf08 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c @@ -745,7 +745,6 @@ static void cgwb_bdi_unregister(struct backing_dev_info *bdi) */ void wb_memcg_offline(struct mem_cgroup *memcg) { - LIST_HEAD(to_destroy); struct list_head *memcg_cgwb_list = mem_cgroup_cgwb_list(memcg); struct bdi_writeback *wb, *next; @@ -764,7 +763,6 @@ void wb_memcg_offline(struct mem_cgroup *memcg) */ void wb_blkcg_offline(struct blkcg *blkcg) { - LIST_HEAD(to_destroy); struct bdi_writeback *wb, *next; spin_lock_irq(&cgwb_lock); -- cgit v1.2.3 From 1209cb7fa4afcd7f652a70e1d2028686ec28a7a5 Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Tue, 27 Feb 2018 16:32:13 -0800 Subject: blk-mq-debugfs: Reorder queue show and store methods Make sure that the queue show and store methods are contiguous and also that these appear in alphabetical order. Signed-off-by: Bart Van Assche Cc: Omar Sandoval Cc: Damien Le Moal Cc: Ming Lei Cc: Hannes Reinecke Cc: Johannes Thumshirn Signed-off-by: Jens Axboe --- block/blk-mq-debugfs.c | 130 ++++++++++++++++++++++++------------------------- 1 file changed, 65 insertions(+), 65 deletions(-) diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c index 21cbc1f071c6..9547569aa619 100644 --- a/block/blk-mq-debugfs.c +++ b/block/blk-mq-debugfs.c @@ -24,6 +24,64 @@ #include "blk-mq-debugfs.h" #include "blk-mq-tag.h" +static void print_stat(struct seq_file *m, struct blk_rq_stat *stat) +{ + if (stat->nr_samples) { + seq_printf(m, "samples=%d, mean=%lld, min=%llu, max=%llu", + stat->nr_samples, stat->mean, stat->min, stat->max); + } else { + seq_puts(m, "samples=0"); + } +} + +static int queue_poll_stat_show(void *data, struct seq_file *m) +{ + struct request_queue *q = data; + int bucket; + + for (bucket = 0; bucket < BLK_MQ_POLL_STATS_BKTS/2; bucket++) { + seq_printf(m, "read (%d Bytes): ", 1 << (9+bucket)); + print_stat(m, &q->poll_stat[2*bucket]); + seq_puts(m, "\n"); + + seq_printf(m, "write (%d Bytes): ", 1 << (9+bucket)); + print_stat(m, &q->poll_stat[2*bucket+1]); + seq_puts(m, "\n"); + } + return 0; +} + +static void *queue_requeue_list_start(struct seq_file *m, loff_t *pos) + __acquires(&q->requeue_lock) +{ + struct request_queue *q = m->private; + + spin_lock_irq(&q->requeue_lock); + return seq_list_start(&q->requeue_list, *pos); +} + +static void *queue_requeue_list_next(struct seq_file *m, void *v, loff_t *pos) +{ + struct request_queue *q = m->private; + + return seq_list_next(v, &q->requeue_list, pos); +} + +static void queue_requeue_list_stop(struct seq_file *m, void *v) + __releases(&q->requeue_lock) +{ + struct request_queue *q = m->private; + + spin_unlock_irq(&q->requeue_lock); +} + +static const struct seq_operations queue_requeue_list_seq_ops = { + .start = queue_requeue_list_start, + .next = queue_requeue_list_next, + .stop = queue_requeue_list_stop, + .show = blk_mq_debugfs_rq_show, +}; + static int blk_flags_show(struct seq_file *m, const unsigned long flags, const char *const *flag_name, int flag_name_count) { @@ -125,16 +183,6 @@ inval: return count; } -static void print_stat(struct seq_file *m, struct blk_rq_stat *stat) -{ - if (stat->nr_samples) { - seq_printf(m, "samples=%d, mean=%lld, min=%llu, max=%llu", - stat->nr_samples, stat->mean, stat->min, stat->max); - } else { - seq_puts(m, "samples=0"); - } -} - static int queue_write_hint_show(void *data, struct seq_file *m) { struct request_queue *q = data; @@ -158,22 +206,13 @@ static ssize_t queue_write_hint_store(void *data, const char __user *buf, return count; } -static int queue_poll_stat_show(void *data, struct seq_file *m) -{ - struct request_queue *q = data; - int bucket; - - for (bucket = 0; bucket < BLK_MQ_POLL_STATS_BKTS/2; bucket++) { - seq_printf(m, "read (%d Bytes): ", 1 << (9+bucket)); - print_stat(m, &q->poll_stat[2*bucket]); - seq_puts(m, "\n"); - - seq_printf(m, "write (%d Bytes): ", 1 << (9+bucket)); - print_stat(m, &q->poll_stat[2*bucket+1]); - seq_puts(m, "\n"); - } - return 0; -} +static const struct blk_mq_debugfs_attr blk_mq_debugfs_queue_attrs[] = { + { "poll_stat", 0400, queue_poll_stat_show }, + { "requeue_list", 0400, .seq_ops = &queue_requeue_list_seq_ops }, + { "state", 0600, queue_state_show, queue_state_write }, + { "write_hints", 0600, queue_write_hint_show, queue_write_hint_store }, + { }, +}; #define HCTX_STATE_NAME(name) [BLK_MQ_S_##name] = #name static const char *const hctx_state_name[] = { @@ -327,37 +366,6 @@ int blk_mq_debugfs_rq_show(struct seq_file *m, void *v) } EXPORT_SYMBOL_GPL(blk_mq_debugfs_rq_show); -static void *queue_requeue_list_start(struct seq_file *m, loff_t *pos) - __acquires(&q->requeue_lock) -{ - struct request_queue *q = m->private; - - spin_lock_irq(&q->requeue_lock); - return seq_list_start(&q->requeue_list, *pos); -} - -static void *queue_requeue_list_next(struct seq_file *m, void *v, loff_t *pos) -{ - struct request_queue *q = m->private; - - return seq_list_next(v, &q->requeue_list, pos); -} - -static void queue_requeue_list_stop(struct seq_file *m, void *v) - __releases(&q->requeue_lock) -{ - struct request_queue *q = m->private; - - spin_unlock_irq(&q->requeue_lock); -} - -static const struct seq_operations queue_requeue_list_seq_ops = { - .start = queue_requeue_list_start, - .next = queue_requeue_list_next, - .stop = queue_requeue_list_stop, - .show = blk_mq_debugfs_rq_show, -}; - static void *hctx_dispatch_start(struct seq_file *m, loff_t *pos) __acquires(&hctx->lock) { @@ -747,14 +755,6 @@ static const struct file_operations blk_mq_debugfs_fops = { .release = blk_mq_debugfs_release, }; -static const struct blk_mq_debugfs_attr blk_mq_debugfs_queue_attrs[] = { - {"poll_stat", 0400, queue_poll_stat_show}, - {"requeue_list", 0400, .seq_ops = &queue_requeue_list_seq_ops}, - {"state", 0600, queue_state_show, queue_state_write}, - {"write_hints", 0600, queue_write_hint_show, queue_write_hint_store}, - {}, -}; - static const struct blk_mq_debugfs_attr blk_mq_debugfs_hctx_attrs[] = { {"state", 0400, hctx_state_show}, {"flags", 0400, hctx_flags_show}, -- cgit v1.2.3 From 18bc42308699522b57fd599401c03ad561f422ef Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Tue, 27 Feb 2018 16:32:14 -0800 Subject: blk-mq-debugfs: Show zone locking information When debugging the ZBC code in the mq-deadline scheduler it is very important to know which zones are locked and which zones are not locked. Hence this patch that exports the zone locking information through debugfs. Cc: Omar Sandoval Cc: Ming Lei Cc: Hannes Reinecke Cc: Johannes Thumshirn Reviewed-by: Damien Le Moal Tested-by: Damien Le Moal Signed-off-by: Bart Van Assche Signed-off-by: Jens Axboe --- block/blk-mq-debugfs.c | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c index 9547569aa619..bd21d5b9f65f 100644 --- a/block/blk-mq-debugfs.c +++ b/block/blk-mq-debugfs.c @@ -206,11 +206,27 @@ static ssize_t queue_write_hint_store(void *data, const char __user *buf, return count; } +static int queue_zone_wlock_show(void *data, struct seq_file *m) +{ + struct request_queue *q = data; + unsigned int i; + + if (!q->seq_zones_wlock) + return 0; + + for (i = 0; i < blk_queue_nr_zones(q); i++) + if (test_bit(i, q->seq_zones_wlock)) + seq_printf(m, "%u\n", i); + + return 0; +} + static const struct blk_mq_debugfs_attr blk_mq_debugfs_queue_attrs[] = { { "poll_stat", 0400, queue_poll_stat_show }, { "requeue_list", 0400, .seq_ops = &queue_requeue_list_seq_ops }, { "state", 0600, queue_state_show, queue_state_write }, { "write_hints", 0600, queue_write_hint_show, queue_write_hint_store }, + { "zone_wlock", 0400, queue_zone_wlock_show, NULL }, { }, }; -- cgit v1.2.3 From e9a99a638800af25c7ed006c96fd1dabb99254b7 Mon Sep 17 00:00:00 2001 From: Omar Sandoval Date: Tue, 27 Feb 2018 16:56:42 -0800 Subject: block: clear ctx pending bit under ctx lock When we insert a request, we set the software queue pending bit while holding the software queue lock. However, we clear it outside of the lock, so it's possible that a concurrent insert could reset the bit after we clear it but before we empty the request list. Afterwards, the bit would still be set but the software queue wouldn't have any requests in it, leading us to do a spurious run in the future. This is mostly a benign/theoretical issue, but it makes the following change easier to justify. Signed-off-by: Omar Sandoval Acked-by: Tejun Heo Signed-off-by: Jens Axboe --- block/blk-mq.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/block/blk-mq.c b/block/blk-mq.c index 16e83e6df404..9594a0e9f65b 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -986,9 +986,9 @@ static bool flush_busy_ctx(struct sbitmap *sb, unsigned int bitnr, void *data) struct blk_mq_hw_ctx *hctx = flush_data->hctx; struct blk_mq_ctx *ctx = hctx->ctxs[bitnr]; - sbitmap_clear_bit(sb, bitnr); spin_lock(&ctx->lock); list_splice_tail_init(&ctx->rq_list, flush_data->list); + sbitmap_clear_bit(sb, bitnr); spin_unlock(&ctx->lock); return true; } -- cgit v1.2.3 From 4ace53f1ed40a5cfee4bdd7614c8a8b2798227ad Mon Sep 17 00:00:00 2001 From: Omar Sandoval Date: Tue, 27 Feb 2018 16:56:43 -0800 Subject: sbitmap: use test_and_set_bit_lock()/clear_bit_unlock() sbitmap_queue_get()/sbitmap_queue_clear() are used for allocating/freeing a resource, so they should provide acquire/release barrier semantics, respectively. sbitmap_get() currently contains a full barrier, which is unnecessary, so use test_and_set_bit_lock() instead of test_and_set_bit() (these are equivalent on x86_64). sbitmap_clear_bit() does not imply any barriers, which is incorrect, as accesses of the resource (e.g., request) could potentially get reordered to after the clear_bit(). Introduce sbitmap_clear_bit_unlock() and use it for sbitmap_queue_clear() (this only adds a compiler barrier on x86_64). The other existing user of sbitmap_clear_bit() (the blk-mq software queue pending map) is serialized through a spinlock and does not need this. Reported-by: Tejun Heo Acked-by: Tejun Heo Signed-off-by: Omar Sandoval Signed-off-by: Jens Axboe --- include/linux/sbitmap.h | 8 ++++++++ lib/sbitmap.c | 10 +++++----- 2 files changed, 13 insertions(+), 5 deletions(-) diff --git a/include/linux/sbitmap.h b/include/linux/sbitmap.h index 0dcc60e820de..841585f6e5f2 100644 --- a/include/linux/sbitmap.h +++ b/include/linux/sbitmap.h @@ -171,6 +171,8 @@ void sbitmap_resize(struct sbitmap *sb, unsigned int depth); * starting from the last allocated bit. This is less efficient * than the default behavior (false). * + * This operation provides acquire barrier semantics if it succeeds. + * * Return: Non-negative allocated bit number if successful, -1 otherwise. */ int sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint, bool round_robin); @@ -300,6 +302,12 @@ static inline void sbitmap_clear_bit(struct sbitmap *sb, unsigned int bitnr) clear_bit(SB_NR_TO_BIT(sb, bitnr), __sbitmap_word(sb, bitnr)); } +static inline void sbitmap_clear_bit_unlock(struct sbitmap *sb, + unsigned int bitnr) +{ + clear_bit_unlock(SB_NR_TO_BIT(sb, bitnr), __sbitmap_word(sb, bitnr)); +} + static inline int sbitmap_test_bit(struct sbitmap *sb, unsigned int bitnr) { return test_bit(SB_NR_TO_BIT(sb, bitnr), __sbitmap_word(sb, bitnr)); diff --git a/lib/sbitmap.c b/lib/sbitmap.c index 42b5ca0acf93..e6a9c06ec70c 100644 --- a/lib/sbitmap.c +++ b/lib/sbitmap.c @@ -100,7 +100,7 @@ static int __sbitmap_get_word(unsigned long *word, unsigned long depth, return -1; } - if (!test_and_set_bit(nr, word)) + if (!test_and_set_bit_lock(nr, word)) break; hint = nr + 1; @@ -434,9 +434,9 @@ static void sbq_wake_up(struct sbitmap_queue *sbq) /* * Pairs with the memory barrier in set_current_state() to ensure the * proper ordering of clear_bit()/waitqueue_active() in the waker and - * test_and_set_bit()/prepare_to_wait()/finish_wait() in the waiter. See - * the comment on waitqueue_active(). This is __after_atomic because we - * just did clear_bit() in the caller. + * test_and_set_bit_lock()/prepare_to_wait()/finish_wait() in the + * waiter. See the comment on waitqueue_active(). This is __after_atomic + * because we just did clear_bit_unlock() in the caller. */ smp_mb__after_atomic(); @@ -469,7 +469,7 @@ static void sbq_wake_up(struct sbitmap_queue *sbq) void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr, unsigned int cpu) { - sbitmap_clear_bit(&sbq->sb, nr); + sbitmap_clear_bit_unlock(&sbq->sb, nr); sbq_wake_up(sbq); if (likely(!sbq->round_robin && nr < sbq->sb.depth)) *per_cpu_ptr(sbq->alloc_hint, cpu) = nr; -- cgit v1.2.3 From 24941b90e639df2bf467531601a15d792eaa6d6b Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Wed, 28 Feb 2018 09:18:57 -0700 Subject: null_blk: add 'requeue' fault attribute Similarly to the support we have for testing/faking timeouts for null_blk, this adds support for triggering a requeue condition. Considering the issues around restart we've been seeing, this should be a useful addition to the testing arsenal to ensure that we are handling requeue conditions correctly. This works for queue mode 1 (legacy request_fn based path) and 2 (blk-mq path), as there's no good way to do requeue with a bio based driver. This is similar to the timeout path. For the blk-mq path, we alternate between passing back BLK_STS_RESOURCE and manually calling blk_mq_requeue_request() in the driver. The former will hit the core requeue path, while the latter exercises the IO scheduler requeue path. Reviewed-by: Bart Van Assche Reviewed-by: Omar Sandoval Signed-off-by: Jens Axboe --- drivers/block/null_blk.c | 67 +++++++++++++++++++++++++++++++++++++++--------- 1 file changed, 55 insertions(+), 12 deletions(-) diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c index 287a09611c0f..d12d7a8325ad 100644 --- a/drivers/block/null_blk.c +++ b/drivers/block/null_blk.c @@ -29,6 +29,7 @@ #ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION static DECLARE_FAULT_ATTR(null_timeout_attr); +static DECLARE_FAULT_ATTR(null_requeue_attr); #endif static inline u64 mb_per_tick(int mbps) @@ -53,6 +54,7 @@ struct nullb_queue { wait_queue_head_t wait; unsigned int queue_depth; struct nullb_device *dev; + unsigned int requeue_selection; struct nullb_cmd *cmds; }; @@ -170,6 +172,9 @@ MODULE_PARM_DESC(home_node, "Home node for the device"); #ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION static char g_timeout_str[80]; module_param_string(timeout, g_timeout_str, sizeof(g_timeout_str), S_IRUGO); + +static char g_requeue_str[80]; +module_param_string(requeue, g_requeue_str, sizeof(g_requeue_str), S_IRUGO); #endif static int g_queue_mode = NULL_Q_MQ; @@ -1380,7 +1385,15 @@ static bool should_timeout_request(struct request *rq) if (g_timeout_str[0]) return should_fail(&null_timeout_attr, 1); #endif + return false; +} +static bool should_requeue_request(struct request *rq) +{ +#ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION + if (g_requeue_str[0]) + return should_fail(&null_requeue_attr, 1); +#endif return false; } @@ -1391,11 +1404,17 @@ static void null_request_fn(struct request_queue *q) while ((rq = blk_fetch_request(q)) != NULL) { struct nullb_cmd *cmd = rq->special; - if (!should_timeout_request(rq)) { - spin_unlock_irq(q->queue_lock); - null_handle_cmd(cmd); - spin_lock_irq(q->queue_lock); + /* just ignore the request */ + if (should_timeout_request(rq)) + continue; + if (should_requeue_request(rq)) { + blk_requeue_request(q, rq); + continue; } + + spin_unlock_irq(q->queue_lock); + null_handle_cmd(cmd); + spin_lock_irq(q->queue_lock); } } @@ -1422,10 +1441,23 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx, blk_mq_start_request(bd->rq); - if (!should_timeout_request(bd->rq)) - return null_handle_cmd(cmd); + if (should_requeue_request(bd->rq)) { + /* + * Alternate between hitting the core BUSY path, and the + * driver driven requeue path + */ + nq->requeue_selection++; + if (nq->requeue_selection & 1) + return BLK_STS_RESOURCE; + else { + blk_mq_requeue_request(bd->rq, true); + return BLK_STS_OK; + } + } + if (should_timeout_request(bd->rq)) + return BLK_STS_OK; - return BLK_STS_OK; + return null_handle_cmd(cmd); } static const struct blk_mq_ops null_mq_ops = { @@ -1659,16 +1691,27 @@ static void null_validate_conf(struct nullb_device *dev) dev->mbps = 0; } -static bool null_setup_fault(void) -{ #ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION - if (!g_timeout_str[0]) +static bool __null_setup_fault(struct fault_attr *attr, char *str) +{ + if (!str[0]) return true; - if (!setup_fault_attr(&null_timeout_attr, g_timeout_str)) + if (!setup_fault_attr(attr, str)) return false; - null_timeout_attr.verbose = 0; + attr->verbose = 0; + return true; +} +#endif + +static bool null_setup_fault(void) +{ +#ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION + if (!__null_setup_fault(&null_timeout_attr, g_timeout_str)) + return false; + if (!__null_setup_fault(&null_requeue_attr, g_requeue_str)) + return false; #endif return true; } -- cgit v1.2.3 From 0fa8ebdd4244b8e652cc5341c3d5b4b06f84a637 Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Wed, 28 Feb 2018 10:15:28 -0800 Subject: block/loop: Delete gendisk before cleaning up the request queue Remove the disk, partition and bdi sysfs attributes before cleaning up the request queue associated with the disk. Signed-off-by: Bart Van Assche Reviewed-by: Johannes Thumshirn Reviewed-by: Joseph Qi Reviewed-by: Ming Lei Cc: Josef Bacik Cc: Shaohua Li Cc: Omar Sandoval Cc: Hannes Reinecke Signed-off-by: Jens Axboe --- drivers/block/loop.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 87855b5123a6..9d29aa6413e5 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -1864,8 +1864,8 @@ out: static void loop_remove(struct loop_device *lo) { - blk_cleanup_queue(lo->lo_queue); del_gendisk(lo->lo_disk); + blk_cleanup_queue(lo->lo_queue); blk_mq_free_tag_set(&lo->tag_set); put_disk(lo->lo_disk); kfree(lo); -- cgit v1.2.3 From d8115c35bf3ee575cfc9c51ac9853f58a21a43dc Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Wed, 28 Feb 2018 10:15:29 -0800 Subject: md: Delete gendisk before cleaning up the request queue Remove the disk, partition and bdi sysfs attributes before cleaning up the request queue associated with the disk. Signed-off-by: Bart Van Assche Reviewed-by: Johannes Thumshirn Reviewed-by: Joseph Qi Reviewed-by: Ming Lei Cc: Shaohua Li Signed-off-by: Jens Axboe --- drivers/md/md.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/md/md.c b/drivers/md/md.c index bc67ab6844f0..eba7fa2f0abb 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -5203,12 +5203,12 @@ static void md_free(struct kobject *ko) if (mddev->sysfs_state) sysfs_put(mddev->sysfs_state); + if (mddev->gendisk) + del_gendisk(mddev->gendisk); if (mddev->queue) blk_cleanup_queue(mddev->queue); - if (mddev->gendisk) { - del_gendisk(mddev->gendisk); + if (mddev->gendisk) put_disk(mddev->gendisk); - } percpu_ref_exit(&mddev->writes_pending); kfree(mddev); -- cgit v1.2.3 From 392db38058eb47250a9d0cc737af37e78a7e443d Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Wed, 28 Feb 2018 10:15:30 -0800 Subject: zram: Delete gendisk before cleaning up the request queue Remove the disk, partition and bdi sysfs attributes before cleaning up the request queue associated with the disk. Signed-off-by: Bart Van Assche Reviewed-by: Johannes Thumshirn Reviewed-by: Joseph Qi Reviewed-by: Ming Lei Cc: Minchan Kim Cc: Nitin Gupta Cc: Sergey Senozhatsky Signed-off-by: Jens Axboe --- drivers/block/zram/zram_drv.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index 0afa6c8c3857..85110e7931e5 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -1620,8 +1620,8 @@ static int zram_remove(struct zram *zram) pr_info("Removed device: %s\n", zram->disk->disk_name); - blk_cleanup_queue(zram->disk->queue); del_gendisk(zram->disk); + blk_cleanup_queue(zram->disk->queue); put_disk(zram->disk); kfree(zram); return 0; -- cgit v1.2.3 From 5ee0524ba137fe928a88b440d014e3c8451fb32c Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Wed, 28 Feb 2018 10:15:31 -0800 Subject: block: Add 'lock' as third argument to blk_alloc_queue_node() This patch does not change any functionality. Signed-off-by: Bart Van Assche Reviewed-by: Joseph Qi Cc: Christoph Hellwig Cc: Philipp Reisner Cc: Ulf Hansson Cc: Kees Cook Signed-off-by: Jens Axboe --- block/blk-core.c | 7 ++++--- block/blk-mq.c | 2 +- drivers/block/null_blk.c | 3 ++- drivers/ide/ide-probe.c | 2 +- drivers/lightnvm/core.c | 2 +- drivers/md/dm.c | 2 +- drivers/nvdimm/pmem.c | 2 +- drivers/nvme/host/multipath.c | 2 +- drivers/scsi/scsi_lib.c | 2 +- include/linux/blkdev.h | 3 ++- 10 files changed, 15 insertions(+), 12 deletions(-) diff --git a/block/blk-core.c b/block/blk-core.c index 2d1a7bbe0634..e873a24bf82d 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -810,7 +810,7 @@ void blk_exit_rl(struct request_queue *q, struct request_list *rl) struct request_queue *blk_alloc_queue(gfp_t gfp_mask) { - return blk_alloc_queue_node(gfp_mask, NUMA_NO_NODE); + return blk_alloc_queue_node(gfp_mask, NUMA_NO_NODE, NULL); } EXPORT_SYMBOL(blk_alloc_queue); @@ -888,7 +888,8 @@ static void blk_rq_timed_out_timer(struct timer_list *t) kblockd_schedule_work(&q->timeout_work); } -struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) +struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id, + spinlock_t *lock) { struct request_queue *q; @@ -1030,7 +1031,7 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id) { struct request_queue *q; - q = blk_alloc_queue_node(GFP_KERNEL, node_id); + q = blk_alloc_queue_node(GFP_KERNEL, node_id, NULL); if (!q) return NULL; diff --git a/block/blk-mq.c b/block/blk-mq.c index 9594a0e9f65b..75336848f7a7 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -2556,7 +2556,7 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set) { struct request_queue *uninit_q, *q; - uninit_q = blk_alloc_queue_node(GFP_KERNEL, set->numa_node); + uninit_q = blk_alloc_queue_node(GFP_KERNEL, set->numa_node, NULL); if (!uninit_q) return ERR_PTR(-ENOMEM); diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c index d12d7a8325ad..6dc7e7cfca4a 100644 --- a/drivers/block/null_blk.c +++ b/drivers/block/null_blk.c @@ -1760,7 +1760,8 @@ static int null_add_dev(struct nullb_device *dev) } null_init_queues(nullb); } else if (dev->queue_mode == NULL_Q_BIO) { - nullb->q = blk_alloc_queue_node(GFP_KERNEL, dev->home_node); + nullb->q = blk_alloc_queue_node(GFP_KERNEL, dev->home_node, + NULL); if (!nullb->q) { rv = -ENOMEM; goto out_cleanup_queues; diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c index caa20eb5f26b..d6b8c7e1545d 100644 --- a/drivers/ide/ide-probe.c +++ b/drivers/ide/ide-probe.c @@ -766,7 +766,7 @@ static int ide_init_queue(ide_drive_t *drive) * limits and LBA48 we could raise it but as yet * do not. */ - q = blk_alloc_queue_node(GFP_KERNEL, hwif_to_node(hwif)); + q = blk_alloc_queue_node(GFP_KERNEL, hwif_to_node(hwif), NULL); if (!q) return 1; diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c index dcc9e621e651..5f1988df1593 100644 --- a/drivers/lightnvm/core.c +++ b/drivers/lightnvm/core.c @@ -384,7 +384,7 @@ static int nvm_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create) goto err_dev; } - tqueue = blk_alloc_queue_node(GFP_KERNEL, dev->q->node); + tqueue = blk_alloc_queue_node(GFP_KERNEL, dev->q->node, NULL); if (!tqueue) { ret = -ENOMEM; goto err_disk; diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 68136806d365..7586d249266c 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -1841,7 +1841,7 @@ static struct mapped_device *alloc_dev(int minor) INIT_LIST_HEAD(&md->table_devices); spin_lock_init(&md->uevent_lock); - md->queue = blk_alloc_queue_node(GFP_KERNEL, numa_node_id); + md->queue = blk_alloc_queue_node(GFP_KERNEL, numa_node_id, NULL); if (!md->queue) goto bad; md->queue->queuedata = md; diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c index 10041ac4032c..cfb15ac50925 100644 --- a/drivers/nvdimm/pmem.c +++ b/drivers/nvdimm/pmem.c @@ -344,7 +344,7 @@ static int pmem_attach_disk(struct device *dev, return -EBUSY; } - q = blk_alloc_queue_node(GFP_KERNEL, dev_to_node(dev)); + q = blk_alloc_queue_node(GFP_KERNEL, dev_to_node(dev), NULL); if (!q) return -ENOMEM; diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index b7e5c6db4d92..88440562a197 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c @@ -162,7 +162,7 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head) if (!(ctrl->subsys->cmic & (1 << 1)) || !multipath) return 0; - q = blk_alloc_queue_node(GFP_KERNEL, NUMA_NO_NODE); + q = blk_alloc_queue_node(GFP_KERNEL, NUMA_NO_NODE, NULL); if (!q) goto out; q->queuedata = head; diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index a86df9ca7d1c..71d1135f94d0 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -2223,7 +2223,7 @@ struct request_queue *scsi_old_alloc_queue(struct scsi_device *sdev) struct Scsi_Host *shost = sdev->host; struct request_queue *q; - q = blk_alloc_queue_node(GFP_KERNEL, NUMA_NO_NODE); + q = blk_alloc_queue_node(GFP_KERNEL, NUMA_NO_NODE, NULL); if (!q) return NULL; q->cmd_size = sizeof(struct scsi_cmnd) + shost->hostt->cmd_size; diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index ed63f3b69c12..667a9b0053d9 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -1321,7 +1321,8 @@ extern long nr_blockdev_pages(void); bool __must_check blk_get_queue(struct request_queue *); struct request_queue *blk_alloc_queue(gfp_t); -struct request_queue *blk_alloc_queue_node(gfp_t, int); +struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id, + spinlock_t *lock); extern void blk_put_queue(struct request_queue *); extern void blk_set_queue_dying(struct request_queue *); -- cgit v1.2.3 From 498f6650aec864e331cae7575fec5f07781d0bf3 Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Wed, 28 Feb 2018 10:15:32 -0800 Subject: block: Fix a race between the cgroup code and request queue initialization Initialize the request queue lock earlier such that the following race can no longer occur: blk_init_queue_node() blkcg_print_blkgs() blk_alloc_queue_node (1) q->queue_lock = &q->__queue_lock (2) blkcg_init_queue(q) (3) spin_lock_irq(blkg->q->queue_lock) (4) q->queue_lock = lock (5) spin_unlock_irq(blkg->q->queue_lock) (6) (1) allocate an uninitialized queue; (2) initialize queue_lock to its default internal lock; (3) initialize blkcg part of request queue, which will create blkg and then insert it to blkg_list; (4) traverse blkg_list and find the created blkg, and then take its queue lock, here it is the default *internal lock*; (5) *race window*, now queue_lock is overridden with *driver specified lock*; (6) now unlock *driver specified lock*, not the locked *internal lock*, unlock balance breaks. The changes in this patch are as follows: - Move the .queue_lock initialization from blk_init_queue_node() into blk_alloc_queue_node(). - Only override the .queue_lock pointer for legacy queues because it is not useful for blk-mq queues to override this pointer. - For all all block drivers that initialize .queue_lock explicitly, change the blk_alloc_queue() call in the driver into a blk_alloc_queue_node() call and remove the explicit .queue_lock initialization. Additionally, initialize the spin lock that will be used as queue lock earlier if necessary. Reported-by: Joseph Qi Signed-off-by: Bart Van Assche Reviewed-by: Joseph Qi Cc: Christoph Hellwig Cc: Philipp Reisner Cc: Ulf Hansson Cc: Kees Cook Signed-off-by: Jens Axboe --- block/blk-core.c | 24 ++++++++++++++++-------- drivers/block/drbd/drbd_main.c | 3 +-- drivers/block/umem.c | 7 +++---- 3 files changed, 20 insertions(+), 14 deletions(-) diff --git a/block/blk-core.c b/block/blk-core.c index e873a24bf82d..41c74b37be85 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -888,6 +888,19 @@ static void blk_rq_timed_out_timer(struct timer_list *t) kblockd_schedule_work(&q->timeout_work); } +/** + * blk_alloc_queue_node - allocate a request queue + * @gfp_mask: memory allocation flags + * @node_id: NUMA node to allocate memory from + * @lock: For legacy queues, pointer to a spinlock that will be used to e.g. + * serialize calls to the legacy .request_fn() callback. Ignored for + * blk-mq request queues. + * + * Note: pass the queue lock as the third argument to this function instead of + * setting the queue lock pointer explicitly to avoid triggering a sporadic + * crash in the blkcg code. This function namely calls blkcg_init_queue() and + * the queue lock pointer must be set before blkcg_init_queue() is called. + */ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id, spinlock_t *lock) { @@ -940,11 +953,8 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id, mutex_init(&q->sysfs_lock); spin_lock_init(&q->__queue_lock); - /* - * By default initialize queue_lock to internal lock and driver can - * override it later if need be. - */ - q->queue_lock = &q->__queue_lock; + if (!q->mq_ops) + q->queue_lock = lock ? : &q->__queue_lock; /* * A queue starts its life with bypass turned on to avoid @@ -1031,13 +1041,11 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id) { struct request_queue *q; - q = blk_alloc_queue_node(GFP_KERNEL, node_id, NULL); + q = blk_alloc_queue_node(GFP_KERNEL, node_id, lock); if (!q) return NULL; q->request_fn = rfn; - if (lock) - q->queue_lock = lock; if (blk_init_allocated_queue(q) < 0) { blk_cleanup_queue(q); return NULL; diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index 0a0394aa1b9c..185f1ef00a7c 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c @@ -2816,7 +2816,7 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig drbd_init_set_defaults(device); - q = blk_alloc_queue(GFP_KERNEL); + q = blk_alloc_queue_node(GFP_KERNEL, NUMA_NO_NODE, &resource->req_lock); if (!q) goto out_no_q; device->rq_queue = q; @@ -2848,7 +2848,6 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig /* Setting the max_hw_sectors to an odd value of 8kibyte here This triggers a max_bio_size message upon first attach or connect */ blk_queue_max_hw_sectors(q, DRBD_MAX_BIO_SIZE_SAFE >> 8); - q->queue_lock = &resource->req_lock; device->md_io.page = alloc_page(GFP_KERNEL); if (!device->md_io.page) diff --git a/drivers/block/umem.c b/drivers/block/umem.c index 8077123678ad..5c7fb8cc4149 100644 --- a/drivers/block/umem.c +++ b/drivers/block/umem.c @@ -888,13 +888,14 @@ static int mm_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) card->Active = -1; /* no page is active */ card->bio = NULL; card->biotail = &card->bio; + spin_lock_init(&card->lock); - card->queue = blk_alloc_queue(GFP_KERNEL); + card->queue = blk_alloc_queue_node(GFP_KERNEL, NUMA_NO_NODE, + &card->lock); if (!card->queue) goto failed_alloc; blk_queue_make_request(card->queue, mm_make_request); - card->queue->queue_lock = &card->lock; card->queue->queuedata = card; tasklet_init(&card->tasklet, process_page, (unsigned long)card); @@ -968,8 +969,6 @@ static int mm_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) dev_printk(KERN_INFO, &card->dev->dev, "Window size %d bytes, IRQ %d\n", data, dev->irq); - spin_lock_init(&card->lock); - pci_set_drvdata(dev, card); if (pci_write_cmd != 0x0F) /* If not Memory Write & Invalidate */ -- cgit v1.2.3 From a063057d7c731cffa7d10740e8ebc2970df8dbb3 Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Wed, 28 Feb 2018 10:15:33 -0800 Subject: block: Fix a race between request queue removal and the block cgroup controller Avoid that the following race can occur: blk_cleanup_queue() blkcg_print_blkgs() spin_lock_irq(lock) (1) spin_lock_irq(blkg->q->queue_lock) (2,5) q->queue_lock = &q->__queue_lock (3) spin_unlock_irq(lock) (4) spin_unlock_irq(blkg->q->queue_lock) (6) (1) take driver lock; (2) busy loop for driver lock; (3) override driver lock with internal lock; (4) unlock driver lock; (5) can take driver lock now; (6) but unlock internal lock. This change is safe because only the SCSI core and the NVME core keep a reference on a request queue after having called blk_cleanup_queue(). Neither driver accesses any of the removed data structures between its blk_cleanup_queue() and blk_put_queue() calls. Reported-by: Joseph Qi Signed-off-by: Bart Van Assche Reviewed-by: Joseph Qi Cc: Jan Kara Signed-off-by: Jens Axboe --- block/blk-core.c | 31 +++++++++++++++++++++++++++++++ block/blk-sysfs.c | 7 ------- 2 files changed, 31 insertions(+), 7 deletions(-) diff --git a/block/blk-core.c b/block/blk-core.c index 41c74b37be85..6febc69a58aa 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -719,6 +719,37 @@ void blk_cleanup_queue(struct request_queue *q) del_timer_sync(&q->backing_dev_info->laptop_mode_wb_timer); blk_sync_queue(q); + /* + * I/O scheduler exit is only safe after the sysfs scheduler attribute + * has been removed. + */ + WARN_ON_ONCE(q->kobj.state_in_sysfs); + + /* + * Since the I/O scheduler exit code may access cgroup information, + * perform I/O scheduler exit before disassociating from the block + * cgroup controller. + */ + if (q->elevator) { + ioc_clear_queue(q); + elevator_exit(q, q->elevator); + q->elevator = NULL; + } + + /* + * Remove all references to @q from the block cgroup controller before + * restoring @q->queue_lock to avoid that restoring this pointer causes + * e.g. blkcg_print_blkgs() to crash. + */ + blkcg_exit_queue(q); + + /* + * Since the cgroup code may dereference the @q->backing_dev_info + * pointer, only decrease its reference count after having removed the + * association with the block cgroup controller. + */ + bdi_put(q->backing_dev_info); + if (q->mq_ops) blk_mq_free_queue(q); percpu_ref_exit(&q->q_usage_counter); diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index cbea895a5547..fd71a00c9462 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -798,13 +798,6 @@ static void __blk_release_queue(struct work_struct *work) if (test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags)) blk_stat_remove_callback(q, q->poll_cb); blk_stat_free_callback(q->poll_cb); - bdi_put(q->backing_dev_info); - blkcg_exit_queue(q); - - if (q->elevator) { - ioc_clear_queue(q); - elevator_exit(q, q->elevator); - } blk_free_queue_stats(q->stats); -- cgit v1.2.3 From f16ee7c7ec0fa5f0322bd64d5ee183a28ed1ec08 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Thu, 1 Mar 2018 11:31:28 +0100 Subject: misc: rtsx: rename SG_END macro A change to the generic scatterlist code caused a conflict with the rtsx card reader driver: In file included from drivers/misc/cardreader/rtsx_pcr.c:32: include/linux/rtsx_pci.h:40: error: "SG_END" redefined [-Werror] This changes one instance of the driver to prefix SG_END and related constants. Fixes: 723fbf563a6a ("lib/scatterlist: Add SG_CHAIN and SG_END macros for LSB encodings") Cc: Anshuman Khandual Signed-off-by: Arnd Bergmann Signed-off-by: Jens Axboe --- drivers/misc/cardreader/rtsx_pcr.c | 4 ++-- include/linux/rtsx_pci.h | 12 ++++++------ 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/drivers/misc/cardreader/rtsx_pcr.c b/drivers/misc/cardreader/rtsx_pcr.c index fd09b0960097..e8f1d4bb806a 100644 --- a/drivers/misc/cardreader/rtsx_pcr.c +++ b/drivers/misc/cardreader/rtsx_pcr.c @@ -444,12 +444,12 @@ static void rtsx_pci_add_sg_tbl(struct rtsx_pcr *pcr, { u64 *ptr = (u64 *)(pcr->host_sg_tbl_ptr) + pcr->sgi; u64 val; - u8 option = SG_VALID | SG_TRANS_DATA; + u8 option = RTSX_SG_VALID | RTSX_SG_TRANS_DATA; pcr_dbg(pcr, "DMA addr: 0x%x, Len: 0x%x\n", (unsigned int)addr, len); if (end) - option |= SG_END; + option |= RTSX_SG_END; val = ((u64)addr << 32) | ((u64)len << 12) | option; put_unaligned_le64(val, ptr); diff --git a/include/linux/rtsx_pci.h b/include/linux/rtsx_pci.h index 478acf6efac6..e964bbd03fc2 100644 --- a/include/linux/rtsx_pci.h +++ b/include/linux/rtsx_pci.h @@ -36,12 +36,12 @@ #define CHECK_REG_CMD 2 #define RTSX_HDBAR 0x08 -#define SG_INT 0x04 -#define SG_END 0x02 -#define SG_VALID 0x01 -#define SG_NO_OP 0x00 -#define SG_TRANS_DATA (0x02 << 4) -#define SG_LINK_DESC (0x03 << 4) +#define RTSX_SG_INT 0x04 +#define RTSX_SG_END 0x02 +#define RTSX_SG_VALID 0x01 +#define RTSX_SG_NO_OP 0x00 +#define RTSX_SG_TRANS_DATA (0x02 << 4) +#define RTSX_SG_LINK_DESC (0x03 << 4) #define RTSX_HDBCTLR 0x0C #define SDMA_MODE 0x00 #define ADMA_MODE (0x02 << 26) -- cgit v1.2.3 From b5d013bc09e9e76df9cdc85c2598486ba9a5b9b6 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Thu, 1 Mar 2018 11:31:29 +0100 Subject: staging: rts5208: rename SG_END macro A change to the generic scatterlist code caused a conflict with the rtsx card reader driver: In file included from drivers/staging/rts5208/rtsx.h:180, from drivers/staging/rts5208/rtsx.c:28: drivers/staging/rts5208/rtsx_chip.h:343: error: "SG_END" redefined [-Werror] This changes one instance of the driver to prefix SG_END and related constants. Fixes: 723fbf563a6a ("lib/scatterlist: Add SG_CHAIN and SG_END macros for LSB encodings") Cc: Anshuman Khandual Reviewed-by: Andy Shevchenko Signed-off-by: Arnd Bergmann Signed-off-by: Jens Axboe --- drivers/staging/rts5208/rtsx_chip.h | 12 ++++++------ drivers/staging/rts5208/rtsx_transport.c | 10 +++++----- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/drivers/staging/rts5208/rtsx_chip.h b/drivers/staging/rts5208/rtsx_chip.h index 4f6e3c1c4621..8a8cd5d3cf7e 100644 --- a/drivers/staging/rts5208/rtsx_chip.h +++ b/drivers/staging/rts5208/rtsx_chip.h @@ -339,13 +339,13 @@ struct sense_data_t { #define CHK_BIT(data, idx) ((data) & (1 << (idx))) /* SG descriptor */ -#define SG_INT 0x04 -#define SG_END 0x02 -#define SG_VALID 0x01 +#define RTSX_SG_INT 0x04 +#define RTSX_SG_END 0x02 +#define RTSX_SG_VALID 0x01 -#define SG_NO_OP 0x00 -#define SG_TRANS_DATA (0x02 << 4) -#define SG_LINK_DESC (0x03 << 4) +#define RTSX_SG_NO_OP 0x00 +#define RTSX_SG_TRANS_DATA (0x02 << 4) +#define RTSX_SG_LINK_DESC (0x03 << 4) struct rtsx_chip; diff --git a/drivers/staging/rts5208/rtsx_transport.c b/drivers/staging/rts5208/rtsx_transport.c index 8b57e17ee6d3..716cce2bd7f0 100644 --- a/drivers/staging/rts5208/rtsx_transport.c +++ b/drivers/staging/rts5208/rtsx_transport.c @@ -308,7 +308,7 @@ static inline void rtsx_add_sg_tbl( do { if (len > 0x80000) { temp_len = 0x80000; - temp_opt = option & (~SG_END); + temp_opt = option & (~RTSX_SG_END); } else { temp_len = len; temp_opt = option; @@ -407,9 +407,9 @@ static int rtsx_transfer_sglist_adma_partial(struct rtsx_chip *chip, u8 card, *index = *index + 1; } if ((i == (sg_cnt - 1)) || !resid) - option = SG_VALID | SG_END | SG_TRANS_DATA; + option = RTSX_SG_VALID | RTSX_SG_END | RTSX_SG_TRANS_DATA; else - option = SG_VALID | SG_TRANS_DATA; + option = RTSX_SG_VALID | RTSX_SG_TRANS_DATA; rtsx_add_sg_tbl(chip, (u32)addr, (u32)len, option); @@ -555,9 +555,9 @@ static int rtsx_transfer_sglist_adma(struct rtsx_chip *chip, u8 card, (unsigned int)addr, len); if (j == (sg_cnt - 1)) - option = SG_VALID | SG_END | SG_TRANS_DATA; + option = RTSX_SG_VALID | RTSX_SG_END | RTSX_SG_TRANS_DATA; else - option = SG_VALID | SG_TRANS_DATA; + option = RTSX_SG_VALID | RTSX_SG_TRANS_DATA; rtsx_add_sg_tbl(chip, (u32)addr, (u32)len, option); -- cgit v1.2.3 From 66231ad3e2886ba99fbf440cea44cab547e5163f Mon Sep 17 00:00:00 2001 From: Ming Lei Date: Tue, 6 Mar 2018 12:07:13 +0800 Subject: block: null_blk: fix 'Invalid parameters' when loading module On ARM64, the default page size has been 64K on some distributions, and we should allow ARM64 people to play null_blk. This patch fixes the issue by extend page bitmap size for supporting other non-4KB PAGE_SIZE. Cc: Bart Van Assche Cc: Shaohua Li Cc: Kyungchan Koh , Cc: weiping zhang Cc: Yi Zhang Reported-by: Yi Zhang Signed-off-by: Ming Lei Signed-off-by: Jens Axboe --- drivers/block/null_blk.c | 46 +++++++++++++++++++++++++--------------------- 1 file changed, 25 insertions(+), 21 deletions(-) diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c index 6dc7e7cfca4a..d6be7a6d8ca6 100644 --- a/drivers/block/null_blk.c +++ b/drivers/block/null_blk.c @@ -74,6 +74,7 @@ enum nullb_device_flags { NULLB_DEV_FL_CACHE = 3, }; +#define MAP_SZ ((PAGE_SIZE >> SECTOR_SHIFT) + 2) /* * nullb_page is a page in memory for nullb devices. * @@ -88,10 +89,10 @@ enum nullb_device_flags { */ struct nullb_page { struct page *page; - unsigned long bitmap; + DECLARE_BITMAP(bitmap, MAP_SZ); }; -#define NULLB_PAGE_LOCK (sizeof(unsigned long) * 8 - 1) -#define NULLB_PAGE_FREE (sizeof(unsigned long) * 8 - 2) +#define NULLB_PAGE_LOCK (MAP_SZ - 1) +#define NULLB_PAGE_FREE (MAP_SZ - 2) struct nullb_device { struct nullb *nullb; @@ -733,7 +734,7 @@ static struct nullb_page *null_alloc_page(gfp_t gfp_flags) if (!t_page->page) goto out_freepage; - t_page->bitmap = 0; + memset(t_page->bitmap, 0, sizeof(t_page->bitmap)); return t_page; out_freepage: kfree(t_page); @@ -743,13 +744,20 @@ out: static void null_free_page(struct nullb_page *t_page) { - __set_bit(NULLB_PAGE_FREE, &t_page->bitmap); - if (test_bit(NULLB_PAGE_LOCK, &t_page->bitmap)) + __set_bit(NULLB_PAGE_FREE, t_page->bitmap); + if (test_bit(NULLB_PAGE_LOCK, t_page->bitmap)) return; __free_page(t_page->page); kfree(t_page); } +static bool null_page_empty(struct nullb_page *page) +{ + int size = MAP_SZ - 2; + + return find_first_bit(page->bitmap, size) == size; +} + static void null_free_sector(struct nullb *nullb, sector_t sector, bool is_cache) { @@ -764,9 +772,9 @@ static void null_free_sector(struct nullb *nullb, sector_t sector, t_page = radix_tree_lookup(root, idx); if (t_page) { - __clear_bit(sector_bit, &t_page->bitmap); + __clear_bit(sector_bit, t_page->bitmap); - if (!t_page->bitmap) { + if (null_page_empty(t_page)) { ret = radix_tree_delete_item(root, idx, t_page); WARN_ON(ret != t_page); null_free_page(ret); @@ -837,7 +845,7 @@ static struct nullb_page *__null_lookup_page(struct nullb *nullb, t_page = radix_tree_lookup(root, idx); WARN_ON(t_page && t_page->page->index != idx); - if (t_page && (for_write || test_bit(sector_bit, &t_page->bitmap))) + if (t_page && (for_write || test_bit(sector_bit, t_page->bitmap))) return t_page; return NULL; @@ -900,10 +908,10 @@ static int null_flush_cache_page(struct nullb *nullb, struct nullb_page *c_page) t_page = null_insert_page(nullb, idx << PAGE_SECTORS_SHIFT, true); - __clear_bit(NULLB_PAGE_LOCK, &c_page->bitmap); - if (test_bit(NULLB_PAGE_FREE, &c_page->bitmap)) { + __clear_bit(NULLB_PAGE_LOCK, c_page->bitmap); + if (test_bit(NULLB_PAGE_FREE, c_page->bitmap)) { null_free_page(c_page); - if (t_page && t_page->bitmap == 0) { + if (t_page && null_page_empty(t_page)) { ret = radix_tree_delete_item(&nullb->dev->data, idx, t_page); null_free_page(t_page); @@ -919,11 +927,11 @@ static int null_flush_cache_page(struct nullb *nullb, struct nullb_page *c_page) for (i = 0; i < PAGE_SECTORS; i += (nullb->dev->blocksize >> SECTOR_SHIFT)) { - if (test_bit(i, &c_page->bitmap)) { + if (test_bit(i, c_page->bitmap)) { offset = (i << SECTOR_SHIFT); memcpy(dst + offset, src + offset, nullb->dev->blocksize); - __set_bit(i, &t_page->bitmap); + __set_bit(i, t_page->bitmap); } } @@ -960,10 +968,10 @@ again: * We found the page which is being flushed to disk by other * threads */ - if (test_bit(NULLB_PAGE_LOCK, &c_pages[i]->bitmap)) + if (test_bit(NULLB_PAGE_LOCK, c_pages[i]->bitmap)) c_pages[i] = NULL; else - __set_bit(NULLB_PAGE_LOCK, &c_pages[i]->bitmap); + __set_bit(NULLB_PAGE_LOCK, c_pages[i]->bitmap); } one_round = 0; @@ -1016,7 +1024,7 @@ static int copy_to_nullb(struct nullb *nullb, struct page *source, kunmap_atomic(dst); kunmap_atomic(src); - __set_bit(sector & SECTOR_MASK, &t_page->bitmap); + __set_bit(sector & SECTOR_MASK, t_page->bitmap); if (is_fua) null_free_sector(nullb, sector, true); @@ -1846,10 +1854,6 @@ static int __init null_init(void) struct nullb *nullb; struct nullb_device *dev; - /* check for nullb_page.bitmap */ - if (sizeof(unsigned long) * 8 - 2 < (PAGE_SIZE >> SECTOR_SHIFT)) - return -EINVAL; - if (g_bs > PAGE_SIZE) { pr_warn("null_blk: invalid block size\n"); pr_warn("null_blk: defaults block size to %lu\n", PAGE_SIZE); -- cgit v1.2.3 From d15e1175a9a9af967ae01245f55196acb5d5ff0f Mon Sep 17 00:00:00 2001 From: Jonas Rabenstein Date: Thu, 1 Mar 2018 14:26:37 +0100 Subject: block: sed-opal: fix response string extraction Tokens are prefixed by a variable length of bytes. If a bytestring is not stored in an tiny or short atom, we have to skip more than one byte in order to have the actual bytes not prefixed by the bytes describing the actual length of the string. Acked-by: Jonathan Derrick Signed-off-by: Jonas Rabenstein Signed-off-by: Jens Axboe --- block/sed-opal.c | 26 +++++++++++++++++++++++--- 1 file changed, 23 insertions(+), 3 deletions(-) diff --git a/block/sed-opal.c b/block/sed-opal.c index 9ed51d0c6b1d..36842bfa572e 100644 --- a/block/sed-opal.c +++ b/block/sed-opal.c @@ -871,6 +871,9 @@ static int response_parse(const u8 *buf, size_t length, static size_t response_get_string(const struct parsed_resp *resp, int n, const char **store) { + u8 skip; + const struct opal_resp_tok *token; + *store = NULL; if (!resp) { pr_debug("Response is NULL\n"); @@ -883,13 +886,30 @@ static size_t response_get_string(const struct parsed_resp *resp, int n, return 0; } - if (resp->toks[n].type != OPAL_DTA_TOKENID_BYTESTRING) { + token = &resp->toks[n]; + if (token->type != OPAL_DTA_TOKENID_BYTESTRING) { pr_debug("Token is not a byte string!\n"); return 0; } - *store = resp->toks[n].pos + 1; - return resp->toks[n].len - 1; + switch (token->width) { + case OPAL_WIDTH_TINY: + case OPAL_WIDTH_SHORT: + skip = 1; + break; + case OPAL_WIDTH_MEDIUM: + skip = 2; + break; + case OPAL_WIDTH_LONG: + skip = 4; + break; + default: + pr_debug("Token has invalid width!\n"); + return 0; + } + + *store = token->pos + skip; + return token->len - skip; } static u64 response_get_u64(const struct parsed_resp *resp, int n) -- cgit v1.2.3 From 66f91322f39cd18a01524264464c2ff4c98c936e Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Wed, 7 Mar 2018 17:10:02 -0800 Subject: block: Reorder the queue flag manipulation function definitions Move the definition of queue_flag_clear_unlocked() up and move the definition of queue_in_flight() down such that all queue flag manipulation function definitions become contiguous. This patch does not change any functionality. Cc: Christoph Hellwig Cc: Hannes Reinecke Cc: Ming Lei Reviewed-by: Johannes Thumshirn Reviewed-by: Martin K. Petersen Signed-off-by: Bart Van Assche Signed-off-by: Jens Axboe --- include/linux/blkdev.h | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 667a9b0053d9..c351aaec3ca7 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -726,6 +726,12 @@ static inline void queue_flag_set_unlocked(unsigned int flag, __set_bit(flag, &q->queue_flags); } +static inline void queue_flag_clear_unlocked(unsigned int flag, + struct request_queue *q) +{ + __clear_bit(flag, &q->queue_flags); +} + static inline int queue_flag_test_and_clear(unsigned int flag, struct request_queue *q) { @@ -758,17 +764,6 @@ static inline void queue_flag_set(unsigned int flag, struct request_queue *q) __set_bit(flag, &q->queue_flags); } -static inline void queue_flag_clear_unlocked(unsigned int flag, - struct request_queue *q) -{ - __clear_bit(flag, &q->queue_flags); -} - -static inline int queue_in_flight(struct request_queue *q) -{ - return q->in_flight[0] + q->in_flight[1]; -} - static inline void queue_flag_clear(unsigned int flag, struct request_queue *q) { queue_lockdep_assert_held(q); @@ -804,6 +799,11 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q) extern int blk_set_preempt_only(struct request_queue *q); extern void blk_clear_preempt_only(struct request_queue *q); +static inline int queue_in_flight(struct request_queue *q) +{ + return q->in_flight[0] + q->in_flight[1]; +} + static inline bool blk_account_rq(struct request *rq) { return (rq->rq_flags & RQF_STARTED) && !blk_rq_is_passthrough(rq); -- cgit v1.2.3 From f78bac2c8e69144781e271d9771bae8dbb4e7098 Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Wed, 7 Mar 2018 17:10:03 -0800 Subject: block: Use the queue_flag_*() functions instead of open-coding these Except for changing the atomic queue flag manipulations that are protected by the queue lock into non-atomic manipulations, this patch does not change any functionality. Cc: Christoph Hellwig Cc: Hannes Reinecke Cc: Ming Lei Reviewed-by: Johannes Thumshirn Reviewed-by: Martin K. Petersen Signed-off-by: Bart Van Assche Signed-off-by: Jens Axboe --- block/blk-core.c | 2 +- block/blk-mq.c | 2 +- block/blk-settings.c | 4 ++-- block/blk-stat.c | 6 +++--- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/block/blk-core.c b/block/blk-core.c index 6febc69a58aa..241b73088617 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -994,7 +994,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id, * registered by blk_register_queue(). */ q->bypass_depth = 1; - __set_bit(QUEUE_FLAG_BYPASS, &q->queue_flags); + queue_flag_set_unlocked(QUEUE_FLAG_BYPASS, q); init_waitqueue_head(&q->mq_freeze_wq); diff --git a/block/blk-mq.c b/block/blk-mq.c index 75336848f7a7..e70cc7d48f58 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -2678,7 +2678,7 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT; if (!(set->flags & BLK_MQ_F_SG_MERGE)) - q->queue_flags |= 1 << QUEUE_FLAG_NO_SG_MERGE; + queue_flag_set_unlocked(QUEUE_FLAG_NO_SG_MERGE, q); q->sg_reserved_size = INT_MAX; diff --git a/block/blk-settings.c b/block/blk-settings.c index 48ebe6be07b7..7f719da0eadd 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -861,9 +861,9 @@ void blk_queue_flush_queueable(struct request_queue *q, bool queueable) { spin_lock_irq(q->queue_lock); if (queueable) - clear_bit(QUEUE_FLAG_FLUSH_NQ, &q->queue_flags); + queue_flag_clear(QUEUE_FLAG_FLUSH_NQ, q); else - set_bit(QUEUE_FLAG_FLUSH_NQ, &q->queue_flags); + queue_flag_set(QUEUE_FLAG_FLUSH_NQ, q); spin_unlock_irq(q->queue_lock); } EXPORT_SYMBOL_GPL(blk_queue_flush_queueable); diff --git a/block/blk-stat.c b/block/blk-stat.c index 28003bf9941c..b664aa6df725 100644 --- a/block/blk-stat.c +++ b/block/blk-stat.c @@ -152,7 +152,7 @@ void blk_stat_add_callback(struct request_queue *q, spin_lock(&q->stats->lock); list_add_tail_rcu(&cb->list, &q->stats->callbacks); - set_bit(QUEUE_FLAG_STATS, &q->queue_flags); + queue_flag_set(QUEUE_FLAG_STATS, q); spin_unlock(&q->stats->lock); } EXPORT_SYMBOL_GPL(blk_stat_add_callback); @@ -163,7 +163,7 @@ void blk_stat_remove_callback(struct request_queue *q, spin_lock(&q->stats->lock); list_del_rcu(&cb->list); if (list_empty(&q->stats->callbacks) && !q->stats->enable_accounting) - clear_bit(QUEUE_FLAG_STATS, &q->queue_flags); + queue_flag_clear(QUEUE_FLAG_STATS, q); spin_unlock(&q->stats->lock); del_timer_sync(&cb->timer); @@ -191,7 +191,7 @@ void blk_stat_enable_accounting(struct request_queue *q) { spin_lock(&q->stats->lock); q->stats->enable_accounting = true; - set_bit(QUEUE_FLAG_STATS, &q->queue_flags); + queue_flag_set(QUEUE_FLAG_STATS, q); spin_unlock(&q->stats->lock); } -- cgit v1.2.3 From 8814ce8a0f680599a837af18aefdec774e5c7b97 Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Wed, 7 Mar 2018 17:10:04 -0800 Subject: block: Introduce blk_queue_flag_{set,clear,test_and_{set,clear}}() Introduce functions that modify the queue flags and that protect these modifications with the request queue lock. Except for moving one wake_up_all() call from inside to outside a critical section, this patch does not change any functionality. Cc: Christoph Hellwig Cc: Hannes Reinecke Cc: Ming Lei Reviewed-by: Johannes Thumshirn Reviewed-by: Martin K. Petersen Signed-off-by: Bart Van Assche Signed-off-by: Jens Axboe --- block/blk-core.c | 91 +++++++++++++++++++++++++++++++++++++++++--------- block/blk-mq.c | 12 ++----- block/blk-settings.c | 6 ++-- block/blk-sysfs.c | 22 ++++-------- block/blk-timeout.c | 6 ++-- include/linux/blkdev.h | 5 +++ 6 files changed, 93 insertions(+), 49 deletions(-) diff --git a/block/blk-core.c b/block/blk-core.c index 241b73088617..74c6283f4509 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -71,6 +71,78 @@ struct kmem_cache *blk_requestq_cachep; */ static struct workqueue_struct *kblockd_workqueue; +/** + * blk_queue_flag_set - atomically set a queue flag + * @flag: flag to be set + * @q: request queue + */ +void blk_queue_flag_set(unsigned int flag, struct request_queue *q) +{ + unsigned long flags; + + spin_lock_irqsave(q->queue_lock, flags); + queue_flag_set(flag, q); + spin_unlock_irqrestore(q->queue_lock, flags); +} +EXPORT_SYMBOL(blk_queue_flag_set); + +/** + * blk_queue_flag_clear - atomically clear a queue flag + * @flag: flag to be cleared + * @q: request queue + */ +void blk_queue_flag_clear(unsigned int flag, struct request_queue *q) +{ + unsigned long flags; + + spin_lock_irqsave(q->queue_lock, flags); + queue_flag_clear(flag, q); + spin_unlock_irqrestore(q->queue_lock, flags); +} +EXPORT_SYMBOL(blk_queue_flag_clear); + +/** + * blk_queue_flag_test_and_set - atomically test and set a queue flag + * @flag: flag to be set + * @q: request queue + * + * Returns the previous value of @flag - 0 if the flag was not set and 1 if + * the flag was already set. + */ +bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q) +{ + unsigned long flags; + bool res; + + spin_lock_irqsave(q->queue_lock, flags); + res = queue_flag_test_and_set(flag, q); + spin_unlock_irqrestore(q->queue_lock, flags); + + return res; +} +EXPORT_SYMBOL_GPL(blk_queue_flag_test_and_set); + +/** + * blk_queue_flag_test_and_clear - atomically test and clear a queue flag + * @flag: flag to be cleared + * @q: request queue + * + * Returns the previous value of @flag - 0 if the flag was not set and 1 if + * the flag was set. + */ +bool blk_queue_flag_test_and_clear(unsigned int flag, struct request_queue *q) +{ + unsigned long flags; + bool res; + + spin_lock_irqsave(q->queue_lock, flags); + res = queue_flag_test_and_clear(flag, q); + spin_unlock_irqrestore(q->queue_lock, flags); + + return res; +} +EXPORT_SYMBOL_GPL(blk_queue_flag_test_and_clear); + static void blk_clear_congested(struct request_list *rl, int sync) { #ifdef CONFIG_CGROUP_WRITEBACK @@ -361,25 +433,14 @@ EXPORT_SYMBOL(blk_sync_queue); */ int blk_set_preempt_only(struct request_queue *q) { - unsigned long flags; - int res; - - spin_lock_irqsave(q->queue_lock, flags); - res = queue_flag_test_and_set(QUEUE_FLAG_PREEMPT_ONLY, q); - spin_unlock_irqrestore(q->queue_lock, flags); - - return res; + return blk_queue_flag_test_and_set(QUEUE_FLAG_PREEMPT_ONLY, q); } EXPORT_SYMBOL_GPL(blk_set_preempt_only); void blk_clear_preempt_only(struct request_queue *q) { - unsigned long flags; - - spin_lock_irqsave(q->queue_lock, flags); - queue_flag_clear(QUEUE_FLAG_PREEMPT_ONLY, q); + blk_queue_flag_clear(QUEUE_FLAG_PREEMPT_ONLY, q); wake_up_all(&q->mq_freeze_wq); - spin_unlock_irqrestore(q->queue_lock, flags); } EXPORT_SYMBOL_GPL(blk_clear_preempt_only); @@ -629,9 +690,7 @@ EXPORT_SYMBOL_GPL(blk_queue_bypass_end); void blk_set_queue_dying(struct request_queue *q) { - spin_lock_irq(q->queue_lock); - queue_flag_set(QUEUE_FLAG_DYING, q); - spin_unlock_irq(q->queue_lock); + blk_queue_flag_set(QUEUE_FLAG_DYING, q); /* * When queue DYING flag is set, we need to block new req diff --git a/block/blk-mq.c b/block/blk-mq.c index e70cc7d48f58..a86899022683 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -194,11 +194,7 @@ EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue); */ void blk_mq_quiesce_queue_nowait(struct request_queue *q) { - unsigned long flags; - - spin_lock_irqsave(q->queue_lock, flags); - queue_flag_set(QUEUE_FLAG_QUIESCED, q); - spin_unlock_irqrestore(q->queue_lock, flags); + blk_queue_flag_set(QUEUE_FLAG_QUIESCED, q); } EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue_nowait); @@ -239,11 +235,7 @@ EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue); */ void blk_mq_unquiesce_queue(struct request_queue *q) { - unsigned long flags; - - spin_lock_irqsave(q->queue_lock, flags); - queue_flag_clear(QUEUE_FLAG_QUIESCED, q); - spin_unlock_irqrestore(q->queue_lock, flags); + blk_queue_flag_clear(QUEUE_FLAG_QUIESCED, q); /* dispatch requests which are inserted during quiescing */ blk_mq_run_hw_queues(q, true); diff --git a/block/blk-settings.c b/block/blk-settings.c index 7f719da0eadd..d1de71124656 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -859,12 +859,10 @@ EXPORT_SYMBOL(blk_queue_update_dma_alignment); void blk_queue_flush_queueable(struct request_queue *q, bool queueable) { - spin_lock_irq(q->queue_lock); if (queueable) - queue_flag_clear(QUEUE_FLAG_FLUSH_NQ, q); + blk_queue_flag_clear(QUEUE_FLAG_FLUSH_NQ, q); else - queue_flag_set(QUEUE_FLAG_FLUSH_NQ, q); - spin_unlock_irq(q->queue_lock); + blk_queue_flag_set(QUEUE_FLAG_FLUSH_NQ, q); } EXPORT_SYMBOL_GPL(blk_queue_flush_queueable); diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index fd71a00c9462..d00d1b0ec109 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -276,12 +276,10 @@ queue_store_##name(struct request_queue *q, const char *page, size_t count) \ if (neg) \ val = !val; \ \ - spin_lock_irq(q->queue_lock); \ if (val) \ - queue_flag_set(QUEUE_FLAG_##flag, q); \ + blk_queue_flag_set(QUEUE_FLAG_##flag, q); \ else \ - queue_flag_clear(QUEUE_FLAG_##flag, q); \ - spin_unlock_irq(q->queue_lock); \ + blk_queue_flag_clear(QUEUE_FLAG_##flag, q); \ return ret; \ } @@ -414,12 +412,10 @@ static ssize_t queue_poll_store(struct request_queue *q, const char *page, if (ret < 0) return ret; - spin_lock_irq(q->queue_lock); if (poll_on) - queue_flag_set(QUEUE_FLAG_POLL, q); + blk_queue_flag_set(QUEUE_FLAG_POLL, q); else - queue_flag_clear(QUEUE_FLAG_POLL, q); - spin_unlock_irq(q->queue_lock); + blk_queue_flag_clear(QUEUE_FLAG_POLL, q); return ret; } @@ -487,12 +483,10 @@ static ssize_t queue_wc_store(struct request_queue *q, const char *page, if (set == -1) return -EINVAL; - spin_lock_irq(q->queue_lock); if (set) - queue_flag_set(QUEUE_FLAG_WC, q); + blk_queue_flag_set(QUEUE_FLAG_WC, q); else - queue_flag_clear(QUEUE_FLAG_WC, q); - spin_unlock_irq(q->queue_lock); + blk_queue_flag_clear(QUEUE_FLAG_WC, q); return count; } @@ -946,9 +940,7 @@ void blk_unregister_queue(struct gendisk *disk) */ mutex_lock(&q->sysfs_lock); - spin_lock_irq(q->queue_lock); - queue_flag_clear(QUEUE_FLAG_REGISTERED, q); - spin_unlock_irq(q->queue_lock); + blk_queue_flag_clear(QUEUE_FLAG_REGISTERED, q); /* * Remove the sysfs attributes before unregistering the queue data diff --git a/block/blk-timeout.c b/block/blk-timeout.c index a05e3676d24a..34a55250f08a 100644 --- a/block/blk-timeout.c +++ b/block/blk-timeout.c @@ -57,12 +57,10 @@ ssize_t part_timeout_store(struct device *dev, struct device_attribute *attr, char *p = (char *) buf; val = simple_strtoul(p, &p, 10); - spin_lock_irq(q->queue_lock); if (val) - queue_flag_set(QUEUE_FLAG_FAIL_IO, q); + blk_queue_flag_set(QUEUE_FLAG_FAIL_IO, q); else - queue_flag_clear(QUEUE_FLAG_FAIL_IO, q); - spin_unlock_irq(q->queue_lock); + blk_queue_flag_clear(QUEUE_FLAG_FAIL_IO, q); } return count; diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index c351aaec3ca7..f84b3c7887b1 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -707,6 +707,11 @@ struct request_queue { (1 << QUEUE_FLAG_SAME_COMP) | \ (1 << QUEUE_FLAG_POLL)) +void blk_queue_flag_set(unsigned int flag, struct request_queue *q); +void blk_queue_flag_clear(unsigned int flag, struct request_queue *q); +bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q); +bool blk_queue_flag_test_and_clear(unsigned int flag, struct request_queue *q); + /* * @q->queue_lock is set while a queue is being initialized. Since we know * that no other threads access the queue object before @q->queue_lock has -- cgit v1.2.3 From 7dfdbc7367f6f789715cab2cb484b78ab45e9f3e Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Wed, 7 Mar 2018 17:10:05 -0800 Subject: block: Protect queue flag changes with the queue lock Since the queue flags may be changed concurrently from multiple contexts after a queue becomes visible in sysfs, make these changes safe by protecting these with the queue lock. Cc: Christoph Hellwig Cc: Hannes Reinecke Cc: Ming Lei Reviewed-by: Martin K. Petersen Reviewed-by: Johannes Thumshirn Signed-off-by: Bart Van Assche Signed-off-by: Jens Axboe --- block/blk-mq.c | 2 +- block/blk-stat.c | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/block/blk-mq.c b/block/blk-mq.c index a86899022683..f5c7dbcb954f 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -2997,7 +2997,7 @@ EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues); static bool blk_poll_stats_enable(struct request_queue *q) { if (test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags) || - test_and_set_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags)) + blk_queue_flag_test_and_set(QUEUE_FLAG_POLL_STATS, q)) return true; blk_stat_add_callback(q, q->poll_cb); return false; diff --git a/block/blk-stat.c b/block/blk-stat.c index b664aa6df725..bd365a95fcf8 100644 --- a/block/blk-stat.c +++ b/block/blk-stat.c @@ -152,7 +152,7 @@ void blk_stat_add_callback(struct request_queue *q, spin_lock(&q->stats->lock); list_add_tail_rcu(&cb->list, &q->stats->callbacks); - queue_flag_set(QUEUE_FLAG_STATS, q); + blk_queue_flag_set(QUEUE_FLAG_STATS, q); spin_unlock(&q->stats->lock); } EXPORT_SYMBOL_GPL(blk_stat_add_callback); @@ -163,7 +163,7 @@ void blk_stat_remove_callback(struct request_queue *q, spin_lock(&q->stats->lock); list_del_rcu(&cb->list); if (list_empty(&q->stats->callbacks) && !q->stats->enable_accounting) - queue_flag_clear(QUEUE_FLAG_STATS, q); + blk_queue_flag_clear(QUEUE_FLAG_STATS, q); spin_unlock(&q->stats->lock); del_timer_sync(&cb->timer); @@ -191,7 +191,7 @@ void blk_stat_enable_accounting(struct request_queue *q) { spin_lock(&q->stats->lock); q->stats->enable_accounting = true; - queue_flag_set(QUEUE_FLAG_STATS, q); + blk_queue_flag_set(QUEUE_FLAG_STATS, q); spin_unlock(&q->stats->lock); } -- cgit v1.2.3 From 4e699cb99d08563df4587a9a667bc5936dc75e51 Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Wed, 7 Mar 2018 17:10:06 -0800 Subject: mtip32xx: Use the blk_queue_flag_*() functions Use the blk_queue_flag_*() functions instead of open-coding these. Cc: Christoph Hellwig Cc: Hannes Reinecke Cc: Ming Lei Reviewed-by: Johannes Thumshirn Reviewed-by: Martin K. Petersen Signed-off-by: Bart Van Assche Signed-off-by: Jens Axboe --- drivers/block/mtip32xx/mtip32xx.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c index b8af7352a18f..769c551e3d71 100644 --- a/drivers/block/mtip32xx/mtip32xx.c +++ b/drivers/block/mtip32xx/mtip32xx.c @@ -159,7 +159,7 @@ static bool mtip_check_surprise_removal(struct pci_dev *pdev) if (vendor_id == 0xFFFF) { dd->sr = true; if (dd->queue) - set_bit(QUEUE_FLAG_DEAD, &dd->queue->queue_flags); + blk_queue_flag_set(QUEUE_FLAG_DEAD, dd->queue); else dev_warn(&dd->pdev->dev, "%s: dd->queue is NULL\n", __func__); @@ -3855,8 +3855,8 @@ skip_create_disk: goto start_service_thread; /* Set device limits. */ - set_bit(QUEUE_FLAG_NONROT, &dd->queue->queue_flags); - clear_bit(QUEUE_FLAG_ADD_RANDOM, &dd->queue->queue_flags); + blk_queue_flag_set(QUEUE_FLAG_NONROT, dd->queue); + blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, dd->queue); blk_queue_max_segments(dd->queue, MTIP_MAX_SG); blk_queue_physical_block_size(dd->queue, 4096); blk_queue_max_hw_sectors(dd->queue, 0xffff); @@ -3866,7 +3866,7 @@ skip_create_disk: /* Signal trim support */ if (dd->trim_supp == true) { - set_bit(QUEUE_FLAG_DISCARD, &dd->queue->queue_flags); + blk_queue_flag_set(QUEUE_FLAG_DISCARD, dd->queue); dd->queue->limits.discard_granularity = 4096; blk_queue_max_discard_sectors(dd->queue, MTIP_MAX_TRIM_ENTRY_LEN * MTIP_MAX_TRIM_ENTRIES); -- cgit v1.2.3 From 44e1ebe2a33b6cf70d6bee6beb1d5a198a841380 Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Wed, 7 Mar 2018 17:10:07 -0800 Subject: bcache: Use the blk_queue_flag_{set,clear}() functions Use the blk_queue_flag_{set,clear}() functions instead of open-coding these. Cc: Kent Overstreet Cc: Christoph Hellwig Cc: Hannes Reinecke Cc: Ming Lei Reviewed-by: Michael Lyle Reviewed-by: Johannes Thumshirn Reviewed-by: Martin K. Petersen Signed-off-by: Bart Van Assche Signed-off-by: Jens Axboe --- drivers/md/bcache/super.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index 4d1d8dfb2d2a..e8dfa804bd98 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -833,9 +833,9 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size, q->limits.io_min = block_size; q->limits.logical_block_size = block_size; q->limits.physical_block_size = block_size; - set_bit(QUEUE_FLAG_NONROT, &d->disk->queue->queue_flags); - clear_bit(QUEUE_FLAG_ADD_RANDOM, &d->disk->queue->queue_flags); - set_bit(QUEUE_FLAG_DISCARD, &d->disk->queue->queue_flags); + blk_queue_flag_set(QUEUE_FLAG_NONROT, d->disk->queue); + blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, d->disk->queue); + blk_queue_flag_set(QUEUE_FLAG_DISCARD, d->disk->queue); blk_queue_write_cache(q, true, true); -- cgit v1.2.3 From bc74c33eab7590abaf7c90563089a82f62667823 Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Wed, 7 Mar 2018 17:10:08 -0800 Subject: iscsi: Use blk_queue_flag_set() Use blk_queue_flag_set() instead of open-coding this function. Cc: Christoph Hellwig Cc: Hannes Reinecke Cc: Ming Lei Acked-by: Martin K. Petersen Reviewed-by: Johannes Thumshirn Signed-off-by: Bart Van Assche Signed-off-by: Jens Axboe --- drivers/scsi/iscsi_tcp.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c index 6198559abbd8..07b54dfebccf 100644 --- a/drivers/scsi/iscsi_tcp.c +++ b/drivers/scsi/iscsi_tcp.c @@ -948,7 +948,7 @@ static umode_t iscsi_sw_tcp_attr_is_visible(int param_type, int param) static int iscsi_sw_tcp_slave_alloc(struct scsi_device *sdev) { - set_bit(QUEUE_FLAG_BIDI, &sdev->request_queue->queue_flags); + blk_queue_flag_set(QUEUE_FLAG_BIDI, sdev->request_queue); return 0; } -- cgit v1.2.3 From bf3a2b310ea35ae2f641bb734892574bd820d4a5 Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Wed, 7 Mar 2018 17:10:09 -0800 Subject: target/tcm_loop: Use blk_queue_flag_set() Use blk_queue_flag_set() instead of open-coding this function. Cc: Nicholas A. Bellinger Cc: Christoph Hellwig Cc: Hannes Reinecke Cc: Ming Lei Reviewed-by: Johannes Thumshirn Reviewed-by: Martin K. Petersen Signed-off-by: Bart Van Assche Signed-off-by: Jens Axboe --- drivers/target/loopback/tcm_loop.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c index 9cd4ffe76c07..60d5b918c4ac 100644 --- a/drivers/target/loopback/tcm_loop.c +++ b/drivers/target/loopback/tcm_loop.c @@ -309,7 +309,7 @@ static int tcm_loop_target_reset(struct scsi_cmnd *sc) static int tcm_loop_slave_alloc(struct scsi_device *sd) { - set_bit(QUEUE_FLAG_BIDI, &sd->request_queue->queue_flags); + blk_queue_flag_set(QUEUE_FLAG_BIDI, sd->request_queue); return 0; } -- cgit v1.2.3 From 8b904b5b6b58b9a29dcf3f82d936d9e7fd69fda6 Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Wed, 7 Mar 2018 17:10:10 -0800 Subject: block: Use blk_queue_flag_*() in drivers instead of queue_flag_*() This patch has been generated as follows: for verb in set_unlocked clear_unlocked set clear; do replace-in-files queue_flag_${verb} blk_queue_flag_${verb%_unlocked} \ $(git grep -lw queue_flag_${verb} drivers block/bsg*) done Except for protecting all queue flag changes with the queue lock this patch does not change any functionality. Cc: Mike Snitzer Cc: Shaohua Li Cc: Christoph Hellwig Cc: Hannes Reinecke Cc: Ming Lei Signed-off-by: Bart Van Assche Reviewed-by: Martin K. Petersen Reviewed-by: Johannes Thumshirn Acked-by: Martin K. Petersen Signed-off-by: Jens Axboe --- block/bsg-lib.c | 4 ++-- drivers/block/drbd/drbd_nl.c | 4 ++-- drivers/block/loop.c | 10 +++++----- drivers/block/nbd.c | 8 ++++---- drivers/block/null_blk.c | 6 +++--- drivers/block/rbd.c | 4 ++-- drivers/block/rsxx/dev.c | 6 +++--- drivers/block/skd_main.c | 4 ++-- drivers/block/xen-blkfront.c | 10 +++++----- drivers/block/zram/zram_drv.c | 6 +++--- drivers/ide/ide-disk.c | 4 ++-- drivers/ide/ide-probe.c | 2 +- drivers/lightnvm/pblk-init.c | 2 +- drivers/md/dm-table.c | 16 ++++++++-------- drivers/md/md-linear.c | 4 ++-- drivers/md/md.c | 4 ++-- drivers/md/raid0.c | 4 ++-- drivers/md/raid1.c | 6 +++--- drivers/md/raid10.c | 6 +++--- drivers/md/raid5.c | 4 ++-- drivers/mmc/core/queue.c | 8 ++++---- drivers/mtd/mtd_blkdevs.c | 6 +++--- drivers/nvdimm/blk.c | 2 +- drivers/nvdimm/btt.c | 2 +- drivers/nvdimm/pmem.c | 4 ++-- drivers/nvme/host/core.c | 4 ++-- drivers/nvme/host/multipath.c | 2 +- drivers/s390/block/dasd.c | 4 ++-- drivers/s390/block/dcssblk.c | 2 +- drivers/s390/block/scm_blk.c | 4 ++-- drivers/s390/block/xpram.c | 4 ++-- drivers/scsi/megaraid/megaraid_sas_base.c | 2 +- drivers/scsi/megaraid/megaraid_sas_fusion.c | 2 +- drivers/scsi/mpt3sas/mpt3sas_scsih.c | 2 +- drivers/scsi/scsi_debug.c | 2 +- drivers/scsi/scsi_lib.c | 2 +- drivers/scsi/scsi_transport_sas.c | 4 ++-- drivers/scsi/sd.c | 8 ++++---- 38 files changed, 89 insertions(+), 89 deletions(-) diff --git a/block/bsg-lib.c b/block/bsg-lib.c index 1474153f73e3..b4fe1a48f111 100644 --- a/block/bsg-lib.c +++ b/block/bsg-lib.c @@ -275,8 +275,8 @@ struct request_queue *bsg_setup_queue(struct device *dev, const char *name, q->queuedata = dev; q->bsg_job_fn = job_fn; - queue_flag_set_unlocked(QUEUE_FLAG_BIDI, q); - queue_flag_set_unlocked(QUEUE_FLAG_SCSI_PASSTHROUGH, q); + blk_queue_flag_set(QUEUE_FLAG_BIDI, q); + blk_queue_flag_set(QUEUE_FLAG_SCSI_PASSTHROUGH, q); blk_queue_softirq_done(q, bsg_softirq_done); blk_queue_rq_timeout(q, BLK_DEFAULT_SG_TIMEOUT); diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c index a12f77e6891e..b4f02768ba47 100644 --- a/drivers/block/drbd/drbd_nl.c +++ b/drivers/block/drbd/drbd_nl.c @@ -1212,10 +1212,10 @@ static void decide_on_discard_support(struct drbd_device *device, * topology on all peers. */ blk_queue_discard_granularity(q, 512); q->limits.max_discard_sectors = drbd_max_discard_sectors(connection); - queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); + blk_queue_flag_set(QUEUE_FLAG_DISCARD, q); q->limits.max_write_zeroes_sectors = drbd_max_discard_sectors(connection); } else { - queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q); + blk_queue_flag_clear(QUEUE_FLAG_DISCARD, q); blk_queue_discard_granularity(q, 0); q->limits.max_discard_sectors = 0; q->limits.max_write_zeroes_sectors = 0; diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 9d29aa6413e5..7952ed5c607b 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -214,10 +214,10 @@ static void __loop_update_dio(struct loop_device *lo, bool dio) blk_mq_freeze_queue(lo->lo_queue); lo->use_dio = use_dio; if (use_dio) { - queue_flag_clear_unlocked(QUEUE_FLAG_NOMERGES, lo->lo_queue); + blk_queue_flag_clear(QUEUE_FLAG_NOMERGES, lo->lo_queue); lo->lo_flags |= LO_FLAGS_DIRECT_IO; } else { - queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, lo->lo_queue); + blk_queue_flag_set(QUEUE_FLAG_NOMERGES, lo->lo_queue); lo->lo_flags &= ~LO_FLAGS_DIRECT_IO; } blk_mq_unfreeze_queue(lo->lo_queue); @@ -817,7 +817,7 @@ static void loop_config_discard(struct loop_device *lo) q->limits.discard_alignment = 0; blk_queue_max_discard_sectors(q, 0); blk_queue_max_write_zeroes_sectors(q, 0); - queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q); + blk_queue_flag_clear(QUEUE_FLAG_DISCARD, q); return; } @@ -826,7 +826,7 @@ static void loop_config_discard(struct loop_device *lo) blk_queue_max_discard_sectors(q, UINT_MAX >> 9); blk_queue_max_write_zeroes_sectors(q, UINT_MAX >> 9); - queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); + blk_queue_flag_set(QUEUE_FLAG_DISCARD, q); } static void loop_unprepare_queue(struct loop_device *lo) @@ -1808,7 +1808,7 @@ static int loop_add(struct loop_device **l, int i) * page. For directio mode, merge does help to dispatch bigger request * to underlayer disk. We will enable merge once directio is enabled. */ - queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, lo->lo_queue); + blk_queue_flag_set(QUEUE_FLAG_NOMERGES, lo->lo_queue); err = -ENOMEM; disk = lo->lo_disk = alloc_disk(1 << part_shift); diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 86258b00a1d4..afbc202ca6fd 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -964,7 +964,7 @@ static void nbd_parse_flags(struct nbd_device *nbd) else set_disk_ro(nbd->disk, false); if (config->flags & NBD_FLAG_SEND_TRIM) - queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue); + blk_queue_flag_set(QUEUE_FLAG_DISCARD, nbd->disk->queue); if (config->flags & NBD_FLAG_SEND_FLUSH) { if (config->flags & NBD_FLAG_SEND_FUA) blk_queue_write_cache(nbd->disk->queue, true, true); @@ -1040,7 +1040,7 @@ static void nbd_config_put(struct nbd_device *nbd) nbd->config = NULL; nbd->tag_set.timeout = 0; - queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue); + blk_queue_flag_clear(QUEUE_FLAG_DISCARD, nbd->disk->queue); mutex_unlock(&nbd->config_lock); nbd_put(nbd); @@ -1488,8 +1488,8 @@ static int nbd_dev_add(int index) /* * Tell the block layer that we are not a rotational device */ - queue_flag_set_unlocked(QUEUE_FLAG_NONROT, disk->queue); - queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, disk->queue); + blk_queue_flag_set(QUEUE_FLAG_NONROT, disk->queue); + blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, disk->queue); disk->queue->limits.discard_granularity = 512; blk_queue_max_discard_sectors(disk->queue, UINT_MAX); blk_queue_max_segment_size(disk->queue, UINT_MAX); diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c index d6be7a6d8ca6..0517613afccb 100644 --- a/drivers/block/null_blk.c +++ b/drivers/block/null_blk.c @@ -1525,7 +1525,7 @@ static void null_config_discard(struct nullb *nullb) nullb->q->limits.discard_granularity = nullb->dev->blocksize; nullb->q->limits.discard_alignment = nullb->dev->blocksize; blk_queue_max_discard_sectors(nullb->q, UINT_MAX >> 9); - queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, nullb->q); + blk_queue_flag_set(QUEUE_FLAG_DISCARD, nullb->q); } static int null_open(struct block_device *bdev, fmode_t mode) @@ -1810,8 +1810,8 @@ static int null_add_dev(struct nullb_device *dev) } nullb->q->queuedata = nullb; - queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nullb->q); - queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, nullb->q); + blk_queue_flag_set(QUEUE_FLAG_NONROT, nullb->q); + blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, nullb->q); mutex_lock(&lock); nullb->index = ida_simple_get(&nullb_indexes, 0, 0, GFP_KERNEL); diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 8e40da093766..0016170cde0a 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -4370,7 +4370,7 @@ static int rbd_init_disk(struct rbd_device *rbd_dev) goto out_tag_set; } - queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q); + blk_queue_flag_set(QUEUE_FLAG_NONROT, q); /* QUEUE_FLAG_ADD_RANDOM is off by default for blk-mq */ /* set io sizes to object size */ @@ -4383,7 +4383,7 @@ static int rbd_init_disk(struct rbd_device *rbd_dev) blk_queue_io_opt(q, segment_size); /* enable the discard support */ - queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); + blk_queue_flag_set(QUEUE_FLAG_DISCARD, q); q->limits.discard_granularity = segment_size; blk_queue_max_discard_sectors(q, segment_size / SECTOR_SIZE); blk_queue_max_write_zeroes_sectors(q, segment_size / SECTOR_SIZE); diff --git a/drivers/block/rsxx/dev.c b/drivers/block/rsxx/dev.c index e397d3ee7308..dddb3f2490b6 100644 --- a/drivers/block/rsxx/dev.c +++ b/drivers/block/rsxx/dev.c @@ -287,10 +287,10 @@ int rsxx_setup_dev(struct rsxx_cardinfo *card) blk_queue_max_hw_sectors(card->queue, blkdev_max_hw_sectors); blk_queue_physical_block_size(card->queue, RSXX_HW_BLK_SIZE); - queue_flag_set_unlocked(QUEUE_FLAG_NONROT, card->queue); - queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, card->queue); + blk_queue_flag_set(QUEUE_FLAG_NONROT, card->queue); + blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, card->queue); if (rsxx_discard_supported(card)) { - queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, card->queue); + blk_queue_flag_set(QUEUE_FLAG_DISCARD, card->queue); blk_queue_max_discard_sectors(card->queue, RSXX_HW_BLK_SIZE >> 9); card->queue->limits.discard_granularity = RSXX_HW_BLK_SIZE; diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c index e41935ab41ef..bc7aea6d7b7c 100644 --- a/drivers/block/skd_main.c +++ b/drivers/block/skd_main.c @@ -2858,8 +2858,8 @@ static int skd_cons_disk(struct skd_device *skdev) /* set optimal I/O size to 8KB */ blk_queue_io_opt(q, 8192); - queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q); - queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q); + blk_queue_flag_set(QUEUE_FLAG_NONROT, q); + blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q); blk_queue_rq_timeout(q, 8 * HZ); diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index e126e4cac2ca..3fcdc0d8eed3 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -931,15 +931,15 @@ static void blkif_set_queue_limits(struct blkfront_info *info) unsigned int segments = info->max_indirect_segments ? : BLKIF_MAX_SEGMENTS_PER_REQUEST; - queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq); + blk_queue_flag_set(QUEUE_FLAG_VIRT, rq); if (info->feature_discard) { - queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, rq); + blk_queue_flag_set(QUEUE_FLAG_DISCARD, rq); blk_queue_max_discard_sectors(rq, get_capacity(gd)); rq->limits.discard_granularity = info->discard_granularity; rq->limits.discard_alignment = info->discard_alignment; if (info->feature_secdiscard) - queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, rq); + blk_queue_flag_set(QUEUE_FLAG_SECERASE, rq); } /* Hard sector size and max sectors impersonate the equiv. hardware. */ @@ -1610,8 +1610,8 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id) blkif_req(req)->error = BLK_STS_NOTSUPP; info->feature_discard = 0; info->feature_secdiscard = 0; - queue_flag_clear(QUEUE_FLAG_DISCARD, rq); - queue_flag_clear(QUEUE_FLAG_SECERASE, rq); + blk_queue_flag_clear(QUEUE_FLAG_DISCARD, rq); + blk_queue_flag_clear(QUEUE_FLAG_SECERASE, rq); } break; case BLKIF_OP_FLUSH_DISKCACHE: diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index 85110e7931e5..71b449613cfa 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -1530,8 +1530,8 @@ static int zram_add(void) /* Actual capacity set using syfs (/sys/block/zram/disksize */ set_capacity(zram->disk, 0); /* zram devices sort of resembles non-rotational disks */ - queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue); - queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, zram->disk->queue); + blk_queue_flag_set(QUEUE_FLAG_NONROT, zram->disk->queue); + blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, zram->disk->queue); /* * To ensure that we always get PAGE_SIZE aligned @@ -1544,7 +1544,7 @@ static int zram_add(void) blk_queue_io_opt(zram->disk->queue, PAGE_SIZE); zram->disk->queue->limits.discard_granularity = PAGE_SIZE; blk_queue_max_discard_sectors(zram->disk->queue, UINT_MAX); - queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, zram->disk->queue); + blk_queue_flag_set(QUEUE_FLAG_DISCARD, zram->disk->queue); /* * zram_bio_discard() will clear all logical blocks if logical block diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c index 188d1b03715d..9c47f975567f 100644 --- a/drivers/ide/ide-disk.c +++ b/drivers/ide/ide-disk.c @@ -687,8 +687,8 @@ static void ide_disk_setup(ide_drive_t *drive) queue_max_sectors(q) / 2); if (ata_id_is_ssd(id)) { - queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q); - queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q); + blk_queue_flag_set(QUEUE_FLAG_NONROT, q); + blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q); } /* calculate drive capacity, and select LBA if possible */ diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c index d6b8c7e1545d..2019e66eada7 100644 --- a/drivers/ide/ide-probe.c +++ b/drivers/ide/ide-probe.c @@ -773,7 +773,7 @@ static int ide_init_queue(ide_drive_t *drive) q->request_fn = do_ide_request; q->initialize_rq_fn = ide_initialize_rq; q->cmd_size = sizeof(struct ide_request); - queue_flag_set_unlocked(QUEUE_FLAG_SCSI_PASSTHROUGH, q); + blk_queue_flag_set(QUEUE_FLAG_SCSI_PASSTHROUGH, q); if (blk_init_allocated_queue(q) < 0) { blk_cleanup_queue(q); return 1; diff --git a/drivers/lightnvm/pblk-init.c b/drivers/lightnvm/pblk-init.c index 93d671ca518e..5b46924ac66c 100644 --- a/drivers/lightnvm/pblk-init.c +++ b/drivers/lightnvm/pblk-init.c @@ -1067,7 +1067,7 @@ static void *pblk_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk, tqueue->limits.discard_granularity = geo->sec_per_chk * geo->sec_size; tqueue->limits.discard_alignment = 0; blk_queue_max_discard_sectors(tqueue, UINT_MAX >> 9); - queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, tqueue); + blk_queue_flag_set(QUEUE_FLAG_DISCARD, tqueue); pr_info("pblk(%s): luns:%u, lines:%d, secs:%llu, buf entries:%u\n", tdisk->disk_name, diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 5fe7ec356c33..54c39ad4ef01 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -1861,7 +1861,7 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, q->limits = *limits; if (!dm_table_supports_discards(t)) { - queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q); + blk_queue_flag_clear(QUEUE_FLAG_DISCARD, q); /* Must also clear discard limits... */ q->limits.max_discard_sectors = 0; q->limits.max_hw_discard_sectors = 0; @@ -1869,7 +1869,7 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, q->limits.discard_alignment = 0; q->limits.discard_misaligned = 0; } else - queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); + blk_queue_flag_set(QUEUE_FLAG_DISCARD, q); if (dm_table_supports_flush(t, (1UL << QUEUE_FLAG_WC))) { wc = true; @@ -1879,15 +1879,15 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, blk_queue_write_cache(q, wc, fua); if (dm_table_supports_dax(t)) - queue_flag_set_unlocked(QUEUE_FLAG_DAX, q); + blk_queue_flag_set(QUEUE_FLAG_DAX, q); if (dm_table_supports_dax_write_cache(t)) dax_write_cache(t->md->dax_dev, true); /* Ensure that all underlying devices are non-rotational. */ if (dm_table_all_devices_attribute(t, device_is_nonrot)) - queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q); + blk_queue_flag_set(QUEUE_FLAG_NONROT, q); else - queue_flag_clear_unlocked(QUEUE_FLAG_NONROT, q); + blk_queue_flag_clear(QUEUE_FLAG_NONROT, q); if (!dm_table_supports_write_same(t)) q->limits.max_write_same_sectors = 0; @@ -1895,9 +1895,9 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, q->limits.max_write_zeroes_sectors = 0; if (dm_table_all_devices_attribute(t, queue_supports_sg_merge)) - queue_flag_clear_unlocked(QUEUE_FLAG_NO_SG_MERGE, q); + blk_queue_flag_clear(QUEUE_FLAG_NO_SG_MERGE, q); else - queue_flag_set_unlocked(QUEUE_FLAG_NO_SG_MERGE, q); + blk_queue_flag_set(QUEUE_FLAG_NO_SG_MERGE, q); dm_table_verify_integrity(t); @@ -1908,7 +1908,7 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, * have it set. */ if (blk_queue_add_random(q) && dm_table_all_devices_attribute(t, device_is_not_random)) - queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q); + blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q); } unsigned int dm_table_get_num_targets(struct dm_table *t) diff --git a/drivers/md/md-linear.c b/drivers/md/md-linear.c index 773fc70dced7..4964323d936b 100644 --- a/drivers/md/md-linear.c +++ b/drivers/md/md-linear.c @@ -138,9 +138,9 @@ static struct linear_conf *linear_conf(struct mddev *mddev, int raid_disks) } if (!discard_supported) - queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, mddev->queue); + blk_queue_flag_clear(QUEUE_FLAG_DISCARD, mddev->queue); else - queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue); + blk_queue_flag_set(QUEUE_FLAG_DISCARD, mddev->queue); /* * Here we calculate the device offsets. diff --git a/drivers/md/md.c b/drivers/md/md.c index eba7fa2f0abb..de2b26fba5d8 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -5608,9 +5608,9 @@ int md_run(struct mddev *mddev) if (mddev->degraded) nonrot = false; if (nonrot) - queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mddev->queue); + blk_queue_flag_set(QUEUE_FLAG_NONROT, mddev->queue); else - queue_flag_clear_unlocked(QUEUE_FLAG_NONROT, mddev->queue); + blk_queue_flag_clear(QUEUE_FLAG_NONROT, mddev->queue); mddev->queue->backing_dev_info->congested_data = mddev; mddev->queue->backing_dev_info->congested_fn = md_congested; } diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index 5ecba9eef441..584c10347267 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c @@ -399,9 +399,9 @@ static int raid0_run(struct mddev *mddev) discard_supported = true; } if (!discard_supported) - queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, mddev->queue); + blk_queue_flag_clear(QUEUE_FLAG_DISCARD, mddev->queue); else - queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue); + blk_queue_flag_set(QUEUE_FLAG_DISCARD, mddev->queue); } /* calculate array device size */ diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index b2eae332e1a2..f1635eb9e95a 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -1760,7 +1760,7 @@ static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev) } } if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev))) - queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue); + blk_queue_flag_set(QUEUE_FLAG_DISCARD, mddev->queue); print_conf(conf); return err; } @@ -3099,10 +3099,10 @@ static int raid1_run(struct mddev *mddev) if (mddev->queue) { if (discard_supported) - queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, + blk_queue_flag_set(QUEUE_FLAG_DISCARD, mddev->queue); else - queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, + blk_queue_flag_clear(QUEUE_FLAG_DISCARD, mddev->queue); } diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 99c9207899a7..e9c409c5f344 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -1845,7 +1845,7 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev) break; } if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev))) - queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue); + blk_queue_flag_set(QUEUE_FLAG_DISCARD, mddev->queue); print_conf(conf); return err; @@ -3844,10 +3844,10 @@ static int raid10_run(struct mddev *mddev) if (mddev->queue) { if (discard_supported) - queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, + blk_queue_flag_set(QUEUE_FLAG_DISCARD, mddev->queue); else - queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, + blk_queue_flag_clear(QUEUE_FLAG_DISCARD, mddev->queue); } /* need to check that every block has at least one working mirror */ diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 50d01144b805..14714b23a2fa 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -7444,10 +7444,10 @@ static int raid5_run(struct mddev *mddev) if (devices_handle_discard_safely && mddev->queue->limits.max_discard_sectors >= (stripe >> 9) && mddev->queue->limits.discard_granularity >= stripe) - queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, + blk_queue_flag_set(QUEUE_FLAG_DISCARD, mddev->queue); else - queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, + blk_queue_flag_clear(QUEUE_FLAG_DISCARD, mddev->queue); blk_queue_max_hw_sectors(mddev->queue, UINT_MAX); diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c index 421fab7250ac..56e9a803db21 100644 --- a/drivers/mmc/core/queue.c +++ b/drivers/mmc/core/queue.c @@ -185,14 +185,14 @@ static void mmc_queue_setup_discard(struct request_queue *q, if (!max_discard) return; - queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); + blk_queue_flag_set(QUEUE_FLAG_DISCARD, q); blk_queue_max_discard_sectors(q, max_discard); q->limits.discard_granularity = card->pref_erase << 9; /* granularity must not be greater than max. discard */ if (card->pref_erase > max_discard) q->limits.discard_granularity = 0; if (mmc_can_secure_erase_trim(card)) - queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, q); + blk_queue_flag_set(QUEUE_FLAG_SECERASE, q); } /** @@ -356,8 +356,8 @@ static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card) if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask) limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT; - queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue); - queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, mq->queue); + blk_queue_flag_set(QUEUE_FLAG_NONROT, mq->queue); + blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, mq->queue); if (mmc_can_erase(card)) mmc_queue_setup_discard(mq->queue, card); diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c index 9ec8f033ac5f..16ae4ae8e8f9 100644 --- a/drivers/mtd/mtd_blkdevs.c +++ b/drivers/mtd/mtd_blkdevs.c @@ -419,11 +419,11 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new) blk_queue_logical_block_size(new->rq, tr->blksize); blk_queue_bounce_limit(new->rq, BLK_BOUNCE_HIGH); - queue_flag_set_unlocked(QUEUE_FLAG_NONROT, new->rq); - queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, new->rq); + blk_queue_flag_set(QUEUE_FLAG_NONROT, new->rq); + blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, new->rq); if (tr->discard) { - queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, new->rq); + blk_queue_flag_set(QUEUE_FLAG_DISCARD, new->rq); blk_queue_max_discard_sectors(new->rq, UINT_MAX); } diff --git a/drivers/nvdimm/blk.c b/drivers/nvdimm/blk.c index 345acca576b3..7bde764f939a 100644 --- a/drivers/nvdimm/blk.c +++ b/drivers/nvdimm/blk.c @@ -266,7 +266,7 @@ static int nsblk_attach_disk(struct nd_namespace_blk *nsblk) blk_queue_make_request(q, nd_blk_make_request); blk_queue_max_hw_sectors(q, UINT_MAX); blk_queue_logical_block_size(q, nsblk_sector_size(nsblk)); - queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q); + blk_queue_flag_set(QUEUE_FLAG_NONROT, q); q->queuedata = nsblk; disk = alloc_disk(0); diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c index 2ef544f10ec8..6f311f88a8e8 100644 --- a/drivers/nvdimm/btt.c +++ b/drivers/nvdimm/btt.c @@ -1542,7 +1542,7 @@ static int btt_blk_init(struct btt *btt) blk_queue_make_request(btt->btt_queue, btt_make_request); blk_queue_logical_block_size(btt->btt_queue, btt->sector_size); blk_queue_max_hw_sectors(btt->btt_queue, UINT_MAX); - queue_flag_set_unlocked(QUEUE_FLAG_NONROT, btt->btt_queue); + blk_queue_flag_set(QUEUE_FLAG_NONROT, btt->btt_queue); btt->btt_queue->queuedata = btt; set_capacity(btt->btt_disk, 0); diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c index cfb15ac50925..145db2ad712f 100644 --- a/drivers/nvdimm/pmem.c +++ b/drivers/nvdimm/pmem.c @@ -388,8 +388,8 @@ static int pmem_attach_disk(struct device *dev, blk_queue_physical_block_size(q, PAGE_SIZE); blk_queue_logical_block_size(q, pmem_sector_size(ndns)); blk_queue_max_hw_sectors(q, UINT_MAX); - queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q); - queue_flag_set_unlocked(QUEUE_FLAG_DAX, q); + blk_queue_flag_set(QUEUE_FLAG_NONROT, q); + blk_queue_flag_set(QUEUE_FLAG_DAX, q); q->queuedata = pmem; disk = alloc_disk_node(0, nid); diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 817e5e2766da..72e241923e7d 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -1358,7 +1358,7 @@ static void nvme_config_discard(struct nvme_ctrl *ctrl, blk_queue_max_discard_sectors(queue, UINT_MAX); blk_queue_max_discard_segments(queue, NVME_DSM_MAX_RANGES); - queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, queue); + blk_queue_flag_set(QUEUE_FLAG_DISCARD, queue); if (ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES) blk_queue_max_write_zeroes_sectors(queue, UINT_MAX); @@ -2949,7 +2949,7 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid) ns->queue = blk_mq_init_queue(ctrl->tagset); if (IS_ERR(ns->queue)) goto out_free_ns; - queue_flag_set_unlocked(QUEUE_FLAG_NONROT, ns->queue); + blk_queue_flag_set(QUEUE_FLAG_NONROT, ns->queue); ns->queue->queuedata = ns; ns->ctrl = ctrl; diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index 88440562a197..7283d7149baf 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c @@ -168,7 +168,7 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head) q->queuedata = head; blk_queue_make_request(q, nvme_ns_head_make_request); q->poll_fn = nvme_ns_head_poll; - queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q); + blk_queue_flag_set(QUEUE_FLAG_NONROT, q); /* set to a default value for 512 until disk is validated */ blk_queue_logical_block_size(q, 512); diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index a7c15f0085e2..7be803afcb43 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c @@ -3210,7 +3210,7 @@ static void dasd_setup_queue(struct dasd_block *block) } else { max = block->base->discipline->max_blocks << block->s2b_shift; } - queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q); + blk_queue_flag_set(QUEUE_FLAG_NONROT, q); q->limits.max_dev_sectors = max; blk_queue_logical_block_size(q, logical_block_size); blk_queue_max_hw_sectors(q, max); @@ -3233,7 +3233,7 @@ static void dasd_setup_queue(struct dasd_block *block) blk_queue_max_discard_sectors(q, max_discard_sectors); blk_queue_max_write_zeroes_sectors(q, max_discard_sectors); - queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); + blk_queue_flag_set(QUEUE_FLAG_DISCARD, q); } } diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c index 9cae08b36b80..0a312e450207 100644 --- a/drivers/s390/block/dcssblk.c +++ b/drivers/s390/block/dcssblk.c @@ -633,7 +633,7 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char dev_info->gd->private_data = dev_info; blk_queue_make_request(dev_info->dcssblk_queue, dcssblk_make_request); blk_queue_logical_block_size(dev_info->dcssblk_queue, 4096); - queue_flag_set_unlocked(QUEUE_FLAG_DAX, dev_info->dcssblk_queue); + blk_queue_flag_set(QUEUE_FLAG_DAX, dev_info->dcssblk_queue); seg_byte_size = (dev_info->end - dev_info->start + 1); set_capacity(dev_info->gd, seg_byte_size >> 9); // size in sectors diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c index b4130c7880d8..b1fcb76dd272 100644 --- a/drivers/s390/block/scm_blk.c +++ b/drivers/s390/block/scm_blk.c @@ -472,8 +472,8 @@ int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev) blk_queue_logical_block_size(rq, 1 << 12); blk_queue_max_hw_sectors(rq, nr_max_blk << 3); /* 8 * 512 = blk_size */ blk_queue_max_segments(rq, nr_max_blk); - queue_flag_set_unlocked(QUEUE_FLAG_NONROT, rq); - queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, rq); + blk_queue_flag_set(QUEUE_FLAG_NONROT, rq); + blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, rq); bdev->gendisk = alloc_disk(SCM_NR_PARTS); if (!bdev->gendisk) { diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c index 2a6334ca750e..3df5d68d09f0 100644 --- a/drivers/s390/block/xpram.c +++ b/drivers/s390/block/xpram.c @@ -348,8 +348,8 @@ static int __init xpram_setup_blkdev(void) put_disk(xpram_disks[i]); goto out; } - queue_flag_set_unlocked(QUEUE_FLAG_NONROT, xpram_queues[i]); - queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, xpram_queues[i]); + blk_queue_flag_set(QUEUE_FLAG_NONROT, xpram_queues[i]); + blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, xpram_queues[i]); blk_queue_make_request(xpram_queues[i], xpram_make_request); blk_queue_logical_block_size(xpram_queues[i], 4096); } diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index a71ee67df084..dc234650014c 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -1864,7 +1864,7 @@ megasas_set_nvme_device_properties(struct scsi_device *sdev, u32 max_io_size) blk_queue_max_hw_sectors(sdev->request_queue, (max_io_size / 512)); - queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, sdev->request_queue); + blk_queue_flag_set(QUEUE_FLAG_NOMERGES, sdev->request_queue); blk_queue_virt_boundary(sdev->request_queue, mr_nvme_pg_size - 1); } diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index 073ced07e662..298019cf08a2 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c @@ -1908,7 +1908,7 @@ megasas_is_prp_possible(struct megasas_instance *instance, * then sending IOs with holes. * * Though driver can request block layer to disable IO merging by calling- - * queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, sdev->request_queue) but + * blk_queue_flag_set(QUEUE_FLAG_NOMERGES, sdev->request_queue) but * user may tune sysfs parameter- nomerges again to 0 or 1. * * If in future IO scheduling is enabled with SCSI BLK MQ, diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c index 74fca184dba9..e3843828e59a 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c @@ -2352,7 +2352,7 @@ scsih_slave_configure(struct scsi_device *sdev) ** merged and can eliminate holes created during merging ** operation. **/ - queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, + blk_queue_flag_set(QUEUE_FLAG_NOMERGES, sdev->request_queue); blk_queue_virt_boundary(sdev->request_queue, ioc->page_size - 1); diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index a5986dae9020..1cb353f18d08 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c @@ -3897,7 +3897,7 @@ static int scsi_debug_slave_alloc(struct scsi_device *sdp) if (sdebug_verbose) pr_info("slave_alloc <%u %u %u %llu>\n", sdp->host->host_no, sdp->channel, sdp->id, sdp->lun); - queue_flag_set_unlocked(QUEUE_FLAG_BIDI, sdp->request_queue); + blk_queue_flag_set(QUEUE_FLAG_BIDI, sdp->request_queue); return 0; } diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 71d1135f94d0..538152f3528e 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -2140,7 +2140,7 @@ void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q) { struct device *dev = shost->dma_dev; - queue_flag_set_unlocked(QUEUE_FLAG_SCSI_PASSTHROUGH, q); + blk_queue_flag_set(QUEUE_FLAG_SCSI_PASSTHROUGH, q); /* * this limit is imposed by hardware restrictions diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c index 736a1f4f9676..7c0987616684 100644 --- a/drivers/scsi/scsi_transport_sas.c +++ b/drivers/scsi/scsi_transport_sas.c @@ -227,8 +227,8 @@ static int sas_bsg_initialize(struct Scsi_Host *shost, struct sas_rphy *rphy) * by default assume old behaviour and bounce for any highmem page */ blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH); - queue_flag_set_unlocked(QUEUE_FLAG_BIDI, q); - queue_flag_set_unlocked(QUEUE_FLAG_SCSI_PASSTHROUGH, q); + blk_queue_flag_set(QUEUE_FLAG_BIDI, q); + blk_queue_flag_set(QUEUE_FLAG_SCSI_PASSTHROUGH, q); return 0; } diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index bff21e636ddd..98de3207ac5d 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -714,7 +714,7 @@ static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode) case SD_LBP_FULL: case SD_LBP_DISABLE: blk_queue_max_discard_sectors(q, 0); - queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q); + blk_queue_flag_clear(QUEUE_FLAG_DISCARD, q); return; case SD_LBP_UNMAP: @@ -747,7 +747,7 @@ static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode) } blk_queue_max_discard_sectors(q, max_blocks * (logical_block_size >> 9)); - queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); + blk_queue_flag_set(QUEUE_FLAG_DISCARD, q); } static int sd_setup_unmap_cmnd(struct scsi_cmnd *cmd) @@ -2952,8 +2952,8 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp) rot = get_unaligned_be16(&buffer[4]); if (rot == 1) { - queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q); - queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q); + blk_queue_flag_set(QUEUE_FLAG_NONROT, q); + blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q); } if (sdkp->device->type == TYPE_ZBC) { -- cgit v1.2.3 From 1db2008b79a32db2ad41338c6c74c4735cf74f6d Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Wed, 7 Mar 2018 17:10:11 -0800 Subject: block: Complain if queue_flag_(set|clear)_unlocked() is abused Since it is not safe to use queue_flag_(set|clear)_unlocked() without holding the queue lock after the sysfs entries for a queue have been created, complain if this happens. Cc: Mike Snitzer Cc: Christoph Hellwig Cc: Hannes Reinecke Cc: Ming Lei Reviewed-by: Johannes Thumshirn Reviewed-by: Martin K. Petersen Signed-off-by: Bart Van Assche Signed-off-by: Jens Axboe --- include/linux/blkdev.h | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index f84b3c7887b1..888c9b25cb8f 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -728,12 +728,18 @@ static inline void queue_lockdep_assert_held(struct request_queue *q) static inline void queue_flag_set_unlocked(unsigned int flag, struct request_queue *q) { + if (test_bit(QUEUE_FLAG_INIT_DONE, &q->queue_flags) && + kref_read(&q->kobj.kref)) + lockdep_assert_held(q->queue_lock); __set_bit(flag, &q->queue_flags); } static inline void queue_flag_clear_unlocked(unsigned int flag, struct request_queue *q) { + if (test_bit(QUEUE_FLAG_INIT_DONE, &q->queue_flags) && + kref_read(&q->kobj.kref)) + lockdep_assert_held(q->queue_lock); __clear_bit(flag, &q->queue_flags); } -- cgit v1.2.3 From 8a0ac14b8da9b86cfbe7aace40c8d485ed5c5b97 Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Wed, 7 Mar 2018 17:10:12 -0800 Subject: block: Move the queue_flag_*() functions from a public into a private header file This patch helps to avoid that new code gets introduced in block drivers that manipulates queue flags without holding the queue lock when that lock should be held. Cc: Christoph Hellwig Cc: Hannes Reinecke Cc: Ming Lei Reviewed-by: Johannes Thumshirn Reviewed-by: Martin K. Petersen Signed-off-by: Bart Van Assche Signed-off-by: Jens Axboe --- block/blk.h | 69 ++++++++++++++++++++++++++++++++++++++++++++++++++ include/linux/blkdev.h | 69 -------------------------------------------------- 2 files changed, 69 insertions(+), 69 deletions(-) diff --git a/block/blk.h b/block/blk.h index 46db5dc83dcb..b034fd2460c4 100644 --- a/block/blk.h +++ b/block/blk.h @@ -41,6 +41,75 @@ extern struct kmem_cache *request_cachep; extern struct kobj_type blk_queue_ktype; extern struct ida blk_queue_ida; +/* + * @q->queue_lock is set while a queue is being initialized. Since we know + * that no other threads access the queue object before @q->queue_lock has + * been set, it is safe to manipulate queue flags without holding the + * queue_lock if @q->queue_lock == NULL. See also blk_alloc_queue_node() and + * blk_init_allocated_queue(). + */ +static inline void queue_lockdep_assert_held(struct request_queue *q) +{ + if (q->queue_lock) + lockdep_assert_held(q->queue_lock); +} + +static inline void queue_flag_set_unlocked(unsigned int flag, + struct request_queue *q) +{ + if (test_bit(QUEUE_FLAG_INIT_DONE, &q->queue_flags) && + kref_read(&q->kobj.kref)) + lockdep_assert_held(q->queue_lock); + __set_bit(flag, &q->queue_flags); +} + +static inline void queue_flag_clear_unlocked(unsigned int flag, + struct request_queue *q) +{ + if (test_bit(QUEUE_FLAG_INIT_DONE, &q->queue_flags) && + kref_read(&q->kobj.kref)) + lockdep_assert_held(q->queue_lock); + __clear_bit(flag, &q->queue_flags); +} + +static inline int queue_flag_test_and_clear(unsigned int flag, + struct request_queue *q) +{ + queue_lockdep_assert_held(q); + + if (test_bit(flag, &q->queue_flags)) { + __clear_bit(flag, &q->queue_flags); + return 1; + } + + return 0; +} + +static inline int queue_flag_test_and_set(unsigned int flag, + struct request_queue *q) +{ + queue_lockdep_assert_held(q); + + if (!test_bit(flag, &q->queue_flags)) { + __set_bit(flag, &q->queue_flags); + return 0; + } + + return 1; +} + +static inline void queue_flag_set(unsigned int flag, struct request_queue *q) +{ + queue_lockdep_assert_held(q); + __set_bit(flag, &q->queue_flags); +} + +static inline void queue_flag_clear(unsigned int flag, struct request_queue *q) +{ + queue_lockdep_assert_held(q); + __clear_bit(flag, &q->queue_flags); +} + static inline struct blk_flush_queue *blk_get_flush_queue( struct request_queue *q, struct blk_mq_ctx *ctx) { diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 888c9b25cb8f..19eaf8d89368 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -712,75 +712,6 @@ void blk_queue_flag_clear(unsigned int flag, struct request_queue *q); bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q); bool blk_queue_flag_test_and_clear(unsigned int flag, struct request_queue *q); -/* - * @q->queue_lock is set while a queue is being initialized. Since we know - * that no other threads access the queue object before @q->queue_lock has - * been set, it is safe to manipulate queue flags without holding the - * queue_lock if @q->queue_lock == NULL. See also blk_alloc_queue_node() and - * blk_init_allocated_queue(). - */ -static inline void queue_lockdep_assert_held(struct request_queue *q) -{ - if (q->queue_lock) - lockdep_assert_held(q->queue_lock); -} - -static inline void queue_flag_set_unlocked(unsigned int flag, - struct request_queue *q) -{ - if (test_bit(QUEUE_FLAG_INIT_DONE, &q->queue_flags) && - kref_read(&q->kobj.kref)) - lockdep_assert_held(q->queue_lock); - __set_bit(flag, &q->queue_flags); -} - -static inline void queue_flag_clear_unlocked(unsigned int flag, - struct request_queue *q) -{ - if (test_bit(QUEUE_FLAG_INIT_DONE, &q->queue_flags) && - kref_read(&q->kobj.kref)) - lockdep_assert_held(q->queue_lock); - __clear_bit(flag, &q->queue_flags); -} - -static inline int queue_flag_test_and_clear(unsigned int flag, - struct request_queue *q) -{ - queue_lockdep_assert_held(q); - - if (test_bit(flag, &q->queue_flags)) { - __clear_bit(flag, &q->queue_flags); - return 1; - } - - return 0; -} - -static inline int queue_flag_test_and_set(unsigned int flag, - struct request_queue *q) -{ - queue_lockdep_assert_held(q); - - if (!test_bit(flag, &q->queue_flags)) { - __set_bit(flag, &q->queue_flags); - return 0; - } - - return 1; -} - -static inline void queue_flag_set(unsigned int flag, struct request_queue *q) -{ - queue_lockdep_assert_held(q); - __set_bit(flag, &q->queue_flags); -} - -static inline void queue_flag_clear(unsigned int flag, struct request_queue *q) -{ - queue_lockdep_assert_held(q); - __clear_bit(flag, &q->queue_flags); -} - #define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) #define blk_queue_dying(q) test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags) -- cgit v1.2.3 From 0a703c1f89af614d38808a4802744db6c8638762 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Fri, 26 Jan 2018 19:58:16 -0800 Subject: Documentation/cdrom: update cdrom-standard.tex for kernel changes Documentation updates for Documentation/cdrom/cdrom-standard.tex: cdrom_device_ops: - add check_events() and generic_packet() cdrom_device_info: - add one 'const' modifier - correct some field descriptions - add some missing fields - drop 'kdev_t dev;' field Also drop sentence from documentation because it is not referenced anywhere in the kernel header or C files. Signed-off-by: Randy Dunlap Signed-off-by: Jens Axboe --- Documentation/cdrom/cdrom-standard.tex | 29 ++++++++++++++++++++--------- 1 file changed, 20 insertions(+), 9 deletions(-) diff --git a/Documentation/cdrom/cdrom-standard.tex b/Documentation/cdrom/cdrom-standard.tex index 8f85b0e41046..9083c21472d6 100644 --- a/Documentation/cdrom/cdrom-standard.tex +++ b/Documentation/cdrom/cdrom-standard.tex @@ -234,6 +234,7 @@ struct& cdrom_device_ops\ \{ \hidewidth\cr &int& (* open)(struct\ cdrom_device_info *, int)\cr &void& (* release)(struct\ cdrom_device_info *);\cr &int& (* drive_status)(struct\ cdrom_device_info *, int);\cr + &unsigned\ int& (* check_events)(struct\ cdrom_device_info *, unsigned\ int, int);\cr &int& (* media_changed)(struct\ cdrom_device_info *, int);\cr &int& (* tray_move)(struct\ cdrom_device_info *, int);\cr &int& (* lock_door)(struct\ cdrom_device_info *, int);\cr @@ -245,10 +246,9 @@ struct& cdrom_device_ops\ \{ \hidewidth\cr &int& (* reset)(struct\ cdrom_device_info *);\cr &int& (* audio_ioctl)(struct\ cdrom_device_info *, unsigned\ int, void *{});\cr - &int& (* dev_ioctl)(struct\ cdrom_device_info *, unsigned\ int, - unsigned\ long);\cr \noalign{\medskip} &const\ int& capability;& capability flags \cr + &int& (* generic_packet)(struct\ cdrom_device_info *, struct\ packet_command *{});\cr \};\cr } $$ @@ -274,19 +274,32 @@ $$ \halign{$#$\ \hfil&$#$\ \hfil&\hbox to 10em{$#$\hss}& $/*$ \rm# $*/$\hfil\cr struct& cdrom_device_info\ \{ \hidewidth\cr - & struct\ cdrom_device_ops *& ops;& device operations for this major\cr - & struct\ cdrom_device_info *& next;& next device_info for this major\cr + & const\ struct\ cdrom_device_ops *& ops;& device operations for this major\cr + & struct\ list_head& list;& linked list of all device_info\cr + & struct\ gendisk *& disk;& matching block layer disk\cr & void *& handle;& driver-dependent data\cr \noalign{\medskip} - & kdev_t& dev;& device number (incorporates minor)\cr & int& mask;& mask of capability: disables them \cr & int& speed;& maximum speed for reading data \cr & int& capacity;& number of discs in a jukebox \cr \noalign{\medskip} - &int& options : 30;& options flags \cr + &unsigned\ int& options : 30;& options flags \cr &unsigned& mc_flags : 2;& media-change buffer flags \cr + &unsigned\ int& vfs_events;& cached events for vfs path\cr + &unsigned\ int& ioctl_events;& cached events for ioctl path\cr & int& use_count;& number of times device is opened\cr & char& name[20];& name of the device type\cr +\noalign{\medskip} + &__u8& sanyo_slot : 2;& Sanyo 3-CD changer support\cr + &__u8& keeplocked : 1;& CDROM_LOCKDOOR status\cr + &__u8& reserved : 5;& not used yet\cr + & int& cdda_method;& see CDDA_* flags\cr + &__u8& last_sense;& saves last sense key\cr + &__u8& media_written;& dirty flag, DVD+RW bookkeeping\cr + &unsigned\ short& mmc3_profile;& current MMC3 profile\cr + & int& for_data;& unknown:TBD\cr + & int\ (* exit)\ (struct\ cdrom_device_info *);&& unknown:TBD\cr + & int& mrw_mode_page;& which MRW mode page is in use\cr \}\cr }$$ Using this $struct$, a linked list of the registered minor devices is @@ -298,9 +311,7 @@ The $mask$ flags can be used to mask out some of the capabilities listed in $ops\to capability$, if a specific drive doesn't support a feature of the driver. The value $speed$ specifies the maximum head-rate of the drive, measured in units of normal audio speed (176\,kB/sec raw data or -150\,kB/sec file system data). The value $n_discs$ should reflect the -number of discs the drive can hold simultaneously, if it is designed -as a juke-box, or otherwise~1. The parameters are declared $const$ +150\,kB/sec file system data). The parameters are declared $const$ because they describe properties of the drive, which don't change after registration. -- cgit v1.2.3 From da5ff37c7ede7aa4e6883f4dd5a83b6cf8b9837f Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Sat, 27 Jan 2018 16:31:45 -0800 Subject: Documentation/cdrom: fix German sharp s in LaTex MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Apparently the LaTex abbreviation for the German "sharp s" (ß) (Unicode U+00DF) has changed from {\sz} to {\ss}. With {\sz}, I get this error at line 1016 (line number after another patch): ! Undefined control sequence. l.1016 ...nel~2.0. Further thanks to Heiko Ei{\sz }feldt, This is fixed by changing the {\sz} to {\ss}. Signed-off-by: Randy Dunlap Signed-off-by: Jens Axboe --- Documentation/cdrom/cdrom-standard.tex | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Documentation/cdrom/cdrom-standard.tex b/Documentation/cdrom/cdrom-standard.tex index 9083c21472d6..f7cd455973f7 100644 --- a/Documentation/cdrom/cdrom-standard.tex +++ b/Documentation/cdrom/cdrom-standard.tex @@ -1013,7 +1013,7 @@ taken over the torch in maintaining \cdromc\ and integrating much \cdrom-related code in the 2.1-kernel. Thanks to Scott Snyder and Gerd Knorr, who were the first to implement this interface for SCSI and IDE-CD drivers and added many ideas for extension of the data -structures relative to kernel~2.0. Further thanks to Heiko Ei{\sz}feldt, +structures relative to kernel~2.0. Further thanks to Heiko Ei{\ss}feldt, Thomas Quinot, Jon Tombs, Ken Pizzini, Eberhard M\"onkeberg and Andrew Kroll, the \linux\ \cdrom\ device driver developers who were kind enough to give suggestions and criticisms during the writing. Finally -- cgit v1.2.3 From 2bbea6e117357d17842114c65e9a9cf2d13ae8a3 Mon Sep 17 00:00:00 2001 From: Maurizio Lombardi Date: Fri, 9 Mar 2018 13:59:06 +0100 Subject: cdrom: do not call check_disk_change() inside cdrom_open() when mounting an ISO filesystem sometimes (very rarely) the system hangs because of a race condition between two tasks. PID: 6766 TASK: ffff88007b2a6dd0 CPU: 0 COMMAND: "mount" #0 [ffff880078447ae0] __schedule at ffffffff8168d605 #1 [ffff880078447b48] schedule_preempt_disabled at ffffffff8168ed49 #2 [ffff880078447b58] __mutex_lock_slowpath at ffffffff8168c995 #3 [ffff880078447bb8] mutex_lock at ffffffff8168bdef #4 [ffff880078447bd0] sr_block_ioctl at ffffffffa00b6818 [sr_mod] #5 [ffff880078447c10] blkdev_ioctl at ffffffff812fea50 #6 [ffff880078447c70] ioctl_by_bdev at ffffffff8123a8b3 #7 [ffff880078447c90] isofs_fill_super at ffffffffa04fb1e1 [isofs] #8 [ffff880078447da8] mount_bdev at ffffffff81202570 #9 [ffff880078447e18] isofs_mount at ffffffffa04f9828 [isofs] #10 [ffff880078447e28] mount_fs at ffffffff81202d09 #11 [ffff880078447e70] vfs_kern_mount at ffffffff8121ea8f #12 [ffff880078447ea8] do_mount at ffffffff81220fee #13 [ffff880078447f28] sys_mount at ffffffff812218d6 #14 [ffff880078447f80] system_call_fastpath at ffffffff81698c49 RIP: 00007fd9ea914e9a RSP: 00007ffd5d9bf648 RFLAGS: 00010246 RAX: 00000000000000a5 RBX: ffffffff81698c49 RCX: 0000000000000010 RDX: 00007fd9ec2bc210 RSI: 00007fd9ec2bc290 RDI: 00007fd9ec2bcf30 RBP: 0000000000000000 R8: 0000000000000000 R9: 0000000000000010 R10: 00000000c0ed0001 R11: 0000000000000206 R12: 00007fd9ec2bc040 R13: 00007fd9eb6b2380 R14: 00007fd9ec2bc210 R15: 00007fd9ec2bcf30 ORIG_RAX: 00000000000000a5 CS: 0033 SS: 002b This task was trying to mount the cdrom. It allocated and configured a super_block struct and owned the write-lock for the super_block->s_umount rwsem. While exclusively owning the s_umount lock, it called sr_block_ioctl and waited to acquire the global sr_mutex lock. PID: 6785 TASK: ffff880078720fb0 CPU: 0 COMMAND: "systemd-udevd" #0 [ffff880078417898] __schedule at ffffffff8168d605 #1 [ffff880078417900] schedule at ffffffff8168dc59 #2 [ffff880078417910] rwsem_down_read_failed at ffffffff8168f605 #3 [ffff880078417980] call_rwsem_down_read_failed at ffffffff81328838 #4 [ffff8800784179d0] down_read at ffffffff8168cde0 #5 [ffff8800784179e8] get_super at ffffffff81201cc7 #6 [ffff880078417a10] __invalidate_device at ffffffff8123a8de #7 [ffff880078417a40] flush_disk at ffffffff8123a94b #8 [ffff880078417a88] check_disk_change at ffffffff8123ab50 #9 [ffff880078417ab0] cdrom_open at ffffffffa00a29e1 [cdrom] #10 [ffff880078417b68] sr_block_open at ffffffffa00b6f9b [sr_mod] #11 [ffff880078417b98] __blkdev_get at ffffffff8123ba86 #12 [ffff880078417bf0] blkdev_get at ffffffff8123bd65 #13 [ffff880078417c78] blkdev_open at ffffffff8123bf9b #14 [ffff880078417c90] do_dentry_open at ffffffff811fc7f7 #15 [ffff880078417cd8] vfs_open at ffffffff811fc9cf #16 [ffff880078417d00] do_last at ffffffff8120d53d #17 [ffff880078417db0] path_openat at ffffffff8120e6b2 #18 [ffff880078417e48] do_filp_open at ffffffff8121082b #19 [ffff880078417f18] do_sys_open at ffffffff811fdd33 #20 [ffff880078417f70] sys_open at ffffffff811fde4e #21 [ffff880078417f80] system_call_fastpath at ffffffff81698c49 RIP: 00007f29438b0c20 RSP: 00007ffc76624b78 RFLAGS: 00010246 RAX: 0000000000000002 RBX: ffffffff81698c49 RCX: 0000000000000000 RDX: 00007f2944a5fa70 RSI: 00000000000a0800 RDI: 00007f2944a5fa70 RBP: 00007f2944a5f540 R8: 0000000000000000 R9: 0000000000000020 R10: 00007f2943614c40 R11: 0000000000000246 R12: ffffffff811fde4e R13: ffff880078417f78 R14: 000000000000000c R15: 00007f2944a4b010 ORIG_RAX: 0000000000000002 CS: 0033 SS: 002b This task tried to open the cdrom device, the sr_block_open function acquired the global sr_mutex lock. The call to check_disk_change() then saw an event flag indicating a possible media change and tried to flush any cached data for the device. As part of the flush, it tried to acquire the super_block->s_umount lock associated with the cdrom device. This was the same super_block as created and locked by the previous task. The first task acquires the s_umount lock and then the sr_mutex_lock; the second task acquires the sr_mutex_lock and then the s_umount lock. This patch fixes the issue by moving check_disk_change() out of cdrom_open() and let the caller take care of it. Signed-off-by: Maurizio Lombardi Signed-off-by: Jens Axboe --- drivers/block/paride/pcd.c | 2 ++ drivers/cdrom/cdrom.c | 3 --- drivers/cdrom/gdrom.c | 3 +++ drivers/ide/ide-cd.c | 2 ++ drivers/scsi/sr.c | 2 ++ 5 files changed, 9 insertions(+), 3 deletions(-) diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c index 7b8c6368beb7..a026211afb51 100644 --- a/drivers/block/paride/pcd.c +++ b/drivers/block/paride/pcd.c @@ -230,6 +230,8 @@ static int pcd_block_open(struct block_device *bdev, fmode_t mode) struct pcd_unit *cd = bdev->bd_disk->private_data; int ret; + check_disk_change(bdev); + mutex_lock(&pcd_mutex); ret = cdrom_open(&cd->info, bdev, mode); mutex_unlock(&pcd_mutex); diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c index e36d160c458f..8327478effd0 100644 --- a/drivers/cdrom/cdrom.c +++ b/drivers/cdrom/cdrom.c @@ -1152,9 +1152,6 @@ int cdrom_open(struct cdrom_device_info *cdi, struct block_device *bdev, cd_dbg(CD_OPEN, "entering cdrom_open\n"); - /* open is event synchronization point, check events first */ - check_disk_change(bdev); - /* if this was a O_NONBLOCK open and we should honor the flags, * do a quick open without drive/disc integrity checks. */ cdi->use_count++; diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c index 6495b03f576c..ae3a7537cf0f 100644 --- a/drivers/cdrom/gdrom.c +++ b/drivers/cdrom/gdrom.c @@ -497,6 +497,9 @@ static const struct cdrom_device_ops gdrom_ops = { static int gdrom_bdops_open(struct block_device *bdev, fmode_t mode) { int ret; + + check_disk_change(bdev); + mutex_lock(&gdrom_mutex); ret = cdrom_open(gd.cd_info, bdev, mode); mutex_unlock(&gdrom_mutex); diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c index 7c3ed7c9af77..5613cc2d51fc 100644 --- a/drivers/ide/ide-cd.c +++ b/drivers/ide/ide-cd.c @@ -1613,6 +1613,8 @@ static int idecd_open(struct block_device *bdev, fmode_t mode) struct cdrom_info *info; int rc = -ENXIO; + check_disk_change(bdev); + mutex_lock(&ide_cd_mutex); info = ide_cd_get(bdev->bd_disk); if (!info) diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c index 9be34d37c356..0cf25d789d05 100644 --- a/drivers/scsi/sr.c +++ b/drivers/scsi/sr.c @@ -525,6 +525,8 @@ static int sr_block_open(struct block_device *bdev, fmode_t mode) struct scsi_cd *cd; int ret = -ENXIO; + check_disk_change(bdev); + mutex_lock(&sr_mutex); cd = scsi_cd_get(bdev->bd_disk); if (cd) { -- cgit v1.2.3 From 56c4bddb970658b26da0d847a2bfb3225e2f2b13 Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Thu, 8 Mar 2018 15:28:50 -0800 Subject: block: Suppress kernel-doc warnings triggered by blk-zoned.c Avoid that building with W=1 causes the kernel-doc tool to complain about undocumented function arguments for the blk-zoned.c source file. Signed-off-by: Bart Van Assche Cc: Christoph Hellwig Cc: Damien Le Moal Signed-off-by: Jens Axboe --- block/blk-zoned.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/block/blk-zoned.c b/block/blk-zoned.c index acb7252c7e81..08e84ef2bc05 100644 --- a/block/blk-zoned.c +++ b/block/blk-zoned.c @@ -296,7 +296,7 @@ int blkdev_reset_zones(struct block_device *bdev, } EXPORT_SYMBOL_GPL(blkdev_reset_zones); -/** +/* * BLKREPORTZONE ioctl processing. * Called from blkdev_ioctl. */ @@ -355,7 +355,7 @@ int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode, return ret; } -/** +/* * BLKRESETZONE ioctl processing. * Called from blkdev_ioctl. */ -- cgit v1.2.3 From fc9de9a52e993829d928825d05fc9d3f41ce33fa Mon Sep 17 00:00:00 2001 From: Ross Zwisler Date: Fri, 9 Mar 2018 09:38:26 -0700 Subject: MAINTAINERS: add coverage for drivers/block To help folks like me that use scripts/get_maintainer.pl. Signed-off-by: Ross Zwisler Signed-off-by: Jens Axboe --- MAINTAINERS | 1 + 1 file changed, 1 insertion(+) diff --git a/MAINTAINERS b/MAINTAINERS index 93a12af4f180..24f36c499d93 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -2683,6 +2683,7 @@ L: linux-block@vger.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git S: Maintained F: block/ +F: drivers/block/ F: kernel/trace/blktrace.c F: lib/sbitmap.c -- cgit v1.2.3 From c8f4c36f81623002165dce874fa60bb0c154b10e Mon Sep 17 00:00:00 2001 From: Nikolay Borisov Date: Fri, 23 Feb 2018 13:45:28 +0200 Subject: direct-io: Remove unused DIO_ASYNC_EXTEND flag This flag was added by 6039257378e4 ("direct-io: add flag to allow aio writes beyond i_size") to support XFS. However, with the rework of XFS' DIO's path to use iomap in acdda3aae146 ("xfs: use iomap_dio_rw") it became redundant. So let's remove it. Reviewed-by: Christoph Hellwig Signed-off-by: Nikolay Borisov Signed-off-by: Jens Axboe --- fs/direct-io.c | 3 +-- include/linux/fs.h | 3 --- 2 files changed, 1 insertion(+), 5 deletions(-) diff --git a/fs/direct-io.c b/fs/direct-io.c index 1357ef563893..88f0c7fba1ce 100644 --- a/fs/direct-io.c +++ b/fs/direct-io.c @@ -1252,8 +1252,7 @@ do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode, */ if (is_sync_kiocb(iocb)) dio->is_async = false; - else if (!(dio->flags & DIO_ASYNC_EXTEND) && - iov_iter_rw(iter) == WRITE && end > i_size_read(inode)) + else if (iov_iter_rw(iter) == WRITE && end > i_size_read(inode)) dio->is_async = false; else dio->is_async = true; diff --git a/include/linux/fs.h b/include/linux/fs.h index 2a815560fda0..260c233e7375 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -2977,9 +2977,6 @@ enum { /* filesystem does not support filling holes */ DIO_SKIP_HOLES = 0x02, - /* filesystem can handle aio writes beyond i_size */ - DIO_ASYNC_EXTEND = 0x04, - /* inode/fs/bdev does not need truncate protection */ DIO_SKIP_DIO_COUNT = 0x08, }; -- cgit v1.2.3 From ce3077ee80d6ac1087c06441f4c63ce5f13ef12c Mon Sep 17 00:00:00 2001 From: Nikolay Borisov Date: Fri, 23 Feb 2018 13:45:29 +0200 Subject: direct-io: Remove unused DIO_SKIP_DIO_COUNT logic This flag was added by fe0f07d08ee3 ("direct-io: only inc/deci inode->i_dio_count for file systems") as means to optimise the atomic modificaiton of the variable for blockdevices. However with the advent of 542ff7bf18c6 ("block: new direct I/O implementation") it became unused. So let's remove it. Reviewed-by: Christoph Hellwig Signed-off-by: Nikolay Borisov Signed-off-by: Jens Axboe --- fs/direct-io.c | 6 ++---- include/linux/fs.h | 3 --- 2 files changed, 2 insertions(+), 7 deletions(-) diff --git a/fs/direct-io.c b/fs/direct-io.c index 88f0c7fba1ce..ba12ee659673 100644 --- a/fs/direct-io.c +++ b/fs/direct-io.c @@ -315,8 +315,7 @@ static ssize_t dio_complete(struct dio *dio, ssize_t ret, unsigned int flags) dio_warn_stale_pagecache(dio->iocb->ki_filp); } - if (!(dio->flags & DIO_SKIP_DIO_COUNT)) - inode_dio_end(dio->inode); + inode_dio_end(dio->inode); if (flags & DIO_COMPLETE_ASYNC) { /* @@ -1296,8 +1295,7 @@ do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode, /* * Will be decremented at I/O completion time. */ - if (!(dio->flags & DIO_SKIP_DIO_COUNT)) - inode_dio_begin(inode); + inode_dio_begin(inode); retval = 0; sdio.blkbits = blkbits; diff --git a/include/linux/fs.h b/include/linux/fs.h index 260c233e7375..9bee267209e5 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -2976,9 +2976,6 @@ enum { /* filesystem does not support filling holes */ DIO_SKIP_HOLES = 0x02, - - /* inode/fs/bdev does not need truncate protection */ - DIO_SKIP_DIO_COUNT = 0x08, }; void dio_end_io(struct bio *bio); -- cgit v1.2.3 From 31156ec378c2ed10330c8c06bbf36fb7d7a55506 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 13 Mar 2018 17:28:39 +0100 Subject: bsg-lib: introduce a timeout field in struct bsg_job The zfcp driver wants to know the timeout for a bsg job, so add a field to struct bsg_job for it in preparation of not exposing the request to the bsg-lib users. Signed-off-by: Christoph Hellwig Reviewed-by: Benjamin Block Reviewed-by: Hannes Reinecke Reviewed-by: Johannes Thumshirn Signed-off-by: Jens Axboe --- block/bsg-lib.c | 1 + drivers/s390/scsi/zfcp_fc.c | 4 ++-- include/linux/bsg-lib.h | 2 ++ 3 files changed, 5 insertions(+), 2 deletions(-) diff --git a/block/bsg-lib.c b/block/bsg-lib.c index b4fe1a48f111..fb509779a090 100644 --- a/block/bsg-lib.c +++ b/block/bsg-lib.c @@ -132,6 +132,7 @@ static int bsg_prepare_job(struct device *dev, struct request *req) struct bsg_job *job = blk_mq_rq_to_pdu(req); int ret; + job->timeout = req->timeout; job->request = rq->cmd; job->request_len = rq->cmd_len; diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c index ca218c82321f..6162cf57a20a 100644 --- a/drivers/s390/scsi/zfcp_fc.c +++ b/drivers/s390/scsi/zfcp_fc.c @@ -961,7 +961,7 @@ static int zfcp_fc_exec_els_job(struct bsg_job *job, d_id = ntoh24(bsg_request->rqst_data.h_els.port_id); els->handler = zfcp_fc_ct_els_job_handler; - return zfcp_fsf_send_els(adapter, d_id, els, job->req->timeout / HZ); + return zfcp_fsf_send_els(adapter, d_id, els, job->timeout / HZ); } static int zfcp_fc_exec_ct_job(struct bsg_job *job, @@ -980,7 +980,7 @@ static int zfcp_fc_exec_ct_job(struct bsg_job *job, return ret; ct->handler = zfcp_fc_ct_job_handler; - ret = zfcp_fsf_send_ct(wka_port, ct, NULL, job->req->timeout / HZ); + ret = zfcp_fsf_send_ct(wka_port, ct, NULL, job->timeout / HZ); if (ret) zfcp_fc_wka_port_put(wka_port); diff --git a/include/linux/bsg-lib.h b/include/linux/bsg-lib.h index b1be0233ce35..402223c95ce1 100644 --- a/include/linux/bsg-lib.h +++ b/include/linux/bsg-lib.h @@ -44,6 +44,8 @@ struct bsg_job { struct kref kref; + unsigned int timeout; + /* Transport/driver specific request/reply structs */ void *request; void *reply; -- cgit v1.2.3 From ef6fa64f9b8e1611854077ea9213f2eef2428cd2 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 13 Mar 2018 17:28:40 +0100 Subject: bsg-lib: remove bsg_job.req Users of the bsg-lib interface should only use the bsg_job data structure and not know about implementation details of it. Signed-off-by: Christoph Hellwig Reviewed-by: Benjamin Block Reviewed-by: Hannes Reinecke Reviewed-by: Johannes Thumshirn Signed-off-by: Jens Axboe --- block/bsg-lib.c | 14 ++++++-------- include/linux/bsg-lib.h | 1 - 2 files changed, 6 insertions(+), 9 deletions(-) diff --git a/block/bsg-lib.c b/block/bsg-lib.c index fb509779a090..f2c2d54a61b4 100644 --- a/block/bsg-lib.c +++ b/block/bsg-lib.c @@ -35,7 +35,7 @@ static void bsg_teardown_job(struct kref *kref) { struct bsg_job *job = container_of(kref, struct bsg_job, kref); - struct request *rq = job->req; + struct request *rq = blk_mq_rq_from_pdu(job); put_device(job->dev); /* release reference for the request */ @@ -68,19 +68,18 @@ EXPORT_SYMBOL_GPL(bsg_job_get); void bsg_job_done(struct bsg_job *job, int result, unsigned int reply_payload_rcv_len) { - struct request *req = job->req; + struct request *req = blk_mq_rq_from_pdu(job); struct request *rsp = req->next_rq; - struct scsi_request *rq = scsi_req(req); int err; - err = scsi_req(job->req)->result = result; + err = job->sreq.result = result; if (err < 0) /* we're only returning the result field in the reply */ - rq->sense_len = sizeof(u32); + job->sreq.sense_len = sizeof(u32); else - rq->sense_len = job->reply_len; + job->sreq.sense_len = job->reply_len; /* we assume all request payload was transferred, residual == 0 */ - rq->resid_len = 0; + job->sreq.resid_len = 0; if (rsp) { WARN_ON(reply_payload_rcv_len > scsi_req(rsp)->resid_len); @@ -232,7 +231,6 @@ static void bsg_initialize_rq(struct request *req) sreq->sense = sense; sreq->sense_len = SCSI_SENSE_BUFFERSIZE; - job->req = req; job->reply = sense; job->reply_len = sreq->sense_len; job->dd_data = job + 1; diff --git a/include/linux/bsg-lib.h b/include/linux/bsg-lib.h index 402223c95ce1..08762d297cbd 100644 --- a/include/linux/bsg-lib.h +++ b/include/linux/bsg-lib.h @@ -40,7 +40,6 @@ struct bsg_buffer { struct bsg_job { struct scsi_request sreq; struct device *dev; - struct request *req; struct kref kref; -- cgit v1.2.3 From 17cb960f29c29ee07bf6848ada3265f4be55972e Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 13 Mar 2018 17:28:41 +0100 Subject: bsg: split handling of SCSI CDBs vs transport requeues The current BSG design tries to shoe-horn the transport-specific passthrough commands into the overall framework for SCSI passthrough requests. This has a couple problems: - each passthrough queue has to set the QUEUE_FLAG_SCSI_PASSTHROUGH flag despite not dealing with SCSI commands at all. Because of that these queues could also incorrectly accept SCSI commands from in-kernel users or through the legacy SCSI_IOCTL_SEND_COMMAND ioctl. - the real SCSI bsg queues also incorrectly accept bsg requests of the BSG_SUB_PROTOCOL_SCSI_TRANSPORT type - the bsg transport code is almost unredable because it tries to reuse different SCSI concepts for its own purpose. This patch instead adds a new bsg_ops structure to handle the two cases differently, and thus solves all of the above problems. Another side effect is that the bsg-lib queues also don't need to embedd a struct scsi_request anymore. Signed-off-by: Christoph Hellwig Reviewed-by: Hannes Reinecke Reviewed-by: Johannes Thumshirn Signed-off-by: Jens Axboe --- block/bsg-lib.c | 158 +++++++++++++++-------- block/bsg.c | 262 +++++++++++++++++--------------------- drivers/scsi/scsi_lib.c | 4 +- drivers/scsi/scsi_sysfs.c | 3 +- drivers/scsi/scsi_transport_sas.c | 1 - include/linux/bsg-lib.h | 4 +- include/linux/bsg.h | 35 +++-- 7 files changed, 250 insertions(+), 217 deletions(-) diff --git a/block/bsg-lib.c b/block/bsg-lib.c index f2c2d54a61b4..fc2e5ff2c4b9 100644 --- a/block/bsg-lib.c +++ b/block/bsg-lib.c @@ -27,6 +27,94 @@ #include #include #include +#include + +#define uptr64(val) ((void __user *)(uintptr_t)(val)) + +static int bsg_transport_check_proto(struct sg_io_v4 *hdr) +{ + if (hdr->protocol != BSG_PROTOCOL_SCSI || + hdr->subprotocol != BSG_SUB_PROTOCOL_SCSI_TRANSPORT) + return -EINVAL; + if (!capable(CAP_SYS_RAWIO)) + return -EPERM; + return 0; +} + +static int bsg_transport_fill_hdr(struct request *rq, struct sg_io_v4 *hdr, + fmode_t mode) +{ + struct bsg_job *job = blk_mq_rq_to_pdu(rq); + + job->request_len = hdr->request_len; + job->request = memdup_user(uptr64(hdr->request), hdr->request_len); + if (IS_ERR(job->request)) + return PTR_ERR(job->request); + return 0; +} + +static int bsg_transport_complete_rq(struct request *rq, struct sg_io_v4 *hdr) +{ + struct bsg_job *job = blk_mq_rq_to_pdu(rq); + int ret = 0; + + /* + * The assignments below don't make much sense, but are kept for + * bug by bug backwards compatibility: + */ + hdr->device_status = job->result & 0xff; + hdr->transport_status = host_byte(job->result); + hdr->driver_status = driver_byte(job->result); + hdr->info = 0; + if (hdr->device_status || hdr->transport_status || hdr->driver_status) + hdr->info |= SG_INFO_CHECK; + hdr->response_len = 0; + + if (job->result < 0) { + /* we're only returning the result field in the reply */ + job->reply_len = sizeof(u32); + ret = job->result; + } + + if (job->reply_len && hdr->response) { + int len = min(hdr->max_response_len, job->reply_len); + + if (copy_to_user(uptr64(hdr->response), job->reply, len)) + ret = -EFAULT; + else + hdr->response_len = len; + } + + /* we assume all request payload was transferred, residual == 0 */ + hdr->dout_resid = 0; + + if (rq->next_rq) { + unsigned int rsp_len = job->reply_payload.payload_len; + + if (WARN_ON(job->reply_payload_rcv_len > rsp_len)) + hdr->din_resid = 0; + else + hdr->din_resid = rsp_len - job->reply_payload_rcv_len; + } else { + hdr->din_resid = 0; + } + + return ret; +} + +static void bsg_transport_free_rq(struct request *rq) +{ + struct bsg_job *job = blk_mq_rq_to_pdu(rq); + + kfree(job->request); +} + +static const struct bsg_ops bsg_transport_ops = { + .check_proto = bsg_transport_check_proto, + .fill_hdr = bsg_transport_fill_hdr, + .complete_rq = bsg_transport_complete_rq, + .free_rq = bsg_transport_free_rq, +}; /** * bsg_teardown_job - routine to teardown a bsg job @@ -68,27 +156,9 @@ EXPORT_SYMBOL_GPL(bsg_job_get); void bsg_job_done(struct bsg_job *job, int result, unsigned int reply_payload_rcv_len) { - struct request *req = blk_mq_rq_from_pdu(job); - struct request *rsp = req->next_rq; - int err; - - err = job->sreq.result = result; - if (err < 0) - /* we're only returning the result field in the reply */ - job->sreq.sense_len = sizeof(u32); - else - job->sreq.sense_len = job->reply_len; - /* we assume all request payload was transferred, residual == 0 */ - job->sreq.resid_len = 0; - - if (rsp) { - WARN_ON(reply_payload_rcv_len > scsi_req(rsp)->resid_len); - - /* set reply (bidi) residual */ - scsi_req(rsp)->resid_len -= - min(reply_payload_rcv_len, scsi_req(rsp)->resid_len); - } - blk_complete_request(req); + job->result = result; + job->reply_payload_rcv_len = reply_payload_rcv_len; + blk_complete_request(blk_mq_rq_from_pdu(job)); } EXPORT_SYMBOL_GPL(bsg_job_done); @@ -113,7 +183,6 @@ static int bsg_map_buffer(struct bsg_buffer *buf, struct request *req) if (!buf->sg_list) return -ENOMEM; sg_init_table(buf->sg_list, req->nr_phys_segments); - scsi_req(req)->resid_len = blk_rq_bytes(req); buf->sg_cnt = blk_rq_map_sg(req->q, req, buf->sg_list); buf->payload_len = blk_rq_bytes(req); return 0; @@ -124,16 +193,13 @@ static int bsg_map_buffer(struct bsg_buffer *buf, struct request *req) * @dev: device that is being sent the bsg request * @req: BSG request that needs a job structure */ -static int bsg_prepare_job(struct device *dev, struct request *req) +static bool bsg_prepare_job(struct device *dev, struct request *req) { struct request *rsp = req->next_rq; - struct scsi_request *rq = scsi_req(req); struct bsg_job *job = blk_mq_rq_to_pdu(req); int ret; job->timeout = req->timeout; - job->request = rq->cmd; - job->request_len = rq->cmd_len; if (req->bio) { ret = bsg_map_buffer(&job->request_payload, req); @@ -149,12 +215,13 @@ static int bsg_prepare_job(struct device *dev, struct request *req) /* take a reference for the request */ get_device(job->dev); kref_init(&job->kref); - return 0; + return true; failjob_rls_rqst_payload: kfree(job->request_payload.sg_list); failjob_rls_job: - return -ENOMEM; + job->result = -ENOMEM; + return false; } /** @@ -183,9 +250,7 @@ static void bsg_request_fn(struct request_queue *q) break; spin_unlock_irq(q->queue_lock); - ret = bsg_prepare_job(dev, req); - if (ret) { - scsi_req(req)->result = ret; + if (!bsg_prepare_job(dev, req)) { blk_end_request_all(req, BLK_STS_OK); spin_lock_irq(q->queue_lock); continue; @@ -202,46 +267,34 @@ static void bsg_request_fn(struct request_queue *q) spin_lock_irq(q->queue_lock); } +/* called right after the request is allocated for the request_queue */ static int bsg_init_rq(struct request_queue *q, struct request *req, gfp_t gfp) { struct bsg_job *job = blk_mq_rq_to_pdu(req); - struct scsi_request *sreq = &job->sreq; - - /* called right after the request is allocated for the request_queue */ - sreq->sense = kzalloc(SCSI_SENSE_BUFFERSIZE, gfp); - if (!sreq->sense) + job->reply = kzalloc(SCSI_SENSE_BUFFERSIZE, gfp); + if (!job->reply) return -ENOMEM; - return 0; } +/* called right before the request is given to the request_queue user */ static void bsg_initialize_rq(struct request *req) { struct bsg_job *job = blk_mq_rq_to_pdu(req); - struct scsi_request *sreq = &job->sreq; - void *sense = sreq->sense; - - /* called right before the request is given to the request_queue user */ + void *reply = job->reply; memset(job, 0, sizeof(*job)); - - scsi_req_init(sreq); - - sreq->sense = sense; - sreq->sense_len = SCSI_SENSE_BUFFERSIZE; - - job->reply = sense; - job->reply_len = sreq->sense_len; + job->reply = reply; + job->reply_len = SCSI_SENSE_BUFFERSIZE; job->dd_data = job + 1; } static void bsg_exit_rq(struct request_queue *q, struct request *req) { struct bsg_job *job = blk_mq_rq_to_pdu(req); - struct scsi_request *sreq = &job->sreq; - kfree(sreq->sense); + kfree(job->reply); } /** @@ -275,11 +328,10 @@ struct request_queue *bsg_setup_queue(struct device *dev, const char *name, q->queuedata = dev; q->bsg_job_fn = job_fn; blk_queue_flag_set(QUEUE_FLAG_BIDI, q); - blk_queue_flag_set(QUEUE_FLAG_SCSI_PASSTHROUGH, q); blk_queue_softirq_done(q, bsg_softirq_done); blk_queue_rq_timeout(q, BLK_DEFAULT_SG_TIMEOUT); - ret = bsg_register_queue(q, dev, name, release); + ret = bsg_register_queue(q, dev, name, &bsg_transport_ops, release); if (ret) { printk(KERN_ERR "%s: bsg interface failed to " "initialize - register queue\n", dev->kobj.name); diff --git a/block/bsg.c b/block/bsg.c index 06dc96e1f670..defa06c11858 100644 --- a/block/bsg.c +++ b/block/bsg.c @@ -130,114 +130,120 @@ static inline struct hlist_head *bsg_dev_idx_hash(int index) return &bsg_device_list[index & (BSG_LIST_ARRAY_SIZE - 1)]; } -static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq, - struct sg_io_v4 *hdr, struct bsg_device *bd, - fmode_t mode) +#define uptr64(val) ((void __user *)(uintptr_t)(val)) + +static int bsg_scsi_check_proto(struct sg_io_v4 *hdr) +{ + if (hdr->protocol != BSG_PROTOCOL_SCSI || + hdr->subprotocol != BSG_SUB_PROTOCOL_SCSI_CMD) + return -EINVAL; + return 0; +} + +static int bsg_scsi_fill_hdr(struct request *rq, struct sg_io_v4 *hdr, + fmode_t mode) { - struct scsi_request *req = scsi_req(rq); + struct scsi_request *sreq = scsi_req(rq); - if (hdr->request_len > BLK_MAX_CDB) { - req->cmd = kzalloc(hdr->request_len, GFP_KERNEL); - if (!req->cmd) + sreq->cmd_len = hdr->request_len; + if (sreq->cmd_len > BLK_MAX_CDB) { + sreq->cmd = kzalloc(sreq->cmd_len, GFP_KERNEL); + if (!sreq->cmd) return -ENOMEM; } - if (copy_from_user(req->cmd, (void __user *)(unsigned long)hdr->request, - hdr->request_len)) + if (copy_from_user(sreq->cmd, uptr64(hdr->request), sreq->cmd_len)) return -EFAULT; - - if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) { - if (blk_verify_command(req->cmd, mode)) - return -EPERM; - } else if (!capable(CAP_SYS_RAWIO)) + if (blk_verify_command(sreq->cmd, mode)) return -EPERM; - - /* - * fill in request structure - */ - req->cmd_len = hdr->request_len; - - rq->timeout = msecs_to_jiffies(hdr->timeout); - if (!rq->timeout) - rq->timeout = q->sg_timeout; - if (!rq->timeout) - rq->timeout = BLK_DEFAULT_SG_TIMEOUT; - if (rq->timeout < BLK_MIN_SG_TIMEOUT) - rq->timeout = BLK_MIN_SG_TIMEOUT; - return 0; } -/* - * Check if sg_io_v4 from user is allowed and valid - */ -static int -bsg_validate_sgv4_hdr(struct sg_io_v4 *hdr, int *op) +static int bsg_scsi_complete_rq(struct request *rq, struct sg_io_v4 *hdr) { + struct scsi_request *sreq = scsi_req(rq); int ret = 0; - if (hdr->guard != 'Q') - return -EINVAL; + /* + * fill in all the output members + */ + hdr->device_status = sreq->result & 0xff; + hdr->transport_status = host_byte(sreq->result); + hdr->driver_status = driver_byte(sreq->result); + hdr->info = 0; + if (hdr->device_status || hdr->transport_status || hdr->driver_status) + hdr->info |= SG_INFO_CHECK; + hdr->response_len = 0; - switch (hdr->protocol) { - case BSG_PROTOCOL_SCSI: - switch (hdr->subprotocol) { - case BSG_SUB_PROTOCOL_SCSI_CMD: - case BSG_SUB_PROTOCOL_SCSI_TRANSPORT: - break; - default: - ret = -EINVAL; - } - break; - default: - ret = -EINVAL; + if (sreq->sense_len && hdr->response) { + int len = min_t(unsigned int, hdr->max_response_len, + sreq->sense_len); + + if (copy_to_user(uptr64(hdr->response), sreq->sense, len)) + ret = -EFAULT; + else + hdr->response_len = len; + } + + if (rq->next_rq) { + hdr->dout_resid = sreq->resid_len; + hdr->din_resid = scsi_req(rq->next_rq)->resid_len; + } else if (rq_data_dir(rq) == READ) { + hdr->din_resid = sreq->resid_len; + } else { + hdr->dout_resid = sreq->resid_len; } - *op = hdr->dout_xfer_len ? REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN; return ret; } -/* - * map sg_io_v4 to a request. - */ +static void bsg_scsi_free_rq(struct request *rq) +{ + scsi_req_free_cmd(scsi_req(rq)); +} + +static const struct bsg_ops bsg_scsi_ops = { + .check_proto = bsg_scsi_check_proto, + .fill_hdr = bsg_scsi_fill_hdr, + .complete_rq = bsg_scsi_complete_rq, + .free_rq = bsg_scsi_free_rq, +}; + static struct request * -bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t mode) +bsg_map_hdr(struct request_queue *q, struct sg_io_v4 *hdr, fmode_t mode) { - struct request_queue *q = bd->queue; struct request *rq, *next_rq = NULL; int ret; - unsigned int op, dxfer_len; - void __user *dxferp = NULL; - struct bsg_class_device *bcd = &q->bsg_dev; - /* if the LLD has been removed then the bsg_unregister_queue will - * eventually be called and the class_dev was freed, so we can no - * longer use this request_queue. Return no such address. - */ - if (!bcd->class_dev) + if (!q->bsg_dev.class_dev) return ERR_PTR(-ENXIO); - bsg_dbg(bd, "map hdr %llx/%u %llx/%u\n", - (unsigned long long) hdr->dout_xferp, - hdr->dout_xfer_len, (unsigned long long) hdr->din_xferp, - hdr->din_xfer_len); + if (hdr->guard != 'Q') + return ERR_PTR(-EINVAL); - ret = bsg_validate_sgv4_hdr(hdr, &op); + ret = q->bsg_dev.ops->check_proto(hdr); if (ret) return ERR_PTR(ret); - /* - * map scatter-gather elements separately and string them to request - */ - rq = blk_get_request(q, op, GFP_KERNEL); + rq = blk_get_request(q, hdr->dout_xfer_len ? + REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, + GFP_KERNEL); if (IS_ERR(rq)) return rq; - ret = blk_fill_sgv4_hdr_rq(q, rq, hdr, bd, mode); + ret = q->bsg_dev.ops->fill_hdr(rq, hdr, mode); if (ret) goto out; - if (op == REQ_OP_SCSI_OUT && hdr->din_xfer_len) { + rq->timeout = msecs_to_jiffies(hdr->timeout); + if (!rq->timeout) + rq->timeout = q->sg_timeout; + if (!rq->timeout) + rq->timeout = BLK_DEFAULT_SG_TIMEOUT; + if (rq->timeout < BLK_MIN_SG_TIMEOUT) + rq->timeout = BLK_MIN_SG_TIMEOUT; + + if (hdr->dout_xfer_len && hdr->din_xfer_len) { if (!test_bit(QUEUE_FLAG_BIDI, &q->queue_flags)) { ret = -EOPNOTSUPP; goto out; @@ -246,42 +252,39 @@ bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t mode) next_rq = blk_get_request(q, REQ_OP_SCSI_IN, GFP_KERNEL); if (IS_ERR(next_rq)) { ret = PTR_ERR(next_rq); - next_rq = NULL; goto out; } - rq->next_rq = next_rq; - dxferp = (void __user *)(unsigned long)hdr->din_xferp; - ret = blk_rq_map_user(q, next_rq, NULL, dxferp, + rq->next_rq = next_rq; + ret = blk_rq_map_user(q, next_rq, NULL, uptr64(hdr->din_xferp), hdr->din_xfer_len, GFP_KERNEL); if (ret) - goto out; + goto out_free_nextrq; } if (hdr->dout_xfer_len) { - dxfer_len = hdr->dout_xfer_len; - dxferp = (void __user *)(unsigned long)hdr->dout_xferp; + ret = blk_rq_map_user(q, rq, NULL, uptr64(hdr->dout_xferp), + hdr->dout_xfer_len, GFP_KERNEL); } else if (hdr->din_xfer_len) { - dxfer_len = hdr->din_xfer_len; - dxferp = (void __user *)(unsigned long)hdr->din_xferp; - } else - dxfer_len = 0; - - if (dxfer_len) { - ret = blk_rq_map_user(q, rq, NULL, dxferp, dxfer_len, - GFP_KERNEL); - if (ret) - goto out; + ret = blk_rq_map_user(q, rq, NULL, uptr64(hdr->din_xferp), + hdr->din_xfer_len, GFP_KERNEL); + } else { + ret = blk_rq_map_user(q, rq, NULL, NULL, 0, GFP_KERNEL); } + if (ret) + goto out_unmap_nextrq; return rq; + +out_unmap_nextrq: + if (rq->next_rq) + blk_rq_unmap_user(rq->next_rq->bio); +out_free_nextrq: + if (rq->next_rq) + blk_put_request(rq->next_rq); out: - scsi_req_free_cmd(scsi_req(rq)); + q->bsg_dev.ops->free_rq(rq); blk_put_request(rq); - if (next_rq) { - blk_rq_unmap_user(next_rq->bio); - blk_put_request(next_rq); - } return ERR_PTR(ret); } @@ -383,56 +386,18 @@ static struct bsg_command *bsg_get_done_cmd(struct bsg_device *bd) static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr, struct bio *bio, struct bio *bidi_bio) { - struct scsi_request *req = scsi_req(rq); - int ret = 0; - - pr_debug("rq %p bio %p 0x%x\n", rq, bio, req->result); - /* - * fill in all the output members - */ - hdr->device_status = req->result & 0xff; - hdr->transport_status = host_byte(req->result); - hdr->driver_status = driver_byte(req->result); - hdr->info = 0; - if (hdr->device_status || hdr->transport_status || hdr->driver_status) - hdr->info |= SG_INFO_CHECK; - hdr->response_len = 0; - - if (req->sense_len && hdr->response) { - int len = min_t(unsigned int, hdr->max_response_len, - req->sense_len); + int ret; - ret = copy_to_user((void __user *)(unsigned long)hdr->response, - req->sense, len); - if (!ret) - hdr->response_len = len; - else - ret = -EFAULT; - } + ret = rq->q->bsg_dev.ops->complete_rq(rq, hdr); if (rq->next_rq) { - hdr->dout_resid = req->resid_len; - hdr->din_resid = scsi_req(rq->next_rq)->resid_len; blk_rq_unmap_user(bidi_bio); blk_put_request(rq->next_rq); - } else if (rq_data_dir(rq) == READ) - hdr->din_resid = req->resid_len; - else - hdr->dout_resid = req->resid_len; - - /* - * If the request generated a negative error number, return it - * (providing we aren't already returning an error); if it's - * just a protocol response (i.e. non negative), that gets - * processed above. - */ - if (!ret && req->result < 0) - ret = req->result; + } blk_rq_unmap_user(bio); - scsi_req_free_cmd(req); + rq->q->bsg_dev.ops->free_rq(rq); blk_put_request(rq); - return ret; } @@ -614,7 +579,7 @@ static int __bsg_write(struct bsg_device *bd, const char __user *buf, /* * get a request, fill in the blanks, and add to request queue */ - rq = bsg_map_hdr(bd, &bc->hdr, mode); + rq = bsg_map_hdr(bd->queue, &bc->hdr, mode); if (IS_ERR(rq)) { ret = PTR_ERR(rq); rq = NULL; @@ -742,11 +707,6 @@ static struct bsg_device *bsg_add_device(struct inode *inode, struct bsg_device *bd; unsigned char buf[32]; - if (!blk_queue_scsi_passthrough(rq)) { - WARN_ONCE(true, "Attempt to register a non-SCSI queue\n"); - return ERR_PTR(-EINVAL); - } - if (!blk_get_queue(rq)) return ERR_PTR(-ENXIO); @@ -907,7 +867,7 @@ static long bsg_ioctl(struct file *file, unsigned int cmd, unsigned long arg) if (copy_from_user(&hdr, uarg, sizeof(hdr))) return -EFAULT; - rq = bsg_map_hdr(bd, &hdr, file->f_mode); + rq = bsg_map_hdr(bd->queue, &hdr, file->f_mode); if (IS_ERR(rq)) return PTR_ERR(rq); @@ -959,7 +919,8 @@ void bsg_unregister_queue(struct request_queue *q) EXPORT_SYMBOL_GPL(bsg_unregister_queue); int bsg_register_queue(struct request_queue *q, struct device *parent, - const char *name, void (*release)(struct device *)) + const char *name, const struct bsg_ops *ops, + void (*release)(struct device *)) { struct bsg_class_device *bcd; dev_t dev; @@ -996,6 +957,7 @@ int bsg_register_queue(struct request_queue *q, struct device *parent, bcd->queue = q; bcd->parent = get_device(parent); bcd->release = release; + bcd->ops = ops; kref_init(&bcd->ref); dev = MKDEV(bsg_major, bcd->minor); class_dev = device_create(bsg_class, parent, dev, NULL, "%s", devname); @@ -1023,7 +985,17 @@ unlock: mutex_unlock(&bsg_mutex); return ret; } -EXPORT_SYMBOL_GPL(bsg_register_queue); + +int bsg_scsi_register_queue(struct request_queue *q, struct device *parent) +{ + if (!blk_queue_scsi_passthrough(q)) { + WARN_ONCE(true, "Attempt to register a non-SCSI queue\n"); + return -EINVAL; + } + + return bsg_register_queue(q, parent, NULL, &bsg_scsi_ops, NULL); +} +EXPORT_SYMBOL_GPL(bsg_scsi_register_queue); static struct cdev bsg_cdev; diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 538152f3528e..37c1d63e847e 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -2140,8 +2140,6 @@ void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q) { struct device *dev = shost->dma_dev; - blk_queue_flag_set(QUEUE_FLAG_SCSI_PASSTHROUGH, q); - /* * this limit is imposed by hardware restrictions */ @@ -2239,6 +2237,7 @@ struct request_queue *scsi_old_alloc_queue(struct scsi_device *sdev) } __scsi_init_queue(shost, q); + blk_queue_flag_set(QUEUE_FLAG_SCSI_PASSTHROUGH, q); blk_queue_prep_rq(q, scsi_prep_fn); blk_queue_unprep_rq(q, scsi_unprep_fn); blk_queue_softirq_done(q, scsi_softirq_done); @@ -2270,6 +2269,7 @@ struct request_queue *scsi_mq_alloc_queue(struct scsi_device *sdev) sdev->request_queue->queuedata = sdev; __scsi_init_queue(sdev->host, sdev->request_queue); + blk_queue_flag_set(QUEUE_FLAG_SCSI_PASSTHROUGH, sdev->request_queue); return sdev->request_queue; } diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c index 91b90f672d23..7142c8be1099 100644 --- a/drivers/scsi/scsi_sysfs.c +++ b/drivers/scsi/scsi_sysfs.c @@ -1292,8 +1292,7 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev) transport_add_device(&sdev->sdev_gendev); sdev->is_visible = 1; - error = bsg_register_queue(rq, &sdev->sdev_gendev, NULL, NULL); - + error = bsg_scsi_register_queue(rq, &sdev->sdev_gendev); if (error) /* we're treating error on bsg register as non-fatal, * so pretend nothing went wrong */ diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c index 7c0987616684..08acbabfae07 100644 --- a/drivers/scsi/scsi_transport_sas.c +++ b/drivers/scsi/scsi_transport_sas.c @@ -228,7 +228,6 @@ static int sas_bsg_initialize(struct Scsi_Host *shost, struct sas_rphy *rphy) */ blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH); blk_queue_flag_set(QUEUE_FLAG_BIDI, q); - blk_queue_flag_set(QUEUE_FLAG_SCSI_PASSTHROUGH, q); return 0; } diff --git a/include/linux/bsg-lib.h b/include/linux/bsg-lib.h index 08762d297cbd..28a7ccc55c89 100644 --- a/include/linux/bsg-lib.h +++ b/include/linux/bsg-lib.h @@ -38,7 +38,6 @@ struct bsg_buffer { }; struct bsg_job { - struct scsi_request sreq; struct device *dev; struct kref kref; @@ -64,6 +63,9 @@ struct bsg_job { struct bsg_buffer request_payload; struct bsg_buffer reply_payload; + int result; + unsigned int reply_payload_rcv_len; + void *dd_data; /* Used for driver-specific storage */ }; diff --git a/include/linux/bsg.h b/include/linux/bsg.h index 2a202e41a3af..0c7dd9ceb139 100644 --- a/include/linux/bsg.h +++ b/include/linux/bsg.h @@ -1,34 +1,43 @@ /* SPDX-License-Identifier: GPL-2.0 */ -#ifndef BSG_H -#define BSG_H +#ifndef _LINUX_BSG_H +#define _LINUX_BSG_H #include +struct request; + +#ifdef CONFIG_BLK_DEV_BSG +struct bsg_ops { + int (*check_proto)(struct sg_io_v4 *hdr); + int (*fill_hdr)(struct request *rq, struct sg_io_v4 *hdr, + fmode_t mode); + int (*complete_rq)(struct request *rq, struct sg_io_v4 *hdr); + void (*free_rq)(struct request *rq); +}; -#if defined(CONFIG_BLK_DEV_BSG) struct bsg_class_device { struct device *class_dev; struct device *parent; int minor; struct request_queue *queue; struct kref ref; + const struct bsg_ops *ops; void (*release)(struct device *); }; -extern int bsg_register_queue(struct request_queue *q, - struct device *parent, const char *name, - void (*release)(struct device *)); -extern void bsg_unregister_queue(struct request_queue *); +int bsg_register_queue(struct request_queue *q, struct device *parent, + const char *name, const struct bsg_ops *ops, + void (*release)(struct device *)); +int bsg_scsi_register_queue(struct request_queue *q, struct device *parent); +void bsg_unregister_queue(struct request_queue *q); #else -static inline int bsg_register_queue(struct request_queue *q, - struct device *parent, const char *name, - void (*release)(struct device *)) +static inline int bsg_scsi_register_queue(struct request_queue *q, + struct device *parent) { return 0; } static inline void bsg_unregister_queue(struct request_queue *q) { } -#endif - -#endif +#endif /* CONFIG_BLK_DEV_BSG */ +#endif /* _LINUX_BSG_H */ -- cgit v1.2.3 From 5f990d316085aca11b04dc0f63d6df5e508d73c7 Mon Sep 17 00:00:00 2001 From: Jonas Rabenstein Date: Wed, 7 Mar 2018 17:55:56 +0100 Subject: block: sed-opal: fix u64 short atom length The length must be given as bytes and not as 4 bit tuples. Reviewed-by: Scott Bauer Signed-off-by: Jonas Rabenstein Signed-off-by: Jens Axboe --- block/sed-opal.c | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/block/sed-opal.c b/block/sed-opal.c index 36842bfa572e..38411c5c477f 100644 --- a/block/sed-opal.c +++ b/block/sed-opal.c @@ -554,15 +554,14 @@ static void add_token_u64(int *err, struct opal_dev *cmd, u64 number) size_t len; int msb; - u8 n; if (!(number & ~TINY_ATOM_DATA_MASK)) { add_token_u8(err, cmd, number); return; } - msb = fls(number); - len = DIV_ROUND_UP(msb, 4); + msb = fls64(number); + len = DIV_ROUND_UP(msb, 8); if (cmd->pos >= IO_BUFFER_LENGTH - len - 1) { pr_debug("Error adding u64: end of buffer.\n"); @@ -570,10 +569,8 @@ static void add_token_u64(int *err, struct opal_dev *cmd, u64 number) return; } add_short_atom_header(cmd, false, false, len); - while (len--) { - n = number >> (len * 8); - add_token_u8(err, cmd, n); - } + while (len--) + add_token_u8(err, cmd, number >> (len * 8)); } static void add_token_bytestring(int *err, struct opal_dev *cmd, -- cgit v1.2.3 From 4c6994806f708559c2812b73501406e21ae5dcd0 Mon Sep 17 00:00:00 2001 From: Joseph Qi Date: Fri, 16 Mar 2018 14:51:27 +0800 Subject: blk-throttle: fix race between blkcg_bio_issue_check() and cgroup_rmdir() We've triggered a WARNING in blk_throtl_bio() when throttling writeback io, which complains blkg->refcnt is already 0 when calling blkg_get(), and then kernel crashes with invalid page request. After investigating this issue, we've found it is caused by a race between blkcg_bio_issue_check() and cgroup_rmdir(), which is described below: writeback kworker cgroup_rmdir cgroup_destroy_locked kill_css css_killed_ref_fn css_killed_work_fn offline_css blkcg_css_offline blkcg_bio_issue_check rcu_read_lock blkg_lookup spin_trylock(q->queue_lock) blkg_destroy spin_unlock(q->queue_lock) blk_throtl_bio spin_lock_irq(q->queue_lock) ... spin_unlock_irq(q->queue_lock) rcu_read_unlock Since rcu can only prevent blkg from releasing when it is being used, the blkg->refcnt can be decreased to 0 during blkg_destroy() and schedule blkg release. Then trying to blkg_get() in blk_throtl_bio() will complains the WARNING. And then the corresponding blkg_put() will schedule blkg release again, which result in double free. This race is introduced by commit ae1188963611 ("blkcg: consolidate blkg creation in blkcg_bio_issue_check()"). Before this commit, it will lookup first and then try to lookup/create again with queue_lock. Since revive this logic is a bit drastic, so fix it by only offlining pd during blkcg_css_offline(), and move the rest destruction (especially blkg_put()) into blkcg_css_free(), which should be the right way as discussed. Fixes: ae1188963611 ("blkcg: consolidate blkg creation in blkcg_bio_issue_check()") Reported-by: Jiufei Xue Signed-off-by: Joseph Qi Acked-by: Tejun Heo Signed-off-by: Jens Axboe --- block/blk-cgroup.c | 78 ++++++++++++++++++++++++++++++++++++---------- include/linux/blk-cgroup.h | 1 + 2 files changed, 63 insertions(+), 16 deletions(-) diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index c2033a232a44..1c16694ae145 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -307,11 +307,28 @@ struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg, } } +static void blkg_pd_offline(struct blkcg_gq *blkg) +{ + int i; + + lockdep_assert_held(blkg->q->queue_lock); + lockdep_assert_held(&blkg->blkcg->lock); + + for (i = 0; i < BLKCG_MAX_POLS; i++) { + struct blkcg_policy *pol = blkcg_policy[i]; + + if (blkg->pd[i] && !blkg->pd[i]->offline && + pol->pd_offline_fn) { + pol->pd_offline_fn(blkg->pd[i]); + blkg->pd[i]->offline = true; + } + } +} + static void blkg_destroy(struct blkcg_gq *blkg) { struct blkcg *blkcg = blkg->blkcg; struct blkcg_gq *parent = blkg->parent; - int i; lockdep_assert_held(blkg->q->queue_lock); lockdep_assert_held(&blkcg->lock); @@ -320,13 +337,6 @@ static void blkg_destroy(struct blkcg_gq *blkg) WARN_ON_ONCE(list_empty(&blkg->q_node)); WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node)); - for (i = 0; i < BLKCG_MAX_POLS; i++) { - struct blkcg_policy *pol = blkcg_policy[i]; - - if (blkg->pd[i] && pol->pd_offline_fn) - pol->pd_offline_fn(blkg->pd[i]); - } - if (parent) { blkg_rwstat_add_aux(&parent->stat_bytes, &blkg->stat_bytes); blkg_rwstat_add_aux(&parent->stat_ios, &blkg->stat_ios); @@ -369,6 +379,7 @@ static void blkg_destroy_all(struct request_queue *q) struct blkcg *blkcg = blkg->blkcg; spin_lock(&blkcg->lock); + blkg_pd_offline(blkg); blkg_destroy(blkg); spin_unlock(&blkcg->lock); } @@ -995,25 +1006,25 @@ static struct cftype blkcg_legacy_files[] = { * @css: css of interest * * This function is called when @css is about to go away and responsible - * for shooting down all blkgs associated with @css. blkgs should be - * removed while holding both q and blkcg locks. As blkcg lock is nested - * inside q lock, this function performs reverse double lock dancing. + * for offlining all blkgs pd and killing all wbs associated with @css. + * blkgs pd offline should be done while holding both q and blkcg locks. + * As blkcg lock is nested inside q lock, this function performs reverse + * double lock dancing. * * This is the blkcg counterpart of ioc_release_fn(). */ static void blkcg_css_offline(struct cgroup_subsys_state *css) { struct blkcg *blkcg = css_to_blkcg(css); + struct blkcg_gq *blkg; spin_lock_irq(&blkcg->lock); - while (!hlist_empty(&blkcg->blkg_list)) { - struct blkcg_gq *blkg = hlist_entry(blkcg->blkg_list.first, - struct blkcg_gq, blkcg_node); + hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) { struct request_queue *q = blkg->q; if (spin_trylock(q->queue_lock)) { - blkg_destroy(blkg); + blkg_pd_offline(blkg); spin_unlock(q->queue_lock); } else { spin_unlock_irq(&blkcg->lock); @@ -1027,11 +1038,43 @@ static void blkcg_css_offline(struct cgroup_subsys_state *css) wb_blkcg_offline(blkcg); } +/** + * blkcg_destroy_all_blkgs - destroy all blkgs associated with a blkcg + * @blkcg: blkcg of interest + * + * This function is called when blkcg css is about to free and responsible for + * destroying all blkgs associated with @blkcg. + * blkgs should be removed while holding both q and blkcg locks. As blkcg lock + * is nested inside q lock, this function performs reverse double lock dancing. + */ +static void blkcg_destroy_all_blkgs(struct blkcg *blkcg) +{ + spin_lock_irq(&blkcg->lock); + while (!hlist_empty(&blkcg->blkg_list)) { + struct blkcg_gq *blkg = hlist_entry(blkcg->blkg_list.first, + struct blkcg_gq, + blkcg_node); + struct request_queue *q = blkg->q; + + if (spin_trylock(q->queue_lock)) { + blkg_destroy(blkg); + spin_unlock(q->queue_lock); + } else { + spin_unlock_irq(&blkcg->lock); + cpu_relax(); + spin_lock_irq(&blkcg->lock); + } + } + spin_unlock_irq(&blkcg->lock); +} + static void blkcg_css_free(struct cgroup_subsys_state *css) { struct blkcg *blkcg = css_to_blkcg(css); int i; + blkcg_destroy_all_blkgs(blkcg); + mutex_lock(&blkcg_pol_mutex); list_del(&blkcg->all_blkcgs_node); @@ -1371,8 +1414,11 @@ void blkcg_deactivate_policy(struct request_queue *q, spin_lock(&blkg->blkcg->lock); if (blkg->pd[pol->plid]) { - if (pol->pd_offline_fn) + if (!blkg->pd[pol->plid]->offline && + pol->pd_offline_fn) { pol->pd_offline_fn(blkg->pd[pol->plid]); + blkg->pd[pol->plid]->offline = true; + } pol->pd_free_fn(blkg->pd[pol->plid]); blkg->pd[pol->plid] = NULL; } diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h index 69bea82ebeb1..6c666fd7de3c 100644 --- a/include/linux/blk-cgroup.h +++ b/include/linux/blk-cgroup.h @@ -88,6 +88,7 @@ struct blkg_policy_data { /* the blkg and policy id this per-policy data belongs to */ struct blkcg_gq *blkg; int plid; + bool offline; }; /* -- cgit v1.2.3 From ec6dcf63c55c8b09b44fe5990082b07baf139c69 Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Fri, 16 Mar 2018 10:31:11 -0700 Subject: blk-mq-debugfs: Show more request state information Since commit 634f9e4631a8 ("blk-mq: remove REQ_ATOM_COMPLETE usages from blk-mq") blk_rq_is_complete() only reports whether or not a request has completed for legacy queues. Hence modify the blk-mq-debugfs code such that it shows the blk-mq request state again. Fixes: 634f9e4631a8 ("blk-mq: remove REQ_ATOM_COMPLETE usages from blk-mq") Signed-off-by: Bart Van Assche Cc: Tejun Heo Signed-off-by: Jens Axboe --- block/blk-mq-debugfs.c | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c index bd21d5b9f65f..58b3b79cbe83 100644 --- a/block/blk-mq-debugfs.c +++ b/block/blk-mq-debugfs.c @@ -350,6 +350,20 @@ static const char *const rqf_name[] = { }; #undef RQF_NAME +static const char *const blk_mq_rq_state_name_array[] = { + [MQ_RQ_IDLE] = "idle", + [MQ_RQ_IN_FLIGHT] = "in_flight", + [MQ_RQ_COMPLETE] = "complete", +}; + +static const char *blk_mq_rq_state_name(enum mq_rq_state rq_state) +{ + if (WARN_ON_ONCE((unsigned int)rq_state > + ARRAY_SIZE(blk_mq_rq_state_name_array))) + return "(?)"; + return blk_mq_rq_state_name_array[rq_state]; +} + int __blk_mq_debugfs_rq_show(struct seq_file *m, struct request *rq) { const struct blk_mq_ops *const mq_ops = rq->q->mq_ops; @@ -366,7 +380,7 @@ int __blk_mq_debugfs_rq_show(struct seq_file *m, struct request *rq) seq_puts(m, ", .rq_flags="); blk_flags_show(m, (__force unsigned int)rq->rq_flags, rqf_name, ARRAY_SIZE(rqf_name)); - seq_printf(m, ", complete=%d", blk_rq_is_complete(rq)); + seq_printf(m, ", .state=%s", blk_mq_rq_state_name(blk_mq_rq_state(rq))); seq_printf(m, ", .tag=%d, .internal_tag=%d", rq->tag, rq->internal_tag); if (mq_ops->show_rq) -- cgit v1.2.3 From 233bde21aa43516baa013ef7ac33f3427056db3e Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Wed, 14 Mar 2018 15:48:06 -0700 Subject: block: Move SECTOR_SIZE and SECTOR_SHIFT definitions into It happens often while I'm preparing a patch for a block driver that I'm wondering: is a definition of SECTOR_SIZE and/or SECTOR_SHIFT available for this driver? Do I have to introduce definitions of these constants before I can use these constants? To avoid this confusion, move the existing definitions of SECTOR_SIZE and SECTOR_SHIFT into the header file such that these become available for all block drivers. Make the SECTOR_SIZE definition in the uapi msdos_fs.h header file conditional to avoid that including that header file after causes the compiler to complain about a SECTOR_SIZE redefinition. Note: the SECTOR_SIZE / SECTOR_SHIFT / SECTOR_BITS definitions have not been removed from uapi header files nor from NAND drivers in which these constants are used for another purpose than converting block layer offsets and sizes into a number of sectors. Cc: David S. Miller Cc: Mike Snitzer Cc: Dan Williams Cc: Minchan Kim Cc: Nitin Gupta Reviewed-by: Sergey Senozhatsky Reviewed-by: Christoph Hellwig Reviewed-by: Johannes Thumshirn Reviewed-by: Martin K. Petersen Signed-off-by: Bart Van Assche Signed-off-by: Jens Axboe --- arch/xtensa/platforms/iss/simdisk.c | 1 - drivers/block/brd.c | 1 - drivers/block/null_blk.c | 2 -- drivers/block/rbd.c | 9 -------- drivers/block/zram/zram_drv.h | 1 - drivers/ide/ide-cd.c | 8 +++---- drivers/ide/ide-cd.h | 6 +----- drivers/nvdimm/nd.h | 1 - drivers/scsi/gdth.h | 3 --- include/linux/blkdev.h | 42 +++++++++++++++++++++++++++---------- include/linux/device-mapper.h | 2 -- include/linux/ide.h | 1 - include/uapi/linux/msdos_fs.h | 2 ++ 13 files changed, 38 insertions(+), 41 deletions(-) diff --git a/arch/xtensa/platforms/iss/simdisk.c b/arch/xtensa/platforms/iss/simdisk.c index 1b6418407467..026211e7ab09 100644 --- a/arch/xtensa/platforms/iss/simdisk.c +++ b/arch/xtensa/platforms/iss/simdisk.c @@ -21,7 +21,6 @@ #include #define SIMDISK_MAJOR 240 -#define SECTOR_SHIFT 9 #define SIMDISK_MINORS 1 #define MAX_SIMDISK_COUNT 10 diff --git a/drivers/block/brd.c b/drivers/block/brd.c index deea78e485da..66cb0f857f64 100644 --- a/drivers/block/brd.c +++ b/drivers/block/brd.c @@ -24,7 +24,6 @@ #include -#define SECTOR_SHIFT 9 #define PAGE_SECTORS_SHIFT (PAGE_SHIFT - SECTOR_SHIFT) #define PAGE_SECTORS (1 << PAGE_SECTORS_SHIFT) diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c index 0517613afccb..a76553293a31 100644 --- a/drivers/block/null_blk.c +++ b/drivers/block/null_blk.c @@ -16,10 +16,8 @@ #include #include -#define SECTOR_SHIFT 9 #define PAGE_SECTORS_SHIFT (PAGE_SHIFT - SECTOR_SHIFT) #define PAGE_SECTORS (1 << PAGE_SECTORS_SHIFT) -#define SECTOR_SIZE (1 << SECTOR_SHIFT) #define SECTOR_MASK (PAGE_SECTORS - 1) #define FREE_BATCH 16 diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 0016170cde0a..1e03b04819c8 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -50,15 +50,6 @@ #define RBD_DEBUG /* Activate rbd_assert() calls */ -/* - * The basic unit of block I/O is a sector. It is interpreted in a - * number of contexts in Linux (blk, bio, genhd), but the default is - * universally 512 bytes. These symbols are just slightly more - * meaningful than the bare numbers they represent. - */ -#define SECTOR_SHIFT 9 -#define SECTOR_SIZE (1ULL << SECTOR_SHIFT) - /* * Increment the given counter and return its updated value. * If the counter is already 0 it will not be incremented. diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h index 31762db861e3..1e9bf65c0bfb 100644 --- a/drivers/block/zram/zram_drv.h +++ b/drivers/block/zram/zram_drv.h @@ -37,7 +37,6 @@ static const size_t max_zpage_size = PAGE_SIZE / 4 * 3; /*-- End of configurable params */ -#define SECTOR_SHIFT 9 #define SECTORS_PER_PAGE_SHIFT (PAGE_SHIFT - SECTOR_SHIFT) #define SECTORS_PER_PAGE (1 << SECTORS_PER_PAGE_SHIFT) #define ZRAM_LOGICAL_BLOCK_SHIFT 12 diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c index 5613cc2d51fc..5a8e8e3c22cd 100644 --- a/drivers/ide/ide-cd.c +++ b/drivers/ide/ide-cd.c @@ -712,7 +712,7 @@ static ide_startstop_t cdrom_start_rw(ide_drive_t *drive, struct request *rq) struct request_queue *q = drive->queue; int write = rq_data_dir(rq) == WRITE; unsigned short sectors_per_frame = - queue_logical_block_size(q) >> SECTOR_BITS; + queue_logical_block_size(q) >> SECTOR_SHIFT; ide_debug_log(IDE_DBG_RQ, "rq->cmd[0]: 0x%x, rq->cmd_flags: 0x%x, " "secs_per_frame: %u", @@ -919,7 +919,7 @@ static int cdrom_read_capacity(ide_drive_t *drive, unsigned long *capacity, * end up being bogus. */ blocklen = be32_to_cpu(capbuf.blocklen); - blocklen = (blocklen >> SECTOR_BITS) << SECTOR_BITS; + blocklen = (blocklen >> SECTOR_SHIFT) << SECTOR_SHIFT; switch (blocklen) { case 512: case 1024: @@ -935,7 +935,7 @@ static int cdrom_read_capacity(ide_drive_t *drive, unsigned long *capacity, } *capacity = 1 + be32_to_cpu(capbuf.lba); - *sectors_per_frame = blocklen >> SECTOR_BITS; + *sectors_per_frame = blocklen >> SECTOR_SHIFT; ide_debug_log(IDE_DBG_PROBE, "cap: %lu, sectors_per_frame: %lu", *capacity, *sectors_per_frame); @@ -1012,7 +1012,7 @@ int ide_cd_read_toc(ide_drive_t *drive, struct request_sense *sense) drive->probed_capacity = toc->capacity * sectors_per_frame; blk_queue_logical_block_size(drive->queue, - sectors_per_frame << SECTOR_BITS); + sectors_per_frame << SECTOR_SHIFT); /* first read just the header, so we know how long the TOC is */ stat = cdrom_read_tocentry(drive, 0, 1, 0, (char *) &toc->hdr, diff --git a/drivers/ide/ide-cd.h b/drivers/ide/ide-cd.h index 264e822eba58..04f0f310a856 100644 --- a/drivers/ide/ide-cd.h +++ b/drivers/ide/ide-cd.h @@ -21,11 +21,7 @@ /************************************************************************/ -#define SECTOR_BITS 9 -#ifndef SECTOR_SIZE -#define SECTOR_SIZE (1 << SECTOR_BITS) -#endif -#define SECTORS_PER_FRAME (CD_FRAMESIZE >> SECTOR_BITS) +#define SECTORS_PER_FRAME (CD_FRAMESIZE >> SECTOR_SHIFT) #define SECTOR_BUFFER_SIZE (CD_FRAMESIZE * 32) /* Capabilities Page size including 8 bytes of Mode Page Header */ diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h index 8d6375ee0fda..184e070d50a2 100644 --- a/drivers/nvdimm/nd.h +++ b/drivers/nvdimm/nd.h @@ -29,7 +29,6 @@ enum { * BTT instance */ ND_MAX_LANES = 256, - SECTOR_SHIFT = 9, INT_LBASIZE_ALIGNMENT = 64, NVDIMM_IO_ATOMIC = 1, }; diff --git a/drivers/scsi/gdth.h b/drivers/scsi/gdth.h index 95fc720c1b30..e6e5ccb1e0f3 100644 --- a/drivers/scsi/gdth.h +++ b/drivers/scsi/gdth.h @@ -178,9 +178,6 @@ #define MSG_SIZE 34 /* size of message structure */ #define MSG_REQUEST 0 /* async. event: message */ -/* cacheservice defines */ -#define SECTOR_SIZE 0x200 /* always 512 bytes per sec. */ - /* DPMEM constants */ #define DPMEM_MAGIC 0xC0FFEE11 #define IC_HEADER_BYTES 48 diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 19eaf8d89368..9af3e0f430bc 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -1021,6 +1021,19 @@ static inline struct request_queue *bdev_get_queue(struct block_device *bdev) return bdev->bd_disk->queue; /* this is never NULL */ } +/* + * The basic unit of block I/O is a sector. It is used in a number of contexts + * in Linux (blk, bio, genhd). The size of one sector is 512 = 2**9 + * bytes. Variables of type sector_t represent an offset or size that is a + * multiple of 512 bytes. Hence these two constants. + */ +#ifndef SECTOR_SHIFT +#define SECTOR_SHIFT 9 +#endif +#ifndef SECTOR_SIZE +#define SECTOR_SIZE (1 << SECTOR_SHIFT) +#endif + /* * blk_rq_pos() : the current sector * blk_rq_bytes() : bytes left in the entire request @@ -1048,12 +1061,12 @@ extern unsigned int blk_rq_err_bytes(const struct request *rq); static inline unsigned int blk_rq_sectors(const struct request *rq) { - return blk_rq_bytes(rq) >> 9; + return blk_rq_bytes(rq) >> SECTOR_SHIFT; } static inline unsigned int blk_rq_cur_sectors(const struct request *rq) { - return blk_rq_cur_bytes(rq) >> 9; + return blk_rq_cur_bytes(rq) >> SECTOR_SHIFT; } static inline unsigned int blk_rq_zone_no(struct request *rq) @@ -1083,7 +1096,8 @@ static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q, int op) { if (unlikely(op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE)) - return min(q->limits.max_discard_sectors, UINT_MAX >> 9); + return min(q->limits.max_discard_sectors, + UINT_MAX >> SECTOR_SHIFT); if (unlikely(op == REQ_OP_WRITE_SAME)) return q->limits.max_write_same_sectors; @@ -1395,16 +1409,21 @@ extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, static inline int sb_issue_discard(struct super_block *sb, sector_t block, sector_t nr_blocks, gfp_t gfp_mask, unsigned long flags) { - return blkdev_issue_discard(sb->s_bdev, block << (sb->s_blocksize_bits - 9), - nr_blocks << (sb->s_blocksize_bits - 9), + return blkdev_issue_discard(sb->s_bdev, + block << (sb->s_blocksize_bits - + SECTOR_SHIFT), + nr_blocks << (sb->s_blocksize_bits - + SECTOR_SHIFT), gfp_mask, flags); } static inline int sb_issue_zeroout(struct super_block *sb, sector_t block, sector_t nr_blocks, gfp_t gfp_mask) { return blkdev_issue_zeroout(sb->s_bdev, - block << (sb->s_blocksize_bits - 9), - nr_blocks << (sb->s_blocksize_bits - 9), + block << (sb->s_blocksize_bits - + SECTOR_SHIFT), + nr_blocks << (sb->s_blocksize_bits - + SECTOR_SHIFT), gfp_mask, 0); } @@ -1511,7 +1530,8 @@ static inline int queue_alignment_offset(struct request_queue *q) static inline int queue_limit_alignment_offset(struct queue_limits *lim, sector_t sector) { unsigned int granularity = max(lim->physical_block_size, lim->io_min); - unsigned int alignment = sector_div(sector, granularity >> 9) << 9; + unsigned int alignment = sector_div(sector, granularity >> SECTOR_SHIFT) + << SECTOR_SHIFT; return (granularity + lim->alignment_offset - alignment) % granularity; } @@ -1545,8 +1565,8 @@ static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector return 0; /* Why are these in bytes, not sectors? */ - alignment = lim->discard_alignment >> 9; - granularity = lim->discard_granularity >> 9; + alignment = lim->discard_alignment >> SECTOR_SHIFT; + granularity = lim->discard_granularity >> SECTOR_SHIFT; if (!granularity) return 0; @@ -1557,7 +1577,7 @@ static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector offset = (granularity + alignment - offset) % granularity; /* Turn it back into bytes, gaah */ - return offset << 9; + return offset << SECTOR_SHIFT; } static inline int bdev_discard_alignment(struct block_device *bdev) diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index da83f64952e7..4384433b50e7 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h @@ -542,8 +542,6 @@ do { \ #define DMEMIT(x...) sz += ((sz >= maxlen) ? \ 0 : scnprintf(result + sz, maxlen - sz, x)) -#define SECTOR_SHIFT 9 - /* * Definitions of return values from target end_io function. */ diff --git a/include/linux/ide.h b/include/linux/ide.h index 771989d25ef8..0acfa62b1d44 100644 --- a/include/linux/ide.h +++ b/include/linux/ide.h @@ -165,7 +165,6 @@ struct ide_io_ports { */ #define PARTN_BITS 6 /* number of minor dev bits for partitions */ #define MAX_DRIVES 2 /* per interface; 2 assumed by lots of code */ -#define SECTOR_SIZE 512 /* * Timeouts for various operations: diff --git a/include/uapi/linux/msdos_fs.h b/include/uapi/linux/msdos_fs.h index a45d0754102e..fde753735aba 100644 --- a/include/uapi/linux/msdos_fs.h +++ b/include/uapi/linux/msdos_fs.h @@ -10,7 +10,9 @@ * The MS-DOS filesystem constants/structures */ +#ifndef SECTOR_SIZE #define SECTOR_SIZE 512 /* sector size (bytes) */ +#endif #define SECTOR_BITS 9 /* log2(SECTOR_SIZE) */ #define MSDOS_DPB (MSDOS_DPS) /* dir entries per block */ #define MSDOS_DPB_BITS 4 /* log2(MSDOS_DPB) */ -- cgit v1.2.3 From 52c5e62d4c4beecddc6e1b8045ce1d695fca1ba7 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 14 Mar 2018 16:56:53 +0100 Subject: block: bio_check_eod() needs to consider partitions bio_check_eod() should check partition size not the whole disk if bio->bi_partno is non-zero. Do this by moving the call to bio_check_eod() into blk_partition_remap(). Based on an earlier patch from Jiufei Xue. Fixes: 74d46992e0d9 ("block: replace bi_bdev with a gendisk pointer and partitions index") Reported-by: Jiufei Xue Signed-off-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/blk-core.c | 93 ++++++++++++++++++++++++-------------------------------- 1 file changed, 40 insertions(+), 53 deletions(-) diff --git a/block/blk-core.c b/block/blk-core.c index 74c6283f4509..5e88c579e896 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -2122,7 +2122,7 @@ out_unlock: return BLK_QC_T_NONE; } -static void handle_bad_sector(struct bio *bio) +static void handle_bad_sector(struct bio *bio, sector_t maxsector) { char b[BDEVNAME_SIZE]; @@ -2130,7 +2130,7 @@ static void handle_bad_sector(struct bio *bio) printk(KERN_INFO "%s: rw=%d, want=%Lu, limit=%Lu\n", bio_devname(bio, b), bio->bi_opf, (unsigned long long)bio_end_sector(bio), - (long long)get_capacity(bio->bi_disk)); + (long long)maxsector); } #ifdef CONFIG_FAIL_MAKE_REQUEST @@ -2191,68 +2191,59 @@ static noinline int should_fail_bio(struct bio *bio) } ALLOW_ERROR_INJECTION(should_fail_bio, ERRNO); +/* + * Check whether this bio extends beyond the end of the device or partition. + * This may well happen - the kernel calls bread() without checking the size of + * the device, e.g., when mounting a file system. + */ +static inline int bio_check_eod(struct bio *bio, sector_t maxsector) +{ + unsigned int nr_sectors = bio_sectors(bio); + + if (nr_sectors && maxsector && + (nr_sectors > maxsector || + bio->bi_iter.bi_sector > maxsector - nr_sectors)) { + handle_bad_sector(bio, maxsector); + return -EIO; + } + return 0; +} + /* * Remap block n of partition p to block n+start(p) of the disk. */ static inline int blk_partition_remap(struct bio *bio) { struct hd_struct *p; - int ret = 0; + int ret = -EIO; rcu_read_lock(); p = __disk_get_part(bio->bi_disk, bio->bi_partno); - if (unlikely(!p || should_fail_request(p, bio->bi_iter.bi_size) || - bio_check_ro(bio, p))) { - ret = -EIO; + if (unlikely(!p)) + goto out; + if (unlikely(should_fail_request(p, bio->bi_iter.bi_size))) + goto out; + if (unlikely(bio_check_ro(bio, p))) goto out; - } /* * Zone reset does not include bi_size so bio_sectors() is always 0. * Include a test for the reset op code and perform the remap if needed. */ - if (!bio_sectors(bio) && bio_op(bio) != REQ_OP_ZONE_RESET) - goto out; - - bio->bi_iter.bi_sector += p->start_sect; - bio->bi_partno = 0; - trace_block_bio_remap(bio->bi_disk->queue, bio, part_devt(p), - bio->bi_iter.bi_sector - p->start_sect); - + if (bio_sectors(bio) || bio_op(bio) == REQ_OP_ZONE_RESET) { + if (bio_check_eod(bio, part_nr_sects_read(p))) + goto out; + bio->bi_iter.bi_sector += p->start_sect; + bio->bi_partno = 0; + trace_block_bio_remap(bio->bi_disk->queue, bio, part_devt(p), + bio->bi_iter.bi_sector - p->start_sect); + } + ret = 0; out: rcu_read_unlock(); return ret; } -/* - * Check whether this bio extends beyond the end of the device. - */ -static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors) -{ - sector_t maxsector; - - if (!nr_sectors) - return 0; - - /* Test device or partition size, when known. */ - maxsector = get_capacity(bio->bi_disk); - if (maxsector) { - sector_t sector = bio->bi_iter.bi_sector; - - if (maxsector < nr_sectors || maxsector - nr_sectors < sector) { - /* - * This may well happen - the kernel calls bread() - * without checking the size of the device, e.g., when - * mounting a device. - */ - handle_bad_sector(bio); - return 1; - } - } - - return 0; -} - static noinline_for_stack bool generic_make_request_checks(struct bio *bio) { @@ -2263,9 +2254,6 @@ generic_make_request_checks(struct bio *bio) might_sleep(); - if (bio_check_eod(bio, nr_sectors)) - goto end_io; - q = bio->bi_disk->queue; if (unlikely(!q)) { printk(KERN_ERR @@ -2285,17 +2273,16 @@ generic_make_request_checks(struct bio *bio) if (should_fail_bio(bio)) goto end_io; - if (!bio->bi_partno) { - if (unlikely(bio_check_ro(bio, &bio->bi_disk->part0))) + if (bio->bi_partno) { + if (unlikely(blk_partition_remap(bio))) goto end_io; } else { - if (blk_partition_remap(bio)) + if (unlikely(bio_check_ro(bio, &bio->bi_disk->part0))) + goto end_io; + if (unlikely(bio_check_eod(bio, get_capacity(bio->bi_disk)))) goto end_io; } - if (bio_check_eod(bio, nr_sectors)) - goto end_io; - /* * Filter flush bio's early so that make_request based * drivers without flush support don't have to worry -- cgit v1.2.3 From 804f3c6981f5e4a506a8f14dc284cb218d0659ae Mon Sep 17 00:00:00 2001 From: Coly Li Date: Sun, 18 Mar 2018 17:36:14 -0700 Subject: bcache: fix cached_dev->count usage for bch_cache_set_error() When bcache metadata I/O fails, bcache will call bch_cache_set_error() to retire the whole cache set. The expected behavior to retire a cache set is to unregister the cache set, and unregister all backing device attached to this cache set, then remove sysfs entries of the cache set and all attached backing devices, finally release memory of structs cache_set, cache, cached_dev and bcache_device. In my testing when journal I/O failure triggered by disconnected cache device, sometimes the cache set cannot be retired, and its sysfs entry /sys/fs/bcache/ still exits and the backing device also references it. This is not expected behavior. When metadata I/O failes, the call senquence to retire whole cache set is, bch_cache_set_error() bch_cache_set_unregister() bch_cache_set_stop() __cache_set_unregister() <- called as callback by calling clousre_queue(&c->caching) cache_set_flush() <- called as a callback when refcount of cache_set->caching is 0 cache_set_free() <- called as a callback when refcount of catch_set->cl is 0 bch_cache_set_release() <- called as a callback when refcount of catch_set->kobj is 0 I find if kernel thread bch_writeback_thread() quits while-loop when kthread_should_stop() is true and searched_full_index is false, clousre callback cache_set_flush() set by continue_at() will never be called. The result is, bcache fails to retire whole cache set. cache_set_flush() will be called when refcount of closure c->caching is 0, and in function bcache_device_detach() refcount of closure c->caching is released to 0 by clousre_put(). In metadata error code path, function bcache_device_detach() is called by cached_dev_detach_finish(). This is a callback routine being called when cached_dev->count is 0. This refcount is decreased by cached_dev_put(). The above dependence indicates, cache_set_flush() will be called when refcount of cache_set->cl is 0, and refcount of cache_set->cl to be 0 when refcount of cache_dev->count is 0. The reason why sometimes cache_dev->count is not 0 (when metadata I/O fails and bch_cache_set_error() called) is, in bch_writeback_thread(), refcount of cache_dev is not decreased properly. In bch_writeback_thread(), cached_dev_put() is called only when searched_full_index is true and cached_dev->writeback_keys is empty, a.k.a there is no dirty data on cache. In most of run time it is correct, but when bch_writeback_thread() quits the while-loop while cache is still dirty, current code forget to call cached_dev_put() before this kernel thread exits. This is why sometimes cache_set_flush() is not executed and cache set fails to be retired. The reason to call cached_dev_put() in bch_writeback_rate() is, when the cache device changes from clean to dirty, cached_dev_get() is called, to make sure during writeback operatiions both backing and cache devices won't be released. Adding following code in bch_writeback_thread() does not work, static int bch_writeback_thread(void *arg) } + if (atomic_read(&dc->has_dirty)) + cached_dev_put() + return 0; } because writeback kernel thread can be waken up and start via sysfs entry: echo 1 > /sys/block/bcache/bcache/writeback_running It is difficult to check whether backing device is dirty without race and extra lock. So the above modification will introduce potential refcount underflow in some conditions. The correct fix is, to take cached dev refcount when creating the kernel thread, and put it before the kernel thread exits. Then bcache does not need to take a cached dev refcount when cache turns from clean to dirty, or to put a cached dev refcount when cache turns from ditry to clean. The writeback kernel thread is alwasy safe to reference data structure from cache set, cache and cached device (because a refcount of cache device is taken for it already), and no matter the kernel thread is stopped by I/O errors or system reboot, cached_dev->count can always be used correctly. The patch is simple, but understanding how it works is quite complicated. Changelog: v2: set dc->writeback_thread to NULL in this patch, as suggested by Hannes. v1: initial version for review. Signed-off-by: Coly Li Reviewed-by: Hannes Reinecke Reviewed-by: Michael Lyle Cc: Michael Lyle Cc: Junhui Tang Signed-off-by: Jens Axboe --- drivers/md/bcache/super.c | 1 - drivers/md/bcache/writeback.c | 11 ++++++++--- drivers/md/bcache/writeback.h | 2 -- 3 files changed, 8 insertions(+), 6 deletions(-) diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index e8dfa804bd98..020be4f1cd8b 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -1054,7 +1054,6 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c, if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) { bch_sectors_dirty_init(&dc->disk); atomic_set(&dc->has_dirty, 1); - refcount_inc(&dc->count); bch_writeback_queue(dc); } diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c index f1d2fc15abcc..b280c134dd4d 100644 --- a/drivers/md/bcache/writeback.c +++ b/drivers/md/bcache/writeback.c @@ -572,7 +572,7 @@ static int bch_writeback_thread(void *arg) if (kthread_should_stop()) { set_current_state(TASK_RUNNING); - return 0; + break; } schedule(); @@ -585,7 +585,6 @@ static int bch_writeback_thread(void *arg) if (searched_full_index && RB_EMPTY_ROOT(&dc->writeback_keys.keys)) { atomic_set(&dc->has_dirty, 0); - cached_dev_put(dc); SET_BDEV_STATE(&dc->sb, BDEV_STATE_CLEAN); bch_write_bdev_super(dc, NULL); } @@ -606,6 +605,9 @@ static int bch_writeback_thread(void *arg) } } + dc->writeback_thread = NULL; + cached_dev_put(dc); + return 0; } @@ -669,10 +671,13 @@ int bch_cached_dev_writeback_start(struct cached_dev *dc) if (!dc->writeback_write_wq) return -ENOMEM; + cached_dev_get(dc); dc->writeback_thread = kthread_create(bch_writeback_thread, dc, "bcache_writeback"); - if (IS_ERR(dc->writeback_thread)) + if (IS_ERR(dc->writeback_thread)) { + cached_dev_put(dc); return PTR_ERR(dc->writeback_thread); + } schedule_delayed_work(&dc->writeback_rate_update, dc->writeback_rate_update_seconds * HZ); diff --git a/drivers/md/bcache/writeback.h b/drivers/md/bcache/writeback.h index 587b25599856..0bba8f1c6cdf 100644 --- a/drivers/md/bcache/writeback.h +++ b/drivers/md/bcache/writeback.h @@ -105,8 +105,6 @@ static inline void bch_writeback_add(struct cached_dev *dc) { if (!atomic_read(&dc->has_dirty) && !atomic_xchg(&dc->has_dirty, 1)) { - refcount_inc(&dc->count); - if (BDEV_STATE(&dc->sb) != BDEV_STATE_DIRTY) { SET_BDEV_STATE(&dc->sb, BDEV_STATE_DIRTY); /* XXX: should do this synchronously */ -- cgit v1.2.3 From fadd94e05c02afec7b70b0b14915624f1782f578 Mon Sep 17 00:00:00 2001 From: Coly Li Date: Sun, 18 Mar 2018 17:36:15 -0700 Subject: bcache: quit dc->writeback_thread when BCACHE_DEV_DETACHING is set In patch "bcache: fix cached_dev->count usage for bch_cache_set_error()", cached_dev_get() is called when creating dc->writeback_thread, and cached_dev_put() is called when exiting dc->writeback_thread. This modification works well unless people detach the bcache device manually by 'echo 1 > /sys/block/bcache/bcache/detach' Because this sysfs interface only calls bch_cached_dev_detach() which wakes up dc->writeback_thread but does not stop it. The reason is, before patch "bcache: fix cached_dev->count usage for bch_cache_set_error()", inside bch_writeback_thread(), if cache is not dirty after writeback, cached_dev_put() will be called here. And in cached_dev_make_request() when a new write request makes cache from clean to dirty, cached_dev_get() will be called there. Since we don't operate dc->count in these locations, refcount d->count cannot be dropped after cache becomes clean, and cached_dev_detach_finish() won't be called to detach bcache device. This patch fixes the issue by checking whether BCACHE_DEV_DETACHING is set inside bch_writeback_thread(). If this bit is set and cache is clean (no existing writeback_keys), break the while-loop, call cached_dev_put() and quit the writeback thread. Please note if cache is still dirty, even BCACHE_DEV_DETACHING is set the writeback thread should continue to perform writeback, this is the original design of manually detach. It is safe to do the following check without locking, let me explain why, + if (!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) && + (!atomic_read(&dc->has_dirty) || !dc->writeback_running)) { If the kenrel thread does not sleep and continue to run due to conditions are not updated in time on the running CPU core, it just consumes more CPU cycles and has no hurt. This should-sleep-but-run is safe here. We just focus on the should-run-but-sleep condition, which means the writeback thread goes to sleep in mistake while it should continue to run. 1, First of all, no matter the writeback thread is hung or not, kthread_stop() from cached_dev_detach_finish() will wake up it and terminate by making kthread_should_stop() return true. And in normal run time, bit on index BCACHE_DEV_DETACHING is always cleared, the condition !test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) is always true and can be ignored as constant value. 2, If one of the following conditions is true, the writeback thread should go to sleep, "!atomic_read(&dc->has_dirty)" or "!dc->writeback_running)" each of them independently controls the writeback thread should sleep or not, let's analyse them one by one. 2.1 condition "!atomic_read(&dc->has_dirty)" If dc->has_dirty is set from 0 to 1 on another CPU core, bcache will call bch_writeback_queue() immediately or call bch_writeback_add() which indirectly calls bch_writeback_queue() too. In bch_writeback_queue(), wake_up_process(dc->writeback_thread) is called. It sets writeback thread's task state to TASK_RUNNING and following an implicit memory barrier, then tries to wake up the writeback thread. In writeback thread, its task state is set to TASK_INTERRUPTIBLE before doing the condition check. If other CPU core sets the TASK_RUNNING state after writeback thread setting TASK_INTERRUPTIBLE, the writeback thread will be scheduled to run very soon because its state is not TASK_INTERRUPTIBLE. If other CPU core sets the TASK_RUNNING state before writeback thread setting TASK_INTERRUPTIBLE, the implict memory barrier of wake_up_process() will make sure modification of dc->has_dirty on other CPU core is updated and observed on the CPU core of writeback thread. Therefore the condition check will correctly be false, and continue writeback code without sleeping. 2.2 condition "!dc->writeback_running)" dc->writeback_running can be changed via sysfs file, every time it is modified, a following bch_writeback_queue() is alwasy called. So the change is always observed on the CPU core of writeback thread. If dc->writeback_running is changed from 0 to 1 on other CPU core, this condition check will observe the modification and allow writeback thread to continue to run without sleeping. Now we can see, even without a locking protection, multiple conditions check is safe here, no deadlock or process hang up will happen. I compose a separte patch because that patch "bcache: fix cached_dev->count usage for bch_cache_set_error()" already gets a "Reviewed-by:" from Hannes Reinecke. Also this fix is not trivial and good for a separate patch. Signed-off-by: Coly Li Reviewed-by: Michael Lyle Cc: Hannes Reinecke Cc: Huijun Tang Signed-off-by: Jens Axboe --- drivers/md/bcache/writeback.c | 20 +++++++++++++++++--- 1 file changed, 17 insertions(+), 3 deletions(-) diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c index b280c134dd4d..4dbeaaa575bf 100644 --- a/drivers/md/bcache/writeback.c +++ b/drivers/md/bcache/writeback.c @@ -565,9 +565,15 @@ static int bch_writeback_thread(void *arg) while (!kthread_should_stop()) { down_write(&dc->writeback_lock); set_current_state(TASK_INTERRUPTIBLE); - if (!atomic_read(&dc->has_dirty) || - (!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) && - !dc->writeback_running)) { + /* + * If the bache device is detaching, skip here and continue + * to perform writeback. Otherwise, if no dirty data on cache, + * or there is dirty data on cache but writeback is disabled, + * the writeback thread should sleep here and wait for others + * to wake up it. + */ + if (!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) && + (!atomic_read(&dc->has_dirty) || !dc->writeback_running)) { up_write(&dc->writeback_lock); if (kthread_should_stop()) { @@ -587,6 +593,14 @@ static int bch_writeback_thread(void *arg) atomic_set(&dc->has_dirty, 0); SET_BDEV_STATE(&dc->sb, BDEV_STATE_CLEAN); bch_write_bdev_super(dc, NULL); + /* + * If bcache device is detaching via sysfs interface, + * writeback thread should stop after there is no dirty + * data on cache. BCACHE_DEV_DETACHING flag is set in + * bch_cached_dev_detach(). + */ + if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags)) + break; } up_write(&dc->writeback_lock); -- cgit v1.2.3 From 3fd47bfe55b00d5ac7b0a44c9301c07be39b1082 Mon Sep 17 00:00:00 2001 From: Coly Li Date: Sun, 18 Mar 2018 17:36:16 -0700 Subject: bcache: stop dc->writeback_rate_update properly struct delayed_work writeback_rate_update in struct cache_dev is a delayed worker to call function update_writeback_rate() in period (the interval is defined by dc->writeback_rate_update_seconds). When a metadate I/O error happens on cache device, bcache error handling routine bch_cache_set_error() will call bch_cache_set_unregister() to retire whole cache set. On the unregister code path, this delayed work is stopped by calling cancel_delayed_work_sync(&dc->writeback_rate_update). dc->writeback_rate_update is a special delayed work from others in bcache. In its routine update_writeback_rate(), this delayed work is re-armed itself. That means when cancel_delayed_work_sync() returns, this delayed work can still be executed after several seconds defined by dc->writeback_rate_update_seconds. The problem is, after cancel_delayed_work_sync() returns, the cache set unregister code path will continue and release memory of struct cache set. Then the delayed work is scheduled to run, __update_writeback_rate() will reference the already released cache_set memory, and trigger a NULL pointer deference fault. This patch introduces two more bcache device flags, - BCACHE_DEV_WB_RUNNING bit set: bcache device is in writeback mode and running, it is OK for dc->writeback_rate_update to re-arm itself. bit clear:bcache device is trying to stop dc->writeback_rate_update, this delayed work should not re-arm itself and quit. - BCACHE_DEV_RATE_DW_RUNNING bit set: routine update_writeback_rate() is executing. bit clear: routine update_writeback_rate() quits. This patch also adds a function cancel_writeback_rate_update_dwork() to wait for dc->writeback_rate_update quits before cancel it by calling cancel_delayed_work_sync(). In order to avoid a deadlock by unexpected quit dc->writeback_rate_update, after time_out seconds this function will give up and continue to call cancel_delayed_work_sync(). And here I explain how this patch stops self re-armed delayed work properly with the above stuffs. update_writeback_rate() sets BCACHE_DEV_RATE_DW_RUNNING at its beginning and clears BCACHE_DEV_RATE_DW_RUNNING at its end. Before calling cancel_writeback_rate_update_dwork() clear flag BCACHE_DEV_WB_RUNNING. Before calling cancel_delayed_work_sync() wait utill flag BCACHE_DEV_RATE_DW_RUNNING is clear. So when calling cancel_delayed_work_sync(), dc->writeback_rate_update must be already re- armed, or quite by seeing BCACHE_DEV_WB_RUNNING cleared. In both cases delayed work routine update_writeback_rate() won't be executed after cancel_delayed_work_sync() returns. Inside update_writeback_rate() before calling schedule_delayed_work(), flag BCACHE_DEV_WB_RUNNING is checked before. If this flag is cleared, it means someone is about to stop the delayed work. Because flag BCACHE_DEV_RATE_DW_RUNNING is set already and cancel_delayed_work_sync() has to wait for this flag to be cleared, we don't need to worry about race condition here. If update_writeback_rate() is scheduled to run after checking BCACHE_DEV_RATE_DW_RUNNING and before calling cancel_delayed_work_sync() in cancel_writeback_rate_update_dwork(), it is also safe. Because at this moment BCACHE_DEV_WB_RUNNING is cleared with memory barrier. As I mentioned previously, update_writeback_rate() will see BCACHE_DEV_WB_RUNNING is clear and quit immediately. Because there are more dependences inside update_writeback_rate() to struct cache_set memory, dc->writeback_rate_update is not a simple self re-arm delayed work. After trying many different methods (e.g. hold dc->count, or use locks), this is the only way I can find which works to properly stop dc->writeback_rate_update delayed work. Changelog: v3: change values of BCACHE_DEV_WB_RUNNING and BCACHE_DEV_RATE_DW_RUNNING to bit index, for test_bit(). v2: Try to fix the race issue which is pointed out by Junhui. v1: The initial version for review Signed-off-by: Coly Li Reviewed-by: Junhui Tang Reviewed-by: Michael Lyle Cc: Michael Lyle Cc: Hannes Reinecke Signed-off-by: Jens Axboe --- drivers/md/bcache/bcache.h | 9 +++++---- drivers/md/bcache/super.c | 38 ++++++++++++++++++++++++++++++++++---- drivers/md/bcache/sysfs.c | 3 ++- drivers/md/bcache/writeback.c | 29 ++++++++++++++++++++++++++++- 4 files changed, 69 insertions(+), 10 deletions(-) diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h index 12e5197f186c..b5ddb848cd31 100644 --- a/drivers/md/bcache/bcache.h +++ b/drivers/md/bcache/bcache.h @@ -258,10 +258,11 @@ struct bcache_device { struct gendisk *disk; unsigned long flags; -#define BCACHE_DEV_CLOSING 0 -#define BCACHE_DEV_DETACHING 1 -#define BCACHE_DEV_UNLINK_DONE 2 - +#define BCACHE_DEV_CLOSING 0 +#define BCACHE_DEV_DETACHING 1 +#define BCACHE_DEV_UNLINK_DONE 2 +#define BCACHE_DEV_WB_RUNNING 3 +#define BCACHE_DEV_RATE_DW_RUNNING 4 unsigned nr_stripes; unsigned stripe_size; atomic_t *stripe_sectors_dirty; diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index 020be4f1cd8b..e5be599338c5 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -899,6 +899,31 @@ void bch_cached_dev_run(struct cached_dev *dc) pr_debug("error creating sysfs link"); } +/* + * If BCACHE_DEV_RATE_DW_RUNNING is set, it means routine of the delayed + * work dc->writeback_rate_update is running. Wait until the routine + * quits (BCACHE_DEV_RATE_DW_RUNNING is clear), then continue to + * cancel it. If BCACHE_DEV_RATE_DW_RUNNING is not clear after time_out + * seconds, give up waiting here and continue to cancel it too. + */ +static void cancel_writeback_rate_update_dwork(struct cached_dev *dc) +{ + int time_out = WRITEBACK_RATE_UPDATE_SECS_MAX * HZ; + + do { + if (!test_bit(BCACHE_DEV_RATE_DW_RUNNING, + &dc->disk.flags)) + break; + time_out--; + schedule_timeout_interruptible(1); + } while (time_out > 0); + + if (time_out == 0) + pr_warn("give up waiting for dc->writeback_write_update to quit"); + + cancel_delayed_work_sync(&dc->writeback_rate_update); +} + static void cached_dev_detach_finish(struct work_struct *w) { struct cached_dev *dc = container_of(w, struct cached_dev, detach); @@ -911,7 +936,9 @@ static void cached_dev_detach_finish(struct work_struct *w) mutex_lock(&bch_register_lock); - cancel_delayed_work_sync(&dc->writeback_rate_update); + if (test_and_clear_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags)) + cancel_writeback_rate_update_dwork(dc); + if (!IS_ERR_OR_NULL(dc->writeback_thread)) { kthread_stop(dc->writeback_thread); dc->writeback_thread = NULL; @@ -954,6 +981,7 @@ void bch_cached_dev_detach(struct cached_dev *dc) closure_get(&dc->disk.cl); bch_writeback_queue(dc); + cached_dev_put(dc); } @@ -1081,14 +1109,16 @@ static void cached_dev_free(struct closure *cl) { struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl); - cancel_delayed_work_sync(&dc->writeback_rate_update); + mutex_lock(&bch_register_lock); + + if (test_and_clear_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags)) + cancel_writeback_rate_update_dwork(dc); + if (!IS_ERR_OR_NULL(dc->writeback_thread)) kthread_stop(dc->writeback_thread); if (dc->writeback_write_wq) destroy_workqueue(dc->writeback_write_wq); - mutex_lock(&bch_register_lock); - if (atomic_read(&dc->running)) bd_unlink_disk_holder(dc->bdev, dc->disk.disk); bcache_device_free(&dc->disk); diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c index 78cd7bd50fdd..55673508628f 100644 --- a/drivers/md/bcache/sysfs.c +++ b/drivers/md/bcache/sysfs.c @@ -309,7 +309,8 @@ STORE(bch_cached_dev) bch_writeback_queue(dc); if (attr == &sysfs_writeback_percent) - schedule_delayed_work(&dc->writeback_rate_update, + if (!test_and_set_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags)) + schedule_delayed_work(&dc->writeback_rate_update, dc->writeback_rate_update_seconds * HZ); mutex_unlock(&bch_register_lock); diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c index 4dbeaaa575bf..8f98ef1038d3 100644 --- a/drivers/md/bcache/writeback.c +++ b/drivers/md/bcache/writeback.c @@ -115,6 +115,21 @@ static void update_writeback_rate(struct work_struct *work) struct cached_dev, writeback_rate_update); + /* + * should check BCACHE_DEV_RATE_DW_RUNNING before calling + * cancel_delayed_work_sync(). + */ + set_bit(BCACHE_DEV_RATE_DW_RUNNING, &dc->disk.flags); + /* paired with where BCACHE_DEV_RATE_DW_RUNNING is tested */ + smp_mb(); + + if (!test_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags)) { + clear_bit(BCACHE_DEV_RATE_DW_RUNNING, &dc->disk.flags); + /* paired with where BCACHE_DEV_RATE_DW_RUNNING is tested */ + smp_mb(); + return; + } + down_read(&dc->writeback_lock); if (atomic_read(&dc->has_dirty) && @@ -123,8 +138,18 @@ static void update_writeback_rate(struct work_struct *work) up_read(&dc->writeback_lock); - schedule_delayed_work(&dc->writeback_rate_update, + if (test_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags)) { + schedule_delayed_work(&dc->writeback_rate_update, dc->writeback_rate_update_seconds * HZ); + } + + /* + * should check BCACHE_DEV_RATE_DW_RUNNING before calling + * cancel_delayed_work_sync(). + */ + clear_bit(BCACHE_DEV_RATE_DW_RUNNING, &dc->disk.flags); + /* paired with where BCACHE_DEV_RATE_DW_RUNNING is tested */ + smp_mb(); } static unsigned writeback_delay(struct cached_dev *dc, unsigned sectors) @@ -675,6 +700,7 @@ void bch_cached_dev_writeback_init(struct cached_dev *dc) dc->writeback_rate_p_term_inverse = 40; dc->writeback_rate_i_term_inverse = 10000; + WARN_ON(test_and_clear_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags)); INIT_DELAYED_WORK(&dc->writeback_rate_update, update_writeback_rate); } @@ -693,6 +719,7 @@ int bch_cached_dev_writeback_start(struct cached_dev *dc) return PTR_ERR(dc->writeback_thread); } + WARN_ON(test_and_set_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags)); schedule_delayed_work(&dc->writeback_rate_update, dc->writeback_rate_update_seconds * HZ); -- cgit v1.2.3 From 771f393e8ffc9b3066e4830ee5f7391b8e8874f1 Mon Sep 17 00:00:00 2001 From: Coly Li Date: Sun, 18 Mar 2018 17:36:17 -0700 Subject: bcache: add CACHE_SET_IO_DISABLE to struct cache_set flags When too many I/Os failed on cache device, bch_cache_set_error() is called in the error handling code path to retire whole problematic cache set. If new I/O requests continue to come and take refcount dc->count, the cache set won't be retired immediately, this is a problem. Further more, there are several kernel thread and self-armed kernel work may still running after bch_cache_set_error() is called. It needs to wait quite a while for them to stop, or they won't stop at all. They also prevent the cache set from being retired. The solution in this patch is, to add per cache set flag to disable I/O request on this cache and all attached backing devices. Then new coming I/O requests can be rejected in *_make_request() before taking refcount, kernel threads and self-armed kernel worker can stop very fast when flags bit CACHE_SET_IO_DISABLE is set. Because bcache also do internal I/Os for writeback, garbage collection, bucket allocation, journaling, this kind of I/O should be disabled after bch_cache_set_error() is called. So closure_bio_submit() is modified to check whether CACHE_SET_IO_DISABLE is set on cache_set->flags. If set, closure_bio_submit() will set bio->bi_status to BLK_STS_IOERR and return, generic_make_request() won't be called. A sysfs interface is also added to set or clear CACHE_SET_IO_DISABLE bit from cache_set->flags, to disable or enable cache set I/O for debugging. It is helpful to trigger more corner case issues for failed cache device. Changelog v4, add wait_for_kthread_stop(), and call it before exits writeback and gc kernel threads. v3, change CACHE_SET_IO_DISABLE from 4 to 3, since it is bit index. remove "bcache: " prefix when printing out kernel message. v2, more changes by previous review, - Use CACHE_SET_IO_DISABLE of cache_set->flags, suggested by Junhui. - Check CACHE_SET_IO_DISABLE in bch_btree_gc() to stop a while-loop, this is reported and inspired from origal patch of Pavel Vazharov. v1, initial version. Signed-off-by: Coly Li Reviewed-by: Hannes Reinecke Reviewed-by: Michael Lyle Cc: Junhui Tang Cc: Michael Lyle Cc: Pavel Vazharov Signed-off-by: Jens Axboe --- drivers/md/bcache/alloc.c | 3 ++- drivers/md/bcache/bcache.h | 33 +++++++++++++++++++++++++++++++++ drivers/md/bcache/btree.c | 11 ++++++++--- drivers/md/bcache/io.c | 2 +- drivers/md/bcache/journal.c | 4 ++-- drivers/md/bcache/request.c | 26 +++++++++++++++++++------- drivers/md/bcache/super.c | 6 +++++- drivers/md/bcache/sysfs.c | 18 ++++++++++++++++++ drivers/md/bcache/util.h | 6 ------ drivers/md/bcache/writeback.c | 37 ++++++++++++++++++++++++++++--------- 10 files changed, 116 insertions(+), 30 deletions(-) diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c index 458e1d38577d..004cc3cc6123 100644 --- a/drivers/md/bcache/alloc.c +++ b/drivers/md/bcache/alloc.c @@ -287,7 +287,8 @@ do { \ break; \ \ mutex_unlock(&(ca)->set->bucket_lock); \ - if (kthread_should_stop()) { \ + if (kthread_should_stop() || \ + test_bit(CACHE_SET_IO_DISABLE, &ca->set->flags)) { \ set_current_state(TASK_RUNNING); \ return 0; \ } \ diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h index b5ddb848cd31..8a0327581d62 100644 --- a/drivers/md/bcache/bcache.h +++ b/drivers/md/bcache/bcache.h @@ -188,6 +188,7 @@ #include #include #include +#include #include "bset.h" #include "util.h" @@ -475,10 +476,15 @@ struct gc_stat { * * CACHE_SET_RUNNING means all cache devices have been registered and journal * replay is complete. + * + * CACHE_SET_IO_DISABLE is set when bcache is stopping the whold cache set, all + * external and internal I/O should be denied when this flag is set. + * */ #define CACHE_SET_UNREGISTERING 0 #define CACHE_SET_STOPPING 1 #define CACHE_SET_RUNNING 2 +#define CACHE_SET_IO_DISABLE 3 struct cache_set { struct closure cl; @@ -868,6 +874,33 @@ static inline void wake_up_allocators(struct cache_set *c) wake_up_process(ca->alloc_thread); } +static inline void closure_bio_submit(struct cache_set *c, + struct bio *bio, + struct closure *cl) +{ + closure_get(cl); + if (unlikely(test_bit(CACHE_SET_IO_DISABLE, &c->flags))) { + bio->bi_status = BLK_STS_IOERR; + bio_endio(bio); + return; + } + generic_make_request(bio); +} + +/* + * Prevent the kthread exits directly, and make sure when kthread_stop() + * is called to stop a kthread, it is still alive. If a kthread might be + * stopped by CACHE_SET_IO_DISABLE bit set, wait_for_kthread_stop() is + * necessary before the kthread returns. + */ +static inline void wait_for_kthread_stop(void) +{ + while (!kthread_should_stop()) { + set_current_state(TASK_INTERRUPTIBLE); + schedule(); + } +} + /* Forward declarations */ void bch_count_io_errors(struct cache *, blk_status_t, int, const char *); diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c index fad9fe8817eb..39cc8a549091 100644 --- a/drivers/md/bcache/btree.c +++ b/drivers/md/bcache/btree.c @@ -1744,6 +1744,7 @@ static void bch_btree_gc(struct cache_set *c) btree_gc_start(c); + /* if CACHE_SET_IO_DISABLE set, gc thread should stop too */ do { ret = btree_root(gc_root, c, &op, &writes, &stats); closure_sync(&writes); @@ -1751,7 +1752,7 @@ static void bch_btree_gc(struct cache_set *c) if (ret && ret != -EAGAIN) pr_warn("gc failed!"); - } while (ret); + } while (ret && !test_bit(CACHE_SET_IO_DISABLE, &c->flags)); bch_btree_gc_finish(c); wake_up_allocators(c); @@ -1789,15 +1790,19 @@ static int bch_gc_thread(void *arg) while (1) { wait_event_interruptible(c->gc_wait, - kthread_should_stop() || gc_should_run(c)); + kthread_should_stop() || + test_bit(CACHE_SET_IO_DISABLE, &c->flags) || + gc_should_run(c)); - if (kthread_should_stop()) + if (kthread_should_stop() || + test_bit(CACHE_SET_IO_DISABLE, &c->flags)) break; set_gc_sectors(c); bch_btree_gc(c); } + wait_for_kthread_stop(); return 0; } diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c index a783c5a41ff1..8013ecbcdbda 100644 --- a/drivers/md/bcache/io.c +++ b/drivers/md/bcache/io.c @@ -38,7 +38,7 @@ void __bch_submit_bbio(struct bio *bio, struct cache_set *c) bio_set_dev(bio, PTR_CACHE(c, &b->key, 0)->bdev); b->submit_time_us = local_clock_us(); - closure_bio_submit(bio, bio->bi_private); + closure_bio_submit(c, bio, bio->bi_private); } void bch_submit_bbio(struct bio *bio, struct cache_set *c, diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c index 1b736b860739..c94085f400a4 100644 --- a/drivers/md/bcache/journal.c +++ b/drivers/md/bcache/journal.c @@ -62,7 +62,7 @@ reread: left = ca->sb.bucket_size - offset; bio_set_op_attrs(bio, REQ_OP_READ, 0); bch_bio_map(bio, data); - closure_bio_submit(bio, &cl); + closure_bio_submit(ca->set, bio, &cl); closure_sync(&cl); /* This function could be simpler now since we no longer write @@ -674,7 +674,7 @@ static void journal_write_unlocked(struct closure *cl) spin_unlock(&c->journal.lock); while ((bio = bio_list_pop(&list))) - closure_bio_submit(bio, cl); + closure_bio_submit(c, bio, cl); continue_at(cl, journal_write_done, NULL); } diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c index 6422846b546e..7aca308bee5b 100644 --- a/drivers/md/bcache/request.c +++ b/drivers/md/bcache/request.c @@ -747,7 +747,7 @@ static void cached_dev_read_error(struct closure *cl) /* XXX: invalidate cache */ - closure_bio_submit(bio, cl); + closure_bio_submit(s->iop.c, bio, cl); } continue_at(cl, cached_dev_cache_miss_done, NULL); @@ -872,7 +872,7 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s, s->cache_miss = miss; s->iop.bio = cache_bio; bio_get(cache_bio); - closure_bio_submit(cache_bio, &s->cl); + closure_bio_submit(s->iop.c, cache_bio, &s->cl); return ret; out_put: @@ -880,7 +880,7 @@ out_put: out_submit: miss->bi_end_io = request_endio; miss->bi_private = &s->cl; - closure_bio_submit(miss, &s->cl); + closure_bio_submit(s->iop.c, miss, &s->cl); return ret; } @@ -945,7 +945,7 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s) if ((bio_op(bio) != REQ_OP_DISCARD) || blk_queue_discard(bdev_get_queue(dc->bdev))) - closure_bio_submit(bio, cl); + closure_bio_submit(s->iop.c, bio, cl); } else if (s->iop.writeback) { bch_writeback_add(dc); s->iop.bio = bio; @@ -960,12 +960,12 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s) flush->bi_private = cl; flush->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; - closure_bio_submit(flush, cl); + closure_bio_submit(s->iop.c, flush, cl); } } else { s->iop.bio = bio_clone_fast(bio, GFP_NOIO, dc->disk.bio_split); - closure_bio_submit(bio, cl); + closure_bio_submit(s->iop.c, bio, cl); } closure_call(&s->iop.cl, bch_data_insert, NULL, cl); @@ -981,7 +981,7 @@ static void cached_dev_nodata(struct closure *cl) bch_journal_meta(s->iop.c, cl); /* If it's a flush, we send the flush to the backing device too */ - closure_bio_submit(bio, cl); + closure_bio_submit(s->iop.c, bio, cl); continue_at(cl, cached_dev_bio_complete, NULL); } @@ -996,6 +996,12 @@ static blk_qc_t cached_dev_make_request(struct request_queue *q, struct cached_dev *dc = container_of(d, struct cached_dev, disk); int rw = bio_data_dir(bio); + if (unlikely(d->c && test_bit(CACHE_SET_IO_DISABLE, &d->c->flags))) { + bio->bi_status = BLK_STS_IOERR; + bio_endio(bio); + return BLK_QC_T_NONE; + } + atomic_set(&dc->backing_idle, 0); generic_start_io_acct(q, rw, bio_sectors(bio), &d->disk->part0); @@ -1112,6 +1118,12 @@ static blk_qc_t flash_dev_make_request(struct request_queue *q, struct bcache_device *d = bio->bi_disk->private_data; int rw = bio_data_dir(bio); + if (unlikely(d->c && test_bit(CACHE_SET_IO_DISABLE, &d->c->flags))) { + bio->bi_status = BLK_STS_IOERR; + bio_endio(bio); + return BLK_QC_T_NONE; + } + generic_start_io_acct(q, rw, bio_sectors(bio), &d->disk->part0); s = search_alloc(bio, d); diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index e5be599338c5..dda4ccdd1360 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -521,7 +521,7 @@ static void prio_io(struct cache *ca, uint64_t bucket, int op, bio_set_op_attrs(bio, op, REQ_SYNC|REQ_META|op_flags); bch_bio_map(bio, ca->disk_buckets); - closure_bio_submit(bio, &ca->prio); + closure_bio_submit(ca->set, bio, &ca->prio); closure_sync(cl); } @@ -1350,6 +1350,9 @@ bool bch_cache_set_error(struct cache_set *c, const char *fmt, ...) test_bit(CACHE_SET_STOPPING, &c->flags)) return false; + if (test_and_set_bit(CACHE_SET_IO_DISABLE, &c->flags)) + pr_warn("CACHE_SET_IO_DISABLE already set"); + /* XXX: we can be called from atomic context acquire_console_sem(); */ @@ -1585,6 +1588,7 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb) c->congested_read_threshold_us = 2000; c->congested_write_threshold_us = 20000; c->error_limit = DEFAULT_IO_ERROR_LIMIT; + WARN_ON(test_and_clear_bit(CACHE_SET_IO_DISABLE, &c->flags)); return c; err: diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c index 55673508628f..a3a45de5626d 100644 --- a/drivers/md/bcache/sysfs.c +++ b/drivers/md/bcache/sysfs.c @@ -95,6 +95,7 @@ read_attribute(partial_stripes_expensive); rw_attribute(synchronous); rw_attribute(journal_delay_ms); +rw_attribute(io_disable); rw_attribute(discard); rw_attribute(running); rw_attribute(label); @@ -591,6 +592,8 @@ SHOW(__bch_cache_set) sysfs_printf(gc_always_rewrite, "%i", c->gc_always_rewrite); sysfs_printf(btree_shrinker_disabled, "%i", c->shrinker_disabled); sysfs_printf(copy_gc_enabled, "%i", c->copy_gc_enabled); + sysfs_printf(io_disable, "%i", + test_bit(CACHE_SET_IO_DISABLE, &c->flags)); if (attr == &sysfs_bset_tree_stats) return bch_bset_print_stats(c, buf); @@ -680,6 +683,20 @@ STORE(__bch_cache_set) if (attr == &sysfs_io_error_halflife) c->error_decay = strtoul_or_return(buf) / 88; + if (attr == &sysfs_io_disable) { + int v = strtoul_or_return(buf); + + if (v) { + if (test_and_set_bit(CACHE_SET_IO_DISABLE, + &c->flags)) + pr_warn("CACHE_SET_IO_DISABLE already set"); + } else { + if (!test_and_clear_bit(CACHE_SET_IO_DISABLE, + &c->flags)) + pr_warn("CACHE_SET_IO_DISABLE already cleared"); + } + } + sysfs_strtoul(journal_delay_ms, c->journal_delay_ms); sysfs_strtoul(verify, c->verify); sysfs_strtoul(key_merging_disabled, c->key_merging_disabled); @@ -765,6 +782,7 @@ static struct attribute *bch_cache_set_internal_files[] = { &sysfs_gc_always_rewrite, &sysfs_btree_shrinker_disabled, &sysfs_copy_gc_enabled, + &sysfs_io_disable, NULL }; KTYPE(bch_cache_set_internal); diff --git a/drivers/md/bcache/util.h b/drivers/md/bcache/util.h index a6763db7f061..268024529edd 100644 --- a/drivers/md/bcache/util.h +++ b/drivers/md/bcache/util.h @@ -567,12 +567,6 @@ static inline sector_t bdev_sectors(struct block_device *bdev) return bdev->bd_inode->i_size >> 9; } -#define closure_bio_submit(bio, cl) \ -do { \ - closure_get(cl); \ - generic_make_request(bio); \ -} while (0) - uint64_t bch_crc64_update(uint64_t, const void *, size_t); uint64_t bch_crc64(const void *, size_t); diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c index 8f98ef1038d3..70092ada68e6 100644 --- a/drivers/md/bcache/writeback.c +++ b/drivers/md/bcache/writeback.c @@ -114,6 +114,7 @@ static void update_writeback_rate(struct work_struct *work) struct cached_dev *dc = container_of(to_delayed_work(work), struct cached_dev, writeback_rate_update); + struct cache_set *c = dc->disk.c; /* * should check BCACHE_DEV_RATE_DW_RUNNING before calling @@ -123,7 +124,12 @@ static void update_writeback_rate(struct work_struct *work) /* paired with where BCACHE_DEV_RATE_DW_RUNNING is tested */ smp_mb(); - if (!test_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags)) { + /* + * CACHE_SET_IO_DISABLE might be set via sysfs interface, + * check it here too. + */ + if (!test_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags) || + test_bit(CACHE_SET_IO_DISABLE, &c->flags)) { clear_bit(BCACHE_DEV_RATE_DW_RUNNING, &dc->disk.flags); /* paired with where BCACHE_DEV_RATE_DW_RUNNING is tested */ smp_mb(); @@ -138,7 +144,12 @@ static void update_writeback_rate(struct work_struct *work) up_read(&dc->writeback_lock); - if (test_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags)) { + /* + * CACHE_SET_IO_DISABLE might be set via sysfs interface, + * check it here too. + */ + if (test_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags) && + !test_bit(CACHE_SET_IO_DISABLE, &c->flags)) { schedule_delayed_work(&dc->writeback_rate_update, dc->writeback_rate_update_seconds * HZ); } @@ -278,7 +289,7 @@ static void write_dirty(struct closure *cl) bio_set_dev(&io->bio, io->dc->bdev); io->bio.bi_end_io = dirty_endio; - closure_bio_submit(&io->bio, cl); + closure_bio_submit(io->dc->disk.c, &io->bio, cl); } atomic_set(&dc->writeback_sequence_next, next_sequence); @@ -304,7 +315,7 @@ static void read_dirty_submit(struct closure *cl) { struct dirty_io *io = container_of(cl, struct dirty_io, cl); - closure_bio_submit(&io->bio, cl); + closure_bio_submit(io->dc->disk.c, &io->bio, cl); continue_at(cl, write_dirty, io->dc->writeback_write_wq); } @@ -330,7 +341,9 @@ static void read_dirty(struct cached_dev *dc) next = bch_keybuf_next(&dc->writeback_keys); - while (!kthread_should_stop() && next) { + while (!kthread_should_stop() && + !test_bit(CACHE_SET_IO_DISABLE, &dc->disk.c->flags) && + next) { size = 0; nk = 0; @@ -427,7 +440,9 @@ static void read_dirty(struct cached_dev *dc) } } - while (!kthread_should_stop() && delay) { + while (!kthread_should_stop() && + !test_bit(CACHE_SET_IO_DISABLE, &dc->disk.c->flags) && + delay) { schedule_timeout_interruptible(delay); delay = writeback_delay(dc, 0); } @@ -583,11 +598,13 @@ static bool refill_dirty(struct cached_dev *dc) static int bch_writeback_thread(void *arg) { struct cached_dev *dc = arg; + struct cache_set *c = dc->disk.c; bool searched_full_index; bch_ratelimit_reset(&dc->writeback_rate); - while (!kthread_should_stop()) { + while (!kthread_should_stop() && + !test_bit(CACHE_SET_IO_DISABLE, &c->flags)) { down_write(&dc->writeback_lock); set_current_state(TASK_INTERRUPTIBLE); /* @@ -601,7 +618,8 @@ static int bch_writeback_thread(void *arg) (!atomic_read(&dc->has_dirty) || !dc->writeback_running)) { up_write(&dc->writeback_lock); - if (kthread_should_stop()) { + if (kthread_should_stop() || + test_bit(CACHE_SET_IO_DISABLE, &c->flags)) { set_current_state(TASK_RUNNING); break; } @@ -637,6 +655,7 @@ static int bch_writeback_thread(void *arg) while (delay && !kthread_should_stop() && + !test_bit(CACHE_SET_IO_DISABLE, &c->flags) && !test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags)) delay = schedule_timeout_interruptible(delay); @@ -644,8 +663,8 @@ static int bch_writeback_thread(void *arg) } } - dc->writeback_thread = NULL; cached_dev_put(dc); + wait_for_kthread_stop(); return 0; } -- cgit v1.2.3 From 7e027ca4b534b6b99a7c0471e13ba075ffa3f482 Mon Sep 17 00:00:00 2001 From: Coly Li Date: Sun, 18 Mar 2018 17:36:18 -0700 Subject: bcache: add stop_when_cache_set_failed option to backing device When there are too many I/O errors on cache device, current bcache code will retire the whole cache set, and detach all bcache devices. But the detached bcache devices are not stopped, which is problematic when bcache is in writeback mode. If the retired cache set has dirty data of backing devices, continue writing to bcache device will write to backing device directly. If the LBA of write request has a dirty version cached on cache device, next time when the cache device is re-registered and backing device re-attached to it again, the stale dirty data on cache device will be written to backing device, and overwrite latest directly written data. This situation causes a quite data corruption. But we cannot simply stop all attached bcache devices when the cache set is broken or disconnected. For example, use bcache to accelerate performance of an email service. In such workload, if cache device is broken but no dirty data lost, keep the bcache device alive and permit email service continue to access user data might be a better solution for the cache device failure. Nix points out the issue and provides the above example to explain why it might be necessary to not stop bcache device for broken cache device. Pavel Goran provides a brilliant suggestion to provide "always" and "auto" options to per-cached device sysfs file stop_when_cache_set_failed. If cache set is retiring and the backing device has no dirty data on cache, it should be safe to keep the bcache device alive. In this case, if stop_when_cache_set_failed is set to "auto", the device failure handling code will not stop this bcache device and permit application to access the backing device with a unattached bcache device. Changelog: [mlyle: edited to not break string constants across lines] v3: fix typos pointed out by Nix. v2: change option values of stop_when_cache_set_failed from 1/0 to "auto"/"always". v1: initial version, stop_when_cache_set_failed can be 0 (not stop) or 1 (always stop). Signed-off-by: Coly Li Reviewed-by: Michael Lyle Signed-off-by: Michael Lyle Cc: Nix Cc: Pavel Goran Cc: Junhui Tang Cc: Hannes Reinecke Signed-off-by: Jens Axboe --- drivers/md/bcache/bcache.h | 9 ++++++ drivers/md/bcache/super.c | 78 ++++++++++++++++++++++++++++++++++++++++------ drivers/md/bcache/sysfs.c | 17 ++++++++++ 3 files changed, 94 insertions(+), 10 deletions(-) diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h index 8a0327581d62..5e9f3610c6fd 100644 --- a/drivers/md/bcache/bcache.h +++ b/drivers/md/bcache/bcache.h @@ -288,6 +288,12 @@ struct io { sector_t last; }; +enum stop_on_failure { + BCH_CACHED_DEV_STOP_AUTO = 0, + BCH_CACHED_DEV_STOP_ALWAYS, + BCH_CACHED_DEV_STOP_MODE_MAX, +}; + struct cached_dev { struct list_head list; struct bcache_device disk; @@ -380,6 +386,8 @@ struct cached_dev { unsigned writeback_rate_i_term_inverse; unsigned writeback_rate_p_term_inverse; unsigned writeback_rate_minimum; + + enum stop_on_failure stop_when_cache_set_failed; }; enum alloc_reserve { @@ -939,6 +947,7 @@ void bch_write_bdev_super(struct cached_dev *, struct closure *); extern struct workqueue_struct *bcache_wq; extern const char * const bch_cache_modes[]; +extern const char * const bch_stop_on_failure_modes[]; extern struct mutex bch_register_lock; extern struct list_head bch_cache_sets; diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index dda4ccdd1360..7b45160e9a22 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -47,6 +47,14 @@ const char * const bch_cache_modes[] = { NULL }; +/* Default is -1; we skip past it for stop_when_cache_set_failed */ +const char * const bch_stop_on_failure_modes[] = { + "default", + "auto", + "always", + NULL +}; + static struct kobject *bcache_kobj; struct mutex bch_register_lock; LIST_HEAD(bch_cache_sets); @@ -1188,6 +1196,9 @@ static int cached_dev_init(struct cached_dev *dc, unsigned block_size) max(dc->disk.disk->queue->backing_dev_info->ra_pages, q->backing_dev_info->ra_pages); + /* default to auto */ + dc->stop_when_cache_set_failed = BCH_CACHED_DEV_STOP_AUTO; + bch_cached_dev_request_init(dc); bch_cached_dev_writeback_init(dc); return 0; @@ -1464,25 +1475,72 @@ static void cache_set_flush(struct closure *cl) closure_return(cl); } +/* + * This function is only called when CACHE_SET_IO_DISABLE is set, which means + * cache set is unregistering due to too many I/O errors. In this condition, + * the bcache device might be stopped, it depends on stop_when_cache_set_failed + * value and whether the broken cache has dirty data: + * + * dc->stop_when_cache_set_failed dc->has_dirty stop bcache device + * BCH_CACHED_STOP_AUTO 0 NO + * BCH_CACHED_STOP_AUTO 1 YES + * BCH_CACHED_DEV_STOP_ALWAYS 0 YES + * BCH_CACHED_DEV_STOP_ALWAYS 1 YES + * + * The expected behavior is, if stop_when_cache_set_failed is configured to + * "auto" via sysfs interface, the bcache device will not be stopped if the + * backing device is clean on the broken cache device. + */ +static void conditional_stop_bcache_device(struct cache_set *c, + struct bcache_device *d, + struct cached_dev *dc) +{ + if (dc->stop_when_cache_set_failed == BCH_CACHED_DEV_STOP_ALWAYS) { + pr_warn("stop_when_cache_set_failed of %s is \"always\", stop it for failed cache set %pU.", + d->disk->disk_name, c->sb.set_uuid); + bcache_device_stop(d); + } else if (atomic_read(&dc->has_dirty)) { + /* + * dc->stop_when_cache_set_failed == BCH_CACHED_STOP_AUTO + * and dc->has_dirty == 1 + */ + pr_warn("stop_when_cache_set_failed of %s is \"auto\" and cache is dirty, stop it to avoid potential data corruption.", + d->disk->disk_name); + bcache_device_stop(d); + } else { + /* + * dc->stop_when_cache_set_failed == BCH_CACHED_STOP_AUTO + * and dc->has_dirty == 0 + */ + pr_warn("stop_when_cache_set_failed of %s is \"auto\" and cache is clean, keep it alive.", + d->disk->disk_name); + } +} + static void __cache_set_unregister(struct closure *cl) { struct cache_set *c = container_of(cl, struct cache_set, caching); struct cached_dev *dc; + struct bcache_device *d; size_t i; mutex_lock(&bch_register_lock); - for (i = 0; i < c->devices_max_used; i++) - if (c->devices[i]) { - if (!UUID_FLASH_ONLY(&c->uuids[i]) && - test_bit(CACHE_SET_UNREGISTERING, &c->flags)) { - dc = container_of(c->devices[i], - struct cached_dev, disk); - bch_cached_dev_detach(dc); - } else { - bcache_device_stop(c->devices[i]); - } + for (i = 0; i < c->devices_max_used; i++) { + d = c->devices[i]; + if (!d) + continue; + + if (!UUID_FLASH_ONLY(&c->uuids[i]) && + test_bit(CACHE_SET_UNREGISTERING, &c->flags)) { + dc = container_of(d, struct cached_dev, disk); + bch_cached_dev_detach(dc); + if (test_bit(CACHE_SET_IO_DISABLE, &c->flags)) + conditional_stop_bcache_device(c, d, dc); + } else { + bcache_device_stop(d); } + } mutex_unlock(&bch_register_lock); diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c index a3a45de5626d..414129f7c49f 100644 --- a/drivers/md/bcache/sysfs.c +++ b/drivers/md/bcache/sysfs.c @@ -78,6 +78,7 @@ rw_attribute(congested_write_threshold_us); rw_attribute(sequential_cutoff); rw_attribute(data_csum); rw_attribute(cache_mode); +rw_attribute(stop_when_cache_set_failed); rw_attribute(writeback_metadata); rw_attribute(writeback_running); rw_attribute(writeback_percent); @@ -126,6 +127,12 @@ SHOW(__bch_cached_dev) bch_cache_modes + 1, BDEV_CACHE_MODE(&dc->sb)); + if (attr == &sysfs_stop_when_cache_set_failed) + return bch_snprint_string_list(buf, PAGE_SIZE, + bch_stop_on_failure_modes + 1, + dc->stop_when_cache_set_failed); + + sysfs_printf(data_csum, "%i", dc->disk.data_csum); var_printf(verify, "%i"); var_printf(bypass_torture_test, "%i"); @@ -247,6 +254,15 @@ STORE(__cached_dev) } } + if (attr == &sysfs_stop_when_cache_set_failed) { + v = bch_read_string_list(buf, bch_stop_on_failure_modes + 1); + + if (v < 0) + return v; + + dc->stop_when_cache_set_failed = v; + } + if (attr == &sysfs_label) { if (size > SB_LABEL_SIZE) return -EINVAL; @@ -326,6 +342,7 @@ static struct attribute *bch_cached_dev_files[] = { &sysfs_data_csum, #endif &sysfs_cache_mode, + &sysfs_stop_when_cache_set_failed, &sysfs_writeback_metadata, &sysfs_writeback_running, &sysfs_writeback_delay, -- cgit v1.2.3 From bc082a55d25c837341709accaf11311c3a9af727 Mon Sep 17 00:00:00 2001 From: Tang Junhui Date: Sun, 18 Mar 2018 17:36:19 -0700 Subject: bcache: fix inaccurate io state for detached bcache devices When we run IO in a detached device, and run iostat to shows IO status, normally it will show like bellow (Omitted some fields): Device: ... avgrq-sz avgqu-sz await r_await w_await svctm %util sdd ... 15.89 0.53 1.82 0.20 2.23 1.81 52.30 bcache0 ... 15.89 115.42 0.00 0.00 0.00 2.40 69.60 but after IO stopped, there are still very big avgqu-sz and %util values as bellow: Device: ... avgrq-sz avgqu-sz await r_await w_await svctm %util bcache0 ... 0 5326.32 0.00 0.00 0.00 0.00 100.10 The reason for this issue is that, only generic_start_io_acct() called and no generic_end_io_acct() called for detached device in cached_dev_make_request(). See the code: //start generic_start_io_acct() generic_start_io_acct(q, rw, bio_sectors(bio), &d->disk->part0); if (cached_dev_get(dc)) { //will callback generic_end_io_acct() } else { //will not call generic_end_io_acct() } This patch calls generic_end_io_acct() in the end of IO for detached devices, so we can show IO state correctly. (Modified to use GFP_NOIO in kzalloc() by Coly Li) Changelog: v2: fix typo. v1: the initial version. Signed-off-by: Tang Junhui Reviewed-by: Coly Li Reviewed-by: Hannes Reinecke Reviewed-by: Michael Lyle Signed-off-by: Jens Axboe --- drivers/md/bcache/request.c | 58 +++++++++++++++++++++++++++++++++++++++------ 1 file changed, 51 insertions(+), 7 deletions(-) diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c index 7aca308bee5b..5c8ae69c8502 100644 --- a/drivers/md/bcache/request.c +++ b/drivers/md/bcache/request.c @@ -986,6 +986,55 @@ static void cached_dev_nodata(struct closure *cl) continue_at(cl, cached_dev_bio_complete, NULL); } +struct detached_dev_io_private { + struct bcache_device *d; + unsigned long start_time; + bio_end_io_t *bi_end_io; + void *bi_private; +}; + +static void detached_dev_end_io(struct bio *bio) +{ + struct detached_dev_io_private *ddip; + + ddip = bio->bi_private; + bio->bi_end_io = ddip->bi_end_io; + bio->bi_private = ddip->bi_private; + + generic_end_io_acct(ddip->d->disk->queue, + bio_data_dir(bio), + &ddip->d->disk->part0, ddip->start_time); + + kfree(ddip); + + bio->bi_end_io(bio); +} + +static void detached_dev_do_request(struct bcache_device *d, struct bio *bio) +{ + struct detached_dev_io_private *ddip; + struct cached_dev *dc = container_of(d, struct cached_dev, disk); + + /* + * no need to call closure_get(&dc->disk.cl), + * because upper layer had already opened bcache device, + * which would call closure_get(&dc->disk.cl) + */ + ddip = kzalloc(sizeof(struct detached_dev_io_private), GFP_NOIO); + ddip->d = d; + ddip->start_time = jiffies; + ddip->bi_end_io = bio->bi_end_io; + ddip->bi_private = bio->bi_private; + bio->bi_end_io = detached_dev_end_io; + bio->bi_private = ddip; + + if ((bio_op(bio) == REQ_OP_DISCARD) && + !blk_queue_discard(bdev_get_queue(dc->bdev))) + bio->bi_end_io(bio); + else + generic_make_request(bio); +} + /* Cached devices - read & write stuff */ static blk_qc_t cached_dev_make_request(struct request_queue *q, @@ -1028,13 +1077,8 @@ static blk_qc_t cached_dev_make_request(struct request_queue *q, else cached_dev_read(dc, s); } - } else { - if ((bio_op(bio) == REQ_OP_DISCARD) && - !blk_queue_discard(bdev_get_queue(dc->bdev))) - bio_endio(bio); - else - generic_make_request(bio); - } + } else + detached_dev_do_request(d, bio); return BLK_QC_T_NONE; } -- cgit v1.2.3 From 688892b3bc05e25da94866e32210e5f503f16f69 Mon Sep 17 00:00:00 2001 From: Tang Junhui Date: Sun, 18 Mar 2018 17:36:20 -0700 Subject: bcache: fix incorrect sysfs output value of strip size Stripe size is shown as zero when no strip in back end device: [root@ceph132 ~]# cat /sys/block/sdd/bcache/stripe_size 0.0k Actually it should be 1T Bytes (1 << 31 sectors), but in sysfs interface, stripe_size was changed from sectors to bytes, and move 9 bits left, so the 32 bits variable overflows. This patch change the variable to a 64 bits type before moving bits. Signed-off-by: Tang Junhui Reviewed-by: Michael Lyle Signed-off-by: Jens Axboe --- drivers/md/bcache/sysfs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c index 414129f7c49f..8c3fd05db87a 100644 --- a/drivers/md/bcache/sysfs.c +++ b/drivers/md/bcache/sysfs.c @@ -181,7 +181,7 @@ SHOW(__bch_cached_dev) sysfs_hprint(dirty_data, bcache_dev_sectors_dirty(&dc->disk) << 9); - sysfs_hprint(stripe_size, dc->disk.stripe_size << 9); + sysfs_hprint(stripe_size, ((uint64_t)dc->disk.stripe_size) << 9); var_printf(partial_stripes_expensive, "%u"); var_hprint(sequential_cutoff); -- cgit v1.2.3 From f3641c3abd1da978ee969b0203b71b86ec1bfa93 Mon Sep 17 00:00:00 2001 From: Tang Junhui Date: Sun, 18 Mar 2018 17:36:21 -0700 Subject: bcache: fix error return value in memory shrink In bch_mca_scan(), the return value should not be the number of freed btree nodes, but the number of pages of freed btree nodes. Signed-off-by: Tang Junhui Reviewed-by: Michael Lyle Signed-off-by: Jens Axboe --- drivers/md/bcache/btree.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c index 39cc8a549091..b2d4899f48d5 100644 --- a/drivers/md/bcache/btree.c +++ b/drivers/md/bcache/btree.c @@ -719,7 +719,7 @@ static unsigned long bch_mca_scan(struct shrinker *shrink, } out: mutex_unlock(&c->bucket_lock); - return freed; + return freed * c->btree_pages; } static unsigned long bch_mca_count(struct shrinker *shrink, -- cgit v1.2.3 From ca71df31661a0518ed58a1a59cf1993962153ebb Mon Sep 17 00:00:00 2001 From: Tang Junhui Date: Sun, 18 Mar 2018 17:36:22 -0700 Subject: bcache: fix using of loop variable in memory shrink In bch_mca_scan(), There are some confusion and logical error in the use of loop variables. In this patch, we clarify them as: 1) nr: the number of btree nodes needs to scan, which will decrease after we scan a btree node, and should not be less than 0; 2) i: the number of btree nodes have scanned, includes both btree_cache_freeable and btree_cache, which should not be bigger than btree_cache_used; 3) freed: the number of btree nodes have freed. Signed-off-by: Tang Junhui Reviewed-by: Michael Lyle Signed-off-by: Jens Axboe --- drivers/md/bcache/btree.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c index b2d4899f48d5..d64aff0b8abc 100644 --- a/drivers/md/bcache/btree.c +++ b/drivers/md/bcache/btree.c @@ -665,6 +665,7 @@ static unsigned long bch_mca_scan(struct shrinker *shrink, struct btree *b, *t; unsigned long i, nr = sc->nr_to_scan; unsigned long freed = 0; + unsigned int btree_cache_used; if (c->shrinker_disabled) return SHRINK_STOP; @@ -689,9 +690,10 @@ static unsigned long bch_mca_scan(struct shrinker *shrink, nr = min_t(unsigned long, nr, mca_can_free(c)); i = 0; + btree_cache_used = c->btree_cache_used; list_for_each_entry_safe(b, t, &c->btree_cache_freeable, list) { - if (freed >= nr) - break; + if (nr <= 0) + goto out; if (++i > 3 && !mca_reap(b, 0, false)) { @@ -699,9 +701,10 @@ static unsigned long bch_mca_scan(struct shrinker *shrink, rw_unlock(true, b); freed++; } + nr--; } - for (i = 0; (nr--) && i < c->btree_cache_used; i++) { + for (; (nr--) && i < btree_cache_used; i++) { if (list_empty(&c->btree_cache)) goto out; -- cgit v1.2.3 From df2b94313ae5b4f60d49e01d4dff5acb4c2757cf Mon Sep 17 00:00:00 2001 From: Chengguang Xu Date: Sun, 18 Mar 2018 17:36:23 -0700 Subject: bcache: move closure debug file into debug directory In current code closure debug file is outside of debug directory and when unloading module there is lack of removing operation for closure debug file, so it will cause creating error when trying to reload module. This patch move closure debug file into "bcache" debug direcory so that the file can get deleted properly. Signed-off-by: Chengguang Xu Reviewed-by: Michael Lyle Reviewed-by: Tang Junhui Signed-off-by: Jens Axboe --- drivers/md/bcache/closure.c | 9 +++++---- drivers/md/bcache/closure.h | 5 +++-- drivers/md/bcache/debug.c | 14 +++++++------- drivers/md/bcache/super.c | 3 +-- 4 files changed, 16 insertions(+), 15 deletions(-) diff --git a/drivers/md/bcache/closure.c b/drivers/md/bcache/closure.c index 7f12920c14f7..c0949c9f843b 100644 --- a/drivers/md/bcache/closure.c +++ b/drivers/md/bcache/closure.c @@ -157,7 +157,7 @@ void closure_debug_destroy(struct closure *cl) } EXPORT_SYMBOL(closure_debug_destroy); -static struct dentry *debug; +static struct dentry *closure_debug; static int debug_seq_show(struct seq_file *f, void *data) { @@ -199,11 +199,12 @@ static const struct file_operations debug_ops = { .release = single_release }; -void __init closure_debug_init(void) +int __init closure_debug_init(void) { - debug = debugfs_create_file("closures", 0400, NULL, NULL, &debug_ops); + closure_debug = debugfs_create_file("closures", + 0400, bcache_debug, NULL, &debug_ops); + return IS_ERR_OR_NULL(closure_debug); } - #endif MODULE_AUTHOR("Kent Overstreet "); diff --git a/drivers/md/bcache/closure.h b/drivers/md/bcache/closure.h index 3b9dfc9962ad..71427eb5fdae 100644 --- a/drivers/md/bcache/closure.h +++ b/drivers/md/bcache/closure.h @@ -105,6 +105,7 @@ struct closure; struct closure_syncer; typedef void (closure_fn) (struct closure *); +extern struct dentry *bcache_debug; struct closure_waitlist { struct llist_head list; @@ -185,13 +186,13 @@ static inline void closure_sync(struct closure *cl) #ifdef CONFIG_BCACHE_CLOSURES_DEBUG -void closure_debug_init(void); +int closure_debug_init(void); void closure_debug_create(struct closure *cl); void closure_debug_destroy(struct closure *cl); #else -static inline void closure_debug_init(void) {} +static inline int closure_debug_init(void) { return 0; } static inline void closure_debug_create(struct closure *cl) {} static inline void closure_debug_destroy(struct closure *cl) {} diff --git a/drivers/md/bcache/debug.c b/drivers/md/bcache/debug.c index af89408befe8..028f7b386e01 100644 --- a/drivers/md/bcache/debug.c +++ b/drivers/md/bcache/debug.c @@ -17,7 +17,7 @@ #include #include -static struct dentry *debug; +struct dentry *bcache_debug; #ifdef CONFIG_BCACHE_DEBUG @@ -232,11 +232,11 @@ static const struct file_operations cache_set_debug_ops = { void bch_debug_init_cache_set(struct cache_set *c) { - if (!IS_ERR_OR_NULL(debug)) { + if (!IS_ERR_OR_NULL(bcache_debug)) { char name[50]; snprintf(name, 50, "bcache-%pU", c->sb.set_uuid); - c->debug = debugfs_create_file(name, 0400, debug, c, + c->debug = debugfs_create_file(name, 0400, bcache_debug, c, &cache_set_debug_ops); } } @@ -245,13 +245,13 @@ void bch_debug_init_cache_set(struct cache_set *c) void bch_debug_exit(void) { - if (!IS_ERR_OR_NULL(debug)) - debugfs_remove_recursive(debug); + if (!IS_ERR_OR_NULL(bcache_debug)) + debugfs_remove_recursive(bcache_debug); } int __init bch_debug_init(struct kobject *kobj) { - debug = debugfs_create_dir("bcache", NULL); + bcache_debug = debugfs_create_dir("bcache", NULL); - return IS_ERR_OR_NULL(debug); + return IS_ERR_OR_NULL(bcache_debug); } diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index 7b45160e9a22..f1f64853114b 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -2224,7 +2224,6 @@ static int __init bcache_init(void) mutex_init(&bch_register_lock); init_waitqueue_head(&unregister_wait); register_reboot_notifier(&reboot); - closure_debug_init(); bcache_major = register_blkdev(0, "bcache"); if (bcache_major < 0) { @@ -2236,7 +2235,7 @@ static int __init bcache_init(void) if (!(bcache_wq = alloc_workqueue("bcache", WQ_MEM_RECLAIM, 0)) || !(bcache_kobj = kobject_create_and_add("bcache", fs_kobj)) || bch_request_init() || - bch_debug_init(bcache_kobj) || + bch_debug_init(bcache_kobj) || closure_debug_init() || sysfs_create_files(bcache_kobj, files)) goto err; -- cgit v1.2.3 From 27a40ab9269e79b55672312b324f8f29d94463d4 Mon Sep 17 00:00:00 2001 From: Coly Li Date: Sun, 18 Mar 2018 17:36:24 -0700 Subject: bcache: add backing_request_endio() for bi_end_io In order to catch I/O error of backing device, a separate bi_end_io call back is required. Then a per backing device counter can record I/O errors number and retire the backing device if the counter reaches a per backing device I/O error limit. This patch adds backing_request_endio() to bcache backing device I/O code path, this is a preparation for further complicated backing device failure handling. So far there is no real code logic change, I make this change a separate patch to make sure it is stable and reliable for further work. Changelog: v2: Fix code comments typo, remove a redundant bch_writeback_add() line added in v4 patch set. v1: indeed this is new added in this patch set. [mlyle: truncated commit subject] Signed-off-by: Coly Li Reviewed-by: Hannes Reinecke Reviewed-by: Michael Lyle Cc: Junhui Tang Cc: Michael Lyle Signed-off-by: Jens Axboe --- drivers/md/bcache/request.c | 93 +++++++++++++++++++++++++++++++++++-------- drivers/md/bcache/super.c | 1 + drivers/md/bcache/writeback.c | 1 + 3 files changed, 79 insertions(+), 16 deletions(-) diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c index 5c8ae69c8502..b4a5768afbe9 100644 --- a/drivers/md/bcache/request.c +++ b/drivers/md/bcache/request.c @@ -139,6 +139,7 @@ static void bch_data_invalidate(struct closure *cl) } op->insert_data_done = true; + /* get in bch_data_insert() */ bio_put(bio); out: continue_at(cl, bch_data_insert_keys, op->wq); @@ -630,6 +631,38 @@ static void request_endio(struct bio *bio) closure_put(cl); } +static void backing_request_endio(struct bio *bio) +{ + struct closure *cl = bio->bi_private; + + if (bio->bi_status) { + struct search *s = container_of(cl, struct search, cl); + /* + * If a bio has REQ_PREFLUSH for writeback mode, it is + * speically assembled in cached_dev_write() for a non-zero + * write request which has REQ_PREFLUSH. we don't set + * s->iop.status by this failure, the status will be decided + * by result of bch_data_insert() operation. + */ + if (unlikely(s->iop.writeback && + bio->bi_opf & REQ_PREFLUSH)) { + char buf[BDEVNAME_SIZE]; + + bio_devname(bio, buf); + pr_err("Can't flush %s: returned bi_status %i", + buf, bio->bi_status); + } else { + /* set to orig_bio->bi_status in bio_complete() */ + s->iop.status = bio->bi_status; + } + s->recoverable = false; + /* should count I/O error for backing device here */ + } + + bio_put(bio); + closure_put(cl); +} + static void bio_complete(struct search *s) { if (s->orig_bio) { @@ -644,13 +677,21 @@ static void bio_complete(struct search *s) } } -static void do_bio_hook(struct search *s, struct bio *orig_bio) +static void do_bio_hook(struct search *s, + struct bio *orig_bio, + bio_end_io_t *end_io_fn) { struct bio *bio = &s->bio.bio; bio_init(bio, NULL, 0); __bio_clone_fast(bio, orig_bio); - bio->bi_end_io = request_endio; + /* + * bi_end_io can be set separately somewhere else, e.g. the + * variants in, + * - cache_bio->bi_end_io from cached_dev_cache_miss() + * - n->bi_end_io from cache_lookup_fn() + */ + bio->bi_end_io = end_io_fn; bio->bi_private = &s->cl; bio_cnt_set(bio, 3); @@ -676,7 +717,7 @@ static inline struct search *search_alloc(struct bio *bio, s = mempool_alloc(d->c->search, GFP_NOIO); closure_init(&s->cl, NULL); - do_bio_hook(s, bio); + do_bio_hook(s, bio, request_endio); s->orig_bio = bio; s->cache_miss = NULL; @@ -743,10 +784,11 @@ static void cached_dev_read_error(struct closure *cl) trace_bcache_read_retry(s->orig_bio); s->iop.status = 0; - do_bio_hook(s, s->orig_bio); + do_bio_hook(s, s->orig_bio, backing_request_endio); /* XXX: invalidate cache */ + /* I/O request sent to backing device */ closure_bio_submit(s->iop.c, bio, cl); } @@ -859,7 +901,7 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s, bio_copy_dev(cache_bio, miss); cache_bio->bi_iter.bi_size = s->insert_bio_sectors << 9; - cache_bio->bi_end_io = request_endio; + cache_bio->bi_end_io = backing_request_endio; cache_bio->bi_private = &s->cl; bch_bio_map(cache_bio, NULL); @@ -872,14 +914,16 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s, s->cache_miss = miss; s->iop.bio = cache_bio; bio_get(cache_bio); + /* I/O request sent to backing device */ closure_bio_submit(s->iop.c, cache_bio, &s->cl); return ret; out_put: bio_put(cache_bio); out_submit: - miss->bi_end_io = request_endio; + miss->bi_end_io = backing_request_endio; miss->bi_private = &s->cl; + /* I/O request sent to backing device */ closure_bio_submit(s->iop.c, miss, &s->cl); return ret; } @@ -943,31 +987,46 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s) s->iop.bio = s->orig_bio; bio_get(s->iop.bio); - if ((bio_op(bio) != REQ_OP_DISCARD) || - blk_queue_discard(bdev_get_queue(dc->bdev))) - closure_bio_submit(s->iop.c, bio, cl); + if (bio_op(bio) == REQ_OP_DISCARD && + !blk_queue_discard(bdev_get_queue(dc->bdev))) + goto insert_data; + + /* I/O request sent to backing device */ + bio->bi_end_io = backing_request_endio; + closure_bio_submit(s->iop.c, bio, cl); + } else if (s->iop.writeback) { bch_writeback_add(dc); s->iop.bio = bio; if (bio->bi_opf & REQ_PREFLUSH) { - /* Also need to send a flush to the backing device */ - struct bio *flush = bio_alloc_bioset(GFP_NOIO, 0, - dc->disk.bio_split); - + /* + * Also need to send a flush to the backing + * device. + */ + struct bio *flush; + + flush = bio_alloc_bioset(GFP_NOIO, 0, + dc->disk.bio_split); + if (!flush) { + s->iop.status = BLK_STS_RESOURCE; + goto insert_data; + } bio_copy_dev(flush, bio); - flush->bi_end_io = request_endio; + flush->bi_end_io = backing_request_endio; flush->bi_private = cl; flush->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; - + /* I/O request sent to backing device */ closure_bio_submit(s->iop.c, flush, cl); } } else { s->iop.bio = bio_clone_fast(bio, GFP_NOIO, dc->disk.bio_split); - + /* I/O request sent to backing device */ + bio->bi_end_io = backing_request_endio; closure_bio_submit(s->iop.c, bio, cl); } +insert_data: closure_call(&s->iop.cl, bch_data_insert, NULL, cl); continue_at(cl, cached_dev_write_complete, NULL); } @@ -981,6 +1040,7 @@ static void cached_dev_nodata(struct closure *cl) bch_journal_meta(s->iop.c, cl); /* If it's a flush, we send the flush to the backing device too */ + bio->bi_end_io = backing_request_endio; closure_bio_submit(s->iop.c, bio, cl); continue_at(cl, cached_dev_bio_complete, NULL); @@ -1078,6 +1138,7 @@ static blk_qc_t cached_dev_make_request(struct request_queue *q, cached_dev_read(dc, s); } } else + /* I/O request sent to backing device */ detached_dev_do_request(d, bio); return BLK_QC_T_NONE; diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index f1f64853114b..2f8e70aefc90 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -273,6 +273,7 @@ void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent) bio->bi_private = dc; closure_get(cl); + /* I/O request sent to backing device */ __write_super(&dc->sb, bio); closure_return_with_destructor(cl, bch_write_bdev_super_unlock); diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c index 70092ada68e6..4a9547cdcdc5 100644 --- a/drivers/md/bcache/writeback.c +++ b/drivers/md/bcache/writeback.c @@ -289,6 +289,7 @@ static void write_dirty(struct closure *cl) bio_set_dev(&io->bio, io->dc->bdev); io->bio.bi_end_io = dirty_endio; + /* I/O request sent to backing device */ closure_bio_submit(io->dc->disk.c, &io->bio, cl); } -- cgit v1.2.3 From c7b7bd07404c52d8b9c6fd2fe794052ac367a818 Mon Sep 17 00:00:00 2001 From: Coly Li Date: Sun, 18 Mar 2018 17:36:25 -0700 Subject: bcache: add io_disable to struct cached_dev If a bcache device is configured to writeback mode, current code does not handle write I/O errors on backing devices properly. In writeback mode, write request is written to cache device, and latter being flushed to backing device. If I/O failed when writing from cache device to the backing device, bcache code just ignores the error and upper layer code is NOT noticed that the backing device is broken. This patch tries to handle backing device failure like how the cache device failure is handled, - Add a error counter 'io_errors' and error limit 'error_limit' in struct cached_dev. Add another io_disable to struct cached_dev to disable I/Os on the problematic backing device. - When I/O error happens on backing device, increase io_errors counter. And if io_errors reaches error_limit, set cache_dev->io_disable to true, and stop the bcache device. The result is, if backing device is broken of disconnected, and I/O errors reach its error limit, backing device will be disabled and the associated bcache device will be removed from system. Changelog: v2: remove "bcache: " prefix in pr_error(), and use correct name string to print out bcache device gendisk name. v1: indeed this is new added in v2 patch set. Signed-off-by: Coly Li Reviewed-by: Hannes Reinecke Reviewed-by: Michael Lyle Cc: Michael Lyle Cc: Junhui Tang Signed-off-by: Jens Axboe --- drivers/md/bcache/bcache.h | 6 ++++++ drivers/md/bcache/io.c | 14 ++++++++++++++ drivers/md/bcache/request.c | 14 ++++++++++++-- drivers/md/bcache/super.c | 21 +++++++++++++++++++++ drivers/md/bcache/sysfs.c | 15 ++++++++++++++- 5 files changed, 67 insertions(+), 3 deletions(-) diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h index 5e9f3610c6fd..d338b7086013 100644 --- a/drivers/md/bcache/bcache.h +++ b/drivers/md/bcache/bcache.h @@ -367,6 +367,7 @@ struct cached_dev { unsigned sequential_cutoff; unsigned readahead; + unsigned io_disable:1; unsigned verify:1; unsigned bypass_torture_test:1; @@ -388,6 +389,9 @@ struct cached_dev { unsigned writeback_rate_minimum; enum stop_on_failure stop_when_cache_set_failed; +#define DEFAULT_CACHED_DEV_ERROR_LIMIT 64 + atomic_t io_errors; + unsigned error_limit; }; enum alloc_reserve { @@ -911,6 +915,7 @@ static inline void wait_for_kthread_stop(void) /* Forward declarations */ +void bch_count_backing_io_errors(struct cached_dev *dc, struct bio *bio); void bch_count_io_errors(struct cache *, blk_status_t, int, const char *); void bch_bbio_count_io_errors(struct cache_set *, struct bio *, blk_status_t, const char *); @@ -938,6 +943,7 @@ int bch_bucket_alloc_set(struct cache_set *, unsigned, struct bkey *, int, bool); bool bch_alloc_sectors(struct cache_set *, struct bkey *, unsigned, unsigned, unsigned, bool); +bool bch_cached_dev_error(struct cached_dev *dc); __printf(2, 3) bool bch_cache_set_error(struct cache_set *, const char *, ...); diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c index 8013ecbcdbda..7fac97ae036e 100644 --- a/drivers/md/bcache/io.c +++ b/drivers/md/bcache/io.c @@ -50,6 +50,20 @@ void bch_submit_bbio(struct bio *bio, struct cache_set *c, } /* IO errors */ +void bch_count_backing_io_errors(struct cached_dev *dc, struct bio *bio) +{ + char buf[BDEVNAME_SIZE]; + unsigned errors; + + WARN_ONCE(!dc, "NULL pointer of struct cached_dev"); + + errors = atomic_add_return(1, &dc->io_errors); + if (errors < dc->error_limit) + pr_err("%s: IO error on backing device, unrecoverable", + bio_devname(bio, buf)); + else + bch_cached_dev_error(dc); +} void bch_count_io_errors(struct cache *ca, blk_status_t error, diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c index b4a5768afbe9..5a82237c7025 100644 --- a/drivers/md/bcache/request.c +++ b/drivers/md/bcache/request.c @@ -637,6 +637,8 @@ static void backing_request_endio(struct bio *bio) if (bio->bi_status) { struct search *s = container_of(cl, struct search, cl); + struct cached_dev *dc = container_of(s->d, + struct cached_dev, disk); /* * If a bio has REQ_PREFLUSH for writeback mode, it is * speically assembled in cached_dev_write() for a non-zero @@ -657,6 +659,7 @@ static void backing_request_endio(struct bio *bio) } s->recoverable = false; /* should count I/O error for backing device here */ + bch_count_backing_io_errors(dc, bio); } bio_put(bio); @@ -1065,8 +1068,14 @@ static void detached_dev_end_io(struct bio *bio) bio_data_dir(bio), &ddip->d->disk->part0, ddip->start_time); - kfree(ddip); + if (bio->bi_status) { + struct cached_dev *dc = container_of(ddip->d, + struct cached_dev, disk); + /* should count I/O error for backing device here */ + bch_count_backing_io_errors(dc, bio); + } + kfree(ddip); bio->bi_end_io(bio); } @@ -1105,7 +1114,8 @@ static blk_qc_t cached_dev_make_request(struct request_queue *q, struct cached_dev *dc = container_of(d, struct cached_dev, disk); int rw = bio_data_dir(bio); - if (unlikely(d->c && test_bit(CACHE_SET_IO_DISABLE, &d->c->flags))) { + if (unlikely((d->c && test_bit(CACHE_SET_IO_DISABLE, &d->c->flags)) || + dc->io_disable)) { bio->bi_status = BLK_STS_IOERR; bio_endio(bio); return BLK_QC_T_NONE; diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index 2f8e70aefc90..06f4b4833755 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -1197,6 +1197,9 @@ static int cached_dev_init(struct cached_dev *dc, unsigned block_size) max(dc->disk.disk->queue->backing_dev_info->ra_pages, q->backing_dev_info->ra_pages); + atomic_set(&dc->io_errors, 0); + dc->io_disable = false; + dc->error_limit = DEFAULT_CACHED_DEV_ERROR_LIMIT; /* default to auto */ dc->stop_when_cache_set_failed = BCH_CACHED_DEV_STOP_AUTO; @@ -1351,6 +1354,24 @@ int bch_flash_dev_create(struct cache_set *c, uint64_t size) return flash_dev_run(c, u); } +bool bch_cached_dev_error(struct cached_dev *dc) +{ + char name[BDEVNAME_SIZE]; + + if (!dc || test_bit(BCACHE_DEV_CLOSING, &dc->disk.flags)) + return false; + + dc->io_disable = true; + /* make others know io_disable is true earlier */ + smp_mb(); + + pr_err("stop %s: too many IO errors on backing device %s\n", + dc->disk.disk->disk_name, bdevname(dc->bdev, name)); + + bcache_device_stop(&dc->disk); + return true; +} + /* Cache set */ __printf(2, 3) diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c index 8c3fd05db87a..dfeef583ee50 100644 --- a/drivers/md/bcache/sysfs.c +++ b/drivers/md/bcache/sysfs.c @@ -141,7 +141,9 @@ SHOW(__bch_cached_dev) var_print(writeback_delay); var_print(writeback_percent); sysfs_hprint(writeback_rate, dc->writeback_rate.rate << 9); - + sysfs_hprint(io_errors, atomic_read(&dc->io_errors)); + sysfs_printf(io_error_limit, "%i", dc->error_limit); + sysfs_printf(io_disable, "%i", dc->io_disable); var_print(writeback_rate_update_seconds); var_print(writeback_rate_i_term_inverse); var_print(writeback_rate_p_term_inverse); @@ -232,6 +234,14 @@ STORE(__cached_dev) d_strtoul(writeback_rate_i_term_inverse); d_strtoul_nonzero(writeback_rate_p_term_inverse); + sysfs_strtoul_clamp(io_error_limit, dc->error_limit, 0, INT_MAX); + + if (attr == &sysfs_io_disable) { + int v = strtoul_or_return(buf); + + dc->io_disable = v ? 1 : 0; + } + d_strtoi_h(sequential_cutoff); d_strtoi_h(readahead); @@ -352,6 +362,9 @@ static struct attribute *bch_cached_dev_files[] = { &sysfs_writeback_rate_i_term_inverse, &sysfs_writeback_rate_p_term_inverse, &sysfs_writeback_rate_debug, + &sysfs_errors, + &sysfs_io_error_limit, + &sysfs_io_disable, &sysfs_dirty_data, &sysfs_stripe_size, &sysfs_partial_stripes_expensive, -- cgit v1.2.3 From fd01991d5c20098c5c1ffc4dca6c821cc60a2f74 Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Sun, 18 Mar 2018 17:36:26 -0700 Subject: bcache: Fix indentation This patch avoids that smatch complains about inconsistent indentation. Signed-off-by: Bart Van Assche Reviewed-by: Michael Lyle Reviewed-by: Coly Li Signed-off-by: Jens Axboe --- drivers/md/bcache/btree.c | 2 +- drivers/md/bcache/writeback.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c index d64aff0b8abc..143ed5a758e7 100644 --- a/drivers/md/bcache/btree.c +++ b/drivers/md/bcache/btree.c @@ -2178,7 +2178,7 @@ int bch_btree_insert_check_key(struct btree *b, struct btree_op *op, if (b->key.ptr[0] != btree_ptr || b->seq != seq + 1) { - op->lock = b->level; + op->lock = b->level; goto out; } } diff --git a/drivers/md/bcache/writeback.h b/drivers/md/bcache/writeback.h index 0bba8f1c6cdf..610fb01de629 100644 --- a/drivers/md/bcache/writeback.h +++ b/drivers/md/bcache/writeback.h @@ -39,7 +39,7 @@ static inline uint64_t bcache_flash_devs_sectors_dirty(struct cache_set *c) if (!d || !UUID_FLASH_ONLY(&c->uuids[i])) continue; - ret += bcache_dev_sectors_dirty(d); + ret += bcache_dev_sectors_dirty(d); } mutex_unlock(&bch_register_lock); -- cgit v1.2.3 From 4a4e443835a43a79113cc237c472c0d268eb1e1c Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Sun, 18 Mar 2018 17:36:27 -0700 Subject: bcache: Add __printf annotation to __bch_check_keys() Make it possible for the compiler to verify the consistency of the format string passed to __bch_check_keys() and the arguments that should be formatted according to that format string. Signed-off-by: Bart Van Assche Reviewed-by: Michael Lyle Signed-off-by: Jens Axboe --- drivers/md/bcache/bset.h | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/md/bcache/bset.h b/drivers/md/bcache/bset.h index fa506c1aa524..0c24280f3b98 100644 --- a/drivers/md/bcache/bset.h +++ b/drivers/md/bcache/bset.h @@ -531,14 +531,15 @@ int __bch_keylist_realloc(struct keylist *, unsigned); #ifdef CONFIG_BCACHE_DEBUG int __bch_count_data(struct btree_keys *); -void __bch_check_keys(struct btree_keys *, const char *, ...); +void __printf(2, 3) __bch_check_keys(struct btree_keys *, const char *, ...); void bch_dump_bset(struct btree_keys *, struct bset *, unsigned); void bch_dump_bucket(struct btree_keys *); #else static inline int __bch_count_data(struct btree_keys *b) { return -1; } -static inline void __bch_check_keys(struct btree_keys *b, const char *fmt, ...) {} +static inline void __printf(2, 3) + __bch_check_keys(struct btree_keys *b, const char *fmt, ...) {} static inline void bch_dump_bucket(struct btree_keys *b) {} void bch_dump_bset(struct btree_keys *, struct bset *, unsigned); -- cgit v1.2.3 From 9dfbdec7b7fea1ff1b7b5d5d12980dbc7dca46c7 Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Sun, 18 Mar 2018 17:36:28 -0700 Subject: bcache: Annotate switch fall-through This patch avoids that building with W=1 triggers complaints about switch fall-throughs. Signed-off-by: Bart Van Assche Reviewed-by: Michael Lyle Signed-off-by: Jens Axboe --- drivers/md/bcache/util.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/drivers/md/bcache/util.c b/drivers/md/bcache/util.c index a23cd6a14b74..6198041f0ee2 100644 --- a/drivers/md/bcache/util.c +++ b/drivers/md/bcache/util.c @@ -32,20 +32,27 @@ int bch_ ## name ## _h(const char *cp, type *res) \ case 'y': \ case 'z': \ u++; \ + /* fall through */ \ case 'e': \ u++; \ + /* fall through */ \ case 'p': \ u++; \ + /* fall through */ \ case 't': \ u++; \ + /* fall through */ \ case 'g': \ u++; \ + /* fall through */ \ case 'm': \ u++; \ + /* fall through */ \ case 'k': \ u++; \ if (e++ == cp) \ return -EINVAL; \ + /* fall through */ \ case '\n': \ case '\0': \ if (*e == '\n') \ -- cgit v1.2.3 From 47344e330eabc1515cbe6061eb337100a3ab6d37 Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Sun, 18 Mar 2018 17:36:29 -0700 Subject: bcache: Fix kernel-doc warnings Avoid that building with W=1 triggers warnings about the kernel-doc headers. Signed-off-by: Bart Van Assche Reviewed-by: Michael Lyle Signed-off-by: Jens Axboe --- drivers/md/bcache/btree.c | 2 +- drivers/md/bcache/closure.c | 8 ++++---- drivers/md/bcache/request.c | 1 + drivers/md/bcache/util.c | 18 ++++++++---------- 4 files changed, 14 insertions(+), 15 deletions(-) diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c index 143ed5a758e7..17936b2dc7d6 100644 --- a/drivers/md/bcache/btree.c +++ b/drivers/md/bcache/btree.c @@ -962,7 +962,7 @@ err: return b; } -/** +/* * bch_btree_node_get - find a btree node in the cache and lock it, reading it * in from disk if necessary. * diff --git a/drivers/md/bcache/closure.c b/drivers/md/bcache/closure.c index c0949c9f843b..0e14969182c6 100644 --- a/drivers/md/bcache/closure.c +++ b/drivers/md/bcache/closure.c @@ -46,7 +46,7 @@ void closure_sub(struct closure *cl, int v) } EXPORT_SYMBOL(closure_sub); -/** +/* * closure_put - decrement a closure's refcount */ void closure_put(struct closure *cl) @@ -55,7 +55,7 @@ void closure_put(struct closure *cl) } EXPORT_SYMBOL(closure_put); -/** +/* * closure_wake_up - wake up all closures on a wait list, without memory barrier */ void __closure_wake_up(struct closure_waitlist *wait_list) @@ -79,9 +79,9 @@ EXPORT_SYMBOL(__closure_wake_up); /** * closure_wait - add a closure to a waitlist - * - * @waitlist will own a ref on @cl, which will be released when + * @waitlist: will own a ref on @cl, which will be released when * closure_wake_up() is called on @waitlist. + * @cl: closure pointer. * */ bool closure_wait(struct closure_waitlist *waitlist, struct closure *cl) diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c index 5a82237c7025..a65e3365eeb9 100644 --- a/drivers/md/bcache/request.c +++ b/drivers/md/bcache/request.c @@ -296,6 +296,7 @@ err: /** * bch_data_insert - stick some data in the cache + * @cl: closure pointer. * * This is the starting point for any data to end up in a cache device; it could * be from a normal write, or a writeback write, or a write to a flash only diff --git a/drivers/md/bcache/util.c b/drivers/md/bcache/util.c index 6198041f0ee2..74febd5230df 100644 --- a/drivers/md/bcache/util.c +++ b/drivers/md/bcache/util.c @@ -82,10 +82,9 @@ STRTO_H(strtoll, long long) STRTO_H(strtoull, unsigned long long) /** - * bch_hprint() - formats @v to human readable string for sysfs. - * - * @v - signed 64 bit integer - * @buf - the (at least 8 byte) buffer to format the result into. + * bch_hprint - formats @v to human readable string for sysfs. + * @buf: the (at least 8 byte) buffer to format the result into. + * @v: signed 64 bit integer * * Returns the number of bytes used by format. */ @@ -225,13 +224,12 @@ void bch_time_stats_update(struct time_stats *stats, uint64_t start_time) } /** - * bch_next_delay() - increment @d by the amount of work done, and return how - * long to delay until the next time to do some work. - * - * @d - the struct bch_ratelimit to update - * @done - the amount of work done, in arbitrary units + * bch_next_delay() - update ratelimiting statistics and calculate next delay + * @d: the struct bch_ratelimit to update + * @done: the amount of work done, in arbitrary units * - * Returns the amount of time to delay by, in jiffies + * Increment @d by the amount of work done, and return how long to delay in + * jiffies until the next time to do some work. */ uint64_t bch_next_delay(struct bch_ratelimit *d, uint64_t done) { -- cgit v1.2.3 From f0d3814090ac77de94c42b7124c37ece23629197 Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Sun, 18 Mar 2018 17:36:30 -0700 Subject: bcache: Remove an unused variable Signed-off-by: Bart Van Assche Reviewed-by: Michael Lyle Signed-off-by: Jens Axboe --- drivers/md/bcache/extents.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/md/bcache/extents.c b/drivers/md/bcache/extents.c index f9d391711595..c334e6666461 100644 --- a/drivers/md/bcache/extents.c +++ b/drivers/md/bcache/extents.c @@ -534,7 +534,6 @@ err: static bool bch_extent_bad(struct btree_keys *bk, const struct bkey *k) { struct btree *b = container_of(bk, struct btree, keys); - struct bucket *g; unsigned i, stale; if (!KEY_PTRS(k) || @@ -549,7 +548,6 @@ static bool bch_extent_bad(struct btree_keys *bk, const struct bkey *k) return false; for (i = 0; i < KEY_PTRS(k); i++) { - g = PTR_BUCKET(b->c, k, i); stale = ptr_stale(b->c, k, i); btree_bug_on(stale > 96, b, -- cgit v1.2.3 From 42361469ae84c851e40cb1f94c8c9a14cdd94039 Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Sun, 18 Mar 2018 17:36:31 -0700 Subject: bcache: Suppress more warnings about set-but-not-used variables This patch does not change any functionality. Reviewed-by: Michael Lyle Reviewed-by: Coly Li Signed-off-by: Bart Van Assche Signed-off-by: Jens Axboe --- drivers/md/bcache/bset.c | 4 ++-- drivers/md/bcache/journal.c | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c index e56d3ecdbfcb..579c696a5fe0 100644 --- a/drivers/md/bcache/bset.c +++ b/drivers/md/bcache/bset.c @@ -1072,7 +1072,7 @@ EXPORT_SYMBOL(bch_btree_iter_init); static inline struct bkey *__bch_btree_iter_next(struct btree_iter *iter, btree_iter_cmp_fn *cmp) { - struct btree_iter_set unused; + struct btree_iter_set b __maybe_unused; struct bkey *ret = NULL; if (!btree_iter_end(iter)) { @@ -1087,7 +1087,7 @@ static inline struct bkey *__bch_btree_iter_next(struct btree_iter *iter, } if (iter->data->k == iter->data->end) - heap_pop(iter, unused, cmp); + heap_pop(iter, b, cmp); else heap_sift(iter, 0, cmp); } diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c index c94085f400a4..acd0e5c074dd 100644 --- a/drivers/md/bcache/journal.c +++ b/drivers/md/bcache/journal.c @@ -493,7 +493,7 @@ static void journal_reclaim(struct cache_set *c) struct cache *ca; uint64_t last_seq; unsigned iter, n = 0; - atomic_t p; + atomic_t p __maybe_unused; atomic_long_inc(&c->reclaim); -- cgit v1.2.3 From 20d3a518713e394efa5a899c84574b4b79ec5098 Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Sun, 18 Mar 2018 17:36:32 -0700 Subject: bcache: Reduce the number of sparse complaints about lock imbalances Add more annotations for sparse to inform it about which functions do not have the same number of spin_lock() and spin_unlock() calls. Signed-off-by: Bart Van Assche Reviewed-by: Michael Lyle Signed-off-by: Jens Axboe --- drivers/md/bcache/journal.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c index acd0e5c074dd..18f1b5239620 100644 --- a/drivers/md/bcache/journal.c +++ b/drivers/md/bcache/journal.c @@ -594,6 +594,7 @@ static void journal_write_done(struct closure *cl) } static void journal_write_unlock(struct closure *cl) + __releases(&c->journal.lock) { struct cache_set *c = container_of(cl, struct cache_set, journal.io); @@ -705,6 +706,7 @@ static void journal_try_write(struct cache_set *c) static struct journal_write *journal_wait_for_write(struct cache_set *c, unsigned nkeys) + __acquires(&c->journal.lock) { size_t sectors; struct closure cl; -- cgit v1.2.3 From 5f2b18ec8e1643410a2369f06888951cdedea0bf Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Sun, 18 Mar 2018 17:36:33 -0700 Subject: bcache: Fix a compiler warning in bcache_device_init() Avoid that building with W=1 triggers the following compiler warning: drivers/md/bcache/super.c:776:20: warning: comparison is always false due to limited range of data type [-Wtype-limits] d->nr_stripes > SIZE_MAX / sizeof(atomic_t)) { ^ Reviewed-by: Coly Li Reviewed-by: Michael Lyle Signed-off-by: Bart Van Assche Signed-off-by: Jens Axboe --- drivers/md/bcache/super.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index 06f4b4833755..a21694788619 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -778,6 +778,8 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size, sector_t sectors) { struct request_queue *q; + const size_t max_stripes = min_t(size_t, INT_MAX, + SIZE_MAX / sizeof(atomic_t)); size_t n; int idx; @@ -786,9 +788,7 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size, d->nr_stripes = DIV_ROUND_UP_ULL(sectors, d->stripe_size); - if (!d->nr_stripes || - d->nr_stripes > INT_MAX || - d->nr_stripes > SIZE_MAX / sizeof(atomic_t)) { + if (!d->nr_stripes || d->nr_stripes > max_stripes) { pr_err("nr_stripes too large or invalid: %u (start sector beyond end of disk?)", (unsigned)d->nr_stripes); return -ENOMEM; -- cgit v1.2.3 From 818e0fa293ca836eba515615c64680ea916fd7cd Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Mon, 19 Mar 2018 11:46:13 -0700 Subject: block: Change a rcu_read_{lock,unlock}_sched() pair into rcu_read_{lock,unlock}() scsi_device_quiesce() uses synchronize_rcu() to guarantee that the effect of blk_set_preempt_only() will be visible for percpu_ref_tryget() calls that occur after the queue unfreeze by using the approach explained in https://lwn.net/Articles/573497/. The rcu read lock and unlock calls in blk_queue_enter() form a pair with the synchronize_rcu() call in scsi_device_quiesce(). Both scsi_device_quiesce() and blk_queue_enter() must either use regular RCU or RCU-sched. Since neither the RCU-protected code in blk_queue_enter() nor blk_queue_usage_counter_release() sleeps, regular RCU protection is sufficient. Note: scsi_device_quiesce() does not have to be modified since it already uses synchronize_rcu(). Reported-by: Tejun Heo Fixes: 3a0a529971ec ("block, scsi: Make SCSI quiesce and resume work reliably") Signed-off-by: Bart Van Assche Acked-by: Tejun Heo Cc: Tejun Heo Cc: Hannes Reinecke Cc: Ming Lei Cc: Christoph Hellwig Cc: Johannes Thumshirn Cc: Oleksandr Natalenko Cc: Martin Steigerwald Cc: stable@vger.kernel.org # v4.15 Signed-off-by: Jens Axboe --- block/blk-core.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/block/blk-core.c b/block/blk-core.c index 5e88c579e896..a0f675f84f86 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -917,7 +917,7 @@ int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags) bool success = false; int ret; - rcu_read_lock_sched(); + rcu_read_lock(); if (percpu_ref_tryget_live(&q->q_usage_counter)) { /* * The code that sets the PREEMPT_ONLY flag is @@ -930,7 +930,7 @@ int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags) percpu_ref_put(&q->q_usage_counter); } } - rcu_read_unlock_sched(); + rcu_read_unlock(); if (success) return 0; -- cgit v1.2.3 From 6e2fb22103b99c26ae30a46512abe75526d8e4c9 Mon Sep 17 00:00:00 2001 From: Mikulas Patocka Date: Wed, 21 Mar 2018 12:42:25 -0400 Subject: block: use 32-bit blk_status_t on Alpha Early alpha processors cannot write a single byte or word; they read 8 bytes, modify the value in registers and write back 8 bytes. The type blk_status_t is defined as one byte, it is often written asynchronously by I/O completion routines, this asynchronous modification can corrupt content of nearby bytes if these nearby bytes can be written simultaneously by another CPU. - one example of such corruption is the structure dm_io where "blk_status_t status" is written by an asynchronous completion routine and "atomic_t io_count" is modified synchronously - another example is the structure dm_buffer where "unsigned hold_count" is modified synchronously from process context and "blk_status_t write_error" is modified asynchronously from bio completion routine This patch fixes the bug by changing the type blk_status_t to 32 bits if we are on Alpha and if we are compiling for a processor that doesn't have the byte-word-extension. Signed-off-by: Mikulas Patocka Cc: stable@vger.kernel.org # 4.13+ Signed-off-by: Jens Axboe --- include/linux/blk_types.h | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index bf18b95ed92d..17b18b91ebac 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -20,8 +20,13 @@ typedef void (bio_end_io_t) (struct bio *); /* * Block error status values. See block/blk-core:blk_errors for the details. + * Alpha cannot write a byte atomically, so we need to use 32-bit value. */ +#if defined(CONFIG_ALPHA) && !defined(__alpha_bwx__) +typedef u32 __bitwise blk_status_t; +#else typedef u8 __bitwise blk_status_t; +#endif #define BLK_STS_OK 0 #define BLK_STS_NOTSUPP ((__force blk_status_t)1) #define BLK_STS_TIMEOUT ((__force blk_status_t)2) -- cgit v1.2.3 From bd5c4facf59648581d2f1692dad7b107bf429954 Mon Sep 17 00:00:00 2001 From: Mikulas Patocka Date: Wed, 21 Mar 2018 12:49:29 -0400 Subject: Fix slab name "biovec-(1<<(21-12))" I'm getting a slab named "biovec-(1<<(21-12))". It is caused by unintended expansion of the macro BIO_MAX_PAGES. This patch renames it to biovec-max. Signed-off-by: Mikulas Patocka Cc: stable@vger.kernel.org # v4.14+ Signed-off-by: Jens Axboe --- block/bio.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/block/bio.c b/block/bio.c index e1708db48258..53e0f0a1ed94 100644 --- a/block/bio.c +++ b/block/bio.c @@ -43,9 +43,9 @@ * break badly! cannot be bigger than what you can fit into an * unsigned short */ -#define BV(x) { .nr_vecs = x, .name = "biovec-"__stringify(x) } +#define BV(x, n) { .nr_vecs = x, .name = "biovec-"#n } static struct biovec_slab bvec_slabs[BVEC_POOL_NR] __read_mostly = { - BV(1), BV(4), BV(16), BV(64), BV(128), BV(BIO_MAX_PAGES), + BV(1, 1), BV(4, 4), BV(16, 16), BV(64, 64), BV(128, 128), BV(BIO_MAX_PAGES, max), }; #undef BV -- cgit v1.2.3 From 57678e5a3d5145e5f08aa1d307ba219b27b1765a Mon Sep 17 00:00:00 2001 From: Shawn Lin Date: Thu, 22 Mar 2018 18:56:16 +0800 Subject: mmc: block: Delete gendisk before cleaning up the request queue dd if=/dev/urandom of=/dev/mmcblk1 bs=4k count=10000 with a SD card hotplug during transfer reports a warning below introduced by commit a063057d7c73 ("block: Fix a race between request queue removal and the block cgroup controller"). So we should now remove the disk, partition and bdi sysfs attributes before cleaning up the request queue associated with the disk. [ 410.331226] mmc1: card 59b4 removed [ 410.348583] WARNING: CPU: 0 PID: 5 at block/blk-core.c:785 blk_cleanup_queue+0x138/0x140 [ 410.349294] Modules linked in: [ 410.349570] CPU: 0 PID: 5 Comm: kworker/0:0 Not tainted 4.16.0-rc6-next-20180321-00004-gc2ad6a7 #263 [ 410.350363] Hardware name: Excavator-RK3399 Board (DT) [ 410.350819] Workqueue: events_freezable mmc_rescan [ 410.351242] pstate: 60000005 (nZCv daif -PAN -UAO) [ 410.351663] pc : blk_cleanup_queue+0x138/0x140 [ 410.352054] lr : blk_cleanup_queue+0xac/0x140 [ 410.352436] sp : ffff0000092cbb90 [ 410.352727] x29: ffff0000092cbb90 x28: 0000000000000000 [ 410.353195] x27: ffff8000f6f23030 x26: ffff00000904e610 [ 410.353662] x25: ffff8000f17cc808 x24: ffff8000f1038200 [ 410.354128] x23: 0000000000000060 x22: 0000000000000000 [ 410.354595] x21: ffff8000f11748d8 x20: ffff8000f1038200 [ 410.355061] x19: ffff8000f1174200 x18: 0000ffff936347d8 [ 410.355528] x17: 0000ffff935b93c0 x16: ffff0000081263f8 [ 410.355994] x15: 0000000000000000 x14: 0000000000000400 [ 410.356461] x13: 0000000000000001 x12: 0000000000000001 [ 410.356927] x11: 0000000000000040 x10: ffff8000f2400028 [ 410.357393] x9 : ffff8000f2400040 x8 : 0000000000000000 [ 410.357860] x7 : ffff8000f6f3a340 x6 : ffff8000f6f3a340 [ 410.358326] x5 : ffff8000f2400000 x4 : ffff8000f6f3a340 [ 410.358792] x3 : 0000000000000000 x2 : 39c1333e45670800 [ 410.359259] x1 : 0000000000000000 x0 : 0000000000000003 [ 410.359726] Call trace: [ 410.359943] blk_cleanup_queue+0x138/0x140 [ 410.360305] mmc_cleanup_queue+0x2c/0x48 [ 410.360652] mmc_blk_remove_req+0x1c/0x98 [ 410.361005] mmc_blk_remove+0x180/0x1c0 [ 410.361343] mmc_bus_remove+0x1c/0x28 [ 410.361670] device_release_driver_internal+0x154/0x1f0 [ 410.362128] device_release_driver+0x14/0x20 [ 410.362504] bus_remove_device+0xc8/0x108 [ 410.362858] device_del+0x120/0x350 [ 410.363167] mmc_remove_card+0x5c/0xb8 [ 410.363498] mmc_sd_detect+0x40/0x78 [ 410.363813] mmc_rescan+0x19c/0x368 [ 410.364123] process_one_work+0x1ac/0x318 [ 410.364477] worker_thread+0x50/0x450 [ 410.364801] kthread+0xf8/0x128 [ 410.365081] ret_from_fork+0x10/0x18 [ 410.365395] ---[ end trace 268e87a46c28968c ]--- Reviewed-by: Bart Van Assche Signed-off-by: Shawn Lin Signed-off-by: Jens Axboe --- drivers/mmc/core/block.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c index 20135a5de748..c895bb0d5569 100644 --- a/drivers/mmc/core/block.c +++ b/drivers/mmc/core/block.c @@ -2647,7 +2647,6 @@ static void mmc_blk_remove_req(struct mmc_blk_data *md) * from being accepted. */ card = md->queue.card; - mmc_cleanup_queue(&md->queue); if (md->disk->flags & GENHD_FL_UP) { device_remove_file(disk_to_dev(md->disk), &md->force_ro); if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) && @@ -2657,6 +2656,7 @@ static void mmc_blk_remove_req(struct mmc_blk_data *md) del_gendisk(md->disk); } + mmc_cleanup_queue(&md->queue); mmc_blk_put(md); } } -- cgit v1.2.3 From a470143fc83924251647143ff042bd2843e296cf Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Wed, 24 Jan 2018 20:24:24 +0200 Subject: net/utils: Introduce inet_addr_is_any Can be useful to check INET_ANY address for both ipv4/ipv6 addresses. Reviewed-by: Bart Van Assche Signed-off-by: Sagi Grimberg Cc: "David S. Miller" Cc: netdev@vger.kernel.org Signed-off-by: Jens Axboe --- include/linux/inet.h | 1 + net/core/utils.c | 23 +++++++++++++++++++++++ 2 files changed, 24 insertions(+) diff --git a/include/linux/inet.h b/include/linux/inet.h index 636ebe87e6f8..97defc1139e9 100644 --- a/include/linux/inet.h +++ b/include/linux/inet.h @@ -59,5 +59,6 @@ extern int in6_pton(const char *src, int srclen, u8 *dst, int delim, const char extern int inet_pton_with_scope(struct net *net, unsigned short af, const char *src, const char *port, struct sockaddr_storage *addr); +extern bool inet_addr_is_any(struct sockaddr *addr); #endif /* _LINUX_INET_H */ diff --git a/net/core/utils.c b/net/core/utils.c index 93066bd0305a..d47863b07a60 100644 --- a/net/core/utils.c +++ b/net/core/utils.c @@ -403,6 +403,29 @@ int inet_pton_with_scope(struct net *net, __kernel_sa_family_t af, } EXPORT_SYMBOL(inet_pton_with_scope); +bool inet_addr_is_any(struct sockaddr *addr) +{ + if (addr->sa_family == AF_INET6) { + struct sockaddr_in6 *in6 = (struct sockaddr_in6 *)addr; + const struct sockaddr_in6 in6_any = + { .sin6_addr = IN6ADDR_ANY_INIT }; + + if (!memcmp(in6->sin6_addr.s6_addr, + in6_any.sin6_addr.s6_addr, 16)) + return true; + } else if (addr->sa_family == AF_INET) { + struct sockaddr_in *in = (struct sockaddr_in *)addr; + + if (in->sin_addr.s_addr == htonl(INADDR_ANY)) + return true; + } else { + pr_warn("unexpected address family %u\n", addr->sa_family); + } + + return false; +} +EXPORT_SYMBOL(inet_addr_is_any); + void inet_proto_csum_replace4(__sum16 *sum, struct sk_buff *skb, __be32 from, __be32 to, bool pseudohdr) { -- cgit v1.2.3 From 4c65268588102014837b2d09e5a02557e5d3fc9e Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Wed, 24 Jan 2018 20:27:10 +0200 Subject: nvmet: don't return "any" ip address in discovery log page Its perfectly valid to assign a nvmet port to listen on "any" IP address (traddr 0.0.0.0 for ipv4 address family) for IP based transport ports. However, we must not return this address in discovery log entries. Instead we need to return the address where the request was accepted on (req->port address). Since this is nvme transport specific, introduce an optional .disc_traddr interface that is designed to check that a port in question is bound to "any" IP address and if so, set the traddr from the port where the request came from. Reviewed-by: Johannes Thumshirn Signed-off-by: Sagi Grimberg Signed-off-by: Jens Axboe --- drivers/nvme/target/discovery.c | 30 +++++++++++++++++++++++++++--- drivers/nvme/target/nvmet.h | 2 ++ drivers/nvme/target/rdma.c | 18 ++++++++++++++++++ 3 files changed, 47 insertions(+), 3 deletions(-) diff --git a/drivers/nvme/target/discovery.c b/drivers/nvme/target/discovery.c index 8f3b57b4c97b..a72425d8bce0 100644 --- a/drivers/nvme/target/discovery.c +++ b/drivers/nvme/target/discovery.c @@ -43,7 +43,8 @@ void nvmet_referral_disable(struct nvmet_port *port) } static void nvmet_format_discovery_entry(struct nvmf_disc_rsp_page_hdr *hdr, - struct nvmet_port *port, char *subsys_nqn, u8 type, u32 numrec) + struct nvmet_port *port, char *subsys_nqn, char *traddr, + u8 type, u32 numrec) { struct nvmf_disc_rsp_page_entry *e = &hdr->entries[numrec]; @@ -56,11 +57,30 @@ static void nvmet_format_discovery_entry(struct nvmf_disc_rsp_page_hdr *hdr, e->asqsz = cpu_to_le16(NVME_AQ_DEPTH); e->subtype = type; memcpy(e->trsvcid, port->disc_addr.trsvcid, NVMF_TRSVCID_SIZE); - memcpy(e->traddr, port->disc_addr.traddr, NVMF_TRADDR_SIZE); + memcpy(e->traddr, traddr, NVMF_TRADDR_SIZE); memcpy(e->tsas.common, port->disc_addr.tsas.common, NVMF_TSAS_SIZE); memcpy(e->subnqn, subsys_nqn, NVMF_NQN_SIZE); } +/* + * nvmet_set_disc_traddr - set a correct discovery log entry traddr + * + * IP based transports (e.g RDMA) can listen on "any" ipv4/ipv6 addresses + * (INADDR_ANY or IN6ADDR_ANY_INIT). The discovery log page traddr reply + * must not contain that "any" IP address. If the transport implements + * .disc_traddr, use it. this callback will set the discovery traddr + * from the req->port address in case the port in question listens + * "any" IP address. + */ +static void nvmet_set_disc_traddr(struct nvmet_req *req, struct nvmet_port *port, + char *traddr) +{ + if (req->ops->disc_traddr) + req->ops->disc_traddr(req, port, traddr); + else + memcpy(traddr, port->disc_addr.traddr, NVMF_TRADDR_SIZE); +} + static void nvmet_execute_get_disc_log_page(struct nvmet_req *req) { const int entry_size = sizeof(struct nvmf_disc_rsp_page_entry); @@ -90,8 +110,11 @@ static void nvmet_execute_get_disc_log_page(struct nvmet_req *req) if (!nvmet_host_allowed(req, p->subsys, ctrl->hostnqn)) continue; if (residual_len >= entry_size) { + char traddr[NVMF_TRADDR_SIZE]; + + nvmet_set_disc_traddr(req, req->port, traddr); nvmet_format_discovery_entry(hdr, req->port, - p->subsys->subsysnqn, + p->subsys->subsysnqn, traddr, NVME_NQN_NVME, numrec); residual_len -= entry_size; } @@ -102,6 +125,7 @@ static void nvmet_execute_get_disc_log_page(struct nvmet_req *req) if (residual_len >= entry_size) { nvmet_format_discovery_entry(hdr, r, NVME_DISC_SUBSYS_NAME, + r->disc_addr.traddr, NVME_NQN_DISC, numrec); residual_len -= entry_size; } diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h index 417f6c0331cc..40afb5d6ed91 100644 --- a/drivers/nvme/target/nvmet.h +++ b/drivers/nvme/target/nvmet.h @@ -209,6 +209,8 @@ struct nvmet_fabrics_ops { int (*add_port)(struct nvmet_port *port); void (*remove_port)(struct nvmet_port *port); void (*delete_ctrl)(struct nvmet_ctrl *ctrl); + void (*disc_traddr)(struct nvmet_req *req, + struct nvmet_port *port, char *traddr); }; #define NVMET_MAX_INLINE_BIOVEC 8 diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c index 978e169c11bf..d7831372e1f9 100644 --- a/drivers/nvme/target/rdma.c +++ b/drivers/nvme/target/rdma.c @@ -1445,6 +1445,23 @@ static void nvmet_rdma_remove_port(struct nvmet_port *port) rdma_destroy_id(cm_id); } +static void nvmet_rdma_disc_port_addr(struct nvmet_req *req, + struct nvmet_port *port, char *traddr) +{ + struct rdma_cm_id *cm_id = port->priv; + + if (inet_addr_is_any((struct sockaddr *)&cm_id->route.addr.src_addr)) { + struct nvmet_rdma_rsp *rsp = + container_of(req, struct nvmet_rdma_rsp, req); + struct rdma_cm_id *req_cm_id = rsp->queue->cm_id; + struct sockaddr *addr = (void *)&req_cm_id->route.addr.src_addr; + + sprintf(traddr, "%pISc", addr); + } else { + memcpy(traddr, port->disc_addr.traddr, NVMF_TRADDR_SIZE); + } +} + static struct nvmet_fabrics_ops nvmet_rdma_ops = { .owner = THIS_MODULE, .type = NVMF_TRTYPE_RDMA, @@ -1455,6 +1472,7 @@ static struct nvmet_fabrics_ops nvmet_rdma_ops = { .remove_port = nvmet_rdma_remove_port, .queue_response = nvmet_rdma_queue_response, .delete_ctrl = nvmet_rdma_delete_ctrl, + .disc_traddr = nvmet_rdma_disc_port_addr, }; static void nvmet_rdma_remove_one(struct ib_device *ib_device, void *client_data) -- cgit v1.2.3 From 7bfca0cfe60554faa6e9dafa97a4282fb035576b Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Thu, 25 Jan 2018 13:56:46 +0200 Subject: iscsi-target: use common inet_addr_is_any Instead of open-coding it. Reviewed-by: Bart Van Assche Reviewed-by: Johannes Thumshirn Signed-off-by: Sagi Grimberg Cc: "Nicholas A. Bellinger" Cc: target-devel@vger.kernel.org Signed-off-by: Jens Axboe --- drivers/target/iscsi/iscsi_target.c | 28 ++-------------------------- 1 file changed, 2 insertions(+), 26 deletions(-) diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index 9eb10d34682c..8e223799347a 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c @@ -26,6 +26,7 @@ #include #include #include +#include #include #include #include @@ -3291,30 +3292,6 @@ iscsit_send_task_mgt_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn) return conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL, NULL, 0); } -static bool iscsit_check_inaddr_any(struct iscsi_np *np) -{ - bool ret = false; - - if (np->np_sockaddr.ss_family == AF_INET6) { - const struct sockaddr_in6 sin6 = { - .sin6_addr = IN6ADDR_ANY_INIT }; - struct sockaddr_in6 *sock_in6 = - (struct sockaddr_in6 *)&np->np_sockaddr; - - if (!memcmp(sock_in6->sin6_addr.s6_addr, - sin6.sin6_addr.s6_addr, 16)) - ret = true; - } else { - struct sockaddr_in * sock_in = - (struct sockaddr_in *)&np->np_sockaddr; - - if (sock_in->sin_addr.s_addr == htonl(INADDR_ANY)) - ret = true; - } - - return ret; -} - #define SENDTARGETS_BUF_LIMIT 32768U static int @@ -3393,7 +3370,6 @@ iscsit_build_sendtargets_response(struct iscsi_cmd *cmd, list_for_each_entry(tpg_np, &tpg->tpg_gnp_list, tpg_np_list) { struct iscsi_np *np = tpg_np->tpg_np; - bool inaddr_any = iscsit_check_inaddr_any(np); struct sockaddr_storage *sockaddr; if (np->np_network_transport != network_transport) @@ -3422,7 +3398,7 @@ iscsit_build_sendtargets_response(struct iscsi_cmd *cmd, } } - if (inaddr_any) + if (inet_addr_is_any((struct sockaddr *)&np->np_sockaddr)) sockaddr = &conn->local_sockaddr; else sockaddr = &np->np_sockaddr; -- cgit v1.2.3 From 42595eb7d0095897823b89344e497b69d08d91c1 Mon Sep 17 00:00:00 2001 From: Minwoo Im Date: Thu, 8 Feb 2018 22:56:31 +0900 Subject: nvme: use define instead of magic value for identify size NVME_IDENTIFY_DATA_SIZE was added to linux/nvme.h by following commit. commit 0add5e8e588c ("nvmet: use NVME_IDENTIFY_DATA_SIZE") Make it use NVME_IDENTIFY_DATA_SIZE define instead of magic value 0x1000 in case of identify data size. Reviewed-by: Johannes Thumshirn Signed-off-by: Minwoo Im Signed-off-by: Sagi Grimberg Signed-off-by: Jens Axboe --- drivers/nvme/host/core.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 72e241923e7d..24fd90f5fbfb 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -948,7 +948,8 @@ static int nvme_identify_ns_list(struct nvme_ctrl *dev, unsigned nsid, __le32 *n c.identify.opcode = nvme_admin_identify; c.identify.cns = NVME_ID_CNS_NS_ACTIVE_LIST; c.identify.nsid = cpu_to_le32(nsid); - return nvme_submit_sync_cmd(dev->admin_q, &c, ns_list, 0x1000); + return nvme_submit_sync_cmd(dev->admin_q, &c, ns_list, + NVME_IDENTIFY_DATA_SIZE); } static struct nvme_id_ns *nvme_identify_ns(struct nvme_ctrl *ctrl, @@ -3109,7 +3110,7 @@ static int nvme_scan_ns_list(struct nvme_ctrl *ctrl, unsigned nn) unsigned i, j, nsid, prev = 0, num_lists = DIV_ROUND_UP(nn, 1024); int ret = 0; - ns_list = kzalloc(0x1000, GFP_KERNEL); + ns_list = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL); if (!ns_list) return -ENOMEM; -- cgit v1.2.3 From b9e03857f2e22788db6ccb67512a6604a6b4f6db Mon Sep 17 00:00:00 2001 From: Thomas Tai Date: Thu, 8 Feb 2018 13:38:29 -0500 Subject: nvme: Add fault injection feature MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Linux's fault injection framework provides a systematic way to support error injection via debugfs in the /sys/kernel/debug directory. This patch uses the framework to add error injection to NVMe driver. The fault injection source code is stored in a separate file and only linked if CONFIG_FAULT_INJECTION_DEBUG_FS kernel config is selected. Once the error injection is enabled, NVME_SC_INVALID_OPCODE with no retry will be injected into the nvme_end_request. Users can change the default status code and no retry flag via debufs. Following example shows how to enable and inject an error. For more examples, refer to Documentation/fault-injection/nvme-fault-injection.txt How to enable nvme fault injection: First, enable CONFIG_FAULT_INJECTION_DEBUG_FS kernel config, recompile the kernel. After booting up the kernel, do the following. How to inject an error: mount /dev/nvme0n1 /mnt echo 1 > /sys/kernel/debug/nvme0n1/fault_inject/times echo 100 > /sys/kernel/debug/nvme0n1/fault_inject/probability cp a.file /mnt Expected Result: cp: cannot stat ‘/mnt/a.file’: Input/output error Message from dmesg: FAULT_INJECTION: forcing a failure. name fault_inject, interval 1, probability 100, space 0, times 1 CPU: 0 PID: 0 Comm: swapper/0 Not tainted 4.15.0-rc8+ #2 Hardware name: innotek GmbH VirtualBox/VirtualBox, BIOS VirtualBox 12/01/2006 Call Trace: dump_stack+0x5c/0x7d should_fail+0x148/0x170 nvme_should_fail+0x2f/0x50 [nvme_core] nvme_process_cq+0xe7/0x1d0 [nvme] nvme_irq+0x1e/0x40 [nvme] __handle_irq_event_percpu+0x3a/0x190 handle_irq_event_percpu+0x30/0x70 handle_irq_event+0x36/0x60 handle_fasteoi_irq+0x78/0x120 handle_irq+0xa7/0x130 ? tick_irq_enter+0xa8/0xc0 do_IRQ+0x43/0xc0 common_interrupt+0xa2/0xa2 RIP: 0010:native_safe_halt+0x2/0x10 RSP: 0018:ffffffff82003e90 EFLAGS: 00000246 ORIG_RAX: ffffffffffffffdd RAX: ffffffff817a10c0 RBX: ffffffff82012480 RCX: 0000000000000000 RDX: 0000000000000000 RSI: 0000000000000000 RDI: 0000000000000000 RBP: 0000000000000000 R08: 000000008e38ce64 R09: 0000000000000000 R10: 0000000000000000 R11: 0000000000000000 R12: ffffffff82012480 R13: ffffffff82012480 R14: 0000000000000000 R15: 0000000000000000 ? __sched_text_end+0x4/0x4 default_idle+0x18/0xf0 do_idle+0x150/0x1d0 cpu_startup_entry+0x6f/0x80 start_kernel+0x4c4/0x4e4 ? set_init_arg+0x55/0x55 secondary_startup_64+0xa5/0xb0 print_req_error: I/O error, dev nvme0n1, sector 9240 EXT4-fs error (device nvme0n1): ext4_find_entry:1436: inode #2: comm cp: reading directory lblock 0 Signed-off-by: Thomas Tai Reviewed-by: Eric Saint-Etienne Signed-off-by: Karl Volz Reviewed-by: Keith Busch Signed-off-by: Sagi Grimberg Signed-off-by: Jens Axboe --- drivers/nvme/host/Makefile | 1 + drivers/nvme/host/core.c | 2 + drivers/nvme/host/fault_inject.c | 79 ++++++++++++++++++++++++++++++++++++++++ drivers/nvme/host/nvme.h | 27 ++++++++++++++ 4 files changed, 109 insertions(+) create mode 100644 drivers/nvme/host/fault_inject.c diff --git a/drivers/nvme/host/Makefile b/drivers/nvme/host/Makefile index 441e67e3a9d7..aea459c65ae1 100644 --- a/drivers/nvme/host/Makefile +++ b/drivers/nvme/host/Makefile @@ -12,6 +12,7 @@ nvme-core-y := core.o nvme-core-$(CONFIG_TRACING) += trace.o nvme-core-$(CONFIG_NVME_MULTIPATH) += multipath.o nvme-core-$(CONFIG_NVM) += lightnvm.o +nvme-core-$(CONFIG_FAULT_INJECTION_DEBUG_FS) += fault_inject.o nvme-y += pci.o diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 24fd90f5fbfb..f96b99356917 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -3035,6 +3035,7 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid) nvme_mpath_add_disk(ns->head); nvme_mpath_add_disk_links(ns); + nvme_fault_inject_init(ns); return; out_unlink_ns: mutex_lock(&ctrl->subsys->lock); @@ -3053,6 +3054,7 @@ static void nvme_ns_remove(struct nvme_ns *ns) if (test_and_set_bit(NVME_NS_REMOVING, &ns->flags)) return; + nvme_fault_inject_fini(ns); if (ns->disk && ns->disk->flags & GENHD_FL_UP) { nvme_mpath_remove_disk_links(ns); sysfs_remove_group(&disk_to_dev(ns->disk)->kobj, diff --git a/drivers/nvme/host/fault_inject.c b/drivers/nvme/host/fault_inject.c new file mode 100644 index 000000000000..02632266ac06 --- /dev/null +++ b/drivers/nvme/host/fault_inject.c @@ -0,0 +1,79 @@ +/* + * fault injection support for nvme. + * + * Copyright (c) 2018, Oracle and/or its affiliates + * + */ + +#include +#include "nvme.h" + +static DECLARE_FAULT_ATTR(fail_default_attr); +/* optional fault injection attributes boot time option: + * nvme_core.fail_request=,,, + */ +static char *fail_request; +module_param(fail_request, charp, 0000); + +void nvme_fault_inject_init(struct nvme_ns *ns) +{ + struct dentry *dir, *parent; + char *name = ns->disk->disk_name; + struct nvme_fault_inject *fault_inj = &ns->fault_inject; + struct fault_attr *attr = &fault_inj->attr; + + /* set default fault injection attribute */ + if (fail_request) + setup_fault_attr(&fail_default_attr, fail_request); + + /* create debugfs directory and attribute */ + parent = debugfs_create_dir(name, NULL); + if (!parent) { + pr_warn("%s: failed to create debugfs directory\n", name); + return; + } + + *attr = fail_default_attr; + dir = fault_create_debugfs_attr("fault_inject", parent, attr); + if (IS_ERR(dir)) { + pr_warn("%s: failed to create debugfs attr\n", name); + debugfs_remove_recursive(parent); + return; + } + ns->fault_inject.parent = parent; + + /* create debugfs for status code and dont_retry */ + fault_inj->status = NVME_SC_INVALID_OPCODE; + fault_inj->dont_retry = true; + debugfs_create_x16("status", 0600, dir, &fault_inj->status); + debugfs_create_bool("dont_retry", 0600, dir, &fault_inj->dont_retry); +} + +void nvme_fault_inject_fini(struct nvme_ns *ns) +{ + /* remove debugfs directories */ + debugfs_remove_recursive(ns->fault_inject.parent); +} + +void nvme_should_fail(struct request *req) +{ + struct gendisk *disk = req->rq_disk; + struct nvme_ns *ns = NULL; + u16 status; + + /* + * make sure this request is coming from a valid namespace + */ + if (!disk) + return; + + ns = disk->private_data; + if (ns && should_fail(&ns->fault_inject.attr, 1)) { + /* inject status code and DNR bit */ + status = ns->fault_inject.status; + if (ns->fault_inject.dont_retry) + status |= NVME_SC_DNR; + nvme_req(req)->status = status; + } +} +EXPORT_SYMBOL_GPL(nvme_should_fail); diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index 0521e4707d1c..9a3d3540aa6a 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -21,6 +21,7 @@ #include #include #include +#include extern unsigned int nvme_io_timeout; #define NVME_IO_TIMEOUT (nvme_io_timeout * HZ) @@ -261,6 +262,15 @@ struct nvme_ns_head { int instance; }; +#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS +struct nvme_fault_inject { + struct fault_attr attr; + struct dentry *parent; + bool dont_retry; /* DNR, do not retry */ + u16 status; /* status code */ +}; +#endif + struct nvme_ns { struct list_head list; @@ -282,6 +292,11 @@ struct nvme_ns { #define NVME_NS_REMOVING 0 #define NVME_NS_DEAD 1 u16 noiob; + +#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS + struct nvme_fault_inject fault_inject; +#endif + }; struct nvme_ctrl_ops { @@ -300,6 +315,16 @@ struct nvme_ctrl_ops { int (*reinit_request)(void *data, struct request *rq); }; +#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS +void nvme_fault_inject_init(struct nvme_ns *ns); +void nvme_fault_inject_fini(struct nvme_ns *ns); +void nvme_should_fail(struct request *req); +#else +static inline void nvme_fault_inject_init(struct nvme_ns *ns) {} +static inline void nvme_fault_inject_fini(struct nvme_ns *ns) {} +static inline void nvme_should_fail(struct request *req) {} +#endif + static inline bool nvme_ctrl_ready(struct nvme_ctrl *ctrl) { u32 val = 0; @@ -336,6 +361,8 @@ static inline void nvme_end_request(struct request *req, __le16 status, rq->status = le16_to_cpu(status) >> 1; rq->result = result; + /* inject error when permitted by fault injection framework */ + nvme_should_fail(req); blk_mq_complete_request(req); } -- cgit v1.2.3 From cf4182f3d06cec0546c69123f4104a2b69d07be2 Mon Sep 17 00:00:00 2001 From: Thomas Tai Date: Thu, 8 Feb 2018 13:38:30 -0500 Subject: Documentation: nvme: Documentation for nvme fault injection Add examples to show how to use nvme fault injection. Signed-off-by: Thomas Tai Reviewed-by: Eric Saint-Etienne Signed-off-by: Karl Volz Reviewed-by: Keith Busch Signed-off-by: Sagi Grimberg Signed-off-by: Jens Axboe --- Documentation/fault-injection/fault-injection.txt | 8 ++ .../fault-injection/nvme-fault-injection.txt | 116 +++++++++++++++++++++ 2 files changed, 124 insertions(+) create mode 100644 Documentation/fault-injection/nvme-fault-injection.txt diff --git a/Documentation/fault-injection/fault-injection.txt b/Documentation/fault-injection/fault-injection.txt index de1dc35fe500..4d1b7b4ccfaf 100644 --- a/Documentation/fault-injection/fault-injection.txt +++ b/Documentation/fault-injection/fault-injection.txt @@ -36,6 +36,14 @@ o fail_function ALLOW_ERROR_INJECTION() macro, by setting debugfs entries under /sys/kernel/debug/fail_function. No boot option supported. +o NVMe fault injection + + inject NVMe status code and retry flag on devices permitted by setting + debugfs entries under /sys/kernel/debug/nvme*/fault_inject. The default + status code is NVME_SC_INVALID_OPCODE with no retry. The status code and + retry flag can be set via the debugfs. + + Configure fault-injection capabilities behavior ----------------------------------------------- diff --git a/Documentation/fault-injection/nvme-fault-injection.txt b/Documentation/fault-injection/nvme-fault-injection.txt new file mode 100644 index 000000000000..8fbf3bf60b62 --- /dev/null +++ b/Documentation/fault-injection/nvme-fault-injection.txt @@ -0,0 +1,116 @@ +NVMe Fault Injection +==================== +Linux's fault injection framework provides a systematic way to support +error injection via debugfs in the /sys/kernel/debug directory. When +enabled, the default NVME_SC_INVALID_OPCODE with no retry will be +injected into the nvme_end_request. Users can change the default status +code and no retry flag via the debugfs. The list of Generic Command +Status can be found in include/linux/nvme.h + +Following examples show how to inject an error into the nvme. + +First, enable CONFIG_FAULT_INJECTION_DEBUG_FS kernel config, +recompile the kernel. After booting up the kernel, do the +following. + +Example 1: Inject default status code with no retry +--------------------------------------------------- + +mount /dev/nvme0n1 /mnt +echo 1 > /sys/kernel/debug/nvme0n1/fault_inject/times +echo 100 > /sys/kernel/debug/nvme0n1/fault_inject/probability +cp a.file /mnt + +Expected Result: + +cp: cannot stat ‘/mnt/a.file’: Input/output error + +Message from dmesg: + +FAULT_INJECTION: forcing a failure. +name fault_inject, interval 1, probability 100, space 0, times 1 +CPU: 0 PID: 0 Comm: swapper/0 Not tainted 4.15.0-rc8+ #2 +Hardware name: innotek GmbH VirtualBox/VirtualBox, +BIOS VirtualBox 12/01/2006 +Call Trace: + + dump_stack+0x5c/0x7d + should_fail+0x148/0x170 + nvme_should_fail+0x2f/0x50 [nvme_core] + nvme_process_cq+0xe7/0x1d0 [nvme] + nvme_irq+0x1e/0x40 [nvme] + __handle_irq_event_percpu+0x3a/0x190 + handle_irq_event_percpu+0x30/0x70 + handle_irq_event+0x36/0x60 + handle_fasteoi_irq+0x78/0x120 + handle_irq+0xa7/0x130 + ? tick_irq_enter+0xa8/0xc0 + do_IRQ+0x43/0xc0 + common_interrupt+0xa2/0xa2 + +RIP: 0010:native_safe_halt+0x2/0x10 +RSP: 0018:ffffffff82003e90 EFLAGS: 00000246 ORIG_RAX: ffffffffffffffdd +RAX: ffffffff817a10c0 RBX: ffffffff82012480 RCX: 0000000000000000 +RDX: 0000000000000000 RSI: 0000000000000000 RDI: 0000000000000000 +RBP: 0000000000000000 R08: 000000008e38ce64 R09: 0000000000000000 +R10: 0000000000000000 R11: 0000000000000000 R12: ffffffff82012480 +R13: ffffffff82012480 R14: 0000000000000000 R15: 0000000000000000 + ? __sched_text_end+0x4/0x4 + default_idle+0x18/0xf0 + do_idle+0x150/0x1d0 + cpu_startup_entry+0x6f/0x80 + start_kernel+0x4c4/0x4e4 + ? set_init_arg+0x55/0x55 + secondary_startup_64+0xa5/0xb0 + print_req_error: I/O error, dev nvme0n1, sector 9240 +EXT4-fs error (device nvme0n1): ext4_find_entry:1436: +inode #2: comm cp: reading directory lblock 0 + +Example 2: Inject default status code with retry +------------------------------------------------ + +mount /dev/nvme0n1 /mnt +echo 1 > /sys/kernel/debug/nvme0n1/fault_inject/times +echo 100 > /sys/kernel/debug/nvme0n1/fault_inject/probability +echo 1 > /sys/kernel/debug/nvme0n1/fault_inject/status +echo 0 > /sys/kernel/debug/nvme0n1/fault_inject/dont_retry + +cp a.file /mnt + +Expected Result: + +command success without error + +Message from dmesg: + +FAULT_INJECTION: forcing a failure. +name fault_inject, interval 1, probability 100, space 0, times 1 +CPU: 1 PID: 0 Comm: swapper/1 Not tainted 4.15.0-rc8+ #4 +Hardware name: innotek GmbH VirtualBox/VirtualBox, BIOS VirtualBox 12/01/2006 +Call Trace: + + dump_stack+0x5c/0x7d + should_fail+0x148/0x170 + nvme_should_fail+0x30/0x60 [nvme_core] + nvme_loop_queue_response+0x84/0x110 [nvme_loop] + nvmet_req_complete+0x11/0x40 [nvmet] + nvmet_bio_done+0x28/0x40 [nvmet] + blk_update_request+0xb0/0x310 + blk_mq_end_request+0x18/0x60 + flush_smp_call_function_queue+0x3d/0xf0 + smp_call_function_single_interrupt+0x2c/0xc0 + call_function_single_interrupt+0xa2/0xb0 + +RIP: 0010:native_safe_halt+0x2/0x10 +RSP: 0018:ffffc9000068bec0 EFLAGS: 00000246 ORIG_RAX: ffffffffffffff04 +RAX: ffffffff817a10c0 RBX: ffff88011a3c9680 RCX: 0000000000000000 +RDX: 0000000000000000 RSI: 0000000000000000 RDI: 0000000000000000 +RBP: 0000000000000001 R08: 000000008e38c131 R09: 0000000000000000 +R10: 0000000000000000 R11: 0000000000000000 R12: ffff88011a3c9680 +R13: ffff88011a3c9680 R14: 0000000000000000 R15: 0000000000000000 + ? __sched_text_end+0x4/0x4 + default_idle+0x18/0xf0 + do_idle+0x150/0x1d0 + cpu_startup_entry+0x6f/0x80 + start_secondary+0x187/0x1e0 + secondary_startup_64+0xa5/0xb0 -- cgit v1.2.3 From 9a915a5be7dc320743034a17394e08eb438baf33 Mon Sep 17 00:00:00 2001 From: Jianchao Wang Date: Mon, 12 Feb 2018 20:57:24 +0800 Subject: nvme-pci: quiesce IO queues prior to disabling device HMB accesses Quiesce IO queues prior to disabling device HMB accesses. A controller using HMB may relay on it to efficiently complete IO commands. Reviewed-by: Keith Busch Reviewed-by: Sagi Grimberg Signed-off-by: Jianchao Wang Signed-off-by: Sagi Grimberg Signed-off-by: Jens Axboe --- drivers/nvme/host/pci.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 5933a5c732e8..aacc8e4b0051 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -2196,7 +2196,11 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown) if (!dead) { if (shutdown) nvme_wait_freeze_timeout(&dev->ctrl, NVME_IO_TIMEOUT); + } + + nvme_stop_queues(&dev->ctrl); + if (!dead) { /* * If the controller is still alive tell it to stop using the * host memory buffer. In theory the shutdown / reset should @@ -2205,11 +2209,6 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown) */ if (dev->host_mem_descs) nvme_set_host_mem(dev, 0); - - } - nvme_stop_queues(&dev->ctrl); - - if (!dead) { nvme_disable_io_queues(dev); nvme_disable_admin_queue(dev, shutdown); } -- cgit v1.2.3 From 6f8e0d787e3727ed70116e3523f4ecb86887c000 Mon Sep 17 00:00:00 2001 From: Jianchao Wang Date: Mon, 12 Feb 2018 20:54:44 +0800 Subject: nvme: fix the dangerous reference of namespaces list nvme_remove_namespaces and nvme_remove_invalid_namespaces reference the ctrl->namespaces list w/o holding namespaces_mutext. It is ok to invoke nvme_ns_remove there, but what if there is others. To be safer, reference the ctrl->namespaces list under namespaces_mutext. Reviewed-by: Keith Busch Signed-off-by: Jianchao Wang Signed-off-by: Sagi Grimberg Signed-off-by: Jens Axboe --- drivers/nvme/host/core.c | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index f96b99356917..31f20f4643cf 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -3098,11 +3098,18 @@ static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl, unsigned nsid) { struct nvme_ns *ns, *next; + LIST_HEAD(rm_list); + mutex_lock(&ctrl->namespaces_mutex); list_for_each_entry_safe(ns, next, &ctrl->namespaces, list) { if (ns->head->ns_id > nsid) - nvme_ns_remove(ns); + list_move_tail(&ns->list, &rm_list); } + mutex_unlock(&ctrl->namespaces_mutex); + + list_for_each_entry_safe(ns, next, &rm_list, list) + nvme_ns_remove(ns); + } static int nvme_scan_ns_list(struct nvme_ctrl *ctrl, unsigned nn) @@ -3202,6 +3209,7 @@ EXPORT_SYMBOL_GPL(nvme_queue_scan); void nvme_remove_namespaces(struct nvme_ctrl *ctrl) { struct nvme_ns *ns, *next; + LIST_HEAD(ns_list); /* * The dead states indicates the controller was not gracefully @@ -3212,7 +3220,11 @@ void nvme_remove_namespaces(struct nvme_ctrl *ctrl) if (ctrl->state == NVME_CTRL_DEAD) nvme_kill_queues(ctrl); - list_for_each_entry_safe(ns, next, &ctrl->namespaces, list) + mutex_lock(&ctrl->namespaces_mutex); + list_splice_init(&ctrl->namespaces, &ns_list); + mutex_unlock(&ctrl->namespaces_mutex); + + list_for_each_entry_safe(ns, next, &ns_list, list) nvme_ns_remove(ns); } EXPORT_SYMBOL_GPL(nvme_remove_namespaces); -- cgit v1.2.3 From 765cc031cddde40bdc279e8e2697571c7956c54e Mon Sep 17 00:00:00 2001 From: Jianchao Wang Date: Mon, 12 Feb 2018 20:54:46 +0800 Subject: nvme: change namespaces_mutext to namespaces_rwsem namespaces_mutext is used to synchronize the operations on ctrl namespaces list. Most of the time, it is a read operation. On the other hand, there are many interfaces in nvme core that need this lock, such as nvme_wait_freeze, and even more interfaces will be added. If we use mutex here, circular dependency could be introduced easily. For example: context A context B nvme_xxx nvme_xxx hold namespaces_mutext require namespaces_mutext sync context B So it is better to change it from mutex to rwsem. Reviewed-by: Keith Busch Signed-off-by: Jianchao Wang Signed-off-by: Sagi Grimberg Signed-off-by: Jens Axboe --- drivers/nvme/host/core.c | 64 +++++++++++++++++++++---------------------- drivers/nvme/host/multipath.c | 4 +-- drivers/nvme/host/nvme.h | 2 +- 3 files changed, 35 insertions(+), 35 deletions(-) diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 31f20f4643cf..ea99265565ae 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -1125,13 +1125,13 @@ static void nvme_update_formats(struct nvme_ctrl *ctrl) struct nvme_ns *ns, *next; LIST_HEAD(rm_list); - mutex_lock(&ctrl->namespaces_mutex); + down_write(&ctrl->namespaces_rwsem); list_for_each_entry(ns, &ctrl->namespaces, list) { if (ns->disk && nvme_revalidate_disk(ns->disk)) { list_move_tail(&ns->list, &rm_list); } } - mutex_unlock(&ctrl->namespaces_mutex); + up_write(&ctrl->namespaces_rwsem); list_for_each_entry_safe(ns, next, &rm_list, list) nvme_ns_remove(ns); @@ -2441,7 +2441,7 @@ static int nvme_dev_user_cmd(struct nvme_ctrl *ctrl, void __user *argp) struct nvme_ns *ns; int ret; - mutex_lock(&ctrl->namespaces_mutex); + down_read(&ctrl->namespaces_rwsem); if (list_empty(&ctrl->namespaces)) { ret = -ENOTTY; goto out_unlock; @@ -2458,14 +2458,14 @@ static int nvme_dev_user_cmd(struct nvme_ctrl *ctrl, void __user *argp) dev_warn(ctrl->device, "using deprecated NVME_IOCTL_IO_CMD ioctl on the char device!\n"); kref_get(&ns->kref); - mutex_unlock(&ctrl->namespaces_mutex); + up_read(&ctrl->namespaces_rwsem); ret = nvme_user_cmd(ctrl, ns, argp); nvme_put_ns(ns); return ret; out_unlock: - mutex_unlock(&ctrl->namespaces_mutex); + up_read(&ctrl->namespaces_rwsem); return ret; } @@ -2894,7 +2894,7 @@ static struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid) { struct nvme_ns *ns, *ret = NULL; - mutex_lock(&ctrl->namespaces_mutex); + down_read(&ctrl->namespaces_rwsem); list_for_each_entry(ns, &ctrl->namespaces, list) { if (ns->head->ns_id == nsid) { if (!kref_get_unless_zero(&ns->kref)) @@ -2905,7 +2905,7 @@ static struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid) if (ns->head->ns_id > nsid) break; } - mutex_unlock(&ctrl->namespaces_mutex); + up_read(&ctrl->namespaces_rwsem); return ret; } @@ -3016,9 +3016,9 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid) __nvme_revalidate_disk(disk, id); - mutex_lock(&ctrl->namespaces_mutex); + down_write(&ctrl->namespaces_rwsem); list_add_tail(&ns->list, &ctrl->namespaces); - mutex_unlock(&ctrl->namespaces_mutex); + up_write(&ctrl->namespaces_rwsem); nvme_get_ctrl(ctrl); @@ -3072,9 +3072,9 @@ static void nvme_ns_remove(struct nvme_ns *ns) list_del_rcu(&ns->siblings); mutex_unlock(&ns->ctrl->subsys->lock); - mutex_lock(&ns->ctrl->namespaces_mutex); + down_write(&ns->ctrl->namespaces_rwsem); list_del_init(&ns->list); - mutex_unlock(&ns->ctrl->namespaces_mutex); + up_write(&ns->ctrl->namespaces_rwsem); synchronize_srcu(&ns->head->srcu); nvme_mpath_check_last_path(ns); @@ -3100,12 +3100,12 @@ static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl, struct nvme_ns *ns, *next; LIST_HEAD(rm_list); - mutex_lock(&ctrl->namespaces_mutex); + down_write(&ctrl->namespaces_rwsem); list_for_each_entry_safe(ns, next, &ctrl->namespaces, list) { if (ns->head->ns_id > nsid) list_move_tail(&ns->list, &rm_list); } - mutex_unlock(&ctrl->namespaces_mutex); + up_write(&ctrl->namespaces_rwsem); list_for_each_entry_safe(ns, next, &rm_list, list) nvme_ns_remove(ns); @@ -3185,9 +3185,9 @@ static void nvme_scan_work(struct work_struct *work) } nvme_scan_ns_sequential(ctrl, nn); done: - mutex_lock(&ctrl->namespaces_mutex); + down_write(&ctrl->namespaces_rwsem); list_sort(NULL, &ctrl->namespaces, ns_cmp); - mutex_unlock(&ctrl->namespaces_mutex); + up_write(&ctrl->namespaces_rwsem); kfree(id); } @@ -3220,9 +3220,9 @@ void nvme_remove_namespaces(struct nvme_ctrl *ctrl) if (ctrl->state == NVME_CTRL_DEAD) nvme_kill_queues(ctrl); - mutex_lock(&ctrl->namespaces_mutex); + down_write(&ctrl->namespaces_rwsem); list_splice_init(&ctrl->namespaces, &ns_list); - mutex_unlock(&ctrl->namespaces_mutex); + up_write(&ctrl->namespaces_rwsem); list_for_each_entry_safe(ns, next, &ns_list, list) nvme_ns_remove(ns); @@ -3411,7 +3411,7 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev, ctrl->state = NVME_CTRL_NEW; spin_lock_init(&ctrl->lock); INIT_LIST_HEAD(&ctrl->namespaces); - mutex_init(&ctrl->namespaces_mutex); + init_rwsem(&ctrl->namespaces_rwsem); ctrl->dev = dev; ctrl->ops = ops; ctrl->quirks = quirks; @@ -3472,7 +3472,7 @@ void nvme_kill_queues(struct nvme_ctrl *ctrl) { struct nvme_ns *ns; - mutex_lock(&ctrl->namespaces_mutex); + down_read(&ctrl->namespaces_rwsem); /* Forcibly unquiesce queues to avoid blocking dispatch */ if (ctrl->admin_q) @@ -3491,7 +3491,7 @@ void nvme_kill_queues(struct nvme_ctrl *ctrl) /* Forcibly unquiesce queues to avoid blocking dispatch */ blk_mq_unquiesce_queue(ns->queue); } - mutex_unlock(&ctrl->namespaces_mutex); + up_read(&ctrl->namespaces_rwsem); } EXPORT_SYMBOL_GPL(nvme_kill_queues); @@ -3499,10 +3499,10 @@ void nvme_unfreeze(struct nvme_ctrl *ctrl) { struct nvme_ns *ns; - mutex_lock(&ctrl->namespaces_mutex); + down_read(&ctrl->namespaces_rwsem); list_for_each_entry(ns, &ctrl->namespaces, list) blk_mq_unfreeze_queue(ns->queue); - mutex_unlock(&ctrl->namespaces_mutex); + up_read(&ctrl->namespaces_rwsem); } EXPORT_SYMBOL_GPL(nvme_unfreeze); @@ -3510,13 +3510,13 @@ void nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout) { struct nvme_ns *ns; - mutex_lock(&ctrl->namespaces_mutex); + down_read(&ctrl->namespaces_rwsem); list_for_each_entry(ns, &ctrl->namespaces, list) { timeout = blk_mq_freeze_queue_wait_timeout(ns->queue, timeout); if (timeout <= 0) break; } - mutex_unlock(&ctrl->namespaces_mutex); + up_read(&ctrl->namespaces_rwsem); } EXPORT_SYMBOL_GPL(nvme_wait_freeze_timeout); @@ -3524,10 +3524,10 @@ void nvme_wait_freeze(struct nvme_ctrl *ctrl) { struct nvme_ns *ns; - mutex_lock(&ctrl->namespaces_mutex); + down_read(&ctrl->namespaces_rwsem); list_for_each_entry(ns, &ctrl->namespaces, list) blk_mq_freeze_queue_wait(ns->queue); - mutex_unlock(&ctrl->namespaces_mutex); + up_read(&ctrl->namespaces_rwsem); } EXPORT_SYMBOL_GPL(nvme_wait_freeze); @@ -3535,10 +3535,10 @@ void nvme_start_freeze(struct nvme_ctrl *ctrl) { struct nvme_ns *ns; - mutex_lock(&ctrl->namespaces_mutex); + down_read(&ctrl->namespaces_rwsem); list_for_each_entry(ns, &ctrl->namespaces, list) blk_freeze_queue_start(ns->queue); - mutex_unlock(&ctrl->namespaces_mutex); + up_read(&ctrl->namespaces_rwsem); } EXPORT_SYMBOL_GPL(nvme_start_freeze); @@ -3546,10 +3546,10 @@ void nvme_stop_queues(struct nvme_ctrl *ctrl) { struct nvme_ns *ns; - mutex_lock(&ctrl->namespaces_mutex); + down_read(&ctrl->namespaces_rwsem); list_for_each_entry(ns, &ctrl->namespaces, list) blk_mq_quiesce_queue(ns->queue); - mutex_unlock(&ctrl->namespaces_mutex); + up_read(&ctrl->namespaces_rwsem); } EXPORT_SYMBOL_GPL(nvme_stop_queues); @@ -3557,10 +3557,10 @@ void nvme_start_queues(struct nvme_ctrl *ctrl) { struct nvme_ns *ns; - mutex_lock(&ctrl->namespaces_mutex); + down_read(&ctrl->namespaces_rwsem); list_for_each_entry(ns, &ctrl->namespaces, list) blk_mq_unquiesce_queue(ns->queue); - mutex_unlock(&ctrl->namespaces_mutex); + up_read(&ctrl->namespaces_rwsem); } EXPORT_SYMBOL_GPL(nvme_start_queues); diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index 7283d7149baf..affd67021b6f 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c @@ -44,12 +44,12 @@ void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl) { struct nvme_ns *ns; - mutex_lock(&ctrl->namespaces_mutex); + down_read(&ctrl->namespaces_rwsem); list_for_each_entry(ns, &ctrl->namespaces, list) { if (ns->head->disk) kblockd_schedule_work(&ns->head->requeue_work); } - mutex_unlock(&ctrl->namespaces_mutex); + up_read(&ctrl->namespaces_rwsem); } static struct nvme_ns *__nvme_find_path(struct nvme_ns_head *head) diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index 9a3d3540aa6a..29942b1892f7 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -141,7 +141,7 @@ struct nvme_ctrl { struct blk_mq_tag_set *tagset; struct blk_mq_tag_set *admin_tagset; struct list_head namespaces; - struct mutex namespaces_mutex; + struct rw_semaphore namespaces_rwsem; struct device ctrl_device; struct device *device; /* char device */ struct cdev cdev; -- cgit v1.2.3 From 70da6094a646f0f2d823e077614840cf21055580 Mon Sep 17 00:00:00 2001 From: Matias Bjørling Date: Mon, 26 Feb 2018 13:55:40 +0100 Subject: nvme: implement log page low/high offset and dwords MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit NVMe 1.2.1 extends the get log page interface to include 64 bit offset and increases the number of dwords to 32 bits. Implement for future use. Signed-off-by: Matias Bjørling Reviewed-by: Johannes Thumshirn Signed-off-by: Keith Busch Signed-off-by: Jens Axboe --- drivers/nvme/host/core.c | 32 ++++++++++++++++++++++---------- 1 file changed, 22 insertions(+), 10 deletions(-) diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index ea99265565ae..5c729ab51911 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -100,11 +100,6 @@ static struct class *nvme_subsys_class; static void nvme_ns_remove(struct nvme_ns *ns); static int nvme_revalidate_disk(struct gendisk *disk); -static __le32 nvme_get_log_dw10(u8 lid, size_t size) -{ - return cpu_to_le32((((size / 4) - 1) << 16) | lid); -} - int nvme_reset_ctrl(struct nvme_ctrl *ctrl) { if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING)) @@ -2218,18 +2213,35 @@ out_unlock: return ret; } -static int nvme_get_log(struct nvme_ctrl *ctrl, u8 log_page, void *log, - size_t size) +static int nvme_get_log_ext(struct nvme_ctrl *ctrl, struct nvme_ns *ns, + u8 log_page, void *log, + size_t size, size_t offset) { struct nvme_command c = { }; + unsigned long dwlen = size / 4 - 1; + + c.get_log_page.opcode = nvme_admin_get_log_page; + + if (ns) + c.get_log_page.nsid = cpu_to_le32(ns->head->ns_id); + else + c.get_log_page.nsid = cpu_to_le32(NVME_NSID_ALL); - c.common.opcode = nvme_admin_get_log_page; - c.common.nsid = cpu_to_le32(NVME_NSID_ALL); - c.common.cdw10[0] = nvme_get_log_dw10(log_page, size); + c.get_log_page.lid = log_page; + c.get_log_page.numdl = cpu_to_le16(dwlen & ((1 << 16) - 1)); + c.get_log_page.numdu = cpu_to_le16(dwlen >> 16); + c.get_log_page.lpol = cpu_to_le32(offset & ((1ULL << 32) - 1)); + c.get_log_page.lpou = cpu_to_le32(offset >> 32ULL); return nvme_submit_sync_cmd(ctrl->admin_q, &c, log, size); } +static int nvme_get_log(struct nvme_ctrl *ctrl, u8 log_page, void *log, + size_t size) +{ + return nvme_get_log_ext(ctrl, NULL, log_page, log, size, 0); +} + static int nvme_get_effects_log(struct nvme_ctrl *ctrl) { int ret; -- cgit v1.2.3 From 97c122233f73e91ceed5038e6e59fc5009305f7e Mon Sep 17 00:00:00 2001 From: Keith Busch Date: Thu, 8 Mar 2018 14:50:32 -0700 Subject: nvme-pci: Add .get_address ctrl callback The nvme-fabrics exports the controller address to sysfs, and we'd like to have parity with this feature for PCIe. This patch provides the appropiate callback and returns the controller address as the pci domain:bus:device.function. Signed-off-by: Keith Busch Reviewed-by: Sagi Grimberg Signed-off-by: Jens Axboe --- drivers/nvme/host/pci.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index aacc8e4b0051..6aec86b8f8ab 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -2414,6 +2414,13 @@ static int nvme_pci_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val) return 0; } +static int nvme_pci_get_address(struct nvme_ctrl *ctrl, char *buf, int size) +{ + struct pci_dev *pdev = to_pci_dev(to_nvme_dev(ctrl)->dev); + + return snprintf(buf, size, "%s", dev_name(&pdev->dev)); +} + static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = { .name = "pcie", .module = THIS_MODULE, @@ -2423,6 +2430,7 @@ static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = { .reg_read64 = nvme_pci_reg_read64, .free_ctrl = nvme_pci_free_ctrl, .submit_async_event = nvme_pci_submit_async_event, + .get_address = nvme_pci_get_address, }; static int nvme_dev_map(struct nvme_dev *dev) -- cgit v1.2.3 From 77d0612da0e61cb2903ecd0be02444e4c958c672 Mon Sep 17 00:00:00 2001 From: Max Gurtovoy Date: Sun, 11 Mar 2018 17:46:06 +0200 Subject: nvme: centralize ctrl removal prints nvme_delete_ctrl can be called from various contexts in parallel, and cause duplicated information prints, even though the specific context doesn't perform the actual removal. Instead, print the information when the actual removal occurs. Signed-off-by: Max Gurtovoy Reviewed-by: Johannes Thumshirn Signed-off-by: Keith Busch Signed-off-by: Jens Axboe --- drivers/nvme/host/core.c | 3 +++ drivers/nvme/host/fc.c | 13 +++++-------- drivers/nvme/host/rdma.c | 4 ---- 3 files changed, 8 insertions(+), 12 deletions(-) diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 5c729ab51911..f29a07ef122f 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -130,6 +130,9 @@ static void nvme_delete_ctrl_work(struct work_struct *work) struct nvme_ctrl *ctrl = container_of(work, struct nvme_ctrl, delete_work); + dev_info(ctrl->device, + "Removing ctrl: NQN \"%s\"\n", ctrl->opts->subsysnqn); + flush_work(&ctrl->reset_work); nvme_stop_ctrl(ctrl); nvme_remove_namespaces(ctrl); diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c index 7f51f8414b97..7edaa30cb61b 100644 --- a/drivers/nvme/host/fc.c +++ b/drivers/nvme/host/fc.c @@ -768,8 +768,7 @@ nvme_fc_ctrl_connectivity_loss(struct nvme_fc_ctrl *ctrl) */ if (nvme_reset_ctrl(&ctrl->ctrl)) { dev_warn(ctrl->ctrl.device, - "NVME-FC{%d}: Couldn't schedule reset. " - "Deleting controller.\n", + "NVME-FC{%d}: Couldn't schedule reset.\n", ctrl->cnum); nvme_delete_ctrl(&ctrl->ctrl); } @@ -836,8 +835,7 @@ nvme_fc_unregister_remoteport(struct nvme_fc_remote_port *portptr) /* if dev_loss_tmo==0, dev loss is immediate */ if (!portptr->dev_loss_tmo) { dev_warn(ctrl->ctrl.device, - "NVME-FC{%d}: controller connectivity lost. " - "Deleting controller.\n", + "NVME-FC{%d}: controller connectivity lost.\n", ctrl->cnum); nvme_delete_ctrl(&ctrl->ctrl); } else @@ -2882,14 +2880,13 @@ nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status) if (portptr->port_state == FC_OBJSTATE_ONLINE) dev_warn(ctrl->ctrl.device, "NVME-FC{%d}: Max reconnect attempts (%d) " - "reached. Removing controller\n", + "reached.\n", ctrl->cnum, ctrl->ctrl.nr_reconnects); else dev_warn(ctrl->ctrl.device, "NVME-FC{%d}: dev_loss_tmo (%d) expired " - "while waiting for remoteport connectivity. " - "Removing controller\n", ctrl->cnum, - portptr->dev_loss_tmo); + "while waiting for remoteport connectivity.\n", + ctrl->cnum, portptr->dev_loss_tmo); WARN_ON(nvme_delete_ctrl(&ctrl->ctrl)); } } diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 4d84a73ee12d..f5f460b8045c 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -899,7 +899,6 @@ static void nvme_rdma_reconnect_or_remove(struct nvme_rdma_ctrl *ctrl) queue_delayed_work(nvme_wq, &ctrl->reconnect_work, ctrl->ctrl.opts->reconnect_delay * HZ); } else { - dev_info(ctrl->ctrl.device, "Removing controller...\n"); nvme_delete_ctrl(&ctrl->ctrl); } } @@ -2031,9 +2030,6 @@ static void nvme_rdma_remove_one(struct ib_device *ib_device, void *client_data) list_for_each_entry(ctrl, &nvme_rdma_ctrl_list, list) { if (ctrl->device->dev != ib_device) continue; - dev_info(ctrl->ctrl.device, - "Removing ctrl: NQN \"%s\", addr %pISp\n", - ctrl->ctrl.opts->subsysnqn, &ctrl->addr); nvme_delete_ctrl(&ctrl->ctrl); } mutex_unlock(&nvme_rdma_ctrl_mutex); -- cgit v1.2.3 From 467c77d4cbefaaf65e2f44fe102d543a52fcae5b Mon Sep 17 00:00:00 2001 From: Jarosław Janik Date: Sun, 11 Mar 2018 19:51:56 +0100 Subject: nvme-pci: disable APST for Samsung NVMe SSD 960 EVO + ASUS PRIME Z370-A MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Yet another "incompatible" Samsung NVMe SSD 960 EVO and Asus motherboard combination. 960 EVO device disappears from PCIe bus within few minutes after boot-up when APST is in use and never gets back. Forcing NVME_QUIRK_NO_APST is the only way to make this drive work with this particular motherboard. NVME_QUIRK_NO_DEEPEST_PS doesn't work, upgrading motherboard's BIOS didn't help either. Since this is a desktop motherboard, the only drawback of not using APST is increased device temperature. Signed-off-by: Jarosław Janik Signed-off-by: Keith Busch Signed-off-by: Jens Axboe --- drivers/nvme/host/pci.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 6aec86b8f8ab..cef5ce851a92 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -2467,10 +2467,13 @@ static unsigned long check_vendor_combination_bug(struct pci_dev *pdev) } else if (pdev->vendor == 0x144d && pdev->device == 0xa804) { /* * Samsung SSD 960 EVO drops off the PCIe bus after system - * suspend on a Ryzen board, ASUS PRIME B350M-A. + * suspend on a Ryzen board, ASUS PRIME B350M-A, as well as + * within few minutes after bootup on a Coffee Lake board - + * ASUS PRIME Z370-A */ if (dmi_match(DMI_BOARD_VENDOR, "ASUSTeK COMPUTER INC.") && - dmi_match(DMI_BOARD_NAME, "PRIME B350M-A")) + (dmi_match(DMI_BOARD_NAME, "PRIME B350M-A") || + dmi_match(DMI_BOARD_NAME, "PRIME Z370-A"))) return NVME_QUIRK_NO_APST; } -- cgit v1.2.3 From cf25809bec2c7df4b45df5b2196845d9a4a3c89b Mon Sep 17 00:00:00 2001 From: James Smart Date: Tue, 13 Mar 2018 09:48:07 -0700 Subject: nvme_fc: fix ctrl create failures racing with workq items If there are errors during initial controller create, the transport will teardown the partially initialized controller struct and free the ctlr memory. Trouble is - most of those errors can occur due to asynchronous events happening such io timeouts and subsystem connectivity failures. Those failures invoke async workq items to reset the controller and attempt reconnect. Those may be in progress as the main thread frees the ctrl memory, resulting in NULL ptr oops. Prevent this from happening by having the main ctrl failure thread changing state to DELETING followed by synchronously cancelling any pending queued work item. The change of state will prevent the scheduling of resets or reconnect events. Signed-off-by: James Smart Signed-off-by: Keith Busch Signed-off-by: Jens Axboe --- drivers/nvme/host/fc.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c index 7edaa30cb61b..56023878e3c0 100644 --- a/drivers/nvme/host/fc.c +++ b/drivers/nvme/host/fc.c @@ -3123,6 +3123,10 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts, } if (ret) { + nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING); + cancel_work_sync(&ctrl->ctrl.reset_work); + cancel_delayed_work_sync(&ctrl->connect_work); + /* couldn't schedule retry - fail out */ dev_err(ctrl->ctrl.device, "NVME-FC{%d}: Connect retry failed\n", ctrl->cnum); -- cgit v1.2.3 From 041018c634e44d8697879b241e17a9466f2e83ed Mon Sep 17 00:00:00 2001 From: James Smart Date: Mon, 12 Mar 2018 09:32:22 -0700 Subject: nvme_fc: io timeout should defer abort to ctrl reset The current nvme_fc code, when an io times out, will abort the io on the fc link, then call the error recovery routine to reset the controller. It is during the reset of the controller that the transport will wait for all ios to be aborted before sending a Disconnect LS to the target. However, the reset routine only waits for the io which it generates the abort for to complete. Any io that was aborted just prior to the reset isn't in it's list to wait for. Thus the Disconnect is getting sent before the aborts have completed. Correct by removing the abort in the timeout handler. The reset will generate the abort. At that point the timeout handler can be simplified to request the reset (via the error handler) and restart the timeout timer. Also fixes a small typo in a comment in the reset handler. Signed-off-by: James Smart Signed-off-by: Keith Busch Signed-off-by: Jens Axboe --- drivers/nvme/host/fc.c | 12 +----------- 1 file changed, 1 insertion(+), 11 deletions(-) diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c index 56023878e3c0..2318d1255adc 100644 --- a/drivers/nvme/host/fc.c +++ b/drivers/nvme/host/fc.c @@ -2074,20 +2074,10 @@ nvme_fc_timeout(struct request *rq, bool reserved) { struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq); struct nvme_fc_ctrl *ctrl = op->ctrl; - int ret; - - if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE || - atomic_read(&op->state) == FCPOP_STATE_ABORTED) - return BLK_EH_RESET_TIMER; - - ret = __nvme_fc_abort_op(ctrl, op); - if (ret) - /* io wasn't active to abort */ - return BLK_EH_NOT_HANDLED; /* * we can't individually ABTS an io without affecting the queue, - * thus killing the queue, adn thus the association. + * thus killing the queue, and thus the association. * So resolve by performing a controller reset, which will stop * the host/io stack, terminate the association on the link, * and recreate an association on the link. -- cgit v1.2.3 From b12740d316fa89f3f6191b71f986cf3b9383d379 Mon Sep 17 00:00:00 2001 From: James Smart Date: Wed, 28 Feb 2018 14:49:10 -0800 Subject: nvme_fc: fix abort race on teardown with lld reject Another abort race: An io request is started, becomes active, and is attempted to be started with the lldd. At the same time the controller is stopped/torndown and an itterator is run to abort the ios. As the io is active, it is added to the outstanding aborted io count. However on the original io request thread, the driver ends up rejecting the io due to the condition that induced the controller teardown. The driver reject path didn't check whether it was in the outstanding io count. This left the count outstanding stopping controller teardown. Correct by, in the driver reject case, setting the state to inactive and checking whether it was in the outstanding io count. Signed-off-by: James Smart Reviewed-by: Sagi Grimberg Signed-off-by: Keith Busch Signed-off-by: Jens Axboe --- drivers/nvme/host/fc.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c index 2318d1255adc..49e2ef2e83a5 100644 --- a/drivers/nvme/host/fc.c +++ b/drivers/nvme/host/fc.c @@ -2179,7 +2179,7 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue, struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu; struct nvme_command *sqe = &cmdiu->sqe; u32 csn; - int ret; + int ret, opstate; /* * before attempting to send the io, check to see if we believe @@ -2257,6 +2257,9 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue, queue->lldd_handle, &op->fcp_req); if (ret) { + opstate = atomic_xchg(&op->state, FCPOP_STATE_COMPLETE); + __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate); + if (!(op->flags & FCOP_FLAGS_AEN)) nvme_fc_unmap_data(ctrl, op->rq, op); -- cgit v1.2.3 From 0cdd5fca876b1e9c56ca01186ba650b680248b35 Mon Sep 17 00:00:00 2001 From: James Smart Date: Mon, 5 Mar 2018 20:55:49 -0800 Subject: nvme_fc: on remoteport reuse, set new nport_id and role. When reattaching to a removed remoteport that has not yet been fully deleted as it's waiting for reconnect timeouts, be sure to re-set the ports nport id and role. Signed-off-by: James Smart Signed-off-by: Keith Busch Signed-off-by: Jens Axboe --- drivers/nvme/host/fc.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c index 49e2ef2e83a5..0676d4497248 100644 --- a/drivers/nvme/host/fc.c +++ b/drivers/nvme/host/fc.c @@ -588,6 +588,8 @@ nvme_fc_attach_to_suspended_rport(struct nvme_fc_lport *lport, return ERR_PTR(-ESTALE); } + rport->remoteport.port_role = pinfo->port_role; + rport->remoteport.port_id = pinfo->port_id; rport->remoteport.port_state = FC_OBJSTATE_ONLINE; rport->dev_loss_end = 0; -- cgit v1.2.3 From 9d625f7792875e8119ac3f364f3fd71b8bfc1294 Mon Sep 17 00:00:00 2001 From: James Smart Date: Wed, 28 Feb 2018 14:49:11 -0800 Subject: nvmet_fc: prevent new io rqsts in possible isr completions When a bio completion calls back into the transport for a back-end io device, the request completion path can free the transport io job structure allowing it to be reused for other operations. The transport has a defer_rcv queue which holds temporary cmd rcv ops while waitng for io job structures. when the job frees, if there's a cmd waiting, it is picked up and submitted for processing, which can call back out to the bio path if it's a read. Unfortunately, what is unknown is the context of the original bio done call, and it may be in a state (softirq) that is not compatible with submitting the new bio in the same calling sequence. This is especially true when using scsi back-end devices as scsi is in softirq when it makes the done call. Correct by scheduling the io to be started via workq rather than calling the start new io path inline to the original bio done path. Signed-off-by: James Smart Signed-off-by: Keith Busch Signed-off-by: Jens Axboe --- drivers/nvme/target/fc.c | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c index 9b39a6cb1935..9f80f98d81d2 100644 --- a/drivers/nvme/target/fc.c +++ b/drivers/nvme/target/fc.c @@ -87,6 +87,7 @@ struct nvmet_fc_fcp_iod { struct nvmet_req req; struct work_struct work; struct work_struct done_work; + struct work_struct defer_work; struct nvmet_fc_tgtport *tgtport; struct nvmet_fc_tgt_queue *queue; @@ -224,6 +225,7 @@ static DEFINE_IDA(nvmet_fc_tgtport_cnt); static void nvmet_fc_handle_ls_rqst_work(struct work_struct *work); static void nvmet_fc_handle_fcp_rqst_work(struct work_struct *work); static void nvmet_fc_fcp_rqst_op_done_work(struct work_struct *work); +static void nvmet_fc_fcp_rqst_op_defer_work(struct work_struct *work); static void nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc *assoc); static int nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc); static void nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue); @@ -429,6 +431,7 @@ nvmet_fc_prep_fcp_iodlist(struct nvmet_fc_tgtport *tgtport, for (i = 0; i < queue->sqsize; fod++, i++) { INIT_WORK(&fod->work, nvmet_fc_handle_fcp_rqst_work); INIT_WORK(&fod->done_work, nvmet_fc_fcp_rqst_op_done_work); + INIT_WORK(&fod->defer_work, nvmet_fc_fcp_rqst_op_defer_work); fod->tgtport = tgtport; fod->queue = queue; fod->active = false; @@ -511,6 +514,17 @@ nvmet_fc_queue_fcp_req(struct nvmet_fc_tgtport *tgtport, nvmet_fc_handle_fcp_rqst(tgtport, fod); } +static void +nvmet_fc_fcp_rqst_op_defer_work(struct work_struct *work) +{ + struct nvmet_fc_fcp_iod *fod = + container_of(work, struct nvmet_fc_fcp_iod, defer_work); + + /* Submit deferred IO for processing */ + nvmet_fc_queue_fcp_req(fod->tgtport, fod->queue, fod->fcpreq); + +} + static void nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue *queue, struct nvmet_fc_fcp_iod *fod) @@ -568,13 +582,12 @@ nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue *queue, /* inform LLDD IO is now being processed */ tgtport->ops->defer_rcv(&tgtport->fc_target_port, fcpreq); - /* Submit deferred IO for processing */ - nvmet_fc_queue_fcp_req(tgtport, queue, fcpreq); - /* * Leave the queue lookup get reference taken when * fod was originally allocated. */ + + queue_work(queue->work_q, &fod->defer_work); } static int -- cgit v1.2.3 From be9bddeb0a3ff017d9f56db99d67518c8a1b29cf Mon Sep 17 00:00:00 2001 From: Israel Rukshin Date: Wed, 14 Mar 2018 10:22:44 +0000 Subject: nvmet-rdma: Remove unused queue state Signed-off-by: Israel Rukshin Reviewed-by: Max Gurtovoy Signed-off-by: Keith Busch Signed-off-by: Jens Axboe --- drivers/nvme/target/rdma.c | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c index d7831372e1f9..a1ba218326ad 100644 --- a/drivers/nvme/target/rdma.c +++ b/drivers/nvme/target/rdma.c @@ -77,7 +77,6 @@ enum nvmet_rdma_queue_state { NVMET_RDMA_Q_CONNECTING, NVMET_RDMA_Q_LIVE, NVMET_RDMA_Q_DISCONNECTING, - NVMET_RDMA_IN_DEVICE_REMOVAL, }; struct nvmet_rdma_queue { @@ -942,12 +941,10 @@ static void nvmet_rdma_release_queue_work(struct work_struct *w) container_of(w, struct nvmet_rdma_queue, release_work); struct rdma_cm_id *cm_id = queue->cm_id; struct nvmet_rdma_device *dev = queue->dev; - enum nvmet_rdma_queue_state state = queue->state; nvmet_rdma_free_queue(queue); - if (state != NVMET_RDMA_IN_DEVICE_REMOVAL) - rdma_destroy_id(cm_id); + rdma_destroy_id(cm_id); kref_put(&dev->ref, nvmet_rdma_free_dev); } @@ -1209,7 +1206,6 @@ static void __nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue) case NVMET_RDMA_Q_CONNECTING: case NVMET_RDMA_Q_LIVE: queue->state = NVMET_RDMA_Q_DISCONNECTING; - case NVMET_RDMA_IN_DEVICE_REMOVAL: disconnect = true; break; case NVMET_RDMA_Q_DISCONNECTING: -- cgit v1.2.3 From e1a2ee249b19c3a65de893150d2045099c693bc3 Mon Sep 17 00:00:00 2001 From: Israel Rukshin Date: Wed, 14 Mar 2018 10:22:45 +0000 Subject: nvmet-rdma: Fix use after free in nvmet_rdma_cm_handler() We free nvmet rdma queues while handling rdma_cm events. In order to avoid this we destroy the qp and the queue after destroying the cm_id which guarantees that all rdma_cm events are done. Signed-off-by: Israel Rukshin Reviewed-by: Max Gurtovoy Signed-off-by: Keith Busch Signed-off-by: Jens Axboe --- drivers/nvme/target/rdma.c | 27 +++++++++++---------------- 1 file changed, 11 insertions(+), 16 deletions(-) diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c index a1ba218326ad..aa8068fce0dd 100644 --- a/drivers/nvme/target/rdma.c +++ b/drivers/nvme/target/rdma.c @@ -913,8 +913,11 @@ err_destroy_cq: static void nvmet_rdma_destroy_queue_ib(struct nvmet_rdma_queue *queue) { - ib_drain_qp(queue->cm_id->qp); - rdma_destroy_qp(queue->cm_id); + struct ib_qp *qp = queue->cm_id->qp; + + ib_drain_qp(qp); + rdma_destroy_id(queue->cm_id); + ib_destroy_qp(qp); ib_free_cq(queue->cq); } @@ -939,13 +942,10 @@ static void nvmet_rdma_release_queue_work(struct work_struct *w) { struct nvmet_rdma_queue *queue = container_of(w, struct nvmet_rdma_queue, release_work); - struct rdma_cm_id *cm_id = queue->cm_id; struct nvmet_rdma_device *dev = queue->dev; nvmet_rdma_free_queue(queue); - rdma_destroy_id(cm_id); - kref_put(&dev->ref, nvmet_rdma_free_dev); } @@ -1150,8 +1150,11 @@ static int nvmet_rdma_queue_connect(struct rdma_cm_id *cm_id, } ret = nvmet_rdma_cm_accept(cm_id, queue, &event->param.conn); - if (ret) - goto release_queue; + if (ret) { + schedule_work(&queue->release_work); + /* Destroying rdma_cm id is not needed here */ + return 0; + } mutex_lock(&nvmet_rdma_queue_mutex); list_add_tail(&queue->queue_list, &nvmet_rdma_queue_list); @@ -1159,8 +1162,6 @@ static int nvmet_rdma_queue_connect(struct rdma_cm_id *cm_id, return 0; -release_queue: - nvmet_rdma_free_queue(queue); put_device: kref_put(&ndev->ref, nvmet_rdma_free_dev); @@ -1318,13 +1319,7 @@ static int nvmet_rdma_cm_handler(struct rdma_cm_id *cm_id, case RDMA_CM_EVENT_ADDR_CHANGE: case RDMA_CM_EVENT_DISCONNECTED: case RDMA_CM_EVENT_TIMEWAIT_EXIT: - /* - * We might end up here when we already freed the qp - * which means queue release sequence is in progress, - * so don't get in the way... - */ - if (queue) - nvmet_rdma_queue_disconnect(queue); + nvmet_rdma_queue_disconnect(queue); break; case RDMA_CM_EVENT_DEVICE_REMOVAL: ret = nvmet_rdma_device_removal(cm_id, queue); -- cgit v1.2.3 From a3dd7d0022c347207ae931c753a6dc3e6e8fcbc1 Mon Sep 17 00:00:00 2001 From: Max Gurtovoy Date: Wed, 28 Feb 2018 13:12:38 +0200 Subject: nvmet-rdma: Don't flush system_wq by default during remove_one The .remove_one function is called for any ib_device removal. In case the removed device has no reference in our driver, there is no need to flush the system work queue. Reviewed-by: Israel Rukshin Signed-off-by: Max Gurtovoy Reviewed-by: Sagi Grimberg Signed-off-by: Keith Busch Signed-off-by: Jens Axboe --- drivers/nvme/target/rdma.c | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c index aa8068fce0dd..a59263d6d158 100644 --- a/drivers/nvme/target/rdma.c +++ b/drivers/nvme/target/rdma.c @@ -1469,8 +1469,25 @@ static struct nvmet_fabrics_ops nvmet_rdma_ops = { static void nvmet_rdma_remove_one(struct ib_device *ib_device, void *client_data) { struct nvmet_rdma_queue *queue, *tmp; + struct nvmet_rdma_device *ndev; + bool found = false; + + mutex_lock(&device_list_mutex); + list_for_each_entry(ndev, &device_list, entry) { + if (ndev->device == ib_device) { + found = true; + break; + } + } + mutex_unlock(&device_list_mutex); + + if (!found) + return; - /* Device is being removed, delete all queues using this device */ + /* + * IB Device that is used by nvmet controllers is being removed, + * delete all queues using this device. + */ mutex_lock(&nvmet_rdma_queue_mutex); list_for_each_entry_safe(queue, tmp, &nvmet_rdma_queue_list, queue_list) { -- cgit v1.2.3 From 9bad0404ecd7594265cef04e176adeaa4ffbca4a Mon Sep 17 00:00:00 2001 From: Max Gurtovoy Date: Wed, 28 Feb 2018 13:12:39 +0200 Subject: nvme-rdma: Don't flush delete_wq by default during remove_one The .remove_one function is called for any ib_device removal. In case the removed device has no reference in our driver, there is no need to flush the work queue. Reviewed-by: Israel Rukshin Signed-off-by: Max Gurtovoy Reviewed-by: Sagi Grimberg Signed-off-by: Keith Busch Signed-off-by: Jens Axboe --- drivers/nvme/host/rdma.c | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index f5f460b8045c..250b2778bb97 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -2024,6 +2024,20 @@ static struct nvmf_transport_ops nvme_rdma_transport = { static void nvme_rdma_remove_one(struct ib_device *ib_device, void *client_data) { struct nvme_rdma_ctrl *ctrl; + struct nvme_rdma_device *ndev; + bool found = false; + + mutex_lock(&device_list_mutex); + list_for_each_entry(ndev, &device_list, entry) { + if (ndev->dev == ib_device) { + found = true; + break; + } + } + mutex_unlock(&device_list_mutex); + + if (!found) + return; /* Delete all controllers using this device */ mutex_lock(&nvme_rdma_ctrl_mutex); -- cgit v1.2.3 From 2079699c10c8c60a9572540c2f77d045abf036eb Mon Sep 17 00:00:00 2001 From: Keith Busch Date: Mon, 19 Mar 2018 10:53:50 -0600 Subject: nvme: Skip checking heads without namespaces If a task is holding a reference to a namespace on a removed controller, the head will not be released. If the same controller is added again later, its namespaces may not be successfully added. Instead, the user will see kernel message "Duplicate IDs for nsid ". This patch fixes that by skipping heads that don't have namespaces when considering if a new namespace is safe to add. Reported-by: Alex Gagniuc Cc: stable@vger.kernel.org Signed-off-by: Keith Busch Reviewed-by: Max Gurtovoy Reviewed-by: Christoph Hellwig Signed-off-by: Jens Axboe --- drivers/nvme/host/core.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index f29a07ef122f..7811b4886e63 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -2809,6 +2809,7 @@ static int __nvme_check_ids(struct nvme_subsystem *subsys, list_for_each_entry(h, &subsys->nsheads, entry) { if (nvme_ns_ids_valid(&new->ids) && + !list_empty(&h->list) && nvme_ns_ids_equal(&new->ids, &h->ids)) return -EINVAL; } -- cgit v1.2.3 From 187c0832ee80250036adb386b5ffa8f4bcb0ff1e Mon Sep 17 00:00:00 2001 From: Nitzan Carmi Date: Tue, 20 Mar 2018 11:07:29 +0000 Subject: nvme-rdma: Allow DELETING state change failure in error_recovery While error recovery is ongoing, it is OK to move ctrl to DELETING state (from concurrent delete_work). Thus we don't need a warning for that case. Signed-off-by: Nitzan Carmi Reviewed-by: Max Gurtovoy Reviewed-by: Sagi Grimberg Signed-off-by: Keith Busch Signed-off-by: Jens Axboe --- drivers/nvme/host/rdma.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 250b2778bb97..5e731f2c329c 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -973,8 +973,8 @@ static void nvme_rdma_error_recovery_work(struct work_struct *work) nvme_start_queues(&ctrl->ctrl); if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) { - /* state change failure should never happen */ - WARN_ON_ONCE(1); + /* state change failure is ok if we're in DELETING state */ + WARN_ON_ONCE(ctrl->ctrl.state != NVME_CTRL_DELETING); return; } -- cgit v1.2.3 From b435ecea2a4d0b5cd5be2c5497c3461435f3f3a7 Mon Sep 17 00:00:00 2001 From: Nitzan Carmi Date: Tue, 20 Mar 2018 11:07:30 +0000 Subject: nvme: Add .stop_ctrl to nvme ctrl ops For consistancy reasons, any fabric-specific works (e.g error recovery/reconnect) should be canceled in nvme_stop_ctrl, as for all other NVMe pending works (e.g. scan, keep alive). The patch aims to simplify the logic of the code, as we now only rely on a vague demand from any fabric to flush its private workqueues at the beginning of .delete_ctrl op. Signed-off-by: Nitzan Carmi Reviewed-by: Max Gurtovoy Signed-off-by: Keith Busch Signed-off-by: Jens Axboe --- drivers/nvme/host/core.c | 2 ++ drivers/nvme/host/nvme.h | 1 + drivers/nvme/host/rdma.c | 12 +++++++++--- 3 files changed, 12 insertions(+), 3 deletions(-) diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 7811b4886e63..ad99dd76dcd2 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -3370,6 +3370,8 @@ void nvme_stop_ctrl(struct nvme_ctrl *ctrl) flush_work(&ctrl->async_event_work); flush_work(&ctrl->scan_work); cancel_work_sync(&ctrl->fw_act_work); + if (ctrl->ops->stop_ctrl) + ctrl->ops->stop_ctrl(ctrl); } EXPORT_SYMBOL_GPL(nvme_stop_ctrl); diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index 29942b1892f7..741e3c79bbe9 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -313,6 +313,7 @@ struct nvme_ctrl_ops { void (*delete_ctrl)(struct nvme_ctrl *ctrl); int (*get_address)(struct nvme_ctrl *ctrl, char *buf, int size); int (*reinit_request)(void *data, struct request *rq); + void (*stop_ctrl)(struct nvme_ctrl *ctrl); }; #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 5e731f2c329c..758537e9ba07 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -867,6 +867,14 @@ out_free_io_queues: return ret; } +static void nvme_rdma_stop_ctrl(struct nvme_ctrl *nctrl) +{ + struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl); + + cancel_work_sync(&ctrl->err_work); + cancel_delayed_work_sync(&ctrl->reconnect_work); +} + static void nvme_rdma_free_ctrl(struct nvme_ctrl *nctrl) { struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl); @@ -1718,9 +1726,6 @@ static const struct blk_mq_ops nvme_rdma_admin_mq_ops = { static void nvme_rdma_shutdown_ctrl(struct nvme_rdma_ctrl *ctrl, bool shutdown) { - cancel_work_sync(&ctrl->err_work); - cancel_delayed_work_sync(&ctrl->reconnect_work); - if (ctrl->ctrl.queue_count > 1) { nvme_stop_queues(&ctrl->ctrl); blk_mq_tagset_busy_iter(&ctrl->tag_set, @@ -1798,6 +1803,7 @@ static const struct nvme_ctrl_ops nvme_rdma_ctrl_ops = { .submit_async_event = nvme_rdma_submit_async_event, .delete_ctrl = nvme_rdma_delete_ctrl, .get_address = nvmf_get_address, + .stop_ctrl = nvme_rdma_stop_ctrl, }; static inline bool -- cgit v1.2.3 From f871749a9fa40a1c2d09aaf66776e32a0a638881 Mon Sep 17 00:00:00 2001 From: Max Gurtovoy Date: Tue, 20 Mar 2018 14:20:41 +0200 Subject: nvmet: move device_uuid configfs attr definition to suitable place Signed-off-by: Max Gurtovoy Signed-off-by: Keith Busch Signed-off-by: Jens Axboe --- drivers/nvme/target/configfs.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c index e6b2d2af81b6..7780a7210acb 100644 --- a/drivers/nvme/target/configfs.c +++ b/drivers/nvme/target/configfs.c @@ -333,13 +333,13 @@ out_unlock: return ret ? ret : count; } +CONFIGFS_ATTR(nvmet_ns_, device_uuid); + static ssize_t nvmet_ns_device_nguid_show(struct config_item *item, char *page) { return sprintf(page, "%pUb\n", &to_nvmet_ns(item)->nguid); } -CONFIGFS_ATTR(nvmet_ns_, device_uuid); - static ssize_t nvmet_ns_device_nguid_store(struct config_item *item, const char *page, size_t count) { -- cgit v1.2.3 From a5d18612295a0556bc66e8dddb19515f262c3612 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 20 Mar 2018 20:41:34 +0100 Subject: nvmet: refactor configfs transport type handling Have a common table of mappings from numerical transport ids to names, and zero the transport specific area in common code in nvmet_addr_trtype_store. Signed-off-by: Christoph Hellwig Signed-off-by: Keith Busch Signed-off-by: Jens Axboe --- drivers/nvme/target/configfs.c | 61 ++++++++++++++++++++---------------------- 1 file changed, 29 insertions(+), 32 deletions(-) diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c index 7780a7210acb..ad9ff27234b5 100644 --- a/drivers/nvme/target/configfs.c +++ b/drivers/nvme/target/configfs.c @@ -23,6 +23,15 @@ static const struct config_item_type nvmet_host_type; static const struct config_item_type nvmet_subsys_type; +static const struct nvmet_transport_name { + u8 type; + const char *name; +} nvmet_transport_names[] = { + { NVMF_TRTYPE_RDMA, "rdma" }, + { NVMF_TRTYPE_FC, "fc" }, + { NVMF_TRTYPE_LOOP, "loop" }, +}; + /* * nvmet_port Generic ConfigFS definitions. * Used in any place in the ConfigFS tree that refers to an address. @@ -208,43 +217,30 @@ CONFIGFS_ATTR(nvmet_, addr_trsvcid); static ssize_t nvmet_addr_trtype_show(struct config_item *item, char *page) { - switch (to_nvmet_port(item)->disc_addr.trtype) { - case NVMF_TRTYPE_RDMA: - return sprintf(page, "rdma\n"); - case NVMF_TRTYPE_LOOP: - return sprintf(page, "loop\n"); - case NVMF_TRTYPE_FC: - return sprintf(page, "fc\n"); - default: - return sprintf(page, "\n"); + struct nvmet_port *port = to_nvmet_port(item); + int i; + + for (i = 0; i < ARRAY_SIZE(nvmet_transport_names); i++) { + if (port->disc_addr.trtype != nvmet_transport_names[i].type) + continue; + return sprintf(page, "%s\n", nvmet_transport_names[i].name); } + + return sprintf(page, "\n"); } static void nvmet_port_init_tsas_rdma(struct nvmet_port *port) { - port->disc_addr.trtype = NVMF_TRTYPE_RDMA; - memset(&port->disc_addr.tsas.rdma, 0, NVMF_TSAS_SIZE); port->disc_addr.tsas.rdma.qptype = NVMF_RDMA_QPTYPE_CONNECTED; port->disc_addr.tsas.rdma.prtype = NVMF_RDMA_PRTYPE_NOT_SPECIFIED; port->disc_addr.tsas.rdma.cms = NVMF_RDMA_CMS_RDMA_CM; } -static void nvmet_port_init_tsas_loop(struct nvmet_port *port) -{ - port->disc_addr.trtype = NVMF_TRTYPE_LOOP; - memset(&port->disc_addr.tsas, 0, NVMF_TSAS_SIZE); -} - -static void nvmet_port_init_tsas_fc(struct nvmet_port *port) -{ - port->disc_addr.trtype = NVMF_TRTYPE_FC; - memset(&port->disc_addr.tsas, 0, NVMF_TSAS_SIZE); -} - static ssize_t nvmet_addr_trtype_store(struct config_item *item, const char *page, size_t count) { struct nvmet_port *port = to_nvmet_port(item); + int i; if (port->enabled) { pr_err("Cannot modify address while enabled\n"); @@ -252,17 +248,18 @@ static ssize_t nvmet_addr_trtype_store(struct config_item *item, return -EACCES; } - if (sysfs_streq(page, "rdma")) { - nvmet_port_init_tsas_rdma(port); - } else if (sysfs_streq(page, "loop")) { - nvmet_port_init_tsas_loop(port); - } else if (sysfs_streq(page, "fc")) { - nvmet_port_init_tsas_fc(port); - } else { - pr_err("Invalid value '%s' for trtype\n", page); - return -EINVAL; + for (i = 0; i < ARRAY_SIZE(nvmet_transport_names); i++) { + if (sysfs_streq(page, nvmet_transport_names[i].name)) + goto found; } + pr_err("Invalid value '%s' for trtype\n", page); + return -EINVAL; +found: + memset(&port->disc_addr.tsas, 0, NVMF_TSAS_SIZE); + port->disc_addr.trtype = nvmet_transport_names[i].type; + if (port->disc_addr.trtype == NVMF_TRTYPE_RDMA) + nvmet_port_init_tsas_rdma(port); return count; } -- cgit v1.2.3 From e929f06d9eaab4dba14e730ef18aa85b76465db9 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 20 Mar 2018 20:41:35 +0100 Subject: nvmet: constify struct nvmet_fabrics_ops Signed-off-by: Christoph Hellwig Signed-off-by: Keith Busch Signed-off-by: Jens Axboe --- drivers/nvme/target/core.c | 12 ++++++------ drivers/nvme/target/fc.c | 4 ++-- drivers/nvme/target/loop.c | 4 ++-- drivers/nvme/target/nvmet.h | 10 +++++----- drivers/nvme/target/rdma.c | 4 ++-- 5 files changed, 17 insertions(+), 17 deletions(-) diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c index a78029e4e5f4..e95424f172fd 100644 --- a/drivers/nvme/target/core.c +++ b/drivers/nvme/target/core.c @@ -18,7 +18,7 @@ #include "nvmet.h" -static struct nvmet_fabrics_ops *nvmet_transports[NVMF_TRTYPE_MAX]; +static const struct nvmet_fabrics_ops *nvmet_transports[NVMF_TRTYPE_MAX]; static DEFINE_IDA(cntlid_ida); /* @@ -137,7 +137,7 @@ static void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type, schedule_work(&ctrl->async_event_work); } -int nvmet_register_transport(struct nvmet_fabrics_ops *ops) +int nvmet_register_transport(const struct nvmet_fabrics_ops *ops) { int ret = 0; @@ -152,7 +152,7 @@ int nvmet_register_transport(struct nvmet_fabrics_ops *ops) } EXPORT_SYMBOL_GPL(nvmet_register_transport); -void nvmet_unregister_transport(struct nvmet_fabrics_ops *ops) +void nvmet_unregister_transport(const struct nvmet_fabrics_ops *ops) { down_write(&nvmet_config_sem); nvmet_transports[ops->type] = NULL; @@ -162,7 +162,7 @@ EXPORT_SYMBOL_GPL(nvmet_unregister_transport); int nvmet_enable_port(struct nvmet_port *port) { - struct nvmet_fabrics_ops *ops; + const struct nvmet_fabrics_ops *ops; int ret; lockdep_assert_held(&nvmet_config_sem); @@ -195,7 +195,7 @@ int nvmet_enable_port(struct nvmet_port *port) void nvmet_disable_port(struct nvmet_port *port) { - struct nvmet_fabrics_ops *ops; + const struct nvmet_fabrics_ops *ops; lockdep_assert_held(&nvmet_config_sem); @@ -500,7 +500,7 @@ int nvmet_sq_init(struct nvmet_sq *sq) EXPORT_SYMBOL_GPL(nvmet_sq_init); bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq, - struct nvmet_sq *sq, struct nvmet_fabrics_ops *ops) + struct nvmet_sq *sq, const struct nvmet_fabrics_ops *ops) { u8 flags = req->cmd->common.flags; u16 status; diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c index 9f80f98d81d2..33ee8d3145f8 100644 --- a/drivers/nvme/target/fc.c +++ b/drivers/nvme/target/fc.c @@ -1563,7 +1563,7 @@ nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport, static void nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req *nvme_req); -static struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops; +static const struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops; static void nvmet_fc_xmt_ls_rsp_done(struct nvmefc_tgt_ls_req *lsreq) @@ -2518,7 +2518,7 @@ nvmet_fc_remove_port(struct nvmet_port *port) /* nothing to do */ } -static struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops = { +static const struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops = { .owner = THIS_MODULE, .type = NVMF_TRTYPE_FC, .msdbd = 1, diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c index 861d1509b22b..a350765d2d5c 100644 --- a/drivers/nvme/target/loop.c +++ b/drivers/nvme/target/loop.c @@ -71,7 +71,7 @@ static DEFINE_MUTEX(nvme_loop_ctrl_mutex); static void nvme_loop_queue_response(struct nvmet_req *nvme_req); static void nvme_loop_delete_ctrl(struct nvmet_ctrl *ctrl); -static struct nvmet_fabrics_ops nvme_loop_ops; +static const struct nvmet_fabrics_ops nvme_loop_ops; static inline int nvme_loop_queue_idx(struct nvme_loop_queue *queue) { @@ -675,7 +675,7 @@ static void nvme_loop_remove_port(struct nvmet_port *port) nvmet_loop_port = NULL; } -static struct nvmet_fabrics_ops nvme_loop_ops = { +static const struct nvmet_fabrics_ops nvme_loop_ops = { .owner = THIS_MODULE, .type = NVMF_TRTYPE_LOOP, .add_port = nvme_loop_add_port, diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h index 40afb5d6ed91..15fd84ab21f8 100644 --- a/drivers/nvme/target/nvmet.h +++ b/drivers/nvme/target/nvmet.h @@ -130,7 +130,7 @@ struct nvmet_ctrl { struct delayed_work ka_work; struct work_struct fatal_err_work; - struct nvmet_fabrics_ops *ops; + const struct nvmet_fabrics_ops *ops; char subsysnqn[NVMF_NQN_FIELD_LEN]; char hostnqn[NVMF_NQN_FIELD_LEN]; @@ -233,7 +233,7 @@ struct nvmet_req { struct nvmet_port *port; void (*execute)(struct nvmet_req *req); - struct nvmet_fabrics_ops *ops; + const struct nvmet_fabrics_ops *ops; }; static inline void nvmet_set_status(struct nvmet_req *req, u16 status) @@ -269,7 +269,7 @@ u16 nvmet_parse_discovery_cmd(struct nvmet_req *req); u16 nvmet_parse_fabrics_cmd(struct nvmet_req *req); bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq, - struct nvmet_sq *sq, struct nvmet_fabrics_ops *ops); + struct nvmet_sq *sq, const struct nvmet_fabrics_ops *ops); void nvmet_req_uninit(struct nvmet_req *req); void nvmet_req_execute(struct nvmet_req *req); void nvmet_req_complete(struct nvmet_req *req, u16 status); @@ -303,8 +303,8 @@ void nvmet_ns_disable(struct nvmet_ns *ns); struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid); void nvmet_ns_free(struct nvmet_ns *ns); -int nvmet_register_transport(struct nvmet_fabrics_ops *ops); -void nvmet_unregister_transport(struct nvmet_fabrics_ops *ops); +int nvmet_register_transport(const struct nvmet_fabrics_ops *ops); +void nvmet_unregister_transport(const struct nvmet_fabrics_ops *ops); int nvmet_enable_port(struct nvmet_port *port); void nvmet_disable_port(struct nvmet_port *port); diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c index a59263d6d158..52e0c5d579a7 100644 --- a/drivers/nvme/target/rdma.c +++ b/drivers/nvme/target/rdma.c @@ -136,7 +136,7 @@ static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc); static void nvmet_rdma_qp_event(struct ib_event *event, void *priv); static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue); -static struct nvmet_fabrics_ops nvmet_rdma_ops; +static const struct nvmet_fabrics_ops nvmet_rdma_ops; /* XXX: really should move to a generic header sooner or later.. */ static inline u32 get_unaligned_le24(const u8 *p) @@ -1453,7 +1453,7 @@ static void nvmet_rdma_disc_port_addr(struct nvmet_req *req, } } -static struct nvmet_fabrics_ops nvmet_rdma_ops = { +static const struct nvmet_fabrics_ops nvmet_rdma_ops = { .owner = THIS_MODULE, .type = NVMF_TRTYPE_RDMA, .sqe_inline_size = NVMET_RDMA_INLINE_DATA_SIZE, -- cgit v1.2.3 From d558fb51ad3dc7d5f1287d55d0f2e0646af36253 Mon Sep 17 00:00:00 2001 From: Matias Bjørling Date: Wed, 21 Mar 2018 20:27:07 +0100 Subject: nvme: make nvme_get_log_ext non-static MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Enable the lightnvm integration to use the nvme_get_log_ext() function. Signed-off-by: Matias Bjørling Signed-off-by: Keith Busch Signed-off-by: Jens Axboe --- drivers/nvme/host/core.c | 2 +- drivers/nvme/host/nvme.h | 3 +++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index ad99dd76dcd2..9ee919422669 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -2216,7 +2216,7 @@ out_unlock: return ret; } -static int nvme_get_log_ext(struct nvme_ctrl *ctrl, struct nvme_ns *ns, +int nvme_get_log_ext(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 log_page, void *log, size_t size, size_t offset) { diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index 741e3c79bbe9..c393e4b56f39 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -429,6 +429,9 @@ int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl); int nvme_delete_ctrl(struct nvme_ctrl *ctrl); int nvme_delete_ctrl_sync(struct nvme_ctrl *ctrl); +int nvme_get_log_ext(struct nvme_ctrl *ctrl, struct nvme_ns *ns, + u8 log_page, void *log, size_t size, size_t offset); + extern const struct attribute_group nvme_ns_id_attr_group; extern const struct block_device_operations nvme_ns_head_ops; -- cgit v1.2.3 From bc56e2cafa3f80954a278d74bd18349ac3cb8fa5 Mon Sep 17 00:00:00 2001 From: Paolo Valente Date: Mon, 26 Mar 2018 16:06:24 +0200 Subject: block, bfq: lower-bound the estimated peak rate to 1 If a storage device handled by BFQ happens to be slower than 7.5 KB/s for a certain amount of time (in the order of a second), then the estimated peak rate of the device, maintained in BFQ, becomes equal to 0. The reason is the limited precision with which the rate is represented (details on the range of representable values in the comments introduced by this commit). This leads to a division-by-zero error where the estimated peak rate is used as divisor. Such a type of failure has been reported in [1]. This commit addresses this issue by: 1. Lower-bounding the estimated peak rate to 1 2. Adding and improving comments on the range of rates representable [1] https://www.spinics.net/lists/kernel/msg2739205.html Signed-off-by: Konstantin Khlebnikov Signed-off-by: Paolo Valente Signed-off-by: Jens Axboe --- block/bfq-iosched.c | 25 ++++++++++++++++++++++++- block/bfq-iosched.h | 2 +- 2 files changed, 25 insertions(+), 2 deletions(-) diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c index aeca22d91101..f0ecd98509d8 100644 --- a/block/bfq-iosched.c +++ b/block/bfq-iosched.c @@ -201,7 +201,20 @@ static struct kmem_cache *bfq_pool; /* Target observation time interval for a peak-rate update (ns) */ #define BFQ_RATE_REF_INTERVAL NSEC_PER_SEC -/* Shift used for peak rate fixed precision calculations. */ +/* + * Shift used for peak-rate fixed precision calculations. + * With + * - the current shift: 16 positions + * - the current type used to store rate: u32 + * - the current unit of measure for rate: [sectors/usec], or, more precisely, + * [(sectors/usec) / 2^BFQ_RATE_SHIFT] to take into account the shift, + * the range of rates that can be stored is + * [1 / 2^BFQ_RATE_SHIFT, 2^(32 - BFQ_RATE_SHIFT)] sectors/usec = + * [1 / 2^16, 2^16] sectors/usec = [15e-6, 65536] sectors/usec = + * [15, 65G] sectors/sec + * Which, assuming a sector size of 512B, corresponds to a range of + * [7.5K, 33T] B/sec + */ #define BFQ_RATE_SHIFT 16 /* @@ -2637,6 +2650,16 @@ static void bfq_update_rate_reset(struct bfq_data *bfqd, struct request *rq) rate /= divisor; /* smoothing constant alpha = 1/divisor */ bfqd->peak_rate += rate; + + /* + * For a very slow device, bfqd->peak_rate can reach 0 (see + * the minimum representable values reported in the comments + * on BFQ_RATE_SHIFT). Push to 1 if this happens, to avoid + * divisions by zero where bfqd->peak_rate is used as a + * divisor. + */ + bfqd->peak_rate = max_t(u32, 1, bfqd->peak_rate); + update_thr_responsiveness_params(bfqd); reset_computation: diff --git a/block/bfq-iosched.h b/block/bfq-iosched.h index 350c39ae2896..ae2f3dadec44 100644 --- a/block/bfq-iosched.h +++ b/block/bfq-iosched.h @@ -499,7 +499,7 @@ struct bfq_data { u64 delta_from_first; /* * Current estimate of the device peak rate, measured in - * [BFQ_RATE_SHIFT * sectors/usec]. The left-shift by + * [(sectors/usec) / 2^BFQ_RATE_SHIFT]. The left-shift by * BFQ_RATE_SHIFT is performed to increase precision in * fixed-point calculations. */ -- cgit v1.2.3 From 2d1d4c1e591fd40bd7dafd868a249d7d00e215d5 Mon Sep 17 00:00:00 2001 From: Omar Sandoval Date: Mon, 26 Mar 2018 21:39:11 -0700 Subject: loop: don't call into filesystem while holding lo_ctl_mutex We hit an issue where a loop device on NFS was stuck in loop_get_status() doing vfs_getattr() after the NFS server died, which caused a pile-up of uninterruptible processes waiting on lo_ctl_mutex. There's no reason to hold this lock while we wait on the filesystem; let's drop it so that other processes can do their thing. We need to grab a reference on lo_backing_file while we use it, and we can get rid of the check on lo_device, which has been unnecessary since commit a34c0ae9ebd6 ("[PATCH] loop: remove the bio remapping capability") in the linux-history tree. Signed-off-by: Omar Sandoval Signed-off-by: Jens Axboe --- drivers/block/loop.c | 38 ++++++++++++++++++++++++-------------- 1 file changed, 24 insertions(+), 14 deletions(-) diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 7952ed5c607b..c633b68b69ff 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -1167,21 +1167,17 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info) static int loop_get_status(struct loop_device *lo, struct loop_info64 *info) { - struct file *file = lo->lo_backing_file; + struct file *file; struct kstat stat; - int error; + int ret; - if (lo->lo_state != Lo_bound) + if (lo->lo_state != Lo_bound) { + mutex_unlock(&lo->lo_ctl_mutex); return -ENXIO; - error = vfs_getattr(&file->f_path, &stat, - STATX_INO, AT_STATX_SYNC_AS_STAT); - if (error) - return error; + } + memset(info, 0, sizeof(*info)); info->lo_number = lo->lo_number; - info->lo_device = huge_encode_dev(stat.dev); - info->lo_inode = stat.ino; - info->lo_rdevice = huge_encode_dev(lo->lo_device ? stat.rdev : stat.dev); info->lo_offset = lo->lo_offset; info->lo_sizelimit = lo->lo_sizelimit; info->lo_flags = lo->lo_flags; @@ -1194,7 +1190,19 @@ loop_get_status(struct loop_device *lo, struct loop_info64 *info) memcpy(info->lo_encrypt_key, lo->lo_encrypt_key, lo->lo_encrypt_key_size); } - return 0; + + /* Drop lo_ctl_mutex while we call into the filesystem. */ + file = get_file(lo->lo_backing_file); + mutex_unlock(&lo->lo_ctl_mutex); + ret = vfs_getattr(&file->f_path, &stat, STATX_INO, + AT_STATX_SYNC_AS_STAT); + if (!ret) { + info->lo_device = huge_encode_dev(stat.dev); + info->lo_inode = stat.ino; + info->lo_rdevice = huge_encode_dev(stat.rdev); + } + fput(file); + return ret; } static void @@ -1374,7 +1382,8 @@ static int lo_ioctl(struct block_device *bdev, fmode_t mode, break; case LOOP_GET_STATUS: err = loop_get_status_old(lo, (struct loop_info __user *) arg); - break; + /* loop_get_status() unlocks lo_ctl_mutex */ + goto out_unlocked; case LOOP_SET_STATUS64: err = -EPERM; if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN)) @@ -1383,7 +1392,8 @@ static int lo_ioctl(struct block_device *bdev, fmode_t mode, break; case LOOP_GET_STATUS64: err = loop_get_status64(lo, (struct loop_info64 __user *) arg); - break; + /* loop_get_status() unlocks lo_ctl_mutex */ + goto out_unlocked; case LOOP_SET_CAPACITY: err = -EPERM; if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN)) @@ -1544,7 +1554,7 @@ static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode, mutex_lock(&lo->lo_ctl_mutex); err = loop_get_status_compat( lo, (struct compat_loop_info __user *) arg); - mutex_unlock(&lo->lo_ctl_mutex); + /* loop_get_status() unlocks lo_ctl_mutex */ break; case LOOP_SET_CAPACITY: case LOOP_CLR_FD: -- cgit v1.2.3 From 3148ffbdb9162baa28545809d675d3bf9339d6a1 Mon Sep 17 00:00:00 2001 From: Omar Sandoval Date: Mon, 26 Mar 2018 21:39:12 -0700 Subject: loop: use killable lock in ioctls Even after the previous patch to drop lo_ctl_mutex while calling vfs_getattr(), there are other cases where we can end up sleeping for a long time while holding lo_ctl_mutex. Let's avoid the uninterruptible sleep from the ioctls. Signed-off-by: Omar Sandoval Signed-off-by: Jens Axboe --- drivers/block/loop.c | 29 +++++++++++++++++++---------- 1 file changed, 19 insertions(+), 10 deletions(-) diff --git a/drivers/block/loop.c b/drivers/block/loop.c index c633b68b69ff..f34863af332a 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -1360,7 +1360,10 @@ static int lo_ioctl(struct block_device *bdev, fmode_t mode, struct loop_device *lo = bdev->bd_disk->private_data; int err; - mutex_lock_nested(&lo->lo_ctl_mutex, 1); + err = mutex_lock_killable_nested(&lo->lo_ctl_mutex, 1); + if (err) + goto out_unlocked; + switch (cmd) { case LOOP_SET_FD: err = loop_set_fd(lo, mode, bdev, arg); @@ -1545,16 +1548,20 @@ static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode, switch(cmd) { case LOOP_SET_STATUS: - mutex_lock(&lo->lo_ctl_mutex); - err = loop_set_status_compat( - lo, (const struct compat_loop_info __user *) arg); - mutex_unlock(&lo->lo_ctl_mutex); + err = mutex_lock_killable(&lo->lo_ctl_mutex); + if (!err) { + err = loop_set_status_compat(lo, + (const struct compat_loop_info __user *)arg); + mutex_unlock(&lo->lo_ctl_mutex); + } break; case LOOP_GET_STATUS: - mutex_lock(&lo->lo_ctl_mutex); - err = loop_get_status_compat( - lo, (struct compat_loop_info __user *) arg); - /* loop_get_status() unlocks lo_ctl_mutex */ + err = mutex_lock_killable(&lo->lo_ctl_mutex); + if (!err) { + err = loop_get_status_compat(lo, + (struct compat_loop_info __user *)arg); + /* loop_get_status() unlocks lo_ctl_mutex */ + } break; case LOOP_SET_CAPACITY: case LOOP_CLR_FD: @@ -1959,7 +1966,9 @@ static long loop_control_ioctl(struct file *file, unsigned int cmd, ret = loop_lookup(&lo, parm); if (ret < 0) break; - mutex_lock(&lo->lo_ctl_mutex); + ret = mutex_lock_killable(&lo->lo_ctl_mutex); + if (ret) + break; if (lo->lo_state != Lo_unbound) { ret = -EBUSY; mutex_unlock(&lo->lo_ctl_mutex); -- cgit v1.2.3 From f23f5bece686a76598335141a091934f7eb0998c Mon Sep 17 00:00:00 2001 From: Keith Busch Date: Tue, 27 Mar 2018 09:39:06 -0600 Subject: blk-mq: Allow PCI vector offset for mapping queues The PCI interrupt vectors intended to be associated with a queue may not start at 0; a driver may allocate pre_vectors for special use. This patch adds an offset parameter so blk-mq may find the intended affinity mask and updates all drivers using this API accordingly. Cc: Don Brace Cc: Cc: Signed-off-by: Keith Busch Reviewed-by: Ming Lei Signed-off-by: Jens Axboe --- block/blk-mq-pci.c | 6 ++++-- drivers/nvme/host/pci.c | 2 +- drivers/scsi/qla2xxx/qla_os.c | 2 +- drivers/scsi/smartpqi/smartpqi_init.c | 2 +- include/linux/blk-mq-pci.h | 3 ++- 5 files changed, 9 insertions(+), 6 deletions(-) diff --git a/block/blk-mq-pci.c b/block/blk-mq-pci.c index 76944e3271bf..e233996bb76f 100644 --- a/block/blk-mq-pci.c +++ b/block/blk-mq-pci.c @@ -21,6 +21,7 @@ * blk_mq_pci_map_queues - provide a default queue mapping for PCI device * @set: tagset to provide the mapping for * @pdev: PCI device associated with @set. + * @offset: Offset to use for the pci irq vector * * This function assumes the PCI device @pdev has at least as many available * interrupt vectors as @set has queues. It will then query the vector @@ -28,13 +29,14 @@ * that maps a queue to the CPUs that have irq affinity for the corresponding * vector. */ -int blk_mq_pci_map_queues(struct blk_mq_tag_set *set, struct pci_dev *pdev) +int blk_mq_pci_map_queues(struct blk_mq_tag_set *set, struct pci_dev *pdev, + int offset) { const struct cpumask *mask; unsigned int queue, cpu; for (queue = 0; queue < set->nr_hw_queues; queue++) { - mask = pci_irq_get_affinity(pdev, queue); + mask = pci_irq_get_affinity(pdev, queue + offset); if (!mask) goto fallback; diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index cef5ce851a92..e3b9efca0571 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -414,7 +414,7 @@ static int nvme_pci_map_queues(struct blk_mq_tag_set *set) { struct nvme_dev *dev = set->driver_data; - return blk_mq_pci_map_queues(set, to_pci_dev(dev->dev)); + return blk_mq_pci_map_queues(set, to_pci_dev(dev->dev), 0); } /** diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 12ee6e02d146..2c705f3dd265 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -6805,7 +6805,7 @@ static int qla2xxx_map_queues(struct Scsi_Host *shost) if (USER_CTRL_IRQ(vha->hw)) rc = blk_mq_map_queues(&shost->tag_set); else - rc = blk_mq_pci_map_queues(&shost->tag_set, vha->hw->pdev); + rc = blk_mq_pci_map_queues(&shost->tag_set, vha->hw->pdev, 0); return rc; } diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c index b2880c7709e6..10c94011c8a8 100644 --- a/drivers/scsi/smartpqi/smartpqi_init.c +++ b/drivers/scsi/smartpqi/smartpqi_init.c @@ -5348,7 +5348,7 @@ static int pqi_map_queues(struct Scsi_Host *shost) { struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost); - return blk_mq_pci_map_queues(&shost->tag_set, ctrl_info->pci_dev); + return blk_mq_pci_map_queues(&shost->tag_set, ctrl_info->pci_dev, 0); } static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info, diff --git a/include/linux/blk-mq-pci.h b/include/linux/blk-mq-pci.h index 6338551e0fb9..9f4c17f0d2d8 100644 --- a/include/linux/blk-mq-pci.h +++ b/include/linux/blk-mq-pci.h @@ -5,6 +5,7 @@ struct blk_mq_tag_set; struct pci_dev; -int blk_mq_pci_map_queues(struct blk_mq_tag_set *set, struct pci_dev *pdev); +int blk_mq_pci_map_queues(struct blk_mq_tag_set *set, struct pci_dev *pdev, + int offset); #endif /* _LINUX_BLK_MQ_PCI_H */ -- cgit v1.2.3 From 5da84cf6037690835c7b1ea91b4158ed768df712 Mon Sep 17 00:00:00 2001 From: Markus Elfring Date: Fri, 30 Mar 2018 00:04:48 +0200 Subject: lightnvm/pblk-gc: Delete an error message for a failed memory allocation in pblk_gc_line_prepare_ws() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Omit an extra message for a memory allocation failure in this function. This issue was detected by using the Coccinelle software. Signed-off-by: Markus Elfring Reviewed-by: Javier González Signed-off-by: Matias Bjørling Signed-off-by: Jens Axboe --- drivers/lightnvm/pblk-gc.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/drivers/lightnvm/pblk-gc.c b/drivers/lightnvm/pblk-gc.c index 3d899383666e..31f17d6f14ee 100644 --- a/drivers/lightnvm/pblk-gc.c +++ b/drivers/lightnvm/pblk-gc.c @@ -147,10 +147,8 @@ static void pblk_gc_line_prepare_ws(struct work_struct *work) int ret; invalid_bitmap = kmalloc(lm->sec_bitmap_len, GFP_KERNEL); - if (!invalid_bitmap) { - pr_err("pblk: could not allocate GC invalid bitmap\n"); + if (!invalid_bitmap) goto fail_free_ws; - } emeta_buf = pblk_malloc(lm->emeta_len[0], l_mg->emeta_alloc_type, GFP_KERNEL); -- cgit v1.2.3 From 8f37d1913f096b530242f7815ac0be3c20888ef9 Mon Sep 17 00:00:00 2001 From: Matias Bjørling Date: Fri, 30 Mar 2018 00:04:49 +0200 Subject: lightnvm: remove chnl_offset in nvme_nvm_identity MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The identity structure is initialized to zero in the beginning of the nvme_nvm_identity function. The chnl_offset is separately set to zero. Since both the variable and assignment is never changed, remove them. Signed-off-by: Matias Bjørling Signed-off-by: Jens Axboe --- drivers/nvme/host/lightnvm.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c index 50ef71ee3d86..f9c38a8d54e1 100644 --- a/drivers/nvme/host/lightnvm.c +++ b/drivers/nvme/host/lightnvm.c @@ -59,8 +59,7 @@ struct nvme_nvm_identity { __u64 rsvd[2]; __le64 prp1; __le64 prp2; - __le32 chnl_off; - __u32 rsvd11[5]; + __u32 rsvd11[6]; }; struct nvme_nvm_getbbtbl { @@ -279,7 +278,6 @@ static int nvme_nvm_identity(struct nvm_dev *nvmdev, struct nvm_id *nvm_id) c.identity.opcode = nvme_nvm_admin_identity; c.identity.nsid = cpu_to_le32(ns->head->ns_id); - c.identity.chnl_off = 0; nvme_nvm_id = kmalloc(sizeof(struct nvme_nvm_id), GFP_KERNEL); if (!nvme_nvm_id) -- cgit v1.2.3 From cfe1c9e2e2a34ccaf2ba01d2c435d65207335ca1 Mon Sep 17 00:00:00 2001 From: Hans Holmberg Date: Fri, 30 Mar 2018 00:04:50 +0200 Subject: lightnvm: pblk: handle bad sectors in the emeta area correctly MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Unless we check if there are bad sectors in the entire emeta-area we risk ending up with valid bitmap / available sector count inconsistency. This results in lines with a bad chunk at the last LUN marked as bad, so go through the whole emeta area and mark up the invalid sectors. Signed-off-by: Hans Holmberg Signed-off-by: Matias Bjørling Signed-off-by: Jens Axboe --- drivers/lightnvm/pblk-core.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/drivers/lightnvm/pblk-core.c b/drivers/lightnvm/pblk-core.c index 0487b9340c1d..9027cf2ed1d8 100644 --- a/drivers/lightnvm/pblk-core.c +++ b/drivers/lightnvm/pblk-core.c @@ -1021,6 +1021,7 @@ static int pblk_line_init_bb(struct pblk *pblk, struct pblk_line *line, int nr_bb = 0; u64 off; int bit = -1; + int emeta_secs; line->sec_in_line = lm->sec_per_line; @@ -1055,18 +1056,18 @@ static int pblk_line_init_bb(struct pblk *pblk, struct pblk_line *line, /* Mark emeta metadata sectors as bad sectors. We need to consider bad * blocks to make sure that there are enough sectors to store emeta */ - off = lm->sec_per_line - lm->emeta_sec[0]; - bitmap_set(line->invalid_bitmap, off, lm->emeta_sec[0]); - while (nr_bb) { + emeta_secs = lm->emeta_sec[0]; + off = lm->sec_per_line; + while (emeta_secs) { off -= geo->sec_per_pl; if (!test_bit(off, line->invalid_bitmap)) { bitmap_set(line->invalid_bitmap, off, geo->sec_per_pl); - nr_bb--; + emeta_secs -= geo->sec_per_pl; } } - line->sec_in_line -= lm->emeta_sec[0]; line->emeta_ssec = off; + line->sec_in_line -= lm->emeta_sec[0]; line->nr_valid_lbas = 0; line->left_msecs = line->sec_in_line; *line->vsc = cpu_to_le32(line->sec_in_line); -- cgit v1.2.3 From d0ab0b1ab991f48fc1fb579490df397d5f819913 Mon Sep 17 00:00:00 2001 From: Hans Holmberg Date: Fri, 30 Mar 2018 00:04:51 +0200 Subject: lightnvm: pblk: check data lines version on recovery MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit As a preparation for future bumps of data line persistent storage versions, we need to start checking the emeta line version during recovery. Also slit up the current emeta/smeta version into two bytes (major,minor). Recovering lines with the same major number as the current pblk data line version must succeed. This means that any changes in the persistent format must be: (1) Backward compatible: if we switch back to and older kernel, recovery of lines stored with major == current_major and minor > current_minor must succeed. (2) Forward compatible: switching to a newer kernel, recovery of lines stored with major=current_major and minor < minor must handle the data format differences gracefully(i.e. initialize new data structures to default values). If we detect lines that have a different major number than the current we must abort recovery. The user must manually migrate the data in this case. Previously the version stored in the emeta header was copied from smeta, which has version 1, so we need to set the minor version to 1. Signed-off-by: Hans Holmberg Signed-off-by: Javier González Signed-off-by: Matias Bjørling Signed-off-by: Jens Axboe --- drivers/lightnvm/pblk-core.c | 9 ++++++++- drivers/lightnvm/pblk-recovery.c | 26 ++++++++++++++++++++++++-- drivers/lightnvm/pblk.h | 16 ++++++++++++++-- 3 files changed, 46 insertions(+), 5 deletions(-) diff --git a/drivers/lightnvm/pblk-core.c b/drivers/lightnvm/pblk-core.c index 9027cf2ed1d8..155e42a26293 100644 --- a/drivers/lightnvm/pblk-core.c +++ b/drivers/lightnvm/pblk-core.c @@ -975,7 +975,8 @@ static int pblk_line_init_metadata(struct pblk *pblk, struct pblk_line *line, memcpy(smeta_buf->header.uuid, pblk->instance_uuid, 16); smeta_buf->header.id = cpu_to_le32(line->id); smeta_buf->header.type = cpu_to_le16(line->type); - smeta_buf->header.version = SMETA_VERSION; + smeta_buf->header.version_major = SMETA_VERSION_MAJOR; + smeta_buf->header.version_minor = SMETA_VERSION_MINOR; /* Start metadata */ smeta_buf->seq_nr = cpu_to_le64(line->seq_nr); @@ -998,6 +999,12 @@ static int pblk_line_init_metadata(struct pblk *pblk, struct pblk_line *line, /* End metadata */ memcpy(&emeta_buf->header, &smeta_buf->header, sizeof(struct line_header)); + + emeta_buf->header.version_major = EMETA_VERSION_MAJOR; + emeta_buf->header.version_minor = EMETA_VERSION_MINOR; + emeta_buf->header.crc = cpu_to_le32( + pblk_calc_meta_header_crc(pblk, &emeta_buf->header)); + emeta_buf->seq_nr = cpu_to_le64(line->seq_nr); emeta_buf->nr_lbas = cpu_to_le64(line->sec_in_line); emeta_buf->nr_valid_lbas = cpu_to_le64(0); diff --git a/drivers/lightnvm/pblk-recovery.c b/drivers/lightnvm/pblk-recovery.c index 1d5e961bf5e0..a30fe203d454 100644 --- a/drivers/lightnvm/pblk-recovery.c +++ b/drivers/lightnvm/pblk-recovery.c @@ -826,6 +826,25 @@ static u64 pblk_line_emeta_start(struct pblk *pblk, struct pblk_line *line) return emeta_start; } +static int pblk_recov_check_line_version(struct pblk *pblk, + struct line_emeta *emeta) +{ + struct line_header *header = &emeta->header; + + if (header->version_major != EMETA_VERSION_MAJOR) { + pr_err("pblk: line major version mismatch: %d, expected: %d\n", + header->version_major, EMETA_VERSION_MAJOR); + return 1; + } + +#ifdef NVM_DEBUG + if (header->version_minor > EMETA_VERSION_MINOR) + pr_info("pblk: newer line minor version found: %d\n", line_v); +#endif + + return 0; +} + struct pblk_line *pblk_recov_l2p(struct pblk *pblk) { struct pblk_line_meta *lm = &pblk->lm; @@ -873,9 +892,9 @@ struct pblk_line *pblk_recov_l2p(struct pblk *pblk) if (le32_to_cpu(smeta_buf->header.identifier) != PBLK_MAGIC) continue; - if (smeta_buf->header.version != SMETA_VERSION) { + if (smeta_buf->header.version_major != SMETA_VERSION_MAJOR) { pr_err("pblk: found incompatible line version %u\n", - le16_to_cpu(smeta_buf->header.version)); + smeta_buf->header.version_major); return ERR_PTR(-EINVAL); } @@ -943,6 +962,9 @@ struct pblk_line *pblk_recov_l2p(struct pblk *pblk) goto next; } + if (pblk_recov_check_line_version(pblk, line->emeta->buf)) + return ERR_PTR(-EINVAL); + if (pblk_recov_l2p_from_emeta(pblk, line)) pblk_recov_l2p_from_oob(pblk, line); diff --git a/drivers/lightnvm/pblk.h b/drivers/lightnvm/pblk.h index 8c357fb6538e..fae2526f80b2 100644 --- a/drivers/lightnvm/pblk.h +++ b/drivers/lightnvm/pblk.h @@ -320,14 +320,26 @@ enum { }; #define PBLK_MAGIC 0x70626c6b /*pblk*/ -#define SMETA_VERSION cpu_to_le16(1) + +/* emeta/smeta persistent storage format versions: + * Changes in major version requires offline migration. + * Changes in minor version are handled automatically during + * recovery. + */ + +#define SMETA_VERSION_MAJOR (0) +#define SMETA_VERSION_MINOR (1) + +#define EMETA_VERSION_MAJOR (0) +#define EMETA_VERSION_MINOR (1) struct line_header { __le32 crc; __le32 identifier; /* pblk identifier */ __u8 uuid[16]; /* instance uuid */ __le16 type; /* line type */ - __le16 version; /* type version */ + __u8 version_major; /* version major */ + __u8 version_minor; /* version minor */ __le32 id; /* line id for current line */ }; -- cgit v1.2.3 From 76758390f83e5abc3bfc776d793480836d17120c Mon Sep 17 00:00:00 2001 From: Hans Holmberg Date: Fri, 30 Mar 2018 00:04:52 +0200 Subject: lightnvm: pblk: export write amplification counters to sysfs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In a SSD, write amplification, WA, is defined as the average number of page writes per user page write. Write amplification negatively affects write performance and decreases the lifetime of the disk, so it's a useful metric to add to sysfs. In plkb's case, the number of writes per user sector is the sum of: (1) number of user writes (2) number of sectors written by the garbage collector (3) number of sectors padded (i.e. due to syncs) This patch adds persistent counters for 1-3 and two sysfs attributes to export these along with WA calculated with five decimals: write_amp_mileage: the accumulated write amplification stats for the lifetime of the pblk instance write_amp_trip: resetable stats to facilitate delta measurements, values reset at creation and if 0 is written to the attribute. 64-bit counters are used as a 32 bit counter would wrap around already after about 17 TB worth of user data. It will take a long long time before the 64 bit sector counters wrap around. The counters are stored after the bad block bitmap in the first emeta sector of each written line. There is plenty of space in the first emeta sector, so we don't need to bump the major version of the line data format. Signed-off-by: Hans Holmberg Signed-off-by: Javier González Signed-off-by: Matias Bjørling Signed-off-by: Jens Axboe --- drivers/lightnvm/pblk-cache.c | 4 ++ drivers/lightnvm/pblk-core.c | 6 +++ drivers/lightnvm/pblk-init.c | 11 +++++- drivers/lightnvm/pblk-map.c | 2 + drivers/lightnvm/pblk-rb.c | 3 ++ drivers/lightnvm/pblk-recovery.c | 25 ++++++++++++ drivers/lightnvm/pblk-sysfs.c | 85 +++++++++++++++++++++++++++++++++++++++- drivers/lightnvm/pblk.h | 42 ++++++++++++++++---- 8 files changed, 168 insertions(+), 10 deletions(-) diff --git a/drivers/lightnvm/pblk-cache.c b/drivers/lightnvm/pblk-cache.c index 000fcad38136..29a23111b31c 100644 --- a/drivers/lightnvm/pblk-cache.c +++ b/drivers/lightnvm/pblk-cache.c @@ -63,6 +63,8 @@ retry: bio_advance(bio, PBLK_EXPOSED_PAGE_SIZE); } + atomic64_add(nr_entries, &pblk->user_wa); + #ifdef CONFIG_NVM_DEBUG atomic_long_add(nr_entries, &pblk->inflight_writes); atomic_long_add(nr_entries, &pblk->req_writes); @@ -117,6 +119,8 @@ retry: WARN_ONCE(gc_rq->secs_to_gc != valid_entries, "pblk: inconsistent GC write\n"); + atomic64_add(valid_entries, &pblk->gc_wa); + #ifdef CONFIG_NVM_DEBUG atomic_long_add(valid_entries, &pblk->inflight_writes); atomic_long_add(valid_entries, &pblk->recov_gc_writes); diff --git a/drivers/lightnvm/pblk-core.c b/drivers/lightnvm/pblk-core.c index 155e42a26293..22e61cd4f801 100644 --- a/drivers/lightnvm/pblk-core.c +++ b/drivers/lightnvm/pblk-core.c @@ -1630,11 +1630,16 @@ void pblk_line_close_meta(struct pblk *pblk, struct pblk_line *line) struct pblk_line_meta *lm = &pblk->lm; struct pblk_emeta *emeta = line->emeta; struct line_emeta *emeta_buf = emeta->buf; + struct wa_counters *wa = emeta_to_wa(lm, emeta_buf); /* No need for exact vsc value; avoid a big line lock and take aprox. */ memcpy(emeta_to_vsc(pblk, emeta_buf), l_mg->vsc_list, lm->vsc_list_len); memcpy(emeta_to_bb(emeta_buf), line->blk_bitmap, lm->blk_bitmap_len); + wa->user = cpu_to_le64(atomic64_read(&pblk->user_wa)); + wa->pad = cpu_to_le64(atomic64_read(&pblk->pad_wa)); + wa->gc = cpu_to_le64(atomic64_read(&pblk->gc_wa)); + emeta_buf->nr_valid_lbas = cpu_to_le64(line->nr_valid_lbas); emeta_buf->crc = cpu_to_le32(pblk_calc_emeta_crc(pblk, emeta_buf)); @@ -1837,6 +1842,7 @@ void pblk_update_map_dev(struct pblk *pblk, sector_t lba, #endif /* Invalidate and discard padded entries */ if (lba == ADDR_EMPTY) { + atomic64_inc(&pblk->pad_wa); #ifdef CONFIG_NVM_DEBUG atomic_long_inc(&pblk->padded_wb); #endif diff --git a/drivers/lightnvm/pblk-init.c b/drivers/lightnvm/pblk-init.c index 5b46924ac66c..0ffc17ccf1cc 100644 --- a/drivers/lightnvm/pblk-init.c +++ b/drivers/lightnvm/pblk-init.c @@ -559,8 +559,8 @@ static unsigned int calc_emeta_len(struct pblk *pblk) /* Round to sector size so that lba_list starts on its own sector */ lm->emeta_sec[1] = DIV_ROUND_UP( - sizeof(struct line_emeta) + lm->blk_bitmap_len, - geo->sec_size); + sizeof(struct line_emeta) + lm->blk_bitmap_len + + sizeof(struct wa_counters), geo->sec_size); lm->emeta_len[1] = lm->emeta_sec[1] * geo->sec_size; /* Round to sector size so that vsc_list starts on its own sector */ @@ -991,6 +991,13 @@ static void *pblk_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk, if (flags & NVM_TARGET_FACTORY) pblk_setup_uuid(pblk); + atomic64_set(&pblk->user_wa, 0); + atomic64_set(&pblk->pad_wa, 0); + atomic64_set(&pblk->gc_wa, 0); + pblk->user_rst_wa = 0; + pblk->pad_rst_wa = 0; + pblk->gc_rst_wa = 0; + #ifdef CONFIG_NVM_DEBUG atomic_long_set(&pblk->inflight_writes, 0); atomic_long_set(&pblk->padded_writes, 0); diff --git a/drivers/lightnvm/pblk-map.c b/drivers/lightnvm/pblk-map.c index 7445e6430c52..04e08d76ea5f 100644 --- a/drivers/lightnvm/pblk-map.c +++ b/drivers/lightnvm/pblk-map.c @@ -65,6 +65,8 @@ static void pblk_map_page_data(struct pblk *pblk, unsigned int sentry, lba_list[paddr] = cpu_to_le64(w_ctx->lba); if (lba_list[paddr] != addr_empty) line->nr_valid_lbas++; + else + atomic64_inc(&pblk->pad_wa); } else { lba_list[paddr] = meta_list[i].lba = addr_empty; __pblk_map_invalidate(pblk, line, paddr); diff --git a/drivers/lightnvm/pblk-rb.c b/drivers/lightnvm/pblk-rb.c index ec8fc314646b..7044b5599cc4 100644 --- a/drivers/lightnvm/pblk-rb.c +++ b/drivers/lightnvm/pblk-rb.c @@ -622,6 +622,9 @@ try: } } + atomic64_add(pad, &((struct pblk *) + (container_of(rb, struct pblk, rwb)))->pad_wa); + #ifdef CONFIG_NVM_DEBUG atomic_long_add(pad, &((struct pblk *) (container_of(rb, struct pblk, rwb)))->padded_writes); diff --git a/drivers/lightnvm/pblk-recovery.c b/drivers/lightnvm/pblk-recovery.c index a30fe203d454..e75a1af2eebe 100644 --- a/drivers/lightnvm/pblk-recovery.c +++ b/drivers/lightnvm/pblk-recovery.c @@ -845,6 +845,29 @@ static int pblk_recov_check_line_version(struct pblk *pblk, return 0; } +static void pblk_recov_wa_counters(struct pblk *pblk, + struct line_emeta *emeta) +{ + struct pblk_line_meta *lm = &pblk->lm; + struct line_header *header = &emeta->header; + struct wa_counters *wa = emeta_to_wa(lm, emeta); + + /* WA counters were introduced in emeta version 0.2 */ + if (header->version_major > 0 || header->version_minor >= 2) { + u64 user = le64_to_cpu(wa->user); + u64 pad = le64_to_cpu(wa->pad); + u64 gc = le64_to_cpu(wa->gc); + + atomic64_set(&pblk->user_wa, user); + atomic64_set(&pblk->pad_wa, pad); + atomic64_set(&pblk->gc_wa, gc); + + pblk->user_rst_wa = user; + pblk->pad_rst_wa = pad; + pblk->gc_rst_wa = gc; + } +} + struct pblk_line *pblk_recov_l2p(struct pblk *pblk) { struct pblk_line_meta *lm = &pblk->lm; @@ -965,6 +988,8 @@ struct pblk_line *pblk_recov_l2p(struct pblk *pblk) if (pblk_recov_check_line_version(pblk, line->emeta->buf)) return ERR_PTR(-EINVAL); + pblk_recov_wa_counters(pblk, line->emeta->buf); + if (pblk_recov_l2p_from_emeta(pblk, line)) pblk_recov_l2p_from_oob(pblk, line); diff --git a/drivers/lightnvm/pblk-sysfs.c b/drivers/lightnvm/pblk-sysfs.c index 620bab853579..beed99936c89 100644 --- a/drivers/lightnvm/pblk-sysfs.c +++ b/drivers/lightnvm/pblk-sysfs.c @@ -298,6 +298,48 @@ static ssize_t pblk_sysfs_get_sec_per_write(struct pblk *pblk, char *page) return snprintf(page, PAGE_SIZE, "%d\n", pblk->sec_per_write); } +static ssize_t pblk_get_write_amp(u64 user, u64 gc, u64 pad, + char *page) +{ + int sz; + + + sz = snprintf(page, PAGE_SIZE, + "user:%lld gc:%lld pad:%lld WA:", + user, gc, pad); + + if (!user) { + sz += snprintf(page + sz, PAGE_SIZE - sz, "NaN\n"); + } else { + u64 wa_int; + u32 wa_frac; + + wa_int = (user + gc + pad) * 100000; + wa_int = div_u64(wa_int, user); + wa_int = div_u64_rem(wa_int, 100000, &wa_frac); + + sz += snprintf(page + sz, PAGE_SIZE - sz, "%llu.%05u\n", + wa_int, wa_frac); + } + + return sz; +} + +static ssize_t pblk_sysfs_get_write_amp_mileage(struct pblk *pblk, char *page) +{ + return pblk_get_write_amp(atomic64_read(&pblk->user_wa), + atomic64_read(&pblk->gc_wa), atomic64_read(&pblk->pad_wa), + page); +} + +static ssize_t pblk_sysfs_get_write_amp_trip(struct pblk *pblk, char *page) +{ + return pblk_get_write_amp( + atomic64_read(&pblk->user_wa) - pblk->user_rst_wa, + atomic64_read(&pblk->gc_wa) - pblk->gc_rst_wa, + atomic64_read(&pblk->pad_wa) - pblk->pad_rst_wa, page); +} + #ifdef CONFIG_NVM_DEBUG static ssize_t pblk_sysfs_stats_debug(struct pblk *pblk, char *page) { @@ -360,6 +402,30 @@ static ssize_t pblk_sysfs_set_sec_per_write(struct pblk *pblk, return len; } +static ssize_t pblk_sysfs_set_write_amp_trip(struct pblk *pblk, + const char *page, size_t len) +{ + size_t c_len; + int reset_value; + + c_len = strcspn(page, "\n"); + if (c_len >= len) + return -EINVAL; + + if (kstrtouint(page, 0, &reset_value)) + return -EINVAL; + + if (reset_value != 0) + return -EINVAL; + + pblk->user_rst_wa = atomic64_read(&pblk->user_wa); + pblk->pad_rst_wa = atomic64_read(&pblk->pad_wa); + pblk->gc_rst_wa = atomic64_read(&pblk->gc_wa); + + return len; +} + + static struct attribute sys_write_luns = { .name = "write_luns", .mode = 0444, @@ -410,6 +476,16 @@ static struct attribute sys_max_sec_per_write = { .mode = 0644, }; +static struct attribute sys_write_amp_mileage = { + .name = "write_amp_mileage", + .mode = 0444, +}; + +static struct attribute sys_write_amp_trip = { + .name = "write_amp_trip", + .mode = 0644, +}; + #ifdef CONFIG_NVM_DEBUG static struct attribute sys_stats_debug_attr = { .name = "stats", @@ -428,6 +504,8 @@ static struct attribute *pblk_attrs[] = { &sys_stats_ppaf_attr, &sys_lines_attr, &sys_lines_info_attr, + &sys_write_amp_mileage, + &sys_write_amp_trip, #ifdef CONFIG_NVM_DEBUG &sys_stats_debug_attr, #endif @@ -457,6 +535,10 @@ static ssize_t pblk_sysfs_show(struct kobject *kobj, struct attribute *attr, return pblk_sysfs_lines_info(pblk, buf); else if (strcmp(attr->name, "max_sec_per_write") == 0) return pblk_sysfs_get_sec_per_write(pblk, buf); + else if (strcmp(attr->name, "write_amp_mileage") == 0) + return pblk_sysfs_get_write_amp_mileage(pblk, buf); + else if (strcmp(attr->name, "write_amp_trip") == 0) + return pblk_sysfs_get_write_amp_trip(pblk, buf); #ifdef CONFIG_NVM_DEBUG else if (strcmp(attr->name, "stats") == 0) return pblk_sysfs_stats_debug(pblk, buf); @@ -473,7 +555,8 @@ static ssize_t pblk_sysfs_store(struct kobject *kobj, struct attribute *attr, return pblk_sysfs_gc_force(pblk, buf, len); else if (strcmp(attr->name, "max_sec_per_write") == 0) return pblk_sysfs_set_sec_per_write(pblk, buf, len); - + else if (strcmp(attr->name, "write_amp_trip") == 0) + return pblk_sysfs_set_write_amp_trip(pblk, buf, len); return 0; } diff --git a/drivers/lightnvm/pblk.h b/drivers/lightnvm/pblk.h index fae2526f80b2..4b7d8618631f 100644 --- a/drivers/lightnvm/pblk.h +++ b/drivers/lightnvm/pblk.h @@ -331,7 +331,7 @@ enum { #define SMETA_VERSION_MINOR (1) #define EMETA_VERSION_MAJOR (0) -#define EMETA_VERSION_MINOR (1) +#define EMETA_VERSION_MINOR (2) struct line_header { __le32 crc; @@ -361,11 +361,13 @@ struct line_smeta { __le64 lun_bitmap[]; }; + /* * Metadata layout in media: * First sector: * 1. struct line_emeta * 2. bad block bitmap (u64 * window_wr_lun) + * 3. write amplification counters * Mid sectors (start at lbas_sector): * 3. nr_lbas (u64) forming lba list * Last sectors (start at vsc_sector): @@ -389,7 +391,15 @@ struct line_emeta { __le32 next_id; /* Line id for next line */ __le64 nr_lbas; /* Number of lbas mapped in line */ __le64 nr_valid_lbas; /* Number of valid lbas mapped in line */ - __le64 bb_bitmap[]; /* Updated bad block bitmap for line */ + __le64 bb_bitmap[]; /* Updated bad block bitmap for line */ +}; + + +/* Write amplification counters stored on media */ +struct wa_counters { + __le64 user; /* Number of user written sectors */ + __le64 gc; /* Number of sectors written by GC*/ + __le64 pad; /* Number of padded sectors */ }; struct pblk_emeta { @@ -519,10 +529,11 @@ struct pblk_line_meta { unsigned int smeta_sec; /* Sectors needed for smeta */ unsigned int emeta_len[4]; /* Lengths for emeta: - * [0]: Total length - * [1]: struct line_emeta length - * [2]: L2P portion length - * [3]: vsc list length + * [0]: Total + * [1]: struct line_emeta + + * bb_bitmap + struct wa_counters + * [2]: L2P portion + * [3]: vsc */ unsigned int emeta_sec[4]; /* Sectors needed for emeta. Same layout * as emeta_len @@ -604,8 +615,19 @@ struct pblk { int sec_per_write; unsigned char instance_uuid[16]; + + /* Persistent write amplification counters, 4kb sector I/Os */ + atomic64_t user_wa; /* Sectors written by user */ + atomic64_t gc_wa; /* Sectors written by GC */ + atomic64_t pad_wa; /* Padded sectors written */ + + /* Reset values for delta write amplification measurements */ + u64 user_rst_wa; + u64 gc_rst_wa; + u64 pad_rst_wa; + #ifdef CONFIG_NVM_DEBUG - /* All debug counters apply to 4kb sector I/Os */ + /* Non-persistent debug counters, 4kb sector I/Os */ atomic_long_t inflight_writes; /* Inflight writes (user and gc) */ atomic_long_t padded_writes; /* Sectors padded due to flush/fua */ atomic_long_t padded_wb; /* Sectors padded in write buffer */ @@ -900,6 +922,12 @@ static inline void *emeta_to_bb(struct line_emeta *emeta) return emeta->bb_bitmap; } +static inline void *emeta_to_wa(struct pblk_line_meta *lm, + struct line_emeta *emeta) +{ + return emeta->bb_bitmap + lm->blk_bitmap_len; +} + static inline void *emeta_to_lbas(struct pblk *pblk, struct line_emeta *emeta) { return ((void *)emeta + pblk->lm.emeta_len[1]); -- cgit v1.2.3 From d8a39caee02bf893e23ff26cbd10173ff2ba681f Mon Sep 17 00:00:00 2001 From: Matias Bjørling Date: Fri, 30 Mar 2018 00:04:53 +0200 Subject: lightnvm: remove mlc pairs structure MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The known implementations of the 1.2 specification, and upcoming 2.0 implementation all expose a sequential list of pages to write. Remove the data structure, as it is no longer needed. Signed-off-by: Matias Bjørling Signed-off-by: Jens Axboe --- drivers/nvme/host/lightnvm.c | 14 +------------- 1 file changed, 1 insertion(+), 13 deletions(-) diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c index f9c38a8d54e1..940c9b9a2a09 100644 --- a/drivers/nvme/host/lightnvm.c +++ b/drivers/nvme/host/lightnvm.c @@ -115,17 +115,6 @@ struct nvme_nvm_command { }; }; -#define NVME_NVM_LP_MLC_PAIRS 886 -struct nvme_nvm_lp_mlc { - __le16 num_pairs; - __u8 pairs[NVME_NVM_LP_MLC_PAIRS]; -}; - -struct nvme_nvm_lp_tbl { - __u8 id[8]; - struct nvme_nvm_lp_mlc mlc; -}; - struct nvme_nvm_id_group { __u8 mtype; __u8 fmtype; @@ -149,8 +138,7 @@ struct nvme_nvm_id_group { __le32 mpos; __le32 mccap; __le16 cpar; - __u8 reserved[10]; - struct nvme_nvm_lp_tbl lptbl; + __u8 reserved[906]; } __packed; struct nvme_nvm_addr_format { -- cgit v1.2.3 From ff12581ec702d6c4607f614107d4816c21c6be56 Mon Sep 17 00:00:00 2001 From: Matias Bjørling Date: Fri, 30 Mar 2018 00:04:54 +0200 Subject: lightnvm: remove multiple groups in 1.2 data structure MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Only one id group from the 1.2 specification is supported. Make sure that only the first group is accessible. Signed-off-by: Matias Bjørling Signed-off-by: Jens Axboe --- drivers/nvme/host/lightnvm.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c index 940c9b9a2a09..dc0b1335c7c6 100644 --- a/drivers/nvme/host/lightnvm.c +++ b/drivers/nvme/host/lightnvm.c @@ -166,7 +166,8 @@ struct nvme_nvm_id { __le32 dom; struct nvme_nvm_addr_format ppaf; __u8 resv[228]; - struct nvme_nvm_id_group groups[4]; + struct nvme_nvm_id_group group; + __u8 resv2[2880]; } __packed; struct nvme_nvm_bb_tbl { @@ -208,7 +209,7 @@ static int init_grps(struct nvm_id *nvm_id, struct nvme_nvm_id *nvme_nvm_id) if (nvme_nvm_id->cgrps != 1) return -EINVAL; - src = &nvme_nvm_id->groups[0]; + src = &nvme_nvm_id->group; grp = &nvm_id->grp; grp->mtype = src->mtype; -- cgit v1.2.3 From 5d149bfabeb889b7ee5cd6491bc6d2b5b20c4abd Mon Sep 17 00:00:00 2001 From: Hans Holmberg Date: Fri, 30 Mar 2018 00:04:55 +0200 Subject: lightnvm: pblk: add padding distribution sysfs attribute MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When pblk receives a sync, all data up to that point in the write buffer must be comitted to persistent storage, and as flash memory comes with a minimal write size there is a significant cost involved both in terms of time for completing the sync and in terms of write amplification padded sectors for filling up to the minimal write size. In order to get a better understanding of the costs involved for syncs, Add a sysfs attribute to pblk: padded_dist, showing a normalized distribution of sectors padded. In order to facilitate measurements of specific workloads during the lifetime of the pblk instance, the distribution can be reset by writing 0 to the attribute. Do this by introducing counters for each possible padding: {0..(minimal write size - 1)} and calculate the normalized distribution when showing the attribute. Signed-off-by: Hans Holmberg Signed-off-by: Javier González Rearranged total_buckets statement in pblk_sysfs_get_padding_dist Signed-off-by: Matias Bjørling Signed-off-by: Jens Axboe --- drivers/lightnvm/pblk-init.c | 16 +++++++- drivers/lightnvm/pblk-rb.c | 17 +++++---- drivers/lightnvm/pblk-sysfs.c | 87 ++++++++++++++++++++++++++++++++++++++++++- drivers/lightnvm/pblk.h | 6 ++- 4 files changed, 113 insertions(+), 13 deletions(-) diff --git a/drivers/lightnvm/pblk-init.c b/drivers/lightnvm/pblk-init.c index 0ffc17ccf1cc..8416910ee8bf 100644 --- a/drivers/lightnvm/pblk-init.c +++ b/drivers/lightnvm/pblk-init.c @@ -921,6 +921,7 @@ static void pblk_free(struct pblk *pblk) { pblk_luns_free(pblk); pblk_lines_free(pblk); + kfree(pblk->pad_dist); pblk_line_meta_free(pblk); pblk_core_free(pblk); pblk_l2p_free(pblk); @@ -998,11 +999,13 @@ static void *pblk_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk, pblk->pad_rst_wa = 0; pblk->gc_rst_wa = 0; + atomic64_set(&pblk->nr_flush, 0); + pblk->nr_flush_rst = 0; + #ifdef CONFIG_NVM_DEBUG atomic_long_set(&pblk->inflight_writes, 0); atomic_long_set(&pblk->padded_writes, 0); atomic_long_set(&pblk->padded_wb, 0); - atomic_long_set(&pblk->nr_flush, 0); atomic_long_set(&pblk->req_writes, 0); atomic_long_set(&pblk->sub_writes, 0); atomic_long_set(&pblk->sync_writes, 0); @@ -1034,10 +1037,17 @@ static void *pblk_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk, goto fail_free_luns; } + pblk->pad_dist = kzalloc((pblk->min_write_pgs - 1) * sizeof(atomic64_t), + GFP_KERNEL); + if (!pblk->pad_dist) { + ret = -ENOMEM; + goto fail_free_line_meta; + } + ret = pblk_core_init(pblk); if (ret) { pr_err("pblk: could not initialize core\n"); - goto fail_free_line_meta; + goto fail_free_pad_dist; } ret = pblk_l2p_init(pblk); @@ -1097,6 +1107,8 @@ fail_free_l2p: pblk_l2p_free(pblk); fail_free_core: pblk_core_free(pblk); +fail_free_pad_dist: + kfree(pblk->pad_dist); fail_free_line_meta: pblk_line_meta_free(pblk); fail_free_luns: diff --git a/drivers/lightnvm/pblk-rb.c b/drivers/lightnvm/pblk-rb.c index 7044b5599cc4..8b1434060fb3 100644 --- a/drivers/lightnvm/pblk-rb.c +++ b/drivers/lightnvm/pblk-rb.c @@ -437,9 +437,7 @@ static int pblk_rb_may_write_flush(struct pblk_rb *rb, unsigned int nr_entries, if (bio->bi_opf & REQ_PREFLUSH) { struct pblk *pblk = container_of(rb, struct pblk, rwb); -#ifdef CONFIG_NVM_DEBUG - atomic_long_inc(&pblk->nr_flush); -#endif + atomic64_inc(&pblk->nr_flush); if (pblk_rb_flush_point_set(&pblk->rwb, bio, mem)) *io_ret = NVM_IO_OK; } @@ -620,14 +618,17 @@ try: pr_err("pblk: could not pad page in write bio\n"); return NVM_IO_ERR; } - } - atomic64_add(pad, &((struct pblk *) - (container_of(rb, struct pblk, rwb)))->pad_wa); + if (pad < pblk->min_write_pgs) + atomic64_inc(&pblk->pad_dist[pad - 1]); + else + pr_warn("pblk: padding more than min. sectors\n"); + + atomic64_add(pad, &pblk->pad_wa); + } #ifdef CONFIG_NVM_DEBUG - atomic_long_add(pad, &((struct pblk *) - (container_of(rb, struct pblk, rwb)))->padded_writes); + atomic_long_add(pad, &pblk->padded_writes); #endif return NVM_IO_OK; diff --git a/drivers/lightnvm/pblk-sysfs.c b/drivers/lightnvm/pblk-sysfs.c index beed99936c89..c2cf6c939752 100644 --- a/drivers/lightnvm/pblk-sysfs.c +++ b/drivers/lightnvm/pblk-sysfs.c @@ -340,15 +340,62 @@ static ssize_t pblk_sysfs_get_write_amp_trip(struct pblk *pblk, char *page) atomic64_read(&pblk->pad_wa) - pblk->pad_rst_wa, page); } +static long long bucket_percentage(unsigned long long bucket, + unsigned long long total) +{ + int p = bucket * 100; + + p = div_u64(p, total); + + return p; +} + +static ssize_t pblk_sysfs_get_padding_dist(struct pblk *pblk, char *page) +{ + int sz = 0; + unsigned long long total; + unsigned long long total_buckets = 0; + int buckets = pblk->min_write_pgs - 1; + int i; + + total = atomic64_read(&pblk->nr_flush) - pblk->nr_flush_rst; + if (!total) { + for (i = 0; i < (buckets + 1); i++) + sz += snprintf(page + sz, PAGE_SIZE - sz, + "%d:0 ", i); + sz += snprintf(page + sz, PAGE_SIZE - sz, "\n"); + + return sz; + } + + for (i = 0; i < buckets; i++) + total_buckets += atomic64_read(&pblk->pad_dist[i]); + + sz += snprintf(page + sz, PAGE_SIZE - sz, "0:%lld%% ", + bucket_percentage(total - total_buckets, total)); + + for (i = 0; i < buckets; i++) { + unsigned long long p; + + p = bucket_percentage(atomic64_read(&pblk->pad_dist[i]), + total); + sz += snprintf(page + sz, PAGE_SIZE - sz, "%d:%lld%% ", + i + 1, p); + } + sz += snprintf(page + sz, PAGE_SIZE - sz, "\n"); + + return sz; +} + #ifdef CONFIG_NVM_DEBUG static ssize_t pblk_sysfs_stats_debug(struct pblk *pblk, char *page) { return snprintf(page, PAGE_SIZE, - "%lu\t%lu\t%lu\t%lu\t%lu\t%lu\t%lu\t%lu\t%lu\t%lu\t%lu\t%lu\t%lu\n", + "%lu\t%lu\t%ld\t%llu\t%ld\t%lu\t%lu\t%lu\t%lu\t%lu\t%lu\t%lu\t%lu\n", atomic_long_read(&pblk->inflight_writes), atomic_long_read(&pblk->inflight_reads), atomic_long_read(&pblk->req_writes), - atomic_long_read(&pblk->nr_flush), + (u64)atomic64_read(&pblk->nr_flush), atomic_long_read(&pblk->padded_writes), atomic_long_read(&pblk->padded_wb), atomic_long_read(&pblk->sub_writes), @@ -426,6 +473,32 @@ static ssize_t pblk_sysfs_set_write_amp_trip(struct pblk *pblk, } +static ssize_t pblk_sysfs_set_padding_dist(struct pblk *pblk, + const char *page, size_t len) +{ + size_t c_len; + int reset_value; + int buckets = pblk->min_write_pgs - 1; + int i; + + c_len = strcspn(page, "\n"); + if (c_len >= len) + return -EINVAL; + + if (kstrtouint(page, 0, &reset_value)) + return -EINVAL; + + if (reset_value != 0) + return -EINVAL; + + for (i = 0; i < buckets; i++) + atomic64_set(&pblk->pad_dist[i], 0); + + pblk->nr_flush_rst = atomic64_read(&pblk->nr_flush); + + return len; +} + static struct attribute sys_write_luns = { .name = "write_luns", .mode = 0444, @@ -486,6 +559,11 @@ static struct attribute sys_write_amp_trip = { .mode = 0644, }; +static struct attribute sys_padding_dist = { + .name = "padding_dist", + .mode = 0644, +}; + #ifdef CONFIG_NVM_DEBUG static struct attribute sys_stats_debug_attr = { .name = "stats", @@ -506,6 +584,7 @@ static struct attribute *pblk_attrs[] = { &sys_lines_info_attr, &sys_write_amp_mileage, &sys_write_amp_trip, + &sys_padding_dist, #ifdef CONFIG_NVM_DEBUG &sys_stats_debug_attr, #endif @@ -539,6 +618,8 @@ static ssize_t pblk_sysfs_show(struct kobject *kobj, struct attribute *attr, return pblk_sysfs_get_write_amp_mileage(pblk, buf); else if (strcmp(attr->name, "write_amp_trip") == 0) return pblk_sysfs_get_write_amp_trip(pblk, buf); + else if (strcmp(attr->name, "padding_dist") == 0) + return pblk_sysfs_get_padding_dist(pblk, buf); #ifdef CONFIG_NVM_DEBUG else if (strcmp(attr->name, "stats") == 0) return pblk_sysfs_stats_debug(pblk, buf); @@ -557,6 +638,8 @@ static ssize_t pblk_sysfs_store(struct kobject *kobj, struct attribute *attr, return pblk_sysfs_set_sec_per_write(pblk, buf, len); else if (strcmp(attr->name, "write_amp_trip") == 0) return pblk_sysfs_set_write_amp_trip(pblk, buf, len); + else if (strcmp(attr->name, "padding_dist") == 0) + return pblk_sysfs_set_padding_dist(pblk, buf, len); return 0; } diff --git a/drivers/lightnvm/pblk.h b/drivers/lightnvm/pblk.h index 4b7d8618631f..17e2f242f7da 100644 --- a/drivers/lightnvm/pblk.h +++ b/drivers/lightnvm/pblk.h @@ -626,12 +626,16 @@ struct pblk { u64 gc_rst_wa; u64 pad_rst_wa; + /* Counters used for calculating padding distribution */ + atomic64_t *pad_dist; /* Padding distribution buckets */ + u64 nr_flush_rst; /* Flushes reset value for pad dist.*/ + atomic64_t nr_flush; /* Number of flush/fua I/O */ + #ifdef CONFIG_NVM_DEBUG /* Non-persistent debug counters, 4kb sector I/Os */ atomic_long_t inflight_writes; /* Inflight writes (user and gc) */ atomic_long_t padded_writes; /* Sectors padded due to flush/fua */ atomic_long_t padded_wb; /* Sectors padded in write buffer */ - atomic_long_t nr_flush; /* Number of flush/fua I/O */ atomic_long_t req_writes; /* Sectors stored on write buffer */ atomic_long_t sub_writes; /* Sectors submitted from buffer */ atomic_long_t sync_writes; /* Sectors synced to media */ -- cgit v1.2.3 From 7be970b2258654ca48bdf35d532b2eeef038fe91 Mon Sep 17 00:00:00 2001 From: Hans Holmberg Date: Fri, 30 Mar 2018 00:04:56 +0200 Subject: lightnvm: pblk: delete writer kick timer before stopping thread MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Unless we delete the timer that wakes up the write thread before we stop the thread we risk re-starting the thread, so delete the timer first. Signed-off-by: Hans Holmberg Reviewed-by: Javier González Signed-off-by: Matias Bjørling Signed-off-by: Jens Axboe --- drivers/lightnvm/pblk-init.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/lightnvm/pblk-init.c b/drivers/lightnvm/pblk-init.c index 8416910ee8bf..49c65f1dd48b 100644 --- a/drivers/lightnvm/pblk-init.c +++ b/drivers/lightnvm/pblk-init.c @@ -912,9 +912,9 @@ static void pblk_writer_stop(struct pblk *pblk) WARN(pblk_rb_sync_count(&pblk->rwb), "Stopping not fully synced write buffer\n"); + del_timer_sync(&pblk->wtimer); if (pblk->writer_ts) kthread_stop(pblk->writer_ts); - del_timer(&pblk->wtimer); } static void pblk_free(struct pblk *pblk) -- cgit v1.2.3 From b966c50b14de56cd73d40d3fa87b48bbab6e5c8a Mon Sep 17 00:00:00 2001 From: Hans Holmberg Date: Fri, 30 Mar 2018 00:04:57 +0200 Subject: lightnvm: pblk: allow allocation of new lines during shutdown MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When shutting down pblk the write buffer is flushed and if the current line can't fit the data in the write buffer we need to allocate a new line, so remove the check that prevents this. Signed-off-by: Hans Holmberg Reviewed-by: Javier González Signed-off-by: Matias Bjørling Signed-off-by: Jens Axboe --- drivers/lightnvm/pblk-core.c | 7 ------- 1 file changed, 7 deletions(-) diff --git a/drivers/lightnvm/pblk-core.c b/drivers/lightnvm/pblk-core.c index 22e61cd4f801..8848443a0721 100644 --- a/drivers/lightnvm/pblk-core.c +++ b/drivers/lightnvm/pblk-core.c @@ -1407,13 +1407,6 @@ struct pblk_line *pblk_line_replace_data(struct pblk *pblk) l_mg->data_line = new; spin_lock(&l_mg->free_lock); - if (pblk->state != PBLK_STATE_RUNNING) { - l_mg->data_line = NULL; - l_mg->data_next = NULL; - spin_unlock(&l_mg->free_lock); - goto out; - } - pblk_line_setup_metadata(new, l_mg, &pblk->lm); spin_unlock(&l_mg->free_lock); -- cgit v1.2.3 From 3c05ef115c696392d9703be3fe014100ec77864d Mon Sep 17 00:00:00 2001 From: Hans Holmberg Date: Fri, 30 Mar 2018 00:04:58 +0200 Subject: lightnvm: pblk: prevent race in pblk_rb_flush_point_set MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Make sure that we are not advancing the sync pointer while we're adding bios to the write buffer entry completion list. This race condition results in bios not completing and was identified by a hang when running xfstest generic/113. Signed-off-by: Hans Holmberg Reviewed-by: Javier González Signed-off-by: Matias Bjørling Signed-off-by: Jens Axboe --- drivers/lightnvm/pblk-rb.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/drivers/lightnvm/pblk-rb.c b/drivers/lightnvm/pblk-rb.c index 8b1434060fb3..52fdd85dbc97 100644 --- a/drivers/lightnvm/pblk-rb.c +++ b/drivers/lightnvm/pblk-rb.c @@ -355,10 +355,13 @@ static int pblk_rb_flush_point_set(struct pblk_rb *rb, struct bio *bio, struct pblk_rb_entry *entry; unsigned int sync, flush_point; + pblk_rb_sync_init(rb, NULL); sync = READ_ONCE(rb->sync); - if (pos == sync) + if (pos == sync) { + pblk_rb_sync_end(rb, NULL); return 0; + } #ifdef CONFIG_NVM_DEBUG atomic_inc(&rb->inflight_flush_point); @@ -367,8 +370,6 @@ static int pblk_rb_flush_point_set(struct pblk_rb *rb, struct bio *bio, flush_point = (pos == 0) ? (rb->nr_entries - 1) : (pos - 1); entry = &rb->entries[flush_point]; - pblk_rb_sync_init(rb, NULL); - /* Protect flush points */ smp_store_release(&rb->flush_point, flush_point); -- cgit v1.2.3 From e411b33117d1967d2a5784ed32385e566a871d12 Mon Sep 17 00:00:00 2001 From: Javier González Date: Fri, 30 Mar 2018 00:04:59 +0200 Subject: lightnvm: pblk: refactor bad block identification MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In preparation for the OCSSD 2.0 spec. bad block identification, refactor the current code to generalize bad block get/set functions and structures. Signed-off-by: Javier González Signed-off-by: Matias Bjørling Signed-off-by: Jens Axboe --- drivers/lightnvm/pblk-core.c | 3 - drivers/lightnvm/pblk-init.c | 209 ++++++++++++++++++++++--------------------- drivers/lightnvm/pblk.h | 6 -- 3 files changed, 109 insertions(+), 109 deletions(-) diff --git a/drivers/lightnvm/pblk-core.c b/drivers/lightnvm/pblk-core.c index 8848443a0721..5c363ccde0e3 100644 --- a/drivers/lightnvm/pblk-core.c +++ b/drivers/lightnvm/pblk-core.c @@ -1025,7 +1025,6 @@ static int pblk_line_init_bb(struct pblk *pblk, struct pblk_line *line, struct nvm_geo *geo = &dev->geo; struct pblk_line_meta *lm = &pblk->lm; struct pblk_line_mgmt *l_mg = &pblk->l_mg; - int nr_bb = 0; u64 off; int bit = -1; int emeta_secs; @@ -1041,8 +1040,6 @@ static int pblk_line_init_bb(struct pblk *pblk, struct pblk_line *line, bitmap_or(line->map_bitmap, line->map_bitmap, l_mg->bb_aux, lm->sec_per_line); line->sec_in_line -= geo->sec_per_chk; - if (bit >= lm->emeta_bb) - nr_bb++; } /* Mark smeta metadata sectors as bad sectors */ diff --git a/drivers/lightnvm/pblk-init.c b/drivers/lightnvm/pblk-init.c index 49c65f1dd48b..141036bd6afa 100644 --- a/drivers/lightnvm/pblk-init.c +++ b/drivers/lightnvm/pblk-init.c @@ -365,7 +365,25 @@ static void pblk_luns_free(struct pblk *pblk) kfree(pblk->luns); } -static void pblk_free_line_bitmaps(struct pblk_line *line) +static void pblk_line_mg_free(struct pblk *pblk) +{ + struct pblk_line_mgmt *l_mg = &pblk->l_mg; + int i; + + kfree(l_mg->bb_template); + kfree(l_mg->bb_aux); + kfree(l_mg->vsc_list); + + for (i = 0; i < PBLK_DATA_LINES; i++) { + kfree(l_mg->sline_meta[i]); + pblk_mfree(l_mg->eline_meta[i]->buf, l_mg->emeta_alloc_type); + kfree(l_mg->eline_meta[i]); + } + + kfree(pblk->lines); +} + +static void pblk_line_meta_free(struct pblk_line *line) { kfree(line->blk_bitmap); kfree(line->erase_bitmap); @@ -382,40 +400,16 @@ static void pblk_lines_free(struct pblk *pblk) line = &pblk->lines[i]; pblk_line_free(pblk, line); - pblk_free_line_bitmaps(line); + pblk_line_meta_free(line); } spin_unlock(&l_mg->free_lock); } -static void pblk_line_meta_free(struct pblk *pblk) -{ - struct pblk_line_mgmt *l_mg = &pblk->l_mg; - int i; - - kfree(l_mg->bb_template); - kfree(l_mg->bb_aux); - kfree(l_mg->vsc_list); - - for (i = 0; i < PBLK_DATA_LINES; i++) { - kfree(l_mg->sline_meta[i]); - pblk_mfree(l_mg->eline_meta[i]->buf, l_mg->emeta_alloc_type); - kfree(l_mg->eline_meta[i]); - } - - kfree(pblk->lines); -} - -static int pblk_bb_discovery(struct nvm_tgt_dev *dev, struct pblk_lun *rlun) +static int pblk_bb_get_tbl(struct nvm_tgt_dev *dev, struct pblk_lun *rlun, + u8 *blks, int nr_blks) { - struct nvm_geo *geo = &dev->geo; struct ppa_addr ppa; - u8 *blks; - int nr_blks, ret; - - nr_blks = geo->nr_chks * geo->plane_mode; - blks = kmalloc(nr_blks, GFP_KERNEL); - if (!blks) - return -ENOMEM; + int ret; ppa.ppa = 0; ppa.g.ch = rlun->bppa.g.ch; @@ -423,58 +417,63 @@ static int pblk_bb_discovery(struct nvm_tgt_dev *dev, struct pblk_lun *rlun) ret = nvm_get_tgt_bb_tbl(dev, ppa, blks); if (ret) - goto out; + return ret; nr_blks = nvm_bb_tbl_fold(dev->parent, blks, nr_blks); - if (nr_blks < 0) { - ret = nr_blks; - goto out; - } - - rlun->bb_list = blks; + if (nr_blks < 0) + return -EIO; return 0; -out: - kfree(blks); - return ret; } -static int pblk_bb_line(struct pblk *pblk, struct pblk_line *line, - int blk_per_line) +static void *pblk_bb_get_log(struct pblk *pblk) { struct nvm_tgt_dev *dev = pblk->dev; struct nvm_geo *geo = &dev->geo; - struct pblk_lun *rlun; - int bb_cnt = 0; - int i; + u8 *log; + int i, nr_blks, blk_per_lun; + int ret; - for (i = 0; i < blk_per_line; i++) { - rlun = &pblk->luns[i]; - if (rlun->bb_list[line->id] == NVM_BLK_T_FREE) - continue; + blk_per_lun = geo->nr_chks * geo->plane_mode; + nr_blks = blk_per_lun * geo->all_luns; - set_bit(pblk_ppa_to_pos(geo, rlun->bppa), line->blk_bitmap); - bb_cnt++; + log = kmalloc(nr_blks, GFP_KERNEL); + if (!log) + return ERR_PTR(-ENOMEM); + + for (i = 0; i < geo->all_luns; i++) { + struct pblk_lun *rlun = &pblk->luns[i]; + u8 *log_pos = log + i * blk_per_lun; + + ret = pblk_bb_get_tbl(dev, rlun, log_pos, blk_per_lun); + if (ret) { + kfree(log); + return ERR_PTR(-EIO); + } } - return bb_cnt; + return log; } -static int pblk_alloc_line_bitmaps(struct pblk *pblk, struct pblk_line *line) +static int pblk_bb_line(struct pblk *pblk, struct pblk_line *line, + u8 *bb_log, int blk_per_line) { - struct pblk_line_meta *lm = &pblk->lm; + struct nvm_tgt_dev *dev = pblk->dev; + struct nvm_geo *geo = &dev->geo; + int i, bb_cnt = 0; - line->blk_bitmap = kzalloc(lm->blk_bitmap_len, GFP_KERNEL); - if (!line->blk_bitmap) - return -ENOMEM; + for (i = 0; i < blk_per_line; i++) { + struct pblk_lun *rlun = &pblk->luns[i]; + u8 *lun_bb_log = bb_log + i * blk_per_line; - line->erase_bitmap = kzalloc(lm->blk_bitmap_len, GFP_KERNEL); - if (!line->erase_bitmap) { - kfree(line->blk_bitmap); - return -ENOMEM; + if (lun_bb_log[line->id] == NVM_BLK_T_FREE) + continue; + + set_bit(pblk_ppa_to_pos(geo, rlun->bppa), line->blk_bitmap); + bb_cnt++; } - return 0; + return bb_cnt; } static int pblk_luns_init(struct pblk *pblk, struct ppa_addr *luns) @@ -482,7 +481,7 @@ static int pblk_luns_init(struct pblk *pblk, struct ppa_addr *luns) struct nvm_tgt_dev *dev = pblk->dev; struct nvm_geo *geo = &dev->geo; struct pblk_lun *rlun; - int i, ret; + int i; /* TODO: Implement unbalanced LUN support */ if (geo->nr_luns < 0) { @@ -505,13 +504,6 @@ static int pblk_luns_init(struct pblk *pblk, struct ppa_addr *luns) rlun->bppa = luns[lunid]; sema_init(&rlun->wr_sem, 1); - - ret = pblk_bb_discovery(dev, rlun); - if (ret) { - while (--i >= 0) - kfree(pblk->luns[i].bb_list); - return ret; - } } return 0; @@ -689,6 +681,26 @@ fail_free_smeta: return -ENOMEM; } +static int pblk_setup_line_meta(struct pblk *pblk, struct pblk_line *line, + void *chunk_log, long *nr_bad_blks) +{ + struct pblk_line_meta *lm = &pblk->lm; + + line->blk_bitmap = kzalloc(lm->blk_bitmap_len, GFP_KERNEL); + if (!line->blk_bitmap) + return -ENOMEM; + + line->erase_bitmap = kzalloc(lm->blk_bitmap_len, GFP_KERNEL); + if (!line->erase_bitmap) { + kfree(line->blk_bitmap); + return -ENOMEM; + } + + *nr_bad_blks = pblk_bb_line(pblk, line, chunk_log, lm->blk_per_line); + + return 0; +} + static int pblk_lines_init(struct pblk *pblk) { struct nvm_tgt_dev *dev = pblk->dev; @@ -696,8 +708,9 @@ static int pblk_lines_init(struct pblk *pblk) struct pblk_line_mgmt *l_mg = &pblk->l_mg; struct pblk_line_meta *lm = &pblk->lm; struct pblk_line *line; + void *chunk_log; unsigned int smeta_len, emeta_len; - long nr_bad_blks, nr_free_blks; + long nr_bad_blks = 0, nr_free_blks = 0; int bb_distance, max_write_ppas, mod; int i, ret; @@ -771,13 +784,12 @@ add_emeta_page: if (lm->min_blk_line > lm->blk_per_line) { pr_err("pblk: config. not supported. Min. LUN in line:%d\n", lm->blk_per_line); - ret = -EINVAL; - goto fail; + return -EINVAL; } ret = pblk_lines_alloc_metadata(pblk); if (ret) - goto fail; + return ret; l_mg->bb_template = kzalloc(lm->sec_bitmap_len, GFP_KERNEL); if (!l_mg->bb_template) { @@ -821,9 +833,16 @@ add_emeta_page: goto fail_free_bb_aux; } - nr_free_blks = 0; + chunk_log = pblk_bb_get_log(pblk); + if (IS_ERR(chunk_log)) { + pr_err("pblk: could not get bad block log (%lu)\n", + PTR_ERR(chunk_log)); + ret = PTR_ERR(chunk_log); + goto fail_free_bb_aux; + } + for (i = 0; i < l_mg->nr_lines; i++) { - int blk_in_line; + int chk_in_line; line = &pblk->lines[i]; @@ -835,26 +854,20 @@ add_emeta_page: line->vsc = &l_mg->vsc_list[i]; spin_lock_init(&line->lock); - ret = pblk_alloc_line_bitmaps(pblk, line); + ret = pblk_setup_line_meta(pblk, line, chunk_log, &nr_bad_blks); if (ret) - goto fail_free_lines; + goto fail_free_chunk_log; - nr_bad_blks = pblk_bb_line(pblk, line, lm->blk_per_line); - if (nr_bad_blks < 0 || nr_bad_blks > lm->blk_per_line) { - pblk_free_line_bitmaps(line); - ret = -EINVAL; - goto fail_free_lines; - } - - blk_in_line = lm->blk_per_line - nr_bad_blks; - if (blk_in_line < lm->min_blk_line) { + chk_in_line = lm->blk_per_line - nr_bad_blks; + if (nr_bad_blks < 0 || nr_bad_blks > lm->blk_per_line || + chk_in_line < lm->min_blk_line) { line->state = PBLK_LINESTATE_BAD; list_add_tail(&line->list, &l_mg->bad_list); continue; } - nr_free_blks += blk_in_line; - atomic_set(&line->blk_in_line, blk_in_line); + nr_free_blks += chk_in_line; + atomic_set(&line->blk_in_line, chk_in_line); l_mg->nr_free_lines++; list_add_tail(&line->list, &l_mg->free_list); @@ -862,23 +875,19 @@ add_emeta_page: pblk_set_provision(pblk, nr_free_blks); - /* Cleanup per-LUN bad block lists - managed within lines on run-time */ - for (i = 0; i < geo->all_luns; i++) - kfree(pblk->luns[i].bb_list); - + kfree(chunk_log); return 0; -fail_free_lines: + +fail_free_chunk_log: + kfree(chunk_log); while (--i >= 0) - pblk_free_line_bitmaps(&pblk->lines[i]); + pblk_line_meta_free(&pblk->lines[i]); fail_free_bb_aux: kfree(l_mg->bb_aux); fail_free_bb_template: kfree(l_mg->bb_template); fail_free_meta: - pblk_line_meta_free(pblk); -fail: - for (i = 0; i < geo->all_luns; i++) - kfree(pblk->luns[i].bb_list); + pblk_line_mg_free(pblk); return ret; } @@ -922,7 +931,7 @@ static void pblk_free(struct pblk *pblk) pblk_luns_free(pblk); pblk_lines_free(pblk); kfree(pblk->pad_dist); - pblk_line_meta_free(pblk); + pblk_line_mg_free(pblk); pblk_core_free(pblk); pblk_l2p_free(pblk); @@ -1110,7 +1119,7 @@ fail_free_core: fail_free_pad_dist: kfree(pblk->pad_dist); fail_free_line_meta: - pblk_line_meta_free(pblk); + pblk_line_mg_free(pblk); fail_free_luns: pblk_luns_free(pblk); fail: diff --git a/drivers/lightnvm/pblk.h b/drivers/lightnvm/pblk.h index 17e2f242f7da..f0309d8172c0 100644 --- a/drivers/lightnvm/pblk.h +++ b/drivers/lightnvm/pblk.h @@ -201,12 +201,6 @@ struct pblk_rb { struct pblk_lun { struct ppa_addr bppa; - - u8 *bb_list; /* Bad block list for LUN. Only used on - * bring up. Bad blocks are managed - * within lines on run-time. - */ - struct semaphore wr_sem; }; -- cgit v1.2.3 From a04e0cf93aee6b5e59e84ab66253f09eb71d621b Mon Sep 17 00:00:00 2001 From: Matias Bjørling Date: Fri, 30 Mar 2018 00:05:00 +0200 Subject: lightnvm: make 1.2 data structures explicit MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Make the 1.2 data structures explicit, so it will be easy to identify the 2.0 data structures. Also fix the order of which the nvme_nvm_* are declared, such that they follow the nvme_nvm_command order. Signed-off-by: Matias Bjørling Signed-off-by: Jens Axboe --- drivers/nvme/host/lightnvm.c | 82 ++++++++++++++++++++++---------------------- 1 file changed, 41 insertions(+), 41 deletions(-) diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c index dc0b1335c7c6..60db3f1b59da 100644 --- a/drivers/nvme/host/lightnvm.c +++ b/drivers/nvme/host/lightnvm.c @@ -51,6 +51,21 @@ struct nvme_nvm_ph_rw { __le64 resv; }; +struct nvme_nvm_erase_blk { + __u8 opcode; + __u8 flags; + __u16 command_id; + __le32 nsid; + __u64 rsvd[2]; + __le64 prp1; + __le64 prp2; + __le64 spba; + __le16 length; + __le16 control; + __le32 dsmgmt; + __le64 resv; +}; + struct nvme_nvm_identity { __u8 opcode; __u8 flags; @@ -89,33 +104,18 @@ struct nvme_nvm_setbbtbl { __u32 rsvd4[3]; }; -struct nvme_nvm_erase_blk { - __u8 opcode; - __u8 flags; - __u16 command_id; - __le32 nsid; - __u64 rsvd[2]; - __le64 prp1; - __le64 prp2; - __le64 spba; - __le16 length; - __le16 control; - __le32 dsmgmt; - __le64 resv; -}; - struct nvme_nvm_command { union { struct nvme_common_command common; - struct nvme_nvm_identity identity; struct nvme_nvm_ph_rw ph_rw; + struct nvme_nvm_erase_blk erase; + struct nvme_nvm_identity identity; struct nvme_nvm_getbbtbl get_bb; struct nvme_nvm_setbbtbl set_bb; - struct nvme_nvm_erase_blk erase; }; }; -struct nvme_nvm_id_group { +struct nvme_nvm_id12_grp { __u8 mtype; __u8 fmtype; __le16 res16; @@ -141,7 +141,7 @@ struct nvme_nvm_id_group { __u8 reserved[906]; } __packed; -struct nvme_nvm_addr_format { +struct nvme_nvm_id12_addrf { __u8 ch_offset; __u8 ch_len; __u8 lun_offset; @@ -157,16 +157,16 @@ struct nvme_nvm_addr_format { __u8 res[4]; } __packed; -struct nvme_nvm_id { +struct nvme_nvm_id12 { __u8 ver_id; __u8 vmnt; __u8 cgrps; __u8 res; __le32 cap; __le32 dom; - struct nvme_nvm_addr_format ppaf; + struct nvme_nvm_id12_addrf ppaf; __u8 resv[228]; - struct nvme_nvm_id_group group; + struct nvme_nvm_id12_grp grp; __u8 resv2[2880]; } __packed; @@ -191,25 +191,25 @@ static inline void _nvme_nvm_check_size(void) { BUILD_BUG_ON(sizeof(struct nvme_nvm_identity) != 64); BUILD_BUG_ON(sizeof(struct nvme_nvm_ph_rw) != 64); + BUILD_BUG_ON(sizeof(struct nvme_nvm_erase_blk) != 64); BUILD_BUG_ON(sizeof(struct nvme_nvm_getbbtbl) != 64); BUILD_BUG_ON(sizeof(struct nvme_nvm_setbbtbl) != 64); - BUILD_BUG_ON(sizeof(struct nvme_nvm_erase_blk) != 64); - BUILD_BUG_ON(sizeof(struct nvme_nvm_id_group) != 960); - BUILD_BUG_ON(sizeof(struct nvme_nvm_addr_format) != 16); - BUILD_BUG_ON(sizeof(struct nvme_nvm_id) != NVME_IDENTIFY_DATA_SIZE); + BUILD_BUG_ON(sizeof(struct nvme_nvm_id12_grp) != 960); + BUILD_BUG_ON(sizeof(struct nvme_nvm_id12_addrf) != 16); + BUILD_BUG_ON(sizeof(struct nvme_nvm_id12) != NVME_IDENTIFY_DATA_SIZE); BUILD_BUG_ON(sizeof(struct nvme_nvm_bb_tbl) != 64); } -static int init_grps(struct nvm_id *nvm_id, struct nvme_nvm_id *nvme_nvm_id) +static int init_grp(struct nvm_id *nvm_id, struct nvme_nvm_id12 *id12) { - struct nvme_nvm_id_group *src; + struct nvme_nvm_id12_grp *src; struct nvm_id_group *grp; int sec_per_pg, sec_per_pl, pg_per_blk; - if (nvme_nvm_id->cgrps != 1) + if (id12->cgrps != 1) return -EINVAL; - src = &nvme_nvm_id->group; + src = &id12->grp; grp = &nvm_id->grp; grp->mtype = src->mtype; @@ -261,34 +261,34 @@ static int init_grps(struct nvm_id *nvm_id, struct nvme_nvm_id *nvme_nvm_id) static int nvme_nvm_identity(struct nvm_dev *nvmdev, struct nvm_id *nvm_id) { struct nvme_ns *ns = nvmdev->q->queuedata; - struct nvme_nvm_id *nvme_nvm_id; + struct nvme_nvm_id12 *id; struct nvme_nvm_command c = {}; int ret; c.identity.opcode = nvme_nvm_admin_identity; c.identity.nsid = cpu_to_le32(ns->head->ns_id); - nvme_nvm_id = kmalloc(sizeof(struct nvme_nvm_id), GFP_KERNEL); - if (!nvme_nvm_id) + id = kmalloc(sizeof(struct nvme_nvm_id12), GFP_KERNEL); + if (!id) return -ENOMEM; ret = nvme_submit_sync_cmd(ns->ctrl->admin_q, (struct nvme_command *)&c, - nvme_nvm_id, sizeof(struct nvme_nvm_id)); + id, sizeof(struct nvme_nvm_id12)); if (ret) { ret = -EIO; goto out; } - nvm_id->ver_id = nvme_nvm_id->ver_id; - nvm_id->vmnt = nvme_nvm_id->vmnt; - nvm_id->cap = le32_to_cpu(nvme_nvm_id->cap); - nvm_id->dom = le32_to_cpu(nvme_nvm_id->dom); - memcpy(&nvm_id->ppaf, &nvme_nvm_id->ppaf, + nvm_id->ver_id = id->ver_id; + nvm_id->vmnt = id->vmnt; + nvm_id->cap = le32_to_cpu(id->cap); + nvm_id->dom = le32_to_cpu(id->dom); + memcpy(&nvm_id->ppaf, &id->ppaf, sizeof(struct nvm_addr_format)); - ret = init_grps(nvm_id, nvme_nvm_id); + ret = init_grp(nvm_id, id); out: - kfree(nvme_nvm_id); + kfree(id); return ret; } -- cgit v1.2.3 From c6ac3f35d46b3c9999838dd13e7e113674f22ffa Mon Sep 17 00:00:00 2001 From: Matias Bjørling Date: Fri, 30 Mar 2018 00:05:01 +0200 Subject: lightnvm: flatten nvm_id_group into nvm_id MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit There are no groups in the 2.0 specification, make sure that the nvm_id structure is flattened before 2.0 data structures are added. Signed-off-by: Matias Bjørling Signed-off-by: Jens Axboe --- drivers/lightnvm/core.c | 25 +++++----- drivers/nvme/host/lightnvm.c | 106 +++++++++++++++++++++---------------------- include/linux/lightnvm.h | 53 +++++++++++----------- 3 files changed, 89 insertions(+), 95 deletions(-) diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c index 5f1988df1593..db4a1b8f1561 100644 --- a/drivers/lightnvm/core.c +++ b/drivers/lightnvm/core.c @@ -851,33 +851,32 @@ EXPORT_SYMBOL(nvm_get_tgt_bb_tbl); static int nvm_core_init(struct nvm_dev *dev) { struct nvm_id *id = &dev->identity; - struct nvm_id_group *grp = &id->grp; struct nvm_geo *geo = &dev->geo; int ret; memcpy(&geo->ppaf, &id->ppaf, sizeof(struct nvm_addr_format)); - if (grp->mtype != 0) { + if (id->mtype != 0) { pr_err("nvm: memory type not supported\n"); return -EINVAL; } /* Whole device values */ - geo->nr_chnls = grp->num_ch; - geo->nr_luns = grp->num_lun; + geo->nr_chnls = id->num_ch; + geo->nr_luns = id->num_lun; /* Generic device geometry values */ - geo->ws_min = grp->ws_min; - geo->ws_opt = grp->ws_opt; - geo->ws_seq = grp->ws_seq; - geo->ws_per_chk = grp->ws_per_chk; - geo->nr_chks = grp->num_chk; - geo->sec_size = grp->csecs; - geo->oob_size = grp->sos; - geo->mccap = grp->mccap; + geo->ws_min = id->ws_min; + geo->ws_opt = id->ws_opt; + geo->ws_seq = id->ws_seq; + geo->ws_per_chk = id->ws_per_chk; + geo->nr_chks = id->num_chk; + geo->sec_size = id->csecs; + geo->oob_size = id->sos; + geo->mccap = id->mccap; geo->max_rq_size = dev->ops->max_phys_sect * geo->sec_size; - geo->sec_per_chk = grp->clba; + geo->sec_per_chk = id->clba; geo->sec_per_lun = geo->sec_per_chk * geo->nr_chks; geo->all_luns = geo->nr_luns * geo->nr_chnls; diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c index 60db3f1b59da..6412551ecc65 100644 --- a/drivers/nvme/host/lightnvm.c +++ b/drivers/nvme/host/lightnvm.c @@ -203,57 +203,55 @@ static inline void _nvme_nvm_check_size(void) static int init_grp(struct nvm_id *nvm_id, struct nvme_nvm_id12 *id12) { struct nvme_nvm_id12_grp *src; - struct nvm_id_group *grp; int sec_per_pg, sec_per_pl, pg_per_blk; if (id12->cgrps != 1) return -EINVAL; src = &id12->grp; - grp = &nvm_id->grp; - grp->mtype = src->mtype; - grp->fmtype = src->fmtype; + nvm_id->mtype = src->mtype; + nvm_id->fmtype = src->fmtype; - grp->num_ch = src->num_ch; - grp->num_lun = src->num_lun; + nvm_id->num_ch = src->num_ch; + nvm_id->num_lun = src->num_lun; - grp->num_chk = le16_to_cpu(src->num_chk); - grp->csecs = le16_to_cpu(src->csecs); - grp->sos = le16_to_cpu(src->sos); + nvm_id->num_chk = le16_to_cpu(src->num_chk); + nvm_id->csecs = le16_to_cpu(src->csecs); + nvm_id->sos = le16_to_cpu(src->sos); pg_per_blk = le16_to_cpu(src->num_pg); - sec_per_pg = le16_to_cpu(src->fpg_sz) / grp->csecs; + sec_per_pg = le16_to_cpu(src->fpg_sz) / nvm_id->csecs; sec_per_pl = sec_per_pg * src->num_pln; - grp->clba = sec_per_pl * pg_per_blk; - grp->ws_per_chk = pg_per_blk; - - grp->mpos = le32_to_cpu(src->mpos); - grp->cpar = le16_to_cpu(src->cpar); - grp->mccap = le32_to_cpu(src->mccap); - - grp->ws_opt = grp->ws_min = sec_per_pg; - grp->ws_seq = NVM_IO_SNGL_ACCESS; - - if (grp->mpos & 0x020202) { - grp->ws_seq = NVM_IO_DUAL_ACCESS; - grp->ws_opt <<= 1; - } else if (grp->mpos & 0x040404) { - grp->ws_seq = NVM_IO_QUAD_ACCESS; - grp->ws_opt <<= 2; + nvm_id->clba = sec_per_pl * pg_per_blk; + nvm_id->ws_per_chk = pg_per_blk; + + nvm_id->mpos = le32_to_cpu(src->mpos); + nvm_id->cpar = le16_to_cpu(src->cpar); + nvm_id->mccap = le32_to_cpu(src->mccap); + + nvm_id->ws_opt = nvm_id->ws_min = sec_per_pg; + nvm_id->ws_seq = NVM_IO_SNGL_ACCESS; + + if (nvm_id->mpos & 0x020202) { + nvm_id->ws_seq = NVM_IO_DUAL_ACCESS; + nvm_id->ws_opt <<= 1; + } else if (nvm_id->mpos & 0x040404) { + nvm_id->ws_seq = NVM_IO_QUAD_ACCESS; + nvm_id->ws_opt <<= 2; } - grp->trdt = le32_to_cpu(src->trdt); - grp->trdm = le32_to_cpu(src->trdm); - grp->tprt = le32_to_cpu(src->tprt); - grp->tprm = le32_to_cpu(src->tprm); - grp->tbet = le32_to_cpu(src->tbet); - grp->tbem = le32_to_cpu(src->tbem); + nvm_id->trdt = le32_to_cpu(src->trdt); + nvm_id->trdm = le32_to_cpu(src->trdm); + nvm_id->tprt = le32_to_cpu(src->tprt); + nvm_id->tprm = le32_to_cpu(src->tprm); + nvm_id->tbet = le32_to_cpu(src->tbet); + nvm_id->tbem = le32_to_cpu(src->tbem); /* 1.2 compatibility */ - grp->num_pln = src->num_pln; - grp->num_pg = le16_to_cpu(src->num_pg); - grp->fpg_sz = le16_to_cpu(src->fpg_sz); + nvm_id->num_pln = src->num_pln; + nvm_id->num_pg = le16_to_cpu(src->num_pg); + nvm_id->fpg_sz = le16_to_cpu(src->fpg_sz); return 0; } @@ -740,14 +738,12 @@ static ssize_t nvm_dev_attr_show(struct device *dev, struct nvme_ns *ns = nvme_get_ns_from_dev(dev); struct nvm_dev *ndev = ns->ndev; struct nvm_id *id; - struct nvm_id_group *grp; struct attribute *attr; if (!ndev) return 0; id = &ndev->identity; - grp = &id->grp; attr = &dattr->attr; if (strcmp(attr->name, "version") == 0) { @@ -771,41 +767,41 @@ static ssize_t nvm_dev_attr_show(struct device *dev, id->ppaf.pg_offset, id->ppaf.pg_len, id->ppaf.sect_offset, id->ppaf.sect_len); } else if (strcmp(attr->name, "media_type") == 0) { /* u8 */ - return scnprintf(page, PAGE_SIZE, "%u\n", grp->mtype); + return scnprintf(page, PAGE_SIZE, "%u\n", id->mtype); } else if (strcmp(attr->name, "flash_media_type") == 0) { - return scnprintf(page, PAGE_SIZE, "%u\n", grp->fmtype); + return scnprintf(page, PAGE_SIZE, "%u\n", id->fmtype); } else if (strcmp(attr->name, "num_channels") == 0) { - return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_ch); + return scnprintf(page, PAGE_SIZE, "%u\n", id->num_ch); } else if (strcmp(attr->name, "num_luns") == 0) { - return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_lun); + return scnprintf(page, PAGE_SIZE, "%u\n", id->num_lun); } else if (strcmp(attr->name, "num_planes") == 0) { - return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_pln); + return scnprintf(page, PAGE_SIZE, "%u\n", id->num_pln); } else if (strcmp(attr->name, "num_blocks") == 0) { /* u16 */ - return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_chk); + return scnprintf(page, PAGE_SIZE, "%u\n", id->num_chk); } else if (strcmp(attr->name, "num_pages") == 0) { - return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_pg); + return scnprintf(page, PAGE_SIZE, "%u\n", id->num_pg); } else if (strcmp(attr->name, "page_size") == 0) { - return scnprintf(page, PAGE_SIZE, "%u\n", grp->fpg_sz); + return scnprintf(page, PAGE_SIZE, "%u\n", id->fpg_sz); } else if (strcmp(attr->name, "hw_sector_size") == 0) { - return scnprintf(page, PAGE_SIZE, "%u\n", grp->csecs); + return scnprintf(page, PAGE_SIZE, "%u\n", id->csecs); } else if (strcmp(attr->name, "oob_sector_size") == 0) {/* u32 */ - return scnprintf(page, PAGE_SIZE, "%u\n", grp->sos); + return scnprintf(page, PAGE_SIZE, "%u\n", id->sos); } else if (strcmp(attr->name, "read_typ") == 0) { - return scnprintf(page, PAGE_SIZE, "%u\n", grp->trdt); + return scnprintf(page, PAGE_SIZE, "%u\n", id->trdt); } else if (strcmp(attr->name, "read_max") == 0) { - return scnprintf(page, PAGE_SIZE, "%u\n", grp->trdm); + return scnprintf(page, PAGE_SIZE, "%u\n", id->trdm); } else if (strcmp(attr->name, "prog_typ") == 0) { - return scnprintf(page, PAGE_SIZE, "%u\n", grp->tprt); + return scnprintf(page, PAGE_SIZE, "%u\n", id->tprt); } else if (strcmp(attr->name, "prog_max") == 0) { - return scnprintf(page, PAGE_SIZE, "%u\n", grp->tprm); + return scnprintf(page, PAGE_SIZE, "%u\n", id->tprm); } else if (strcmp(attr->name, "erase_typ") == 0) { - return scnprintf(page, PAGE_SIZE, "%u\n", grp->tbet); + return scnprintf(page, PAGE_SIZE, "%u\n", id->tbet); } else if (strcmp(attr->name, "erase_max") == 0) { - return scnprintf(page, PAGE_SIZE, "%u\n", grp->tbem); + return scnprintf(page, PAGE_SIZE, "%u\n", id->tbem); } else if (strcmp(attr->name, "multiplane_modes") == 0) { - return scnprintf(page, PAGE_SIZE, "0x%08x\n", grp->mpos); + return scnprintf(page, PAGE_SIZE, "0x%08x\n", id->mpos); } else if (strcmp(attr->name, "media_capabilities") == 0) { - return scnprintf(page, PAGE_SIZE, "0x%08x\n", grp->mccap); + return scnprintf(page, PAGE_SIZE, "0x%08x\n", id->mccap); } else if (strcmp(attr->name, "max_phys_secs") == 0) { return scnprintf(page, PAGE_SIZE, "%u\n", ndev->ops->max_phys_sect); diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h index 7f4b60abdf27..94b704a8d83d 100644 --- a/include/linux/lightnvm.h +++ b/include/linux/lightnvm.h @@ -154,9 +154,29 @@ struct nvm_id_lp_tbl { struct nvm_id_lp_mlc mlc; }; -struct nvm_id_group { - u8 mtype; - u8 fmtype; +struct nvm_addr_format { + u8 ch_offset; + u8 ch_len; + u8 lun_offset; + u8 lun_len; + u8 pln_offset; + u8 pln_len; + u8 blk_offset; + u8 blk_len; + u8 pg_offset; + u8 pg_len; + u8 sect_offset; + u8 sect_len; +}; + +struct nvm_id { + u8 ver_id; + u8 vmnt; + u32 cap; + u32 dom; + + struct nvm_addr_format ppaf; + u8 num_ch; u8 num_lun; u16 num_chk; @@ -180,33 +200,12 @@ struct nvm_id_group { u16 cpar; /* 1.2 compatibility */ + u8 mtype; + u8 fmtype; + u8 num_pln; u16 num_pg; u16 fpg_sz; -}; - -struct nvm_addr_format { - u8 ch_offset; - u8 ch_len; - u8 lun_offset; - u8 lun_len; - u8 pln_offset; - u8 pln_len; - u8 blk_offset; - u8 blk_len; - u8 pg_offset; - u8 pg_len; - u8 sect_offset; - u8 sect_len; -}; - -struct nvm_id { - u8 ver_id; - u8 vmnt; - u32 cap; - u32 dom; - struct nvm_addr_format ppaf; - struct nvm_id_group grp; } __packed; struct nvm_target { -- cgit v1.2.3 From 62771fe0aa28b5d329f3e53a2e0f805f73433752 Mon Sep 17 00:00:00 2001 From: Matias Bjørling Date: Fri, 30 Mar 2018 00:05:02 +0200 Subject: lightnvm: add 2.0 geometry identification MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Implement the geometry data structures for 2.0 and enable a drive to be identified as one, including exposing the appropriate 2.0 sysfs entries. Signed-off-by: Matias Bjørling Signed-off-by: Jens Axboe --- drivers/lightnvm/core.c | 8 +- drivers/nvme/host/lightnvm.c | 338 ++++++++++++++++++++++++++++++++++++------- include/linux/lightnvm.h | 11 +- 3 files changed, 299 insertions(+), 58 deletions(-) diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c index db4a1b8f1561..521f520a1bb4 100644 --- a/drivers/lightnvm/core.c +++ b/drivers/lightnvm/core.c @@ -931,11 +931,9 @@ static int nvm_init(struct nvm_dev *dev) goto err; } - pr_debug("nvm: ver:%x nvm_vendor:%x\n", - dev->identity.ver_id, dev->identity.vmnt); - - if (dev->identity.ver_id != 1) { - pr_err("nvm: device not supported by kernel."); + if (dev->identity.ver_id != 1 && dev->identity.ver_id != 2) { + pr_err("nvm: device ver_id %d not supported by kernel.\n", + dev->identity.ver_id); goto err; } diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c index 6412551ecc65..8b243af8a949 100644 --- a/drivers/nvme/host/lightnvm.c +++ b/drivers/nvme/host/lightnvm.c @@ -184,6 +184,58 @@ struct nvme_nvm_bb_tbl { __u8 blk[0]; }; +struct nvme_nvm_id20_addrf { + __u8 grp_len; + __u8 pu_len; + __u8 chk_len; + __u8 lba_len; + __u8 resv[4]; +}; + +struct nvme_nvm_id20 { + __u8 mjr; + __u8 mnr; + __u8 resv[6]; + + struct nvme_nvm_id20_addrf lbaf; + + __le32 mccap; + __u8 resv2[12]; + + __u8 wit; + __u8 resv3[31]; + + /* Geometry */ + __le16 num_grp; + __le16 num_pu; + __le32 num_chk; + __le32 clba; + __u8 resv4[52]; + + /* Write data requirements */ + __le32 ws_min; + __le32 ws_opt; + __le32 mw_cunits; + __le32 maxoc; + __le32 maxocpu; + __u8 resv5[44]; + + /* Performance related metrics */ + __le32 trdt; + __le32 trdm; + __le32 twrt; + __le32 twrm; + __le32 tcrst; + __le32 tcrsm; + __u8 resv6[40]; + + /* Reserved area */ + __u8 resv7[2816]; + + /* Vendor specific */ + __u8 vs[1024]; +}; + /* * Check we didn't inadvertently grow the command struct */ @@ -198,6 +250,8 @@ static inline void _nvme_nvm_check_size(void) BUILD_BUG_ON(sizeof(struct nvme_nvm_id12_addrf) != 16); BUILD_BUG_ON(sizeof(struct nvme_nvm_id12) != NVME_IDENTIFY_DATA_SIZE); BUILD_BUG_ON(sizeof(struct nvme_nvm_bb_tbl) != 64); + BUILD_BUG_ON(sizeof(struct nvme_nvm_id20_addrf) != 8); + BUILD_BUG_ON(sizeof(struct nvme_nvm_id20) != NVME_IDENTIFY_DATA_SIZE); } static int init_grp(struct nvm_id *nvm_id, struct nvme_nvm_id12 *id12) @@ -256,6 +310,49 @@ static int init_grp(struct nvm_id *nvm_id, struct nvme_nvm_id12 *id12) return 0; } +static int nvme_nvm_setup_12(struct nvm_dev *nvmdev, struct nvm_id *nvm_id, + struct nvme_nvm_id12 *id) +{ + nvm_id->ver_id = id->ver_id; + nvm_id->vmnt = id->vmnt; + nvm_id->cap = le32_to_cpu(id->cap); + nvm_id->dom = le32_to_cpu(id->dom); + memcpy(&nvm_id->ppaf, &id->ppaf, + sizeof(struct nvm_addr_format)); + + return init_grp(nvm_id, id); +} + +static int nvme_nvm_setup_20(struct nvm_dev *nvmdev, struct nvm_id *nvm_id, + struct nvme_nvm_id20 *id) +{ + nvm_id->ver_id = id->mjr; + + nvm_id->num_ch = le16_to_cpu(id->num_grp); + nvm_id->num_lun = le16_to_cpu(id->num_pu); + nvm_id->num_chk = le32_to_cpu(id->num_chk); + nvm_id->clba = le32_to_cpu(id->clba); + + nvm_id->ws_min = le32_to_cpu(id->ws_min); + nvm_id->ws_opt = le32_to_cpu(id->ws_opt); + nvm_id->mw_cunits = le32_to_cpu(id->mw_cunits); + + nvm_id->trdt = le32_to_cpu(id->trdt); + nvm_id->trdm = le32_to_cpu(id->trdm); + nvm_id->tprt = le32_to_cpu(id->twrt); + nvm_id->tprm = le32_to_cpu(id->twrm); + nvm_id->tbet = le32_to_cpu(id->tcrst); + nvm_id->tbem = le32_to_cpu(id->tcrsm); + + /* calculated values */ + nvm_id->ws_per_chk = nvm_id->clba / nvm_id->ws_min; + + /* 1.2 compatibility */ + nvm_id->ws_seq = NVM_IO_SNGL_ACCESS; + + return 0; +} + static int nvme_nvm_identity(struct nvm_dev *nvmdev, struct nvm_id *nvm_id) { struct nvme_ns *ns = nvmdev->q->queuedata; @@ -277,14 +374,24 @@ static int nvme_nvm_identity(struct nvm_dev *nvmdev, struct nvm_id *nvm_id) goto out; } - nvm_id->ver_id = id->ver_id; - nvm_id->vmnt = id->vmnt; - nvm_id->cap = le32_to_cpu(id->cap); - nvm_id->dom = le32_to_cpu(id->dom); - memcpy(&nvm_id->ppaf, &id->ppaf, - sizeof(struct nvm_addr_format)); - - ret = init_grp(nvm_id, id); + /* + * The 1.2 and 2.0 specifications share the first byte in their geometry + * command to make it possible to know what version a device implements. + */ + switch (id->ver_id) { + case 1: + ret = nvme_nvm_setup_12(nvmdev, nvm_id, id); + break; + case 2: + ret = nvme_nvm_setup_20(nvmdev, nvm_id, + (struct nvme_nvm_id20 *)id); + break; + default: + dev_err(ns->ctrl->device, + "OCSSD revision not supported (%d)\n", + nvm_id->ver_id); + ret = -EINVAL; + } out: kfree(id); return ret; @@ -733,7 +840,7 @@ void nvme_nvm_unregister(struct nvme_ns *ns) } static ssize_t nvm_dev_attr_show(struct device *dev, - struct device_attribute *dattr, char *page) + struct device_attribute *dattr, char *page) { struct nvme_ns *ns = nvme_get_ns_from_dev(dev); struct nvm_dev *ndev = ns->ndev; @@ -748,10 +855,36 @@ static ssize_t nvm_dev_attr_show(struct device *dev, if (strcmp(attr->name, "version") == 0) { return scnprintf(page, PAGE_SIZE, "%u\n", id->ver_id); - } else if (strcmp(attr->name, "vendor_opcode") == 0) { - return scnprintf(page, PAGE_SIZE, "%u\n", id->vmnt); } else if (strcmp(attr->name, "capabilities") == 0) { return scnprintf(page, PAGE_SIZE, "%u\n", id->cap); + } else if (strcmp(attr->name, "read_typ") == 0) { + return scnprintf(page, PAGE_SIZE, "%u\n", id->trdt); + } else if (strcmp(attr->name, "read_max") == 0) { + return scnprintf(page, PAGE_SIZE, "%u\n", id->trdm); + } else { + return scnprintf(page, + PAGE_SIZE, + "Unhandled attr(%s) in `nvm_dev_attr_show`\n", + attr->name); + } +} + +static ssize_t nvm_dev_attr_show_12(struct device *dev, + struct device_attribute *dattr, char *page) +{ + struct nvme_ns *ns = nvme_get_ns_from_dev(dev); + struct nvm_dev *ndev = ns->ndev; + struct nvm_id *id; + struct attribute *attr; + + if (!ndev) + return 0; + + id = &ndev->identity; + attr = &dattr->attr; + + if (strcmp(attr->name, "vendor_opcode") == 0) { + return scnprintf(page, PAGE_SIZE, "%u\n", id->vmnt); } else if (strcmp(attr->name, "device_mode") == 0) { return scnprintf(page, PAGE_SIZE, "%u\n", id->dom); /* kept for compatibility */ @@ -786,10 +919,6 @@ static ssize_t nvm_dev_attr_show(struct device *dev, return scnprintf(page, PAGE_SIZE, "%u\n", id->csecs); } else if (strcmp(attr->name, "oob_sector_size") == 0) {/* u32 */ return scnprintf(page, PAGE_SIZE, "%u\n", id->sos); - } else if (strcmp(attr->name, "read_typ") == 0) { - return scnprintf(page, PAGE_SIZE, "%u\n", id->trdt); - } else if (strcmp(attr->name, "read_max") == 0) { - return scnprintf(page, PAGE_SIZE, "%u\n", id->trdm); } else if (strcmp(attr->name, "prog_typ") == 0) { return scnprintf(page, PAGE_SIZE, "%u\n", id->tprt); } else if (strcmp(attr->name, "prog_max") == 0) { @@ -808,48 +937,99 @@ static ssize_t nvm_dev_attr_show(struct device *dev, } else { return scnprintf(page, PAGE_SIZE, - "Unhandled attr(%s) in `nvm_dev_attr_show`\n", + "Unhandled attr(%s) in `nvm_dev_attr_show_12`\n", attr->name); } } -#define NVM_DEV_ATTR_RO(_name) \ +static ssize_t nvm_dev_attr_show_20(struct device *dev, + struct device_attribute *dattr, char *page) +{ + struct nvme_ns *ns = nvme_get_ns_from_dev(dev); + struct nvm_dev *ndev = ns->ndev; + struct nvm_id *id; + struct attribute *attr; + + if (!ndev) + return 0; + + id = &ndev->identity; + attr = &dattr->attr; + + if (strcmp(attr->name, "groups") == 0) { + return scnprintf(page, PAGE_SIZE, "%u\n", id->num_ch); + } else if (strcmp(attr->name, "punits") == 0) { + return scnprintf(page, PAGE_SIZE, "%u\n", id->num_lun); + } else if (strcmp(attr->name, "chunks") == 0) { + return scnprintf(page, PAGE_SIZE, "%u\n", id->num_chk); + } else if (strcmp(attr->name, "clba") == 0) { + return scnprintf(page, PAGE_SIZE, "%u\n", id->clba); + } else if (strcmp(attr->name, "ws_min") == 0) { + return scnprintf(page, PAGE_SIZE, "%u\n", id->ws_min); + } else if (strcmp(attr->name, "ws_opt") == 0) { + return scnprintf(page, PAGE_SIZE, "%u\n", id->ws_opt); + } else if (strcmp(attr->name, "mw_cunits") == 0) { + return scnprintf(page, PAGE_SIZE, "%u\n", id->mw_cunits); + } else if (strcmp(attr->name, "write_typ") == 0) { + return scnprintf(page, PAGE_SIZE, "%u\n", id->tprt); + } else if (strcmp(attr->name, "write_max") == 0) { + return scnprintf(page, PAGE_SIZE, "%u\n", id->tprm); + } else if (strcmp(attr->name, "reset_typ") == 0) { + return scnprintf(page, PAGE_SIZE, "%u\n", id->tbet); + } else if (strcmp(attr->name, "reset_max") == 0) { + return scnprintf(page, PAGE_SIZE, "%u\n", id->tbem); + } else { + return scnprintf(page, + PAGE_SIZE, + "Unhandled attr(%s) in `nvm_dev_attr_show_20`\n", + attr->name); + } +} + +#define NVM_DEV_ATTR_RO(_name) \ DEVICE_ATTR(_name, S_IRUGO, nvm_dev_attr_show, NULL) +#define NVM_DEV_ATTR_12_RO(_name) \ + DEVICE_ATTR(_name, S_IRUGO, nvm_dev_attr_show_12, NULL) +#define NVM_DEV_ATTR_20_RO(_name) \ + DEVICE_ATTR(_name, S_IRUGO, nvm_dev_attr_show_20, NULL) +/* general attributes */ static NVM_DEV_ATTR_RO(version); -static NVM_DEV_ATTR_RO(vendor_opcode); static NVM_DEV_ATTR_RO(capabilities); -static NVM_DEV_ATTR_RO(device_mode); -static NVM_DEV_ATTR_RO(ppa_format); -static NVM_DEV_ATTR_RO(media_manager); - -static NVM_DEV_ATTR_RO(media_type); -static NVM_DEV_ATTR_RO(flash_media_type); -static NVM_DEV_ATTR_RO(num_channels); -static NVM_DEV_ATTR_RO(num_luns); -static NVM_DEV_ATTR_RO(num_planes); -static NVM_DEV_ATTR_RO(num_blocks); -static NVM_DEV_ATTR_RO(num_pages); -static NVM_DEV_ATTR_RO(page_size); -static NVM_DEV_ATTR_RO(hw_sector_size); -static NVM_DEV_ATTR_RO(oob_sector_size); + static NVM_DEV_ATTR_RO(read_typ); static NVM_DEV_ATTR_RO(read_max); -static NVM_DEV_ATTR_RO(prog_typ); -static NVM_DEV_ATTR_RO(prog_max); -static NVM_DEV_ATTR_RO(erase_typ); -static NVM_DEV_ATTR_RO(erase_max); -static NVM_DEV_ATTR_RO(multiplane_modes); -static NVM_DEV_ATTR_RO(media_capabilities); -static NVM_DEV_ATTR_RO(max_phys_secs); - -static struct attribute *nvm_dev_attrs[] = { + +/* 1.2 values */ +static NVM_DEV_ATTR_12_RO(vendor_opcode); +static NVM_DEV_ATTR_12_RO(device_mode); +static NVM_DEV_ATTR_12_RO(ppa_format); +static NVM_DEV_ATTR_12_RO(media_manager); +static NVM_DEV_ATTR_12_RO(media_type); +static NVM_DEV_ATTR_12_RO(flash_media_type); +static NVM_DEV_ATTR_12_RO(num_channels); +static NVM_DEV_ATTR_12_RO(num_luns); +static NVM_DEV_ATTR_12_RO(num_planes); +static NVM_DEV_ATTR_12_RO(num_blocks); +static NVM_DEV_ATTR_12_RO(num_pages); +static NVM_DEV_ATTR_12_RO(page_size); +static NVM_DEV_ATTR_12_RO(hw_sector_size); +static NVM_DEV_ATTR_12_RO(oob_sector_size); +static NVM_DEV_ATTR_12_RO(prog_typ); +static NVM_DEV_ATTR_12_RO(prog_max); +static NVM_DEV_ATTR_12_RO(erase_typ); +static NVM_DEV_ATTR_12_RO(erase_max); +static NVM_DEV_ATTR_12_RO(multiplane_modes); +static NVM_DEV_ATTR_12_RO(media_capabilities); +static NVM_DEV_ATTR_12_RO(max_phys_secs); + +static struct attribute *nvm_dev_attrs_12[] = { &dev_attr_version.attr, - &dev_attr_vendor_opcode.attr, &dev_attr_capabilities.attr, + + &dev_attr_vendor_opcode.attr, &dev_attr_device_mode.attr, &dev_attr_media_manager.attr, - &dev_attr_ppa_format.attr, &dev_attr_media_type.attr, &dev_attr_flash_media_type.attr, @@ -870,22 +1050,82 @@ static struct attribute *nvm_dev_attrs[] = { &dev_attr_multiplane_modes.attr, &dev_attr_media_capabilities.attr, &dev_attr_max_phys_secs.attr, + NULL, }; -static const struct attribute_group nvm_dev_attr_group = { +static const struct attribute_group nvm_dev_attr_group_12 = { .name = "lightnvm", - .attrs = nvm_dev_attrs, + .attrs = nvm_dev_attrs_12, +}; + +/* 2.0 values */ +static NVM_DEV_ATTR_20_RO(groups); +static NVM_DEV_ATTR_20_RO(punits); +static NVM_DEV_ATTR_20_RO(chunks); +static NVM_DEV_ATTR_20_RO(clba); +static NVM_DEV_ATTR_20_RO(ws_min); +static NVM_DEV_ATTR_20_RO(ws_opt); +static NVM_DEV_ATTR_20_RO(mw_cunits); +static NVM_DEV_ATTR_20_RO(write_typ); +static NVM_DEV_ATTR_20_RO(write_max); +static NVM_DEV_ATTR_20_RO(reset_typ); +static NVM_DEV_ATTR_20_RO(reset_max); + +static struct attribute *nvm_dev_attrs_20[] = { + &dev_attr_version.attr, + &dev_attr_capabilities.attr, + + &dev_attr_groups.attr, + &dev_attr_punits.attr, + &dev_attr_chunks.attr, + &dev_attr_clba.attr, + &dev_attr_ws_min.attr, + &dev_attr_ws_opt.attr, + &dev_attr_mw_cunits.attr, + + &dev_attr_read_typ.attr, + &dev_attr_read_max.attr, + &dev_attr_write_typ.attr, + &dev_attr_write_max.attr, + &dev_attr_reset_typ.attr, + &dev_attr_reset_max.attr, + + NULL, +}; + +static const struct attribute_group nvm_dev_attr_group_20 = { + .name = "lightnvm", + .attrs = nvm_dev_attrs_20, }; int nvme_nvm_register_sysfs(struct nvme_ns *ns) { - return sysfs_create_group(&disk_to_dev(ns->disk)->kobj, - &nvm_dev_attr_group); + if (!ns->ndev) + return -EINVAL; + + switch (ns->ndev->identity.ver_id) { + case 1: + return sysfs_create_group(&disk_to_dev(ns->disk)->kobj, + &nvm_dev_attr_group_12); + case 2: + return sysfs_create_group(&disk_to_dev(ns->disk)->kobj, + &nvm_dev_attr_group_20); + } + + return -EINVAL; } void nvme_nvm_unregister_sysfs(struct nvme_ns *ns) { - sysfs_remove_group(&disk_to_dev(ns->disk)->kobj, - &nvm_dev_attr_group); + switch (ns->ndev->identity.ver_id) { + case 1: + sysfs_remove_group(&disk_to_dev(ns->disk)->kobj, + &nvm_dev_attr_group_12); + break; + case 2: + sysfs_remove_group(&disk_to_dev(ns->disk)->kobj, + &nvm_dev_attr_group_20); + break; + } } diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h index 94b704a8d83d..b717c000b712 100644 --- a/include/linux/lightnvm.h +++ b/include/linux/lightnvm.h @@ -184,10 +184,9 @@ struct nvm_id { u16 csecs; u16 sos; - u16 ws_min; - u16 ws_opt; - u16 ws_seq; - u16 ws_per_chk; + u32 ws_min; + u32 ws_opt; + u32 mw_cunits; u32 trdt; u32 trdm; @@ -199,6 +198,10 @@ struct nvm_id { u32 mccap; u16 cpar; + /* calculated values */ + u16 ws_seq; + u16 ws_per_chk; + /* 1.2 compatibility */ u8 mtype; u8 fmtype; -- cgit v1.2.3 From af569398c390810fca773c903a85b71dfd870bb0 Mon Sep 17 00:00:00 2001 From: Matias Bjørling Date: Fri, 30 Mar 2018 00:05:03 +0200 Subject: lightnvm: remove max_rq_size MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The field is no longer used. Signed-off-by: Matias Bjørling Signed-off-by: Jens Axboe --- drivers/lightnvm/core.c | 1 - include/linux/lightnvm.h | 2 -- 2 files changed, 3 deletions(-) diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c index 521f520a1bb4..a59ad29600c3 100644 --- a/drivers/lightnvm/core.c +++ b/drivers/lightnvm/core.c @@ -874,7 +874,6 @@ static int nvm_core_init(struct nvm_dev *dev) geo->sec_size = id->csecs; geo->oob_size = id->sos; geo->mccap = id->mccap; - geo->max_rq_size = dev->ops->max_phys_sect * geo->sec_size; geo->sec_per_chk = id->clba; geo->sec_per_lun = geo->sec_per_chk * geo->nr_chks; diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h index b717c000b712..67b4fa8e4906 100644 --- a/include/linux/lightnvm.h +++ b/include/linux/lightnvm.h @@ -295,8 +295,6 @@ struct nvm_geo { int ws_seq; int ws_per_chk; - int max_rq_size; - int op; struct nvm_addr_format ppaf; -- cgit v1.2.3 From 89a09c5643e01f5e5d3c5f2e720053473a60a90b Mon Sep 17 00:00:00 2001 From: Matias Bjørling Date: Fri, 30 Mar 2018 00:05:04 +0200 Subject: lightnvm: remove nvm_dev_ops->max_phys_sect MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The value of max_phys_sect is always static. Instead of defining it in the nvm_dev_ops structure, declare it as a global value. Signed-off-by: Matias Bjørling Signed-off-by: Jens Axboe --- drivers/lightnvm/core.c | 28 +++++++--------------------- drivers/lightnvm/pblk-init.c | 9 ++++----- drivers/lightnvm/pblk-recovery.c | 8 ++------ drivers/nvme/host/lightnvm.c | 5 +---- include/linux/lightnvm.h | 5 ++--- 5 files changed, 16 insertions(+), 39 deletions(-) diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c index a59ad29600c3..9704db219866 100644 --- a/drivers/lightnvm/core.c +++ b/drivers/lightnvm/core.c @@ -407,7 +407,8 @@ static int nvm_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create) tdisk->private_data = targetdata; tqueue->queuedata = targetdata; - blk_queue_max_hw_sectors(tqueue, 8 * dev->ops->max_phys_sect); + blk_queue_max_hw_sectors(tqueue, + (dev->geo.sec_size >> 9) * NVM_MAX_VLBA); set_capacity(tdisk, tt->capacity(targetdata)); add_disk(tdisk); @@ -719,7 +720,7 @@ int nvm_set_tgt_bb_tbl(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *ppas, struct nvm_rq rqd; int ret; - if (nr_ppas > dev->ops->max_phys_sect) { + if (nr_ppas > NVM_MAX_VLBA) { pr_err("nvm: unable to update all blocks atomically\n"); return -EINVAL; } @@ -740,14 +741,6 @@ int nvm_set_tgt_bb_tbl(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *ppas, } EXPORT_SYMBOL(nvm_set_tgt_bb_tbl); -int nvm_max_phys_sects(struct nvm_tgt_dev *tgt_dev) -{ - struct nvm_dev *dev = tgt_dev->parent; - - return dev->ops->max_phys_sect; -} -EXPORT_SYMBOL(nvm_max_phys_sects); - int nvm_submit_io(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd) { struct nvm_dev *dev = tgt_dev->parent; @@ -965,17 +958,10 @@ int nvm_register(struct nvm_dev *dev) if (!dev->q || !dev->ops) return -EINVAL; - if (dev->ops->max_phys_sect > 256) { - pr_info("nvm: max sectors supported is 256.\n"); - return -EINVAL; - } - - if (dev->ops->max_phys_sect > 1) { - dev->dma_pool = dev->ops->create_dma_pool(dev, "ppalist"); - if (!dev->dma_pool) { - pr_err("nvm: could not create dma pool\n"); - return -ENOMEM; - } + dev->dma_pool = dev->ops->create_dma_pool(dev, "ppalist"); + if (!dev->dma_pool) { + pr_err("nvm: could not create dma pool\n"); + return -ENOMEM; } ret = nvm_init(dev); diff --git a/drivers/lightnvm/pblk-init.c b/drivers/lightnvm/pblk-init.c index 141036bd6afa..43b835678f48 100644 --- a/drivers/lightnvm/pblk-init.c +++ b/drivers/lightnvm/pblk-init.c @@ -260,8 +260,7 @@ static int pblk_core_init(struct pblk *pblk) return -ENOMEM; /* Internal bios can be at most the sectors signaled by the device. */ - pblk->page_bio_pool = mempool_create_page_pool(nvm_max_phys_sects(dev), - 0); + pblk->page_bio_pool = mempool_create_page_pool(NVM_MAX_VLBA, 0); if (!pblk->page_bio_pool) goto free_global_caches; @@ -716,12 +715,12 @@ static int pblk_lines_init(struct pblk *pblk) pblk->min_write_pgs = geo->sec_per_pl * (geo->sec_size / PAGE_SIZE); max_write_ppas = pblk->min_write_pgs * geo->all_luns; - pblk->max_write_pgs = (max_write_ppas < nvm_max_phys_sects(dev)) ? - max_write_ppas : nvm_max_phys_sects(dev); + pblk->max_write_pgs = min_t(int, max_write_ppas, NVM_MAX_VLBA); pblk_set_sec_per_write(pblk, pblk->min_write_pgs); if (pblk->max_write_pgs > PBLK_MAX_REQ_ADDRS) { - pr_err("pblk: cannot support device max_phys_sect\n"); + pr_err("pblk: vector list too big(%u > %u)\n", + pblk->max_write_pgs, PBLK_MAX_REQ_ADDRS); return -EINVAL; } diff --git a/drivers/lightnvm/pblk-recovery.c b/drivers/lightnvm/pblk-recovery.c index e75a1af2eebe..aaab9a5c17cc 100644 --- a/drivers/lightnvm/pblk-recovery.c +++ b/drivers/lightnvm/pblk-recovery.c @@ -21,17 +21,15 @@ void pblk_submit_rec(struct work_struct *work) struct pblk_rec_ctx *recovery = container_of(work, struct pblk_rec_ctx, ws_rec); struct pblk *pblk = recovery->pblk; - struct nvm_tgt_dev *dev = pblk->dev; struct nvm_rq *rqd = recovery->rqd; struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd); - int max_secs = nvm_max_phys_sects(dev); struct bio *bio; unsigned int nr_rec_secs; unsigned int pgs_read; int ret; nr_rec_secs = bitmap_weight((unsigned long int *)&rqd->ppa_status, - max_secs); + NVM_MAX_VLBA); bio = bio_alloc(GFP_KERNEL, nr_rec_secs); @@ -74,8 +72,6 @@ int pblk_recov_setup_rq(struct pblk *pblk, struct pblk_c_ctx *c_ctx, struct pblk_rec_ctx *recovery, u64 *comp_bits, unsigned int comp) { - struct nvm_tgt_dev *dev = pblk->dev; - int max_secs = nvm_max_phys_sects(dev); struct nvm_rq *rec_rqd; struct pblk_c_ctx *rec_ctx; int nr_entries = c_ctx->nr_valid + c_ctx->nr_padded; @@ -86,7 +82,7 @@ int pblk_recov_setup_rq(struct pblk *pblk, struct pblk_c_ctx *c_ctx, /* Copy completion bitmap, but exclude the first X completed entries */ bitmap_shift_right((unsigned long int *)&rec_rqd->ppa_status, (unsigned long int *)comp_bits, - comp, max_secs); + comp, NVM_MAX_VLBA); /* Save the context for the entries that need to be re-written and * update current context with the completed entries. diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c index 8b243af8a949..e38d835b15b5 100644 --- a/drivers/nvme/host/lightnvm.c +++ b/drivers/nvme/host/lightnvm.c @@ -612,8 +612,6 @@ static struct nvm_dev_ops nvme_nvm_dev_ops = { .destroy_dma_pool = nvme_nvm_destroy_dma_pool, .dev_dma_alloc = nvme_nvm_dev_dma_alloc, .dev_dma_free = nvme_nvm_dev_dma_free, - - .max_phys_sect = 64, }; static int nvme_nvm_submit_user_cmd(struct request_queue *q, @@ -932,8 +930,7 @@ static ssize_t nvm_dev_attr_show_12(struct device *dev, } else if (strcmp(attr->name, "media_capabilities") == 0) { return scnprintf(page, PAGE_SIZE, "0x%08x\n", id->mccap); } else if (strcmp(attr->name, "max_phys_secs") == 0) { - return scnprintf(page, PAGE_SIZE, "%u\n", - ndev->ops->max_phys_sect); + return scnprintf(page, PAGE_SIZE, "%u\n", NVM_MAX_VLBA); } else { return scnprintf(page, PAGE_SIZE, diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h index 67b4fa8e4906..e55b10573c99 100644 --- a/include/linux/lightnvm.h +++ b/include/linux/lightnvm.h @@ -73,8 +73,6 @@ struct nvm_dev_ops { nvm_destroy_dma_pool_fn *destroy_dma_pool; nvm_dev_dma_alloc_fn *dev_dma_alloc; nvm_dev_dma_free_fn *dev_dma_free; - - unsigned int max_phys_sect; }; #ifdef CONFIG_NVM @@ -228,6 +226,8 @@ struct nvm_target { #define NVM_VERSION_MINOR 0 #define NVM_VERSION_PATCH 0 +#define NVM_MAX_VLBA (64) /* max logical blocks in a vector command */ + struct nvm_rq; typedef void (nvm_end_io_fn)(struct nvm_rq *); @@ -436,7 +436,6 @@ extern void nvm_unregister(struct nvm_dev *); extern int nvm_set_tgt_bb_tbl(struct nvm_tgt_dev *, struct ppa_addr *, int, int); -extern int nvm_max_phys_sects(struct nvm_tgt_dev *); extern int nvm_submit_io(struct nvm_tgt_dev *, struct nvm_rq *); extern int nvm_submit_io_sync(struct nvm_tgt_dev *, struct nvm_rq *); extern void nvm_end_io(struct nvm_rq *); -- cgit v1.2.3 From 96257a8a7f3183613550c41a909819e028372b61 Mon Sep 17 00:00:00 2001 From: Matias Bjørling Date: Fri, 30 Mar 2018 00:05:05 +0200 Subject: nvme: lightnvm: add late setup of block size and metadata MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The nvme driver sets up the size of the nvme namespace in two steps. First it initializes the device with standard logical block and metadata sizes, and then sets the correct logical block and metadata size. Due to the OCSSD 2.0 specification relies on the namespace to expose these sizes for correct initialization, let it be updated appropriately on the LightNVM side as well. Signed-off-by: Matias Bjørling Acked-by: Keith Busch Signed-off-by: Jens Axboe --- drivers/lightnvm/core.c | 3 --- drivers/nvme/host/core.c | 2 ++ drivers/nvme/host/lightnvm.c | 8 ++++++++ drivers/nvme/host/nvme.h | 2 ++ 4 files changed, 12 insertions(+), 3 deletions(-) diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c index 9704db219866..3eec948d1b7e 100644 --- a/drivers/lightnvm/core.c +++ b/drivers/lightnvm/core.c @@ -864,8 +864,6 @@ static int nvm_core_init(struct nvm_dev *dev) geo->ws_seq = id->ws_seq; geo->ws_per_chk = id->ws_per_chk; geo->nr_chks = id->num_chk; - geo->sec_size = id->csecs; - geo->oob_size = id->sos; geo->mccap = id->mccap; geo->sec_per_chk = id->clba; @@ -893,7 +891,6 @@ static int nvm_core_init(struct nvm_dev *dev) if (ret) goto err_fmtype; - blk_queue_logical_block_size(dev->q, geo->sec_size); return 0; err_fmtype: kfree(dev->lun_map); diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 9ee919422669..e7ec2fb5c59a 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -1448,6 +1448,8 @@ static void __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id) if (ns->noiob) nvme_set_chunk_size(ns); nvme_update_disk_info(disk, ns, id); + if (ns->ndev) + nvme_nvm_update_nvm_info(ns); #ifdef CONFIG_NVME_MULTIPATH if (ns->head->disk) nvme_update_disk_info(ns->head->disk, ns, id); diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c index e38d835b15b5..839c0b96466a 100644 --- a/drivers/nvme/host/lightnvm.c +++ b/drivers/nvme/host/lightnvm.c @@ -812,6 +812,14 @@ int nvme_nvm_ioctl(struct nvme_ns *ns, unsigned int cmd, unsigned long arg) } } +void nvme_nvm_update_nvm_info(struct nvme_ns *ns) +{ + struct nvm_dev *ndev = ns->ndev; + + ndev->identity.csecs = ndev->geo.sec_size = 1 << ns->lba_shift; + ndev->identity.sos = ndev->geo.oob_size = ns->ms; +} + int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, int node) { struct request_queue *q = ns->queue; diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index c393e4b56f39..aa10842a6709 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -500,12 +500,14 @@ static inline void nvme_mpath_check_last_path(struct nvme_ns *ns) #endif /* CONFIG_NVME_MULTIPATH */ #ifdef CONFIG_NVM +void nvme_nvm_update_nvm_info(struct nvme_ns *ns); int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, int node); void nvme_nvm_unregister(struct nvme_ns *ns); int nvme_nvm_register_sysfs(struct nvme_ns *ns); void nvme_nvm_unregister_sysfs(struct nvme_ns *ns); int nvme_nvm_ioctl(struct nvme_ns *ns, unsigned int cmd, unsigned long arg); #else +static inline void nvme_nvm_update_nvm_info(struct nvme_ns *ns) {}; static inline int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, int node) { -- cgit v1.2.3 From a38c78d82dd38ce178c994a777751fae61ae31c8 Mon Sep 17 00:00:00 2001 From: Heiner Litz Date: Fri, 30 Mar 2018 00:05:06 +0200 Subject: lightnvm: fix bad block initialization MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit fix reading bad block device information to correctly setup the per line blk_bitmap during lightnvm initialization Signed-off-by: Heiner Litz Signed-off-by: Matias Bjørling Signed-off-by: Jens Axboe --- drivers/lightnvm/pblk-init.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/lightnvm/pblk-init.c b/drivers/lightnvm/pblk-init.c index 43b835678f48..ee936c1ff764 100644 --- a/drivers/lightnvm/pblk-init.c +++ b/drivers/lightnvm/pblk-init.c @@ -460,10 +460,11 @@ static int pblk_bb_line(struct pblk *pblk, struct pblk_line *line, struct nvm_tgt_dev *dev = pblk->dev; struct nvm_geo *geo = &dev->geo; int i, bb_cnt = 0; + int blk_per_lun = geo->nr_chks * geo->plane_mode; for (i = 0; i < blk_per_line; i++) { struct pblk_lun *rlun = &pblk->luns[i]; - u8 *lun_bb_log = bb_log + i * blk_per_line; + u8 *lun_bb_log = bb_log + i * blk_per_lun; if (lun_bb_log[line->id] == NVM_BLK_T_FREE) continue; -- cgit v1.2.3 From 40f962d78a969e3b476451ebc82deffdee4309c2 Mon Sep 17 00:00:00 2001 From: Johannes Thumshirn Date: Fri, 30 Mar 2018 00:05:07 +0200 Subject: lightnvm: centralize permission check for lightnvm ioctl MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Currently all functions for handling the lightnvm core ioctl commands do a check for CAP_SYS_ADMIN. Change this to fail early in nvm_ctl_ioctl(), so we don't have to duplicate the permission checks all over. Signed-off-by: Johannes Thumshirn Signed-off-by: Matias Bjørling Signed-off-by: Jens Axboe --- drivers/lightnvm/core.c | 21 +++------------------ 1 file changed, 3 insertions(+), 18 deletions(-) diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c index 3eec948d1b7e..5b197d6bb6d9 100644 --- a/drivers/lightnvm/core.c +++ b/drivers/lightnvm/core.c @@ -1019,9 +1019,6 @@ static long nvm_ioctl_info(struct file *file, void __user *arg) struct nvm_tgt_type *tt; int tgt_iter = 0; - if (!capable(CAP_SYS_ADMIN)) - return -EPERM; - info = memdup_user(arg, sizeof(struct nvm_ioctl_info)); if (IS_ERR(info)) return -EFAULT; @@ -1060,9 +1057,6 @@ static long nvm_ioctl_get_devices(struct file *file, void __user *arg) struct nvm_dev *dev; int i = 0; - if (!capable(CAP_SYS_ADMIN)) - return -EPERM; - devices = kzalloc(sizeof(struct nvm_ioctl_get_devices), GFP_KERNEL); if (!devices) return -ENOMEM; @@ -1103,9 +1097,6 @@ static long nvm_ioctl_dev_create(struct file *file, void __user *arg) { struct nvm_ioctl_create create; - if (!capable(CAP_SYS_ADMIN)) - return -EPERM; - if (copy_from_user(&create, arg, sizeof(struct nvm_ioctl_create))) return -EFAULT; @@ -1141,9 +1132,6 @@ static long nvm_ioctl_dev_remove(struct file *file, void __user *arg) struct nvm_dev *dev; int ret = 0; - if (!capable(CAP_SYS_ADMIN)) - return -EPERM; - if (copy_from_user(&remove, arg, sizeof(struct nvm_ioctl_remove))) return -EFAULT; @@ -1168,9 +1156,6 @@ static long nvm_ioctl_dev_init(struct file *file, void __user *arg) { struct nvm_ioctl_dev_init init; - if (!capable(CAP_SYS_ADMIN)) - return -EPERM; - if (copy_from_user(&init, arg, sizeof(struct nvm_ioctl_dev_init))) return -EFAULT; @@ -1187,9 +1172,6 @@ static long nvm_ioctl_dev_factory(struct file *file, void __user *arg) { struct nvm_ioctl_dev_factory fact; - if (!capable(CAP_SYS_ADMIN)) - return -EPERM; - if (copy_from_user(&fact, arg, sizeof(struct nvm_ioctl_dev_factory))) return -EFAULT; @@ -1205,6 +1187,9 @@ static long nvm_ctl_ioctl(struct file *file, uint cmd, unsigned long arg) { void __user *argp = (void __user *)arg; + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + switch (cmd) { case NVM_INFO: return nvm_ioctl_info(file, argp); -- cgit v1.2.3 From 9d7aa4a484872cb2b4dc81bd6f058cb8351ca9ed Mon Sep 17 00:00:00 2001 From: Heiner Litz Date: Fri, 30 Mar 2018 00:05:08 +0200 Subject: lightnvm: Avoid validation of default op value MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fixes: 38401d231de65 ("lightnvm: set target over-provision on create ioctl") Signed-off-by: Heiner Litz Reviewed-by: Javier González Signed-off-by: Matias Bjørling Signed-off-by: Jens Axboe --- drivers/lightnvm/core.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c index 5b197d6bb6d9..c4f12b1ae8b8 100644 --- a/drivers/lightnvm/core.c +++ b/drivers/lightnvm/core.c @@ -304,11 +304,9 @@ static int __nvm_config_extended(struct nvm_dev *dev, } /* op not set falls into target's default */ - if (e->op == 0xFFFF) + if (e->op == 0xFFFF) { e->op = NVM_TARGET_DEFAULT_OP; - - if (e->op < NVM_TARGET_MIN_OP || - e->op > NVM_TARGET_MAX_OP) { + } else if (e->op < NVM_TARGET_MIN_OP || e->op > NVM_TARGET_MAX_OP) { pr_err("nvm: invalid over provisioning value\n"); return -EINVAL; } -- cgit v1.2.3 From 43d47127219de1dd674b917c1835baa14c4c1768 Mon Sep 17 00:00:00 2001 From: Javier González Date: Fri, 30 Mar 2018 00:05:09 +0200 Subject: lightnvm: pblk: refactor init/exit sequences MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Refactor init and exit sequences to eliminate dependencies among init modules and improve readability. Signed-off-by: Javier González Signed-off-by: Matias Bjørling Signed-off-by: Jens Axboe --- drivers/lightnvm/pblk-init.c | 405 +++++++++++++++++++++---------------------- 1 file changed, 202 insertions(+), 203 deletions(-) diff --git a/drivers/lightnvm/pblk-init.c b/drivers/lightnvm/pblk-init.c index ee936c1ff764..8f1d622801df 100644 --- a/drivers/lightnvm/pblk-init.c +++ b/drivers/lightnvm/pblk-init.c @@ -103,7 +103,40 @@ static void pblk_l2p_free(struct pblk *pblk) vfree(pblk->trans_map); } -static int pblk_l2p_init(struct pblk *pblk) +static int pblk_l2p_recover(struct pblk *pblk, bool factory_init) +{ + struct pblk_line *line = NULL; + + if (factory_init) { + pblk_setup_uuid(pblk); + } else { + line = pblk_recov_l2p(pblk); + if (IS_ERR(line)) { + pr_err("pblk: could not recover l2p table\n"); + return -EFAULT; + } + } + +#ifdef CONFIG_NVM_DEBUG + pr_info("pblk init: L2P CRC: %x\n", pblk_l2p_crc(pblk)); +#endif + + /* Free full lines directly as GC has not been started yet */ + pblk_gc_free_full_lines(pblk); + + if (!line) { + /* Configure next line for user data */ + line = pblk_line_get_first_data(pblk); + if (!line) { + pr_err("pblk: line list corrupted\n"); + return -EFAULT; + } + } + + return 0; +} + +static int pblk_l2p_init(struct pblk *pblk, bool factory_init) { sector_t i; struct ppa_addr ppa; @@ -119,7 +152,7 @@ static int pblk_l2p_init(struct pblk *pblk) for (i = 0; i < pblk->rl.nr_secs; i++) pblk_trans_map_set(pblk, i, ppa); - return 0; + return pblk_l2p_recover(pblk, factory_init); } static void pblk_rwb_free(struct pblk *pblk) @@ -159,7 +192,13 @@ static int pblk_set_ppaf(struct pblk *pblk) struct nvm_tgt_dev *dev = pblk->dev; struct nvm_geo *geo = &dev->geo; struct nvm_addr_format ppaf = geo->ppaf; - int power_len; + int mod, power_len; + + div_u64_rem(geo->sec_per_chk, pblk->min_write_pgs, &mod); + if (mod) { + pr_err("pblk: bad configuration of sectors/pages\n"); + return -EINVAL; + } /* Re-calculate channel and lun format to adapt to configuration */ power_len = get_count_order(geo->nr_chnls); @@ -252,13 +291,40 @@ static int pblk_core_init(struct pblk *pblk) { struct nvm_tgt_dev *dev = pblk->dev; struct nvm_geo *geo = &dev->geo; + int max_write_ppas; + + atomic64_set(&pblk->user_wa, 0); + atomic64_set(&pblk->pad_wa, 0); + atomic64_set(&pblk->gc_wa, 0); + pblk->user_rst_wa = 0; + pblk->pad_rst_wa = 0; + pblk->gc_rst_wa = 0; + + atomic64_set(&pblk->nr_flush, 0); + pblk->nr_flush_rst = 0; pblk->pgs_in_buffer = NVM_MEM_PAGE_WRITE * geo->sec_per_pg * geo->nr_planes * geo->all_luns; - if (pblk_init_global_caches(pblk)) + pblk->min_write_pgs = geo->sec_per_pl * (geo->sec_size / PAGE_SIZE); + max_write_ppas = pblk->min_write_pgs * geo->all_luns; + pblk->max_write_pgs = min_t(int, max_write_ppas, NVM_MAX_VLBA); + pblk_set_sec_per_write(pblk, pblk->min_write_pgs); + + if (pblk->max_write_pgs > PBLK_MAX_REQ_ADDRS) { + pr_err("pblk: vector list too big(%u > %u)\n", + pblk->max_write_pgs, PBLK_MAX_REQ_ADDRS); + return -EINVAL; + } + + pblk->pad_dist = kzalloc((pblk->min_write_pgs - 1) * sizeof(atomic64_t), + GFP_KERNEL); + if (!pblk->pad_dist) return -ENOMEM; + if (pblk_init_global_caches(pblk)) + goto fail_free_pad_dist; + /* Internal bios can be at most the sectors signaled by the device. */ pblk->page_bio_pool = mempool_create_page_pool(NVM_MAX_VLBA, 0); if (!pblk->page_bio_pool) @@ -307,10 +373,8 @@ static int pblk_core_init(struct pblk *pblk) if (pblk_set_ppaf(pblk)) goto free_r_end_wq; - if (pblk_rwb_init(pblk)) - goto free_r_end_wq; - INIT_LIST_HEAD(&pblk->compl_list); + return 0; free_r_end_wq: @@ -333,6 +397,8 @@ free_page_bio_pool: mempool_destroy(pblk->page_bio_pool); free_global_caches: pblk_free_global_caches(pblk); +fail_free_pad_dist: + kfree(pblk->pad_dist); return -ENOMEM; } @@ -354,14 +420,8 @@ static void pblk_core_free(struct pblk *pblk) mempool_destroy(pblk->e_rq_pool); mempool_destroy(pblk->w_rq_pool); - pblk_rwb_free(pblk); - pblk_free_global_caches(pblk); -} - -static void pblk_luns_free(struct pblk *pblk) -{ - kfree(pblk->luns); + kfree(pblk->pad_dist); } static void pblk_line_mg_free(struct pblk *pblk) @@ -378,8 +438,6 @@ static void pblk_line_mg_free(struct pblk *pblk) pblk_mfree(l_mg->eline_meta[i]->buf, l_mg->emeta_alloc_type); kfree(l_mg->eline_meta[i]); } - - kfree(pblk->lines); } static void pblk_line_meta_free(struct pblk_line *line) @@ -402,6 +460,11 @@ static void pblk_lines_free(struct pblk *pblk) pblk_line_meta_free(line); } spin_unlock(&l_mg->free_lock); + + pblk_line_mg_free(pblk); + + kfree(pblk->luns); + kfree(pblk->lines); } static int pblk_bb_get_tbl(struct nvm_tgt_dev *dev, struct pblk_lun *rlun, @@ -476,7 +539,7 @@ static int pblk_bb_line(struct pblk *pblk, struct pblk_line *line, return bb_cnt; } -static int pblk_luns_init(struct pblk *pblk, struct ppa_addr *luns) +static int pblk_luns_init(struct pblk *pblk) { struct nvm_tgt_dev *dev = pblk->dev; struct nvm_geo *geo = &dev->geo; @@ -501,7 +564,7 @@ static int pblk_luns_init(struct pblk *pblk, struct ppa_addr *luns) int lunid = lun_raw + ch * geo->nr_luns; rlun = &pblk->luns[i]; - rlun->bppa = luns[lunid]; + rlun->bppa = dev->luns[lunid]; sema_init(&rlun->wr_sem, 1); } @@ -509,38 +572,6 @@ static int pblk_luns_init(struct pblk *pblk, struct ppa_addr *luns) return 0; } -static int pblk_lines_configure(struct pblk *pblk, int flags) -{ - struct pblk_line *line = NULL; - int ret = 0; - - if (!(flags & NVM_TARGET_FACTORY)) { - line = pblk_recov_l2p(pblk); - if (IS_ERR(line)) { - pr_err("pblk: could not recover l2p table\n"); - ret = -EFAULT; - } - } - -#ifdef CONFIG_NVM_DEBUG - pr_info("pblk init: L2P CRC: %x\n", pblk_l2p_crc(pblk)); -#endif - - /* Free full lines directly as GC has not been started yet */ - pblk_gc_free_full_lines(pblk); - - if (!line) { - /* Configure next line for user data */ - line = pblk_line_get_first_data(pblk); - if (!line) { - pr_err("pblk: line list corrupted\n"); - ret = -EFAULT; - } - } - - return ret; -} - /* See comment over struct line_emeta definition */ static unsigned int calc_emeta_len(struct pblk *pblk) { @@ -606,11 +637,70 @@ static void pblk_set_provision(struct pblk *pblk, long nr_free_blks) atomic_set(&pblk->rl.free_user_blocks, nr_free_blks); } -static int pblk_lines_alloc_metadata(struct pblk *pblk) +static int pblk_setup_line_meta(struct pblk *pblk, struct pblk_line *line, + void *chunk_log, long *nr_bad_blks) +{ + struct pblk_line_meta *lm = &pblk->lm; + + line->blk_bitmap = kzalloc(lm->blk_bitmap_len, GFP_KERNEL); + if (!line->blk_bitmap) + return -ENOMEM; + + line->erase_bitmap = kzalloc(lm->blk_bitmap_len, GFP_KERNEL); + if (!line->erase_bitmap) { + kfree(line->blk_bitmap); + return -ENOMEM; + } + + *nr_bad_blks = pblk_bb_line(pblk, line, chunk_log, lm->blk_per_line); + + return 0; +} + +static int pblk_line_mg_init(struct pblk *pblk) { + struct nvm_tgt_dev *dev = pblk->dev; + struct nvm_geo *geo = &dev->geo; struct pblk_line_mgmt *l_mg = &pblk->l_mg; struct pblk_line_meta *lm = &pblk->lm; - int i; + int i, bb_distance; + + l_mg->nr_lines = geo->nr_chks; + l_mg->log_line = l_mg->data_line = NULL; + l_mg->l_seq_nr = l_mg->d_seq_nr = 0; + l_mg->nr_free_lines = 0; + bitmap_zero(&l_mg->meta_bitmap, PBLK_DATA_LINES); + + INIT_LIST_HEAD(&l_mg->free_list); + INIT_LIST_HEAD(&l_mg->corrupt_list); + INIT_LIST_HEAD(&l_mg->bad_list); + INIT_LIST_HEAD(&l_mg->gc_full_list); + INIT_LIST_HEAD(&l_mg->gc_high_list); + INIT_LIST_HEAD(&l_mg->gc_mid_list); + INIT_LIST_HEAD(&l_mg->gc_low_list); + INIT_LIST_HEAD(&l_mg->gc_empty_list); + + INIT_LIST_HEAD(&l_mg->emeta_list); + + l_mg->gc_lists[0] = &l_mg->gc_high_list; + l_mg->gc_lists[1] = &l_mg->gc_mid_list; + l_mg->gc_lists[2] = &l_mg->gc_low_list; + + spin_lock_init(&l_mg->free_lock); + spin_lock_init(&l_mg->close_lock); + spin_lock_init(&l_mg->gc_lock); + + l_mg->vsc_list = kcalloc(l_mg->nr_lines, sizeof(__le32), GFP_KERNEL); + if (!l_mg->vsc_list) + goto fail; + + l_mg->bb_template = kzalloc(lm->sec_bitmap_len, GFP_KERNEL); + if (!l_mg->bb_template) + goto fail_free_vsc_list; + + l_mg->bb_aux = kzalloc(lm->sec_bitmap_len, GFP_KERNEL); + if (!l_mg->bb_aux) + goto fail_free_bb_template; /* smeta is always small enough to fit on a kmalloc memory allocation, * emeta depends on the number of LUNs allocated to the pblk instance @@ -656,13 +746,13 @@ static int pblk_lines_alloc_metadata(struct pblk *pblk) } } - l_mg->vsc_list = kcalloc(l_mg->nr_lines, sizeof(__le32), GFP_KERNEL); - if (!l_mg->vsc_list) - goto fail_free_emeta; - for (i = 0; i < l_mg->nr_lines; i++) l_mg->vsc_list[i] = cpu_to_le32(EMPTY_ENTRY); + bb_distance = (geo->all_luns) * geo->ws_opt; + for (i = 0; i < lm->sec_per_line; i += bb_distance) + bitmap_set(l_mg->bb_template, i, geo->ws_opt); + return 0; fail_free_emeta: @@ -673,69 +763,25 @@ fail_free_emeta: kfree(l_mg->eline_meta[i]->buf); kfree(l_mg->eline_meta[i]); } - fail_free_smeta: for (i = 0; i < PBLK_DATA_LINES; i++) kfree(l_mg->sline_meta[i]); - + kfree(l_mg->bb_aux); +fail_free_bb_template: + kfree(l_mg->bb_template); +fail_free_vsc_list: + kfree(l_mg->vsc_list); +fail: return -ENOMEM; } -static int pblk_setup_line_meta(struct pblk *pblk, struct pblk_line *line, - void *chunk_log, long *nr_bad_blks) -{ - struct pblk_line_meta *lm = &pblk->lm; - - line->blk_bitmap = kzalloc(lm->blk_bitmap_len, GFP_KERNEL); - if (!line->blk_bitmap) - return -ENOMEM; - - line->erase_bitmap = kzalloc(lm->blk_bitmap_len, GFP_KERNEL); - if (!line->erase_bitmap) { - kfree(line->blk_bitmap); - return -ENOMEM; - } - - *nr_bad_blks = pblk_bb_line(pblk, line, chunk_log, lm->blk_per_line); - - return 0; -} - -static int pblk_lines_init(struct pblk *pblk) +static int pblk_line_meta_init(struct pblk *pblk) { struct nvm_tgt_dev *dev = pblk->dev; struct nvm_geo *geo = &dev->geo; - struct pblk_line_mgmt *l_mg = &pblk->l_mg; struct pblk_line_meta *lm = &pblk->lm; - struct pblk_line *line; - void *chunk_log; unsigned int smeta_len, emeta_len; - long nr_bad_blks = 0, nr_free_blks = 0; - int bb_distance, max_write_ppas, mod; - int i, ret; - - pblk->min_write_pgs = geo->sec_per_pl * (geo->sec_size / PAGE_SIZE); - max_write_ppas = pblk->min_write_pgs * geo->all_luns; - pblk->max_write_pgs = min_t(int, max_write_ppas, NVM_MAX_VLBA); - pblk_set_sec_per_write(pblk, pblk->min_write_pgs); - - if (pblk->max_write_pgs > PBLK_MAX_REQ_ADDRS) { - pr_err("pblk: vector list too big(%u > %u)\n", - pblk->max_write_pgs, PBLK_MAX_REQ_ADDRS); - return -EINVAL; - } - - div_u64_rem(geo->sec_per_chk, pblk->min_write_pgs, &mod); - if (mod) { - pr_err("pblk: bad configuration of sectors/pages\n"); - return -EINVAL; - } - - l_mg->nr_lines = geo->nr_chks; - l_mg->log_line = l_mg->data_line = NULL; - l_mg->l_seq_nr = l_mg->d_seq_nr = 0; - l_mg->nr_free_lines = 0; - bitmap_zero(&l_mg->meta_bitmap, PBLK_DATA_LINES); + int i; lm->sec_per_line = geo->sec_per_chk * geo->all_luns; lm->blk_per_line = geo->all_luns; @@ -787,58 +833,43 @@ add_emeta_page: return -EINVAL; } - ret = pblk_lines_alloc_metadata(pblk); + return 0; +} + +static int pblk_lines_init(struct pblk *pblk) +{ + struct pblk_line_mgmt *l_mg = &pblk->l_mg; + struct pblk_line_meta *lm = &pblk->lm; + struct pblk_line *line; + void *chunk_log; + long nr_bad_blks = 0, nr_free_blks = 0; + int i, ret; + + ret = pblk_line_meta_init(pblk); if (ret) return ret; - l_mg->bb_template = kzalloc(lm->sec_bitmap_len, GFP_KERNEL); - if (!l_mg->bb_template) { - ret = -ENOMEM; + ret = pblk_line_mg_init(pblk); + if (ret) + return ret; + + ret = pblk_luns_init(pblk); + if (ret) goto fail_free_meta; - } - l_mg->bb_aux = kzalloc(lm->sec_bitmap_len, GFP_KERNEL); - if (!l_mg->bb_aux) { - ret = -ENOMEM; - goto fail_free_bb_template; + chunk_log = pblk_bb_get_log(pblk); + if (IS_ERR(chunk_log)) { + pr_err("pblk: could not get bad block log (%lu)\n", + PTR_ERR(chunk_log)); + ret = PTR_ERR(chunk_log); + goto fail_free_luns; } - bb_distance = (geo->all_luns) * geo->sec_per_pl; - for (i = 0; i < lm->sec_per_line; i += bb_distance) - bitmap_set(l_mg->bb_template, i, geo->sec_per_pl); - - INIT_LIST_HEAD(&l_mg->free_list); - INIT_LIST_HEAD(&l_mg->corrupt_list); - INIT_LIST_HEAD(&l_mg->bad_list); - INIT_LIST_HEAD(&l_mg->gc_full_list); - INIT_LIST_HEAD(&l_mg->gc_high_list); - INIT_LIST_HEAD(&l_mg->gc_mid_list); - INIT_LIST_HEAD(&l_mg->gc_low_list); - INIT_LIST_HEAD(&l_mg->gc_empty_list); - - INIT_LIST_HEAD(&l_mg->emeta_list); - - l_mg->gc_lists[0] = &l_mg->gc_high_list; - l_mg->gc_lists[1] = &l_mg->gc_mid_list; - l_mg->gc_lists[2] = &l_mg->gc_low_list; - - spin_lock_init(&l_mg->free_lock); - spin_lock_init(&l_mg->close_lock); - spin_lock_init(&l_mg->gc_lock); - pblk->lines = kcalloc(l_mg->nr_lines, sizeof(struct pblk_line), GFP_KERNEL); if (!pblk->lines) { ret = -ENOMEM; - goto fail_free_bb_aux; - } - - chunk_log = pblk_bb_get_log(pblk); - if (IS_ERR(chunk_log)) { - pr_err("pblk: could not get bad block log (%lu)\n", - PTR_ERR(chunk_log)); - ret = PTR_ERR(chunk_log); - goto fail_free_bb_aux; + goto fail_free_chunk_log; } for (i = 0; i < l_mg->nr_lines; i++) { @@ -856,7 +887,7 @@ add_emeta_page: ret = pblk_setup_line_meta(pblk, line, chunk_log, &nr_bad_blks); if (ret) - goto fail_free_chunk_log; + goto fail_free_lines; chk_in_line = lm->blk_per_line - nr_bad_blks; if (nr_bad_blks < 0 || nr_bad_blks > lm->blk_per_line || @@ -878,14 +909,14 @@ add_emeta_page: kfree(chunk_log); return 0; -fail_free_chunk_log: - kfree(chunk_log); +fail_free_lines: while (--i >= 0) pblk_line_meta_free(&pblk->lines[i]); -fail_free_bb_aux: - kfree(l_mg->bb_aux); -fail_free_bb_template: - kfree(l_mg->bb_template); + kfree(pblk->lines); +fail_free_chunk_log: + kfree(chunk_log); +fail_free_luns: + kfree(pblk->luns); fail_free_meta: pblk_line_mg_free(pblk); @@ -928,12 +959,10 @@ static void pblk_writer_stop(struct pblk *pblk) static void pblk_free(struct pblk *pblk) { - pblk_luns_free(pblk); pblk_lines_free(pblk); - kfree(pblk->pad_dist); - pblk_line_mg_free(pblk); - pblk_core_free(pblk); pblk_l2p_free(pblk); + pblk_rwb_free(pblk); + pblk_core_free(pblk); kfree(pblk); } @@ -998,19 +1027,6 @@ static void *pblk_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk, spin_lock_init(&pblk->trans_lock); spin_lock_init(&pblk->lock); - if (flags & NVM_TARGET_FACTORY) - pblk_setup_uuid(pblk); - - atomic64_set(&pblk->user_wa, 0); - atomic64_set(&pblk->pad_wa, 0); - atomic64_set(&pblk->gc_wa, 0); - pblk->user_rst_wa = 0; - pblk->pad_rst_wa = 0; - pblk->gc_rst_wa = 0; - - atomic64_set(&pblk->nr_flush, 0); - pblk->nr_flush_rst = 0; - #ifdef CONFIG_NVM_DEBUG atomic_long_set(&pblk->inflight_writes, 0); atomic_long_set(&pblk->padded_writes, 0); @@ -1034,48 +1050,35 @@ static void *pblk_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk, atomic_long_set(&pblk->write_failed, 0); atomic_long_set(&pblk->erase_failed, 0); - ret = pblk_luns_init(pblk, dev->luns); + ret = pblk_core_init(pblk); if (ret) { - pr_err("pblk: could not initialize luns\n"); + pr_err("pblk: could not initialize core\n"); goto fail; } ret = pblk_lines_init(pblk); if (ret) { pr_err("pblk: could not initialize lines\n"); - goto fail_free_luns; - } - - pblk->pad_dist = kzalloc((pblk->min_write_pgs - 1) * sizeof(atomic64_t), - GFP_KERNEL); - if (!pblk->pad_dist) { - ret = -ENOMEM; - goto fail_free_line_meta; + goto fail_free_core; } - ret = pblk_core_init(pblk); + ret = pblk_rwb_init(pblk); if (ret) { - pr_err("pblk: could not initialize core\n"); - goto fail_free_pad_dist; + pr_err("pblk: could not initialize write buffer\n"); + goto fail_free_lines; } - ret = pblk_l2p_init(pblk); + ret = pblk_l2p_init(pblk, flags & NVM_TARGET_FACTORY); if (ret) { pr_err("pblk: could not initialize maps\n"); - goto fail_free_core; - } - - ret = pblk_lines_configure(pblk, flags); - if (ret) { - pr_err("pblk: could not configure lines\n"); - goto fail_free_l2p; + goto fail_free_rwb; } ret = pblk_writer_init(pblk); if (ret) { if (ret != -EINTR) pr_err("pblk: could not initialize write thread\n"); - goto fail_free_lines; + goto fail_free_l2p; } ret = pblk_gc_init(pblk); @@ -1110,18 +1113,14 @@ static void *pblk_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk, fail_stop_writer: pblk_writer_stop(pblk); -fail_free_lines: - pblk_lines_free(pblk); fail_free_l2p: pblk_l2p_free(pblk); +fail_free_rwb: + pblk_rwb_free(pblk); +fail_free_lines: + pblk_lines_free(pblk); fail_free_core: pblk_core_free(pblk); -fail_free_pad_dist: - kfree(pblk->pad_dist); -fail_free_line_meta: - pblk_line_mg_free(pblk); -fail_free_luns: - pblk_luns_free(pblk); fail: kfree(pblk); return ERR_PTR(ret); -- cgit v1.2.3 From e46f4e4822bdecf9bcbc2e71b2a3ae7f37464a2d Mon Sep 17 00:00:00 2001 From: Javier González Date: Fri, 30 Mar 2018 00:05:10 +0200 Subject: lightnvm: simplify geometry structure MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Currently, the device geometry is stored redundantly in the nvm_id and nvm_geo structures at a device level. Moreover, when instantiating targets on a specific number of LUNs, these structures are replicated and manually modified to fit the instance channel and LUN partitioning. Instead, create a generic geometry around nvm_geo, which can be used by (i) the underlying device to describe the geometry of the whole device, and (ii) instances to describe their geometry independently. Signed-off-by: Javier González Signed-off-by: Matias Bjørling Signed-off-by: Jens Axboe --- drivers/lightnvm/core.c | 70 +++----- drivers/lightnvm/pblk-core.c | 16 +- drivers/lightnvm/pblk-gc.c | 2 +- drivers/lightnvm/pblk-init.c | 117 +++++++------- drivers/lightnvm/pblk-read.c | 2 +- drivers/lightnvm/pblk-recovery.c | 14 +- drivers/lightnvm/pblk-rl.c | 2 +- drivers/lightnvm/pblk-sysfs.c | 35 ++-- drivers/lightnvm/pblk-write.c | 2 +- drivers/lightnvm/pblk.h | 83 ++++------ drivers/nvme/host/lightnvm.c | 337 +++++++++++++++++++++++---------------- include/linux/lightnvm.h | 196 +++++++++++------------ 12 files changed, 451 insertions(+), 425 deletions(-) diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c index c4f12b1ae8b8..9dec936ac1dc 100644 --- a/drivers/lightnvm/core.c +++ b/drivers/lightnvm/core.c @@ -155,7 +155,7 @@ static struct nvm_tgt_dev *nvm_create_tgt_dev(struct nvm_dev *dev, int blun = lun_begin % dev->geo.nr_luns; int lunid = 0; int lun_balanced = 1; - int prev_nr_luns; + int sec_per_lun, prev_nr_luns; int i, j; nr_chnls = (nr_chnls_mod == 0) ? nr_chnls : nr_chnls + 1; @@ -215,18 +215,23 @@ static struct nvm_tgt_dev *nvm_create_tgt_dev(struct nvm_dev *dev, if (!tgt_dev) goto err_ch; + /* Inherit device geometry from parent */ memcpy(&tgt_dev->geo, &dev->geo, sizeof(struct nvm_geo)); + /* Target device only owns a portion of the physical device */ tgt_dev->geo.nr_chnls = nr_chnls; - tgt_dev->geo.all_luns = nr_luns; tgt_dev->geo.nr_luns = (lun_balanced) ? prev_nr_luns : -1; + tgt_dev->geo.all_luns = nr_luns; + tgt_dev->geo.all_chunks = nr_luns * dev->geo.nr_chks; + tgt_dev->geo.op = op; - tgt_dev->total_secs = nr_luns * tgt_dev->geo.sec_per_lun; + + sec_per_lun = dev->geo.clba * dev->geo.nr_chks; + tgt_dev->geo.total_secs = nr_luns * sec_per_lun; + tgt_dev->q = dev->q; tgt_dev->map = dev_map; tgt_dev->luns = luns; - memcpy(&tgt_dev->identity, &dev->identity, sizeof(struct nvm_id)); - tgt_dev->parent = dev; return tgt_dev; @@ -296,8 +301,6 @@ static int __nvm_config_simple(struct nvm_dev *dev, static int __nvm_config_extended(struct nvm_dev *dev, struct nvm_ioctl_create_extended *e) { - struct nvm_geo *geo = &dev->geo; - if (e->lun_begin == 0xFFFF && e->lun_end == 0xFFFF) { e->lun_begin = 0; e->lun_end = dev->geo.all_luns - 1; @@ -311,7 +314,7 @@ static int __nvm_config_extended(struct nvm_dev *dev, return -EINVAL; } - return nvm_config_check_luns(geo, e->lun_begin, e->lun_end); + return nvm_config_check_luns(&dev->geo, e->lun_begin, e->lun_end); } static int nvm_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create) @@ -406,7 +409,7 @@ static int nvm_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create) tqueue->queuedata = targetdata; blk_queue_max_hw_sectors(tqueue, - (dev->geo.sec_size >> 9) * NVM_MAX_VLBA); + (dev->geo.csecs >> 9) * NVM_MAX_VLBA); set_capacity(tdisk, tt->capacity(targetdata)); add_disk(tdisk); @@ -841,40 +844,9 @@ EXPORT_SYMBOL(nvm_get_tgt_bb_tbl); static int nvm_core_init(struct nvm_dev *dev) { - struct nvm_id *id = &dev->identity; struct nvm_geo *geo = &dev->geo; int ret; - memcpy(&geo->ppaf, &id->ppaf, sizeof(struct nvm_addr_format)); - - if (id->mtype != 0) { - pr_err("nvm: memory type not supported\n"); - return -EINVAL; - } - - /* Whole device values */ - geo->nr_chnls = id->num_ch; - geo->nr_luns = id->num_lun; - - /* Generic device geometry values */ - geo->ws_min = id->ws_min; - geo->ws_opt = id->ws_opt; - geo->ws_seq = id->ws_seq; - geo->ws_per_chk = id->ws_per_chk; - geo->nr_chks = id->num_chk; - geo->mccap = id->mccap; - - geo->sec_per_chk = id->clba; - geo->sec_per_lun = geo->sec_per_chk * geo->nr_chks; - geo->all_luns = geo->nr_luns * geo->nr_chnls; - - /* 1.2 spec device geometry values */ - geo->plane_mode = 1 << geo->ws_seq; - geo->nr_planes = geo->ws_opt / geo->ws_min; - geo->sec_per_pg = geo->ws_min; - geo->sec_per_pl = geo->sec_per_pg * geo->nr_planes; - - dev->total_secs = geo->all_luns * geo->sec_per_lun; dev->lun_map = kcalloc(BITS_TO_LONGS(geo->all_luns), sizeof(unsigned long), GFP_KERNEL); if (!dev->lun_map) @@ -913,16 +885,14 @@ static int nvm_init(struct nvm_dev *dev) struct nvm_geo *geo = &dev->geo; int ret = -EINVAL; - if (dev->ops->identity(dev, &dev->identity)) { + if (dev->ops->identity(dev)) { pr_err("nvm: device could not be identified\n"); goto err; } - if (dev->identity.ver_id != 1 && dev->identity.ver_id != 2) { - pr_err("nvm: device ver_id %d not supported by kernel.\n", - dev->identity.ver_id); - goto err; - } + pr_debug("nvm: ver:%u nvm_vendor:%x\n", + geo->ver_id, + geo->vmnt); ret = nvm_core_init(dev); if (ret) { @@ -930,10 +900,10 @@ static int nvm_init(struct nvm_dev *dev) goto err; } - pr_info("nvm: registered %s [%u/%u/%u/%u/%u/%u]\n", - dev->name, geo->sec_per_pg, geo->nr_planes, - geo->ws_per_chk, geo->nr_chks, - geo->all_luns, geo->nr_chnls); + pr_info("nvm: registered %s [%u/%u/%u/%u/%u]\n", + dev->name, geo->ws_min, geo->ws_opt, + geo->nr_chks, geo->all_luns, + geo->nr_chnls); return 0; err: pr_err("nvm: failed to initialize nvm\n"); diff --git a/drivers/lightnvm/pblk-core.c b/drivers/lightnvm/pblk-core.c index 5c363ccde0e3..52c0c3e5ec6e 100644 --- a/drivers/lightnvm/pblk-core.c +++ b/drivers/lightnvm/pblk-core.c @@ -613,7 +613,7 @@ next_rq: memset(&rqd, 0, sizeof(struct nvm_rq)); rq_ppas = pblk_calc_secs(pblk, left_ppas, 0); - rq_len = rq_ppas * geo->sec_size; + rq_len = rq_ppas * geo->csecs; bio = pblk_bio_map_addr(pblk, emeta_buf, rq_ppas, rq_len, l_mg->emeta_alloc_type, GFP_KERNEL); @@ -722,7 +722,7 @@ u64 pblk_line_smeta_start(struct pblk *pblk, struct pblk_line *line) if (bit >= lm->blk_per_line) return -1; - return bit * geo->sec_per_pl; + return bit * geo->ws_opt; } static int pblk_line_submit_smeta_io(struct pblk *pblk, struct pblk_line *line, @@ -1034,17 +1034,17 @@ static int pblk_line_init_bb(struct pblk *pblk, struct pblk_line *line, /* Capture bad block information on line mapping bitmaps */ while ((bit = find_next_bit(line->blk_bitmap, lm->blk_per_line, bit + 1)) < lm->blk_per_line) { - off = bit * geo->sec_per_pl; + off = bit * geo->ws_opt; bitmap_shift_left(l_mg->bb_aux, l_mg->bb_template, off, lm->sec_per_line); bitmap_or(line->map_bitmap, line->map_bitmap, l_mg->bb_aux, lm->sec_per_line); - line->sec_in_line -= geo->sec_per_chk; + line->sec_in_line -= geo->clba; } /* Mark smeta metadata sectors as bad sectors */ bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line); - off = bit * geo->sec_per_pl; + off = bit * geo->ws_opt; bitmap_set(line->map_bitmap, off, lm->smeta_sec); line->sec_in_line -= lm->smeta_sec; line->smeta_ssec = off; @@ -1063,10 +1063,10 @@ static int pblk_line_init_bb(struct pblk *pblk, struct pblk_line *line, emeta_secs = lm->emeta_sec[0]; off = lm->sec_per_line; while (emeta_secs) { - off -= geo->sec_per_pl; + off -= geo->ws_opt; if (!test_bit(off, line->invalid_bitmap)) { - bitmap_set(line->invalid_bitmap, off, geo->sec_per_pl); - emeta_secs -= geo->sec_per_pl; + bitmap_set(line->invalid_bitmap, off, geo->ws_opt); + emeta_secs -= geo->ws_opt; } } diff --git a/drivers/lightnvm/pblk-gc.c b/drivers/lightnvm/pblk-gc.c index 31f17d6f14ee..7143b0f740fb 100644 --- a/drivers/lightnvm/pblk-gc.c +++ b/drivers/lightnvm/pblk-gc.c @@ -88,7 +88,7 @@ static void pblk_gc_line_ws(struct work_struct *work) up(&gc->gc_sem); - gc_rq->data = vmalloc(gc_rq->nr_secs * geo->sec_size); + gc_rq->data = vmalloc(gc_rq->nr_secs * geo->csecs); if (!gc_rq->data) { pr_err("pblk: could not GC line:%d (%d/%d)\n", line->id, *line->vsc, gc_rq->nr_secs); diff --git a/drivers/lightnvm/pblk-init.c b/drivers/lightnvm/pblk-init.c index 8f1d622801df..2fca27d0a9b5 100644 --- a/drivers/lightnvm/pblk-init.c +++ b/drivers/lightnvm/pblk-init.c @@ -179,7 +179,7 @@ static int pblk_rwb_init(struct pblk *pblk) return -ENOMEM; power_size = get_count_order(nr_entries); - power_seg_sz = get_count_order(geo->sec_size); + power_seg_sz = get_count_order(geo->csecs); return pblk_rb_init(&pblk->rwb, entries, power_size, power_seg_sz); } @@ -187,18 +187,10 @@ static int pblk_rwb_init(struct pblk *pblk) /* Minimum pages needed within a lun */ #define ADDR_POOL_SIZE 64 -static int pblk_set_ppaf(struct pblk *pblk) +static int pblk_set_addrf_12(struct nvm_geo *geo, struct nvm_addrf_12 *dst) { - struct nvm_tgt_dev *dev = pblk->dev; - struct nvm_geo *geo = &dev->geo; - struct nvm_addr_format ppaf = geo->ppaf; - int mod, power_len; - - div_u64_rem(geo->sec_per_chk, pblk->min_write_pgs, &mod); - if (mod) { - pr_err("pblk: bad configuration of sectors/pages\n"); - return -EINVAL; - } + struct nvm_addrf_12 *src = (struct nvm_addrf_12 *)&geo->addrf; + int power_len; /* Re-calculate channel and lun format to adapt to configuration */ power_len = get_count_order(geo->nr_chnls); @@ -206,34 +198,50 @@ static int pblk_set_ppaf(struct pblk *pblk) pr_err("pblk: supports only power-of-two channel config.\n"); return -EINVAL; } - ppaf.ch_len = power_len; + dst->ch_len = power_len; power_len = get_count_order(geo->nr_luns); if (1 << power_len != geo->nr_luns) { pr_err("pblk: supports only power-of-two LUN config.\n"); return -EINVAL; } - ppaf.lun_len = power_len; - - pblk->ppaf.sec_offset = 0; - pblk->ppaf.pln_offset = ppaf.sect_len; - pblk->ppaf.ch_offset = pblk->ppaf.pln_offset + ppaf.pln_len; - pblk->ppaf.lun_offset = pblk->ppaf.ch_offset + ppaf.ch_len; - pblk->ppaf.pg_offset = pblk->ppaf.lun_offset + ppaf.lun_len; - pblk->ppaf.blk_offset = pblk->ppaf.pg_offset + ppaf.pg_len; - pblk->ppaf.sec_mask = (1ULL << ppaf.sect_len) - 1; - pblk->ppaf.pln_mask = ((1ULL << ppaf.pln_len) - 1) << - pblk->ppaf.pln_offset; - pblk->ppaf.ch_mask = ((1ULL << ppaf.ch_len) - 1) << - pblk->ppaf.ch_offset; - pblk->ppaf.lun_mask = ((1ULL << ppaf.lun_len) - 1) << - pblk->ppaf.lun_offset; - pblk->ppaf.pg_mask = ((1ULL << ppaf.pg_len) - 1) << - pblk->ppaf.pg_offset; - pblk->ppaf.blk_mask = ((1ULL << ppaf.blk_len) - 1) << - pblk->ppaf.blk_offset; - - pblk->ppaf_bitsize = pblk->ppaf.blk_offset + ppaf.blk_len; + dst->lun_len = power_len; + + dst->blk_len = src->blk_len; + dst->pg_len = src->pg_len; + dst->pln_len = src->pln_len; + dst->sect_len = src->sect_len; + + dst->sect_offset = 0; + dst->pln_offset = dst->sect_len; + dst->ch_offset = dst->pln_offset + dst->pln_len; + dst->lun_offset = dst->ch_offset + dst->ch_len; + dst->pg_offset = dst->lun_offset + dst->lun_len; + dst->blk_offset = dst->pg_offset + dst->pg_len; + + dst->sec_mask = ((1ULL << dst->sect_len) - 1) << dst->sect_offset; + dst->pln_mask = ((1ULL << dst->pln_len) - 1) << dst->pln_offset; + dst->ch_mask = ((1ULL << dst->ch_len) - 1) << dst->ch_offset; + dst->lun_mask = ((1ULL << dst->lun_len) - 1) << dst->lun_offset; + dst->pg_mask = ((1ULL << dst->pg_len) - 1) << dst->pg_offset; + dst->blk_mask = ((1ULL << dst->blk_len) - 1) << dst->blk_offset; + + return dst->blk_offset + src->blk_len; +} + +static int pblk_set_ppaf(struct pblk *pblk) +{ + struct nvm_tgt_dev *dev = pblk->dev; + struct nvm_geo *geo = &dev->geo; + int mod; + + div_u64_rem(geo->clba, pblk->min_write_pgs, &mod); + if (mod) { + pr_err("pblk: bad configuration of sectors/pages\n"); + return -EINVAL; + } + + pblk->ppaf_bitsize = pblk_set_addrf_12(geo, (void *)&pblk->ppaf); return 0; } @@ -303,10 +311,9 @@ static int pblk_core_init(struct pblk *pblk) atomic64_set(&pblk->nr_flush, 0); pblk->nr_flush_rst = 0; - pblk->pgs_in_buffer = NVM_MEM_PAGE_WRITE * geo->sec_per_pg * - geo->nr_planes * geo->all_luns; + pblk->pgs_in_buffer = geo->mw_cunits * geo->all_luns; - pblk->min_write_pgs = geo->sec_per_pl * (geo->sec_size / PAGE_SIZE); + pblk->min_write_pgs = geo->ws_opt * (geo->csecs / PAGE_SIZE); max_write_ppas = pblk->min_write_pgs * geo->all_luns; pblk->max_write_pgs = min_t(int, max_write_ppas, NVM_MAX_VLBA); pblk_set_sec_per_write(pblk, pblk->min_write_pgs); @@ -583,18 +590,18 @@ static unsigned int calc_emeta_len(struct pblk *pblk) /* Round to sector size so that lba_list starts on its own sector */ lm->emeta_sec[1] = DIV_ROUND_UP( sizeof(struct line_emeta) + lm->blk_bitmap_len + - sizeof(struct wa_counters), geo->sec_size); - lm->emeta_len[1] = lm->emeta_sec[1] * geo->sec_size; + sizeof(struct wa_counters), geo->csecs); + lm->emeta_len[1] = lm->emeta_sec[1] * geo->csecs; /* Round to sector size so that vsc_list starts on its own sector */ lm->dsec_per_line = lm->sec_per_line - lm->emeta_sec[0]; lm->emeta_sec[2] = DIV_ROUND_UP(lm->dsec_per_line * sizeof(u64), - geo->sec_size); - lm->emeta_len[2] = lm->emeta_sec[2] * geo->sec_size; + geo->csecs); + lm->emeta_len[2] = lm->emeta_sec[2] * geo->csecs; lm->emeta_sec[3] = DIV_ROUND_UP(l_mg->nr_lines * sizeof(u32), - geo->sec_size); - lm->emeta_len[3] = lm->emeta_sec[3] * geo->sec_size; + geo->csecs); + lm->emeta_len[3] = lm->emeta_sec[3] * geo->csecs; lm->vsc_list_len = l_mg->nr_lines * sizeof(u32); @@ -625,13 +632,13 @@ static void pblk_set_provision(struct pblk *pblk, long nr_free_blks) * on user capacity consider only provisioned blocks */ pblk->rl.total_blocks = nr_free_blks; - pblk->rl.nr_secs = nr_free_blks * geo->sec_per_chk; + pblk->rl.nr_secs = nr_free_blks * geo->clba; /* Consider sectors used for metadata */ sec_meta = (lm->smeta_sec + lm->emeta_sec[0]) * l_mg->nr_free_lines; - blk_meta = DIV_ROUND_UP(sec_meta, geo->sec_per_chk); + blk_meta = DIV_ROUND_UP(sec_meta, geo->clba); - pblk->capacity = (provisioned - blk_meta) * geo->sec_per_chk; + pblk->capacity = (provisioned - blk_meta) * geo->clba; atomic_set(&pblk->rl.free_blocks, nr_free_blks); atomic_set(&pblk->rl.free_user_blocks, nr_free_blks); @@ -783,7 +790,7 @@ static int pblk_line_meta_init(struct pblk *pblk) unsigned int smeta_len, emeta_len; int i; - lm->sec_per_line = geo->sec_per_chk * geo->all_luns; + lm->sec_per_line = geo->clba * geo->all_luns; lm->blk_per_line = geo->all_luns; lm->blk_bitmap_len = BITS_TO_LONGS(geo->all_luns) * sizeof(long); lm->sec_bitmap_len = BITS_TO_LONGS(lm->sec_per_line) * sizeof(long); @@ -797,8 +804,8 @@ static int pblk_line_meta_init(struct pblk *pblk) */ i = 1; add_smeta_page: - lm->smeta_sec = i * geo->sec_per_pl; - lm->smeta_len = lm->smeta_sec * geo->sec_size; + lm->smeta_sec = i * geo->ws_opt; + lm->smeta_len = lm->smeta_sec * geo->csecs; smeta_len = sizeof(struct line_smeta) + lm->lun_bitmap_len; if (smeta_len > lm->smeta_len) { @@ -811,8 +818,8 @@ add_smeta_page: */ i = 1; add_emeta_page: - lm->emeta_sec[0] = i * geo->sec_per_pl; - lm->emeta_len[0] = lm->emeta_sec[0] * geo->sec_size; + lm->emeta_sec[0] = i * geo->ws_opt; + lm->emeta_len[0] = lm->emeta_sec[0] * geo->csecs; emeta_len = calc_emeta_len(pblk); if (emeta_len > lm->emeta_len[0]) { @@ -825,7 +832,7 @@ add_emeta_page: lm->min_blk_line = 1; if (geo->all_luns > 1) lm->min_blk_line += DIV_ROUND_UP(lm->smeta_sec + - lm->emeta_sec[0], geo->sec_per_chk); + lm->emeta_sec[0], geo->clba); if (lm->min_blk_line > lm->blk_per_line) { pr_err("pblk: config. not supported. Min. LUN in line:%d\n", @@ -1009,9 +1016,9 @@ static void *pblk_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk, struct pblk *pblk; int ret; - if (dev->identity.dom & NVM_RSP_L2P) { + if (dev->geo.dom & NVM_RSP_L2P) { pr_err("pblk: host-side L2P table not supported. (%x)\n", - dev->identity.dom); + dev->geo.dom); return ERR_PTR(-EINVAL); } @@ -1093,7 +1100,7 @@ static void *pblk_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk, blk_queue_write_cache(tqueue, true, false); - tqueue->limits.discard_granularity = geo->sec_per_chk * geo->sec_size; + tqueue->limits.discard_granularity = geo->clba * geo->csecs; tqueue->limits.discard_alignment = 0; blk_queue_max_discard_sectors(tqueue, UINT_MAX >> 9); blk_queue_flag_set(QUEUE_FLAG_DISCARD, tqueue); diff --git a/drivers/lightnvm/pblk-read.c b/drivers/lightnvm/pblk-read.c index 2f761283f43e..9eee10f69df0 100644 --- a/drivers/lightnvm/pblk-read.c +++ b/drivers/lightnvm/pblk-read.c @@ -563,7 +563,7 @@ int pblk_submit_read_gc(struct pblk *pblk, struct pblk_gc_rq *gc_rq) if (!(gc_rq->secs_to_gc)) goto out; - data_len = (gc_rq->secs_to_gc) * geo->sec_size; + data_len = (gc_rq->secs_to_gc) * geo->csecs; bio = pblk_bio_map_addr(pblk, gc_rq->data, gc_rq->secs_to_gc, data_len, PBLK_VMALLOC_META, GFP_KERNEL); if (IS_ERR(bio)) { diff --git a/drivers/lightnvm/pblk-recovery.c b/drivers/lightnvm/pblk-recovery.c index aaab9a5c17cc..26356429dc72 100644 --- a/drivers/lightnvm/pblk-recovery.c +++ b/drivers/lightnvm/pblk-recovery.c @@ -184,7 +184,7 @@ static int pblk_calc_sec_in_line(struct pblk *pblk, struct pblk_line *line) int nr_bb = bitmap_weight(line->blk_bitmap, lm->blk_per_line); return lm->sec_per_line - lm->smeta_sec - lm->emeta_sec[0] - - nr_bb * geo->sec_per_chk; + nr_bb * geo->clba; } struct pblk_recov_alloc { @@ -232,7 +232,7 @@ next_read_rq: rq_ppas = pblk_calc_secs(pblk, left_ppas, 0); if (!rq_ppas) rq_ppas = pblk->min_write_pgs; - rq_len = rq_ppas * geo->sec_size; + rq_len = rq_ppas * geo->csecs; bio = bio_map_kern(dev->q, data, rq_len, GFP_KERNEL); if (IS_ERR(bio)) @@ -351,7 +351,7 @@ static int pblk_recov_pad_oob(struct pblk *pblk, struct pblk_line *line, if (!pad_rq) return -ENOMEM; - data = vzalloc(pblk->max_write_pgs * geo->sec_size); + data = vzalloc(pblk->max_write_pgs * geo->csecs); if (!data) { ret = -ENOMEM; goto free_rq; @@ -368,7 +368,7 @@ next_pad_rq: goto fail_free_pad; } - rq_len = rq_ppas * geo->sec_size; + rq_len = rq_ppas * geo->csecs; meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL, &dma_meta_list); if (!meta_list) { @@ -509,7 +509,7 @@ next_rq: rq_ppas = pblk_calc_secs(pblk, left_ppas, 0); if (!rq_ppas) rq_ppas = pblk->min_write_pgs; - rq_len = rq_ppas * geo->sec_size; + rq_len = rq_ppas * geo->csecs; bio = bio_map_kern(dev->q, data, rq_len, GFP_KERNEL); if (IS_ERR(bio)) @@ -640,7 +640,7 @@ next_rq: rq_ppas = pblk_calc_secs(pblk, left_ppas, 0); if (!rq_ppas) rq_ppas = pblk->min_write_pgs; - rq_len = rq_ppas * geo->sec_size; + rq_len = rq_ppas * geo->csecs; bio = bio_map_kern(dev->q, data, rq_len, GFP_KERNEL); if (IS_ERR(bio)) @@ -745,7 +745,7 @@ static int pblk_recov_l2p_from_oob(struct pblk *pblk, struct pblk_line *line) ppa_list = (void *)(meta_list) + pblk_dma_meta_size; dma_ppa_list = dma_meta_list + pblk_dma_meta_size; - data = kcalloc(pblk->max_write_pgs, geo->sec_size, GFP_KERNEL); + data = kcalloc(pblk->max_write_pgs, geo->csecs, GFP_KERNEL); if (!data) { ret = -ENOMEM; goto free_meta_list; diff --git a/drivers/lightnvm/pblk-rl.c b/drivers/lightnvm/pblk-rl.c index 0d457b162f23..883a7113b19d 100644 --- a/drivers/lightnvm/pblk-rl.c +++ b/drivers/lightnvm/pblk-rl.c @@ -200,7 +200,7 @@ void pblk_rl_init(struct pblk_rl *rl, int budget) /* Consider sectors used for metadata */ sec_meta = (lm->smeta_sec + lm->emeta_sec[0]) * l_mg->nr_free_lines; - blk_meta = DIV_ROUND_UP(sec_meta, geo->sec_per_chk); + blk_meta = DIV_ROUND_UP(sec_meta, geo->clba); rl->high = pblk->op_blks - blk_meta - lm->blk_per_line; rl->high_pw = get_count_order(rl->high); diff --git a/drivers/lightnvm/pblk-sysfs.c b/drivers/lightnvm/pblk-sysfs.c index c2cf6c939752..2474ef4366fa 100644 --- a/drivers/lightnvm/pblk-sysfs.c +++ b/drivers/lightnvm/pblk-sysfs.c @@ -113,26 +113,31 @@ static ssize_t pblk_sysfs_ppaf(struct pblk *pblk, char *page) { struct nvm_tgt_dev *dev = pblk->dev; struct nvm_geo *geo = &dev->geo; + struct nvm_addrf_12 *ppaf; + struct nvm_addrf_12 *geo_ppaf; ssize_t sz = 0; - sz = snprintf(page, PAGE_SIZE - sz, + ppaf = (struct nvm_addrf_12 *)&pblk->ppaf; + geo_ppaf = (struct nvm_addrf_12 *)&geo->addrf; + + sz = snprintf(page, PAGE_SIZE, "g:(b:%d)blk:%d/%d,pg:%d/%d,lun:%d/%d,ch:%d/%d,pl:%d/%d,sec:%d/%d\n", - pblk->ppaf_bitsize, - pblk->ppaf.blk_offset, geo->ppaf.blk_len, - pblk->ppaf.pg_offset, geo->ppaf.pg_len, - pblk->ppaf.lun_offset, geo->ppaf.lun_len, - pblk->ppaf.ch_offset, geo->ppaf.ch_len, - pblk->ppaf.pln_offset, geo->ppaf.pln_len, - pblk->ppaf.sec_offset, geo->ppaf.sect_len); + pblk->ppaf_bitsize, + ppaf->blk_offset, ppaf->blk_len, + ppaf->pg_offset, ppaf->pg_len, + ppaf->lun_offset, ppaf->lun_len, + ppaf->ch_offset, ppaf->ch_len, + ppaf->pln_offset, ppaf->pln_len, + ppaf->sect_offset, ppaf->sect_len); sz += snprintf(page + sz, PAGE_SIZE - sz, "d:blk:%d/%d,pg:%d/%d,lun:%d/%d,ch:%d/%d,pl:%d/%d,sec:%d/%d\n", - geo->ppaf.blk_offset, geo->ppaf.blk_len, - geo->ppaf.pg_offset, geo->ppaf.pg_len, - geo->ppaf.lun_offset, geo->ppaf.lun_len, - geo->ppaf.ch_offset, geo->ppaf.ch_len, - geo->ppaf.pln_offset, geo->ppaf.pln_len, - geo->ppaf.sect_offset, geo->ppaf.sect_len); + geo_ppaf->blk_offset, geo_ppaf->blk_len, + geo_ppaf->pg_offset, geo_ppaf->pg_len, + geo_ppaf->lun_offset, geo_ppaf->lun_len, + geo_ppaf->ch_offset, geo_ppaf->ch_len, + geo_ppaf->pln_offset, geo_ppaf->pln_len, + geo_ppaf->sect_offset, geo_ppaf->sect_len); return sz; } @@ -288,7 +293,7 @@ static ssize_t pblk_sysfs_lines_info(struct pblk *pblk, char *page) "blk_line:%d, sec_line:%d, sec_blk:%d\n", lm->blk_per_line, lm->sec_per_line, - geo->sec_per_chk); + geo->clba); return sz; } diff --git a/drivers/lightnvm/pblk-write.c b/drivers/lightnvm/pblk-write.c index aae86ed60b98..3e6f1ebd743a 100644 --- a/drivers/lightnvm/pblk-write.c +++ b/drivers/lightnvm/pblk-write.c @@ -333,7 +333,7 @@ int pblk_submit_meta_io(struct pblk *pblk, struct pblk_line *meta_line) m_ctx = nvm_rq_to_pdu(rqd); m_ctx->private = meta_line; - rq_len = rq_ppas * geo->sec_size; + rq_len = rq_ppas * geo->csecs; data = ((void *)emeta->buf) + emeta->mem; bio = pblk_bio_map_addr(pblk, data, rq_ppas, rq_len, diff --git a/drivers/lightnvm/pblk.h b/drivers/lightnvm/pblk.h index f0309d8172c0..898c4e49f77d 100644 --- a/drivers/lightnvm/pblk.h +++ b/drivers/lightnvm/pblk.h @@ -551,21 +551,6 @@ struct pblk_line_meta { unsigned int meta_distance; /* Distance between data and metadata */ }; -struct pblk_addr_format { - u64 ch_mask; - u64 lun_mask; - u64 pln_mask; - u64 blk_mask; - u64 pg_mask; - u64 sec_mask; - u8 ch_offset; - u8 lun_offset; - u8 pln_offset; - u8 blk_offset; - u8 pg_offset; - u8 sec_offset; -}; - enum { PBLK_STATE_RUNNING = 0, PBLK_STATE_STOPPING = 1, @@ -585,8 +570,8 @@ struct pblk { struct pblk_line_mgmt l_mg; /* Line management */ struct pblk_line_meta lm; /* Line metadata */ + struct nvm_addrf ppaf; int ppaf_bitsize; - struct pblk_addr_format ppaf; struct pblk_rb rwb; @@ -941,14 +926,12 @@ static inline int pblk_line_vsc(struct pblk_line *line) return le32_to_cpu(*line->vsc); } -#define NVM_MEM_PAGE_WRITE (8) - static inline int pblk_pad_distance(struct pblk *pblk) { struct nvm_tgt_dev *dev = pblk->dev; struct nvm_geo *geo = &dev->geo; - return NVM_MEM_PAGE_WRITE * geo->all_luns * geo->sec_per_pl; + return geo->mw_cunits * geo->all_luns * geo->ws_opt; } static inline int pblk_ppa_to_line(struct ppa_addr p) @@ -964,15 +947,16 @@ static inline int pblk_ppa_to_pos(struct nvm_geo *geo, struct ppa_addr p) static inline struct ppa_addr addr_to_gen_ppa(struct pblk *pblk, u64 paddr, u64 line_id) { + struct nvm_addrf_12 *ppaf = (struct nvm_addrf_12 *)&pblk->ppaf; struct ppa_addr ppa; ppa.ppa = 0; ppa.g.blk = line_id; - ppa.g.pg = (paddr & pblk->ppaf.pg_mask) >> pblk->ppaf.pg_offset; - ppa.g.lun = (paddr & pblk->ppaf.lun_mask) >> pblk->ppaf.lun_offset; - ppa.g.ch = (paddr & pblk->ppaf.ch_mask) >> pblk->ppaf.ch_offset; - ppa.g.pl = (paddr & pblk->ppaf.pln_mask) >> pblk->ppaf.pln_offset; - ppa.g.sec = (paddr & pblk->ppaf.sec_mask) >> pblk->ppaf.sec_offset; + ppa.g.pg = (paddr & ppaf->pg_mask) >> ppaf->pg_offset; + ppa.g.lun = (paddr & ppaf->lun_mask) >> ppaf->lun_offset; + ppa.g.ch = (paddr & ppaf->ch_mask) >> ppaf->ch_offset; + ppa.g.pl = (paddr & ppaf->pln_mask) >> ppaf->pln_offset; + ppa.g.sec = (paddr & ppaf->sec_mask) >> ppaf->sect_offset; return ppa; } @@ -980,13 +964,14 @@ static inline struct ppa_addr addr_to_gen_ppa(struct pblk *pblk, u64 paddr, static inline u64 pblk_dev_ppa_to_line_addr(struct pblk *pblk, struct ppa_addr p) { + struct nvm_addrf_12 *ppaf = (struct nvm_addrf_12 *)&pblk->ppaf; u64 paddr; - paddr = (u64)p.g.pg << pblk->ppaf.pg_offset; - paddr |= (u64)p.g.lun << pblk->ppaf.lun_offset; - paddr |= (u64)p.g.ch << pblk->ppaf.ch_offset; - paddr |= (u64)p.g.pl << pblk->ppaf.pln_offset; - paddr |= (u64)p.g.sec << pblk->ppaf.sec_offset; + paddr = (u64)p.g.ch << ppaf->ch_offset; + paddr |= (u64)p.g.lun << ppaf->lun_offset; + paddr |= (u64)p.g.pg << ppaf->pg_offset; + paddr |= (u64)p.g.pl << ppaf->pln_offset; + paddr |= (u64)p.g.sec << ppaf->sect_offset; return paddr; } @@ -1003,18 +988,14 @@ static inline struct ppa_addr pblk_ppa32_to_ppa64(struct pblk *pblk, u32 ppa32) ppa64.c.line = ppa32 & ((~0U) >> 1); ppa64.c.is_cached = 1; } else { - ppa64.g.blk = (ppa32 & pblk->ppaf.blk_mask) >> - pblk->ppaf.blk_offset; - ppa64.g.pg = (ppa32 & pblk->ppaf.pg_mask) >> - pblk->ppaf.pg_offset; - ppa64.g.lun = (ppa32 & pblk->ppaf.lun_mask) >> - pblk->ppaf.lun_offset; - ppa64.g.ch = (ppa32 & pblk->ppaf.ch_mask) >> - pblk->ppaf.ch_offset; - ppa64.g.pl = (ppa32 & pblk->ppaf.pln_mask) >> - pblk->ppaf.pln_offset; - ppa64.g.sec = (ppa32 & pblk->ppaf.sec_mask) >> - pblk->ppaf.sec_offset; + struct nvm_addrf_12 *ppaf = (struct nvm_addrf_12 *)&pblk->ppaf; + + ppa64.g.ch = (ppa32 & ppaf->ch_mask) >> ppaf->ch_offset; + ppa64.g.lun = (ppa32 & ppaf->lun_mask) >> ppaf->lun_offset; + ppa64.g.blk = (ppa32 & ppaf->blk_mask) >> ppaf->blk_offset; + ppa64.g.pg = (ppa32 & ppaf->pg_mask) >> ppaf->pg_offset; + ppa64.g.pl = (ppa32 & ppaf->pln_mask) >> ppaf->pln_offset; + ppa64.g.sec = (ppa32 & ppaf->sec_mask) >> ppaf->sect_offset; } return ppa64; @@ -1030,12 +1011,14 @@ static inline u32 pblk_ppa64_to_ppa32(struct pblk *pblk, struct ppa_addr ppa64) ppa32 |= ppa64.c.line; ppa32 |= 1U << 31; } else { - ppa32 |= ppa64.g.blk << pblk->ppaf.blk_offset; - ppa32 |= ppa64.g.pg << pblk->ppaf.pg_offset; - ppa32 |= ppa64.g.lun << pblk->ppaf.lun_offset; - ppa32 |= ppa64.g.ch << pblk->ppaf.ch_offset; - ppa32 |= ppa64.g.pl << pblk->ppaf.pln_offset; - ppa32 |= ppa64.g.sec << pblk->ppaf.sec_offset; + struct nvm_addrf_12 *ppaf = (struct nvm_addrf_12 *)&pblk->ppaf; + + ppa32 |= ppa64.g.ch << ppaf->ch_offset; + ppa32 |= ppa64.g.lun << ppaf->lun_offset; + ppa32 |= ppa64.g.blk << ppaf->blk_offset; + ppa32 |= ppa64.g.pg << ppaf->pg_offset; + ppa32 |= ppa64.g.pl << ppaf->pln_offset; + ppa32 |= ppa64.g.sec << ppaf->sect_offset; } return ppa32; @@ -1229,10 +1212,10 @@ static inline int pblk_boundary_ppa_checks(struct nvm_tgt_dev *tgt_dev, if (!ppa->c.is_cached && ppa->g.ch < geo->nr_chnls && ppa->g.lun < geo->nr_luns && - ppa->g.pl < geo->nr_planes && + ppa->g.pl < geo->num_pln && ppa->g.blk < geo->nr_chks && - ppa->g.pg < geo->ws_per_chk && - ppa->g.sec < geo->sec_per_pg) + ppa->g.pg < geo->num_pg && + ppa->g.sec < geo->ws_min) continue; print_ppa(ppa, "boundary", i); diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c index 839c0b96466a..29c8f44eb25b 100644 --- a/drivers/nvme/host/lightnvm.c +++ b/drivers/nvme/host/lightnvm.c @@ -152,8 +152,8 @@ struct nvme_nvm_id12_addrf { __u8 blk_len; __u8 pg_offset; __u8 pg_len; - __u8 sect_offset; - __u8 sect_len; + __u8 sec_offset; + __u8 sec_len; __u8 res[4]; } __packed; @@ -254,106 +254,160 @@ static inline void _nvme_nvm_check_size(void) BUILD_BUG_ON(sizeof(struct nvme_nvm_id20) != NVME_IDENTIFY_DATA_SIZE); } -static int init_grp(struct nvm_id *nvm_id, struct nvme_nvm_id12 *id12) +static void nvme_nvm_set_addr_12(struct nvm_addrf_12 *dst, + struct nvme_nvm_id12_addrf *src) +{ + dst->ch_len = src->ch_len; + dst->lun_len = src->lun_len; + dst->blk_len = src->blk_len; + dst->pg_len = src->pg_len; + dst->pln_len = src->pln_len; + dst->sect_len = src->sec_len; + + dst->ch_offset = src->ch_offset; + dst->lun_offset = src->lun_offset; + dst->blk_offset = src->blk_offset; + dst->pg_offset = src->pg_offset; + dst->pln_offset = src->pln_offset; + dst->sect_offset = src->sec_offset; + + dst->ch_mask = ((1ULL << dst->ch_len) - 1) << dst->ch_offset; + dst->lun_mask = ((1ULL << dst->lun_len) - 1) << dst->lun_offset; + dst->blk_mask = ((1ULL << dst->blk_len) - 1) << dst->blk_offset; + dst->pg_mask = ((1ULL << dst->pg_len) - 1) << dst->pg_offset; + dst->pln_mask = ((1ULL << dst->pln_len) - 1) << dst->pln_offset; + dst->sec_mask = ((1ULL << dst->sect_len) - 1) << dst->sect_offset; +} + +static int nvme_nvm_setup_12(struct nvme_nvm_id12 *id, + struct nvm_geo *geo) { struct nvme_nvm_id12_grp *src; int sec_per_pg, sec_per_pl, pg_per_blk; - if (id12->cgrps != 1) + if (id->cgrps != 1) + return -EINVAL; + + src = &id->grp; + + if (src->mtype != 0) { + pr_err("nvm: memory type not supported\n"); return -EINVAL; + } - src = &id12->grp; + geo->ver_id = id->ver_id; - nvm_id->mtype = src->mtype; - nvm_id->fmtype = src->fmtype; + geo->nr_chnls = src->num_ch; + geo->nr_luns = src->num_lun; + geo->all_luns = geo->nr_chnls * geo->nr_luns; - nvm_id->num_ch = src->num_ch; - nvm_id->num_lun = src->num_lun; + geo->nr_chks = le16_to_cpu(src->num_chk); - nvm_id->num_chk = le16_to_cpu(src->num_chk); - nvm_id->csecs = le16_to_cpu(src->csecs); - nvm_id->sos = le16_to_cpu(src->sos); + geo->csecs = le16_to_cpu(src->csecs); + geo->sos = le16_to_cpu(src->sos); pg_per_blk = le16_to_cpu(src->num_pg); - sec_per_pg = le16_to_cpu(src->fpg_sz) / nvm_id->csecs; + sec_per_pg = le16_to_cpu(src->fpg_sz) / geo->csecs; sec_per_pl = sec_per_pg * src->num_pln; - nvm_id->clba = sec_per_pl * pg_per_blk; - nvm_id->ws_per_chk = pg_per_blk; - - nvm_id->mpos = le32_to_cpu(src->mpos); - nvm_id->cpar = le16_to_cpu(src->cpar); - nvm_id->mccap = le32_to_cpu(src->mccap); - - nvm_id->ws_opt = nvm_id->ws_min = sec_per_pg; - nvm_id->ws_seq = NVM_IO_SNGL_ACCESS; - - if (nvm_id->mpos & 0x020202) { - nvm_id->ws_seq = NVM_IO_DUAL_ACCESS; - nvm_id->ws_opt <<= 1; - } else if (nvm_id->mpos & 0x040404) { - nvm_id->ws_seq = NVM_IO_QUAD_ACCESS; - nvm_id->ws_opt <<= 2; - } + geo->clba = sec_per_pl * pg_per_blk; + + geo->all_chunks = geo->all_luns * geo->nr_chks; + geo->total_secs = geo->clba * geo->all_chunks; + + geo->ws_min = sec_per_pg; + geo->ws_opt = sec_per_pg; + geo->mw_cunits = geo->ws_opt << 3; /* default to MLC safe values */ - nvm_id->trdt = le32_to_cpu(src->trdt); - nvm_id->trdm = le32_to_cpu(src->trdm); - nvm_id->tprt = le32_to_cpu(src->tprt); - nvm_id->tprm = le32_to_cpu(src->tprm); - nvm_id->tbet = le32_to_cpu(src->tbet); - nvm_id->tbem = le32_to_cpu(src->tbem); + geo->mccap = le32_to_cpu(src->mccap); + + geo->trdt = le32_to_cpu(src->trdt); + geo->trdm = le32_to_cpu(src->trdm); + geo->tprt = le32_to_cpu(src->tprt); + geo->tprm = le32_to_cpu(src->tprm); + geo->tbet = le32_to_cpu(src->tbet); + geo->tbem = le32_to_cpu(src->tbem); /* 1.2 compatibility */ - nvm_id->num_pln = src->num_pln; - nvm_id->num_pg = le16_to_cpu(src->num_pg); - nvm_id->fpg_sz = le16_to_cpu(src->fpg_sz); + geo->vmnt = id->vmnt; + geo->cap = le32_to_cpu(id->cap); + geo->dom = le32_to_cpu(id->dom); + + geo->mtype = src->mtype; + geo->fmtype = src->fmtype; + + geo->cpar = le16_to_cpu(src->cpar); + geo->mpos = le32_to_cpu(src->mpos); + + geo->plane_mode = NVM_PLANE_SINGLE; + + if (geo->mpos & 0x020202) { + geo->plane_mode = NVM_PLANE_DOUBLE; + geo->ws_opt <<= 1; + } else if (geo->mpos & 0x040404) { + geo->plane_mode = NVM_PLANE_QUAD; + geo->ws_opt <<= 2; + } + + geo->num_pln = src->num_pln; + geo->num_pg = le16_to_cpu(src->num_pg); + geo->fpg_sz = le16_to_cpu(src->fpg_sz); + + nvme_nvm_set_addr_12((struct nvm_addrf_12 *)&geo->addrf, &id->ppaf); return 0; } -static int nvme_nvm_setup_12(struct nvm_dev *nvmdev, struct nvm_id *nvm_id, - struct nvme_nvm_id12 *id) +static void nvme_nvm_set_addr_20(struct nvm_addrf *dst, + struct nvme_nvm_id20_addrf *src) { - nvm_id->ver_id = id->ver_id; - nvm_id->vmnt = id->vmnt; - nvm_id->cap = le32_to_cpu(id->cap); - nvm_id->dom = le32_to_cpu(id->dom); - memcpy(&nvm_id->ppaf, &id->ppaf, - sizeof(struct nvm_addr_format)); - - return init_grp(nvm_id, id); + dst->ch_len = src->grp_len; + dst->lun_len = src->pu_len; + dst->chk_len = src->chk_len; + dst->sec_len = src->lba_len; + + dst->sec_offset = 0; + dst->chk_offset = dst->sec_len; + dst->lun_offset = dst->chk_offset + dst->chk_len; + dst->ch_offset = dst->lun_offset + dst->lun_len; + + dst->ch_mask = ((1ULL << dst->ch_len) - 1) << dst->ch_offset; + dst->lun_mask = ((1ULL << dst->lun_len) - 1) << dst->lun_offset; + dst->chk_mask = ((1ULL << dst->chk_len) - 1) << dst->chk_offset; + dst->sec_mask = ((1ULL << dst->sec_len) - 1) << dst->sec_offset; } -static int nvme_nvm_setup_20(struct nvm_dev *nvmdev, struct nvm_id *nvm_id, - struct nvme_nvm_id20 *id) +static int nvme_nvm_setup_20(struct nvme_nvm_id20 *id, + struct nvm_geo *geo) { - nvm_id->ver_id = id->mjr; + geo->ver_id = id->mjr; - nvm_id->num_ch = le16_to_cpu(id->num_grp); - nvm_id->num_lun = le16_to_cpu(id->num_pu); - nvm_id->num_chk = le32_to_cpu(id->num_chk); - nvm_id->clba = le32_to_cpu(id->clba); + geo->nr_chnls = le16_to_cpu(id->num_grp); + geo->nr_luns = le16_to_cpu(id->num_pu); + geo->all_luns = geo->nr_chnls * geo->nr_luns; - nvm_id->ws_min = le32_to_cpu(id->ws_min); - nvm_id->ws_opt = le32_to_cpu(id->ws_opt); - nvm_id->mw_cunits = le32_to_cpu(id->mw_cunits); + geo->nr_chks = le32_to_cpu(id->num_chk); + geo->clba = le32_to_cpu(id->clba); - nvm_id->trdt = le32_to_cpu(id->trdt); - nvm_id->trdm = le32_to_cpu(id->trdm); - nvm_id->tprt = le32_to_cpu(id->twrt); - nvm_id->tprm = le32_to_cpu(id->twrm); - nvm_id->tbet = le32_to_cpu(id->tcrst); - nvm_id->tbem = le32_to_cpu(id->tcrsm); + geo->all_chunks = geo->all_luns * geo->nr_chks; + geo->total_secs = geo->clba * geo->all_chunks; - /* calculated values */ - nvm_id->ws_per_chk = nvm_id->clba / nvm_id->ws_min; + geo->ws_min = le32_to_cpu(id->ws_min); + geo->ws_opt = le32_to_cpu(id->ws_opt); + geo->mw_cunits = le32_to_cpu(id->mw_cunits); - /* 1.2 compatibility */ - nvm_id->ws_seq = NVM_IO_SNGL_ACCESS; + geo->trdt = le32_to_cpu(id->trdt); + geo->trdm = le32_to_cpu(id->trdm); + geo->tprt = le32_to_cpu(id->twrt); + geo->tprm = le32_to_cpu(id->twrm); + geo->tbet = le32_to_cpu(id->tcrst); + geo->tbem = le32_to_cpu(id->tcrsm); + + nvme_nvm_set_addr_20(&geo->addrf, &id->lbaf); return 0; } -static int nvme_nvm_identity(struct nvm_dev *nvmdev, struct nvm_id *nvm_id) +static int nvme_nvm_identity(struct nvm_dev *nvmdev) { struct nvme_ns *ns = nvmdev->q->queuedata; struct nvme_nvm_id12 *id; @@ -380,18 +434,18 @@ static int nvme_nvm_identity(struct nvm_dev *nvmdev, struct nvm_id *nvm_id) */ switch (id->ver_id) { case 1: - ret = nvme_nvm_setup_12(nvmdev, nvm_id, id); + ret = nvme_nvm_setup_12(id, &nvmdev->geo); break; case 2: - ret = nvme_nvm_setup_20(nvmdev, nvm_id, - (struct nvme_nvm_id20 *)id); + ret = nvme_nvm_setup_20((struct nvme_nvm_id20 *)id, + &nvmdev->geo); break; default: - dev_err(ns->ctrl->device, - "OCSSD revision not supported (%d)\n", - nvm_id->ver_id); + dev_err(ns->ctrl->device, "OCSSD revision not supported (%d)\n", + id->ver_id); ret = -EINVAL; } + out: kfree(id); return ret; @@ -406,7 +460,7 @@ static int nvme_nvm_get_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr ppa, struct nvme_ctrl *ctrl = ns->ctrl; struct nvme_nvm_command c = {}; struct nvme_nvm_bb_tbl *bb_tbl; - int nr_blks = geo->nr_chks * geo->plane_mode; + int nr_blks = geo->nr_chks * geo->num_pln; int tblsz = sizeof(struct nvme_nvm_bb_tbl) + nr_blks; int ret = 0; @@ -447,7 +501,7 @@ static int nvme_nvm_get_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr ppa, goto out; } - memcpy(blks, bb_tbl->blk, geo->nr_chks * geo->plane_mode); + memcpy(blks, bb_tbl->blk, geo->nr_chks * geo->num_pln); out: kfree(bb_tbl); return ret; @@ -815,9 +869,10 @@ int nvme_nvm_ioctl(struct nvme_ns *ns, unsigned int cmd, unsigned long arg) void nvme_nvm_update_nvm_info(struct nvme_ns *ns) { struct nvm_dev *ndev = ns->ndev; + struct nvm_geo *geo = &ndev->geo; - ndev->identity.csecs = ndev->geo.sec_size = 1 << ns->lba_shift; - ndev->identity.sos = ndev->geo.oob_size = ns->ms; + geo->csecs = 1 << ns->lba_shift; + geo->sos = ns->ms; } int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, int node) @@ -850,23 +905,22 @@ static ssize_t nvm_dev_attr_show(struct device *dev, { struct nvme_ns *ns = nvme_get_ns_from_dev(dev); struct nvm_dev *ndev = ns->ndev; - struct nvm_id *id; + struct nvm_geo *geo = &ndev->geo; struct attribute *attr; if (!ndev) return 0; - id = &ndev->identity; attr = &dattr->attr; if (strcmp(attr->name, "version") == 0) { - return scnprintf(page, PAGE_SIZE, "%u\n", id->ver_id); + return scnprintf(page, PAGE_SIZE, "%u\n", geo->ver_id); } else if (strcmp(attr->name, "capabilities") == 0) { - return scnprintf(page, PAGE_SIZE, "%u\n", id->cap); + return scnprintf(page, PAGE_SIZE, "%u\n", geo->cap); } else if (strcmp(attr->name, "read_typ") == 0) { - return scnprintf(page, PAGE_SIZE, "%u\n", id->trdt); + return scnprintf(page, PAGE_SIZE, "%u\n", geo->trdt); } else if (strcmp(attr->name, "read_max") == 0) { - return scnprintf(page, PAGE_SIZE, "%u\n", id->trdm); + return scnprintf(page, PAGE_SIZE, "%u\n", geo->trdm); } else { return scnprintf(page, PAGE_SIZE, @@ -875,75 +929,78 @@ static ssize_t nvm_dev_attr_show(struct device *dev, } } +static ssize_t nvm_dev_attr_show_ppaf(struct nvm_addrf_12 *ppaf, char *page) +{ + return scnprintf(page, PAGE_SIZE, + "0x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n", + ppaf->ch_offset, ppaf->ch_len, + ppaf->lun_offset, ppaf->lun_len, + ppaf->pln_offset, ppaf->pln_len, + ppaf->blk_offset, ppaf->blk_len, + ppaf->pg_offset, ppaf->pg_len, + ppaf->sect_offset, ppaf->sect_len); +} + static ssize_t nvm_dev_attr_show_12(struct device *dev, struct device_attribute *dattr, char *page) { struct nvme_ns *ns = nvme_get_ns_from_dev(dev); struct nvm_dev *ndev = ns->ndev; - struct nvm_id *id; + struct nvm_geo *geo = &ndev->geo; struct attribute *attr; if (!ndev) return 0; - id = &ndev->identity; attr = &dattr->attr; if (strcmp(attr->name, "vendor_opcode") == 0) { - return scnprintf(page, PAGE_SIZE, "%u\n", id->vmnt); + return scnprintf(page, PAGE_SIZE, "%u\n", geo->vmnt); } else if (strcmp(attr->name, "device_mode") == 0) { - return scnprintf(page, PAGE_SIZE, "%u\n", id->dom); + return scnprintf(page, PAGE_SIZE, "%u\n", geo->dom); /* kept for compatibility */ } else if (strcmp(attr->name, "media_manager") == 0) { return scnprintf(page, PAGE_SIZE, "%s\n", "gennvm"); } else if (strcmp(attr->name, "ppa_format") == 0) { - return scnprintf(page, PAGE_SIZE, - "0x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n", - id->ppaf.ch_offset, id->ppaf.ch_len, - id->ppaf.lun_offset, id->ppaf.lun_len, - id->ppaf.pln_offset, id->ppaf.pln_len, - id->ppaf.blk_offset, id->ppaf.blk_len, - id->ppaf.pg_offset, id->ppaf.pg_len, - id->ppaf.sect_offset, id->ppaf.sect_len); + return nvm_dev_attr_show_ppaf((void *)&geo->addrf, page); } else if (strcmp(attr->name, "media_type") == 0) { /* u8 */ - return scnprintf(page, PAGE_SIZE, "%u\n", id->mtype); + return scnprintf(page, PAGE_SIZE, "%u\n", geo->mtype); } else if (strcmp(attr->name, "flash_media_type") == 0) { - return scnprintf(page, PAGE_SIZE, "%u\n", id->fmtype); + return scnprintf(page, PAGE_SIZE, "%u\n", geo->fmtype); } else if (strcmp(attr->name, "num_channels") == 0) { - return scnprintf(page, PAGE_SIZE, "%u\n", id->num_ch); + return scnprintf(page, PAGE_SIZE, "%u\n", geo->nr_chnls); } else if (strcmp(attr->name, "num_luns") == 0) { - return scnprintf(page, PAGE_SIZE, "%u\n", id->num_lun); + return scnprintf(page, PAGE_SIZE, "%u\n", geo->nr_luns); } else if (strcmp(attr->name, "num_planes") == 0) { - return scnprintf(page, PAGE_SIZE, "%u\n", id->num_pln); + return scnprintf(page, PAGE_SIZE, "%u\n", geo->num_pln); } else if (strcmp(attr->name, "num_blocks") == 0) { /* u16 */ - return scnprintf(page, PAGE_SIZE, "%u\n", id->num_chk); + return scnprintf(page, PAGE_SIZE, "%u\n", geo->nr_chks); } else if (strcmp(attr->name, "num_pages") == 0) { - return scnprintf(page, PAGE_SIZE, "%u\n", id->num_pg); + return scnprintf(page, PAGE_SIZE, "%u\n", geo->num_pg); } else if (strcmp(attr->name, "page_size") == 0) { - return scnprintf(page, PAGE_SIZE, "%u\n", id->fpg_sz); + return scnprintf(page, PAGE_SIZE, "%u\n", geo->fpg_sz); } else if (strcmp(attr->name, "hw_sector_size") == 0) { - return scnprintf(page, PAGE_SIZE, "%u\n", id->csecs); + return scnprintf(page, PAGE_SIZE, "%u\n", geo->csecs); } else if (strcmp(attr->name, "oob_sector_size") == 0) {/* u32 */ - return scnprintf(page, PAGE_SIZE, "%u\n", id->sos); + return scnprintf(page, PAGE_SIZE, "%u\n", geo->sos); } else if (strcmp(attr->name, "prog_typ") == 0) { - return scnprintf(page, PAGE_SIZE, "%u\n", id->tprt); + return scnprintf(page, PAGE_SIZE, "%u\n", geo->tprt); } else if (strcmp(attr->name, "prog_max") == 0) { - return scnprintf(page, PAGE_SIZE, "%u\n", id->tprm); + return scnprintf(page, PAGE_SIZE, "%u\n", geo->tprm); } else if (strcmp(attr->name, "erase_typ") == 0) { - return scnprintf(page, PAGE_SIZE, "%u\n", id->tbet); + return scnprintf(page, PAGE_SIZE, "%u\n", geo->tbet); } else if (strcmp(attr->name, "erase_max") == 0) { - return scnprintf(page, PAGE_SIZE, "%u\n", id->tbem); + return scnprintf(page, PAGE_SIZE, "%u\n", geo->tbem); } else if (strcmp(attr->name, "multiplane_modes") == 0) { - return scnprintf(page, PAGE_SIZE, "0x%08x\n", id->mpos); + return scnprintf(page, PAGE_SIZE, "0x%08x\n", geo->mpos); } else if (strcmp(attr->name, "media_capabilities") == 0) { - return scnprintf(page, PAGE_SIZE, "0x%08x\n", id->mccap); + return scnprintf(page, PAGE_SIZE, "0x%08x\n", geo->mccap); } else if (strcmp(attr->name, "max_phys_secs") == 0) { return scnprintf(page, PAGE_SIZE, "%u\n", NVM_MAX_VLBA); } else { - return scnprintf(page, - PAGE_SIZE, - "Unhandled attr(%s) in `nvm_dev_attr_show_12`\n", - attr->name); + return scnprintf(page, PAGE_SIZE, + "Unhandled attr(%s) in `nvm_dev_attr_show_12`\n", + attr->name); } } @@ -952,42 +1009,40 @@ static ssize_t nvm_dev_attr_show_20(struct device *dev, { struct nvme_ns *ns = nvme_get_ns_from_dev(dev); struct nvm_dev *ndev = ns->ndev; - struct nvm_id *id; + struct nvm_geo *geo = &ndev->geo; struct attribute *attr; if (!ndev) return 0; - id = &ndev->identity; attr = &dattr->attr; if (strcmp(attr->name, "groups") == 0) { - return scnprintf(page, PAGE_SIZE, "%u\n", id->num_ch); + return scnprintf(page, PAGE_SIZE, "%u\n", geo->nr_chnls); } else if (strcmp(attr->name, "punits") == 0) { - return scnprintf(page, PAGE_SIZE, "%u\n", id->num_lun); + return scnprintf(page, PAGE_SIZE, "%u\n", geo->nr_luns); } else if (strcmp(attr->name, "chunks") == 0) { - return scnprintf(page, PAGE_SIZE, "%u\n", id->num_chk); + return scnprintf(page, PAGE_SIZE, "%u\n", geo->nr_chks); } else if (strcmp(attr->name, "clba") == 0) { - return scnprintf(page, PAGE_SIZE, "%u\n", id->clba); + return scnprintf(page, PAGE_SIZE, "%u\n", geo->clba); } else if (strcmp(attr->name, "ws_min") == 0) { - return scnprintf(page, PAGE_SIZE, "%u\n", id->ws_min); + return scnprintf(page, PAGE_SIZE, "%u\n", geo->ws_min); } else if (strcmp(attr->name, "ws_opt") == 0) { - return scnprintf(page, PAGE_SIZE, "%u\n", id->ws_opt); + return scnprintf(page, PAGE_SIZE, "%u\n", geo->ws_opt); } else if (strcmp(attr->name, "mw_cunits") == 0) { - return scnprintf(page, PAGE_SIZE, "%u\n", id->mw_cunits); + return scnprintf(page, PAGE_SIZE, "%u\n", geo->mw_cunits); } else if (strcmp(attr->name, "write_typ") == 0) { - return scnprintf(page, PAGE_SIZE, "%u\n", id->tprt); + return scnprintf(page, PAGE_SIZE, "%u\n", geo->tprt); } else if (strcmp(attr->name, "write_max") == 0) { - return scnprintf(page, PAGE_SIZE, "%u\n", id->tprm); + return scnprintf(page, PAGE_SIZE, "%u\n", geo->tprm); } else if (strcmp(attr->name, "reset_typ") == 0) { - return scnprintf(page, PAGE_SIZE, "%u\n", id->tbet); + return scnprintf(page, PAGE_SIZE, "%u\n", geo->tbet); } else if (strcmp(attr->name, "reset_max") == 0) { - return scnprintf(page, PAGE_SIZE, "%u\n", id->tbem); + return scnprintf(page, PAGE_SIZE, "%u\n", geo->tbem); } else { - return scnprintf(page, - PAGE_SIZE, - "Unhandled attr(%s) in `nvm_dev_attr_show_20`\n", - attr->name); + return scnprintf(page, PAGE_SIZE, + "Unhandled attr(%s) in `nvm_dev_attr_show_20`\n", + attr->name); } } @@ -1106,10 +1161,13 @@ static const struct attribute_group nvm_dev_attr_group_20 = { int nvme_nvm_register_sysfs(struct nvme_ns *ns) { - if (!ns->ndev) + struct nvm_dev *ndev = ns->ndev; + struct nvm_geo *geo = &ndev->geo; + + if (!ndev) return -EINVAL; - switch (ns->ndev->identity.ver_id) { + switch (geo->ver_id) { case 1: return sysfs_create_group(&disk_to_dev(ns->disk)->kobj, &nvm_dev_attr_group_12); @@ -1123,7 +1181,10 @@ int nvme_nvm_register_sysfs(struct nvme_ns *ns) void nvme_nvm_unregister_sysfs(struct nvme_ns *ns) { - switch (ns->ndev->identity.ver_id) { + struct nvm_dev *ndev = ns->ndev; + struct nvm_geo *geo = &ndev->geo; + + switch (geo->ver_id) { case 1: sysfs_remove_group(&disk_to_dev(ns->disk)->kobj, &nvm_dev_attr_group_12); diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h index e55b10573c99..6e650563b379 100644 --- a/include/linux/lightnvm.h +++ b/include/linux/lightnvm.h @@ -50,7 +50,7 @@ struct nvm_id; struct nvm_dev; struct nvm_tgt_dev; -typedef int (nvm_id_fn)(struct nvm_dev *, struct nvm_id *); +typedef int (nvm_id_fn)(struct nvm_dev *); typedef int (nvm_op_bb_tbl_fn)(struct nvm_dev *, struct ppa_addr, u8 *); typedef int (nvm_op_set_bb_fn)(struct nvm_dev *, struct ppa_addr *, int, int); typedef int (nvm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *); @@ -152,62 +152,48 @@ struct nvm_id_lp_tbl { struct nvm_id_lp_mlc mlc; }; -struct nvm_addr_format { - u8 ch_offset; +struct nvm_addrf_12 { u8 ch_len; - u8 lun_offset; u8 lun_len; - u8 pln_offset; - u8 pln_len; - u8 blk_offset; u8 blk_len; - u8 pg_offset; u8 pg_len; - u8 sect_offset; + u8 pln_len; u8 sect_len; -}; - -struct nvm_id { - u8 ver_id; - u8 vmnt; - u32 cap; - u32 dom; - struct nvm_addr_format ppaf; - - u8 num_ch; - u8 num_lun; - u16 num_chk; - u16 clba; - u16 csecs; - u16 sos; - - u32 ws_min; - u32 ws_opt; - u32 mw_cunits; - - u32 trdt; - u32 trdm; - u32 tprt; - u32 tprm; - u32 tbet; - u32 tbem; - u32 mpos; - u32 mccap; - u16 cpar; + u8 ch_offset; + u8 lun_offset; + u8 blk_offset; + u8 pg_offset; + u8 pln_offset; + u8 sect_offset; - /* calculated values */ - u16 ws_seq; - u16 ws_per_chk; + u64 ch_mask; + u64 lun_mask; + u64 blk_mask; + u64 pg_mask; + u64 pln_mask; + u64 sec_mask; +}; - /* 1.2 compatibility */ - u8 mtype; - u8 fmtype; +struct nvm_addrf { + u8 ch_len; + u8 lun_len; + u8 chk_len; + u8 sec_len; + u8 rsv_len[2]; - u8 num_pln; - u16 num_pg; - u16 fpg_sz; -} __packed; + u8 ch_offset; + u8 lun_offset; + u8 chk_offset; + u8 sec_offset; + u8 rsv_off[2]; + + u64 ch_mask; + u64 lun_mask; + u64 chk_mask; + u64 sec_mask; + u64 rsv_mask[2]; +}; struct nvm_target { struct list_head list; @@ -274,36 +260,63 @@ enum { NVM_BLK_ST_BAD = 0x8, /* Bad block */ }; - -/* Device generic information */ +/* Instance geometry */ struct nvm_geo { - /* generic geometry */ + /* device reported version */ + u8 ver_id; + + /* instance specific geometry */ int nr_chnls; - int all_luns; /* across channels */ - int nr_luns; /* per channel */ - int nr_chks; /* per lun */ + int nr_luns; /* per channel */ - int sec_size; - int oob_size; - int mccap; + /* calculated values */ + int all_luns; /* across channels */ + int all_chunks; /* across channels */ + + int op; /* over-provision in instance */ + + sector_t total_secs; /* across channels */ + + /* chunk geometry */ + u32 nr_chks; /* chunks per lun */ + u32 clba; /* sectors per chunk */ + u16 csecs; /* sector size */ + u16 sos; /* out-of-band area size */ - int sec_per_chk; - int sec_per_lun; + /* device write constrains */ + u32 ws_min; /* minimum write size */ + u32 ws_opt; /* optimal write size */ + u32 mw_cunits; /* distance required for successful read */ - int ws_min; - int ws_opt; - int ws_seq; - int ws_per_chk; + /* device capabilities */ + u32 mccap; - int op; + /* device timings */ + u32 trdt; /* Avg. Tread (ns) */ + u32 trdm; /* Max Tread (ns) */ + u32 tprt; /* Avg. Tprog (ns) */ + u32 tprm; /* Max Tprog (ns) */ + u32 tbet; /* Avg. Terase (ns) */ + u32 tbem; /* Max Terase (ns) */ - struct nvm_addr_format ppaf; + /* generic address format */ + struct nvm_addrf addrf; - /* Legacy 1.2 specific geometry */ - int plane_mode; /* drive device in single, double or quad mode */ - int nr_planes; - int sec_per_pg; /* only sectors for a single page */ - int sec_per_pl; /* all sectors across planes */ + /* 1.2 compatibility */ + u8 vmnt; + u32 cap; + u32 dom; + + u8 mtype; + u8 fmtype; + + u16 cpar; + u32 mpos; + + u8 num_pln; + u8 plane_mode; + u16 num_pg; + u16 fpg_sz; }; /* sub-device structure */ @@ -314,9 +327,6 @@ struct nvm_tgt_dev { /* Base ppas for target LUNs */ struct ppa_addr *luns; - sector_t total_secs; - - struct nvm_id identity; struct request_queue *q; struct nvm_dev *parent; @@ -331,13 +341,9 @@ struct nvm_dev { /* Device information */ struct nvm_geo geo; - unsigned long total_secs; - unsigned long *lun_map; void *dma_pool; - struct nvm_id identity; - /* Backend device */ struct request_queue *q; char name[DISK_NAME_LEN]; @@ -357,14 +363,15 @@ static inline struct ppa_addr generic_to_dev_addr(struct nvm_tgt_dev *tgt_dev, struct ppa_addr r) { struct nvm_geo *geo = &tgt_dev->geo; + struct nvm_addrf_12 *ppaf = (struct nvm_addrf_12 *)&geo->addrf; struct ppa_addr l; - l.ppa = ((u64)r.g.blk) << geo->ppaf.blk_offset; - l.ppa |= ((u64)r.g.pg) << geo->ppaf.pg_offset; - l.ppa |= ((u64)r.g.sec) << geo->ppaf.sect_offset; - l.ppa |= ((u64)r.g.pl) << geo->ppaf.pln_offset; - l.ppa |= ((u64)r.g.lun) << geo->ppaf.lun_offset; - l.ppa |= ((u64)r.g.ch) << geo->ppaf.ch_offset; + l.ppa = ((u64)r.g.ch) << ppaf->ch_offset; + l.ppa |= ((u64)r.g.lun) << ppaf->lun_offset; + l.ppa |= ((u64)r.g.blk) << ppaf->blk_offset; + l.ppa |= ((u64)r.g.pg) << ppaf->pg_offset; + l.ppa |= ((u64)r.g.pl) << ppaf->pln_offset; + l.ppa |= ((u64)r.g.sec) << ppaf->sect_offset; return l; } @@ -373,24 +380,17 @@ static inline struct ppa_addr dev_to_generic_addr(struct nvm_tgt_dev *tgt_dev, struct ppa_addr r) { struct nvm_geo *geo = &tgt_dev->geo; + struct nvm_addrf_12 *ppaf = (struct nvm_addrf_12 *)&geo->addrf; struct ppa_addr l; l.ppa = 0; - /* - * (r.ppa << X offset) & X len bitmask. X eq. blk, pg, etc. - */ - l.g.blk = (r.ppa >> geo->ppaf.blk_offset) & - (((1 << geo->ppaf.blk_len) - 1)); - l.g.pg |= (r.ppa >> geo->ppaf.pg_offset) & - (((1 << geo->ppaf.pg_len) - 1)); - l.g.sec |= (r.ppa >> geo->ppaf.sect_offset) & - (((1 << geo->ppaf.sect_len) - 1)); - l.g.pl |= (r.ppa >> geo->ppaf.pln_offset) & - (((1 << geo->ppaf.pln_len) - 1)); - l.g.lun |= (r.ppa >> geo->ppaf.lun_offset) & - (((1 << geo->ppaf.lun_len) - 1)); - l.g.ch |= (r.ppa >> geo->ppaf.ch_offset) & - (((1 << geo->ppaf.ch_len) - 1)); + + l.g.ch = (r.ppa & ppaf->ch_mask) >> ppaf->ch_offset; + l.g.lun = (r.ppa & ppaf->lun_mask) >> ppaf->lun_offset; + l.g.blk = (r.ppa & ppaf->blk_mask) >> ppaf->blk_offset; + l.g.pg = (r.ppa & ppaf->pg_mask) >> ppaf->pg_offset; + l.g.pl = (r.ppa & ppaf->pln_mask) >> ppaf->pln_offset; + l.g.sec = (r.ppa & ppaf->sec_mask) >> ppaf->sect_offset; return l; } -- cgit v1.2.3 From 3cb98f84d368b3bbe07a2d5bf938e31f74567620 Mon Sep 17 00:00:00 2001 From: Javier González Date: Fri, 30 Mar 2018 00:05:11 +0200 Subject: lightnvm: add minor version to generic geometry MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Separate the version between major and minor on the generic geometry and represent it through sysfs in the 2.0 path. The 1.2 path only shows the major version to preserve the existing user space interface. Signed-off-by: Javier González Signed-off-by: Matias Bjørling Signed-off-by: Jens Axboe --- drivers/lightnvm/core.c | 4 ++-- drivers/nvme/host/lightnvm.c | 25 ++++++++++++++++++++----- include/linux/lightnvm.h | 3 ++- 3 files changed, 24 insertions(+), 8 deletions(-) diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c index 9dec936ac1dc..103e0ad9622c 100644 --- a/drivers/lightnvm/core.c +++ b/drivers/lightnvm/core.c @@ -890,8 +890,8 @@ static int nvm_init(struct nvm_dev *dev) goto err; } - pr_debug("nvm: ver:%u nvm_vendor:%x\n", - geo->ver_id, + pr_debug("nvm: ver:%u.%u nvm_vendor:%x\n", + geo->major_ver_id, geo->minor_ver_id, geo->vmnt); ret = nvm_core_init(dev); diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c index 29c8f44eb25b..de4105544956 100644 --- a/drivers/nvme/host/lightnvm.c +++ b/drivers/nvme/host/lightnvm.c @@ -295,7 +295,9 @@ static int nvme_nvm_setup_12(struct nvme_nvm_id12 *id, return -EINVAL; } - geo->ver_id = id->ver_id; + /* 1.2 spec. only reports a single version id - unfold */ + geo->major_ver_id = id->ver_id; + geo->minor_ver_id = 2; geo->nr_chnls = src->num_ch; geo->nr_luns = src->num_lun; @@ -379,7 +381,14 @@ static void nvme_nvm_set_addr_20(struct nvm_addrf *dst, static int nvme_nvm_setup_20(struct nvme_nvm_id20 *id, struct nvm_geo *geo) { - geo->ver_id = id->mjr; + geo->major_ver_id = id->mjr; + geo->minor_ver_id = id->mnr; + + if (!(geo->major_ver_id == 2 && geo->minor_ver_id == 0)) { + pr_err("nvm: OCSSD version not supported (v%d.%d)\n", + geo->major_ver_id, geo->minor_ver_id); + return -EINVAL; + } geo->nr_chnls = le16_to_cpu(id->num_grp); geo->nr_luns = le16_to_cpu(id->num_pu); @@ -914,7 +923,13 @@ static ssize_t nvm_dev_attr_show(struct device *dev, attr = &dattr->attr; if (strcmp(attr->name, "version") == 0) { - return scnprintf(page, PAGE_SIZE, "%u\n", geo->ver_id); + if (geo->major_ver_id == 1) + return scnprintf(page, PAGE_SIZE, "%u\n", + geo->major_ver_id); + else + return scnprintf(page, PAGE_SIZE, "%u.%u\n", + geo->major_ver_id, + geo->minor_ver_id); } else if (strcmp(attr->name, "capabilities") == 0) { return scnprintf(page, PAGE_SIZE, "%u\n", geo->cap); } else if (strcmp(attr->name, "read_typ") == 0) { @@ -1167,7 +1182,7 @@ int nvme_nvm_register_sysfs(struct nvme_ns *ns) if (!ndev) return -EINVAL; - switch (geo->ver_id) { + switch (geo->major_ver_id) { case 1: return sysfs_create_group(&disk_to_dev(ns->disk)->kobj, &nvm_dev_attr_group_12); @@ -1184,7 +1199,7 @@ void nvme_nvm_unregister_sysfs(struct nvme_ns *ns) struct nvm_dev *ndev = ns->ndev; struct nvm_geo *geo = &ndev->geo; - switch (geo->ver_id) { + switch (geo->major_ver_id) { case 1: sysfs_remove_group(&disk_to_dev(ns->disk)->kobj, &nvm_dev_attr_group_12); diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h index 6e650563b379..7ed8b92d6744 100644 --- a/include/linux/lightnvm.h +++ b/include/linux/lightnvm.h @@ -263,7 +263,8 @@ enum { /* Instance geometry */ struct nvm_geo { /* device reported version */ - u8 ver_id; + u8 major_ver_id; + u8 minor_ver_id; /* instance specific geometry */ int nr_chnls; -- cgit v1.2.3 From f1d4e8121f3fc25f9be94c6de6b8f5f788ad0265 Mon Sep 17 00:00:00 2001 From: Javier González Date: Fri, 30 Mar 2018 00:05:12 +0200 Subject: lightnvm: add shorten OCSSD version in geo MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Create a shorten version to use in the generic geometry. Signed-off-by: Javier González Signed-off-by: Matias Bjørling Signed-off-by: Jens Axboe --- drivers/nvme/host/lightnvm.c | 6 ++++++ include/linux/lightnvm.h | 8 ++++++++ 2 files changed, 14 insertions(+) diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c index de4105544956..f7f7769e7588 100644 --- a/drivers/nvme/host/lightnvm.c +++ b/drivers/nvme/host/lightnvm.c @@ -299,6 +299,9 @@ static int nvme_nvm_setup_12(struct nvme_nvm_id12 *id, geo->major_ver_id = id->ver_id; geo->minor_ver_id = 2; + /* Set compacted version for upper layers */ + geo->version = NVM_OCSSD_SPEC_12; + geo->nr_chnls = src->num_ch; geo->nr_luns = src->num_lun; geo->all_luns = geo->nr_chnls * geo->nr_luns; @@ -384,6 +387,9 @@ static int nvme_nvm_setup_20(struct nvme_nvm_id20 *id, geo->major_ver_id = id->mjr; geo->minor_ver_id = id->mnr; + /* Set compacted version for upper layers */ + geo->version = NVM_OCSSD_SPEC_20; + if (!(geo->major_ver_id == 2 && geo->minor_ver_id == 0)) { pr_err("nvm: OCSSD version not supported (v%d.%d)\n", geo->major_ver_id, geo->minor_ver_id); diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h index 7ed8b92d6744..a073c0c76260 100644 --- a/include/linux/lightnvm.h +++ b/include/linux/lightnvm.h @@ -23,6 +23,11 @@ enum { #define NVM_LUN_BITS (8) #define NVM_CH_BITS (7) +enum { + NVM_OCSSD_SPEC_12 = 12, + NVM_OCSSD_SPEC_20 = 20, +}; + struct ppa_addr { /* Generic structure for all addresses */ union { @@ -266,6 +271,9 @@ struct nvm_geo { u8 major_ver_id; u8 minor_ver_id; + /* kernel short version */ + u8 version; + /* instance specific geometry */ int nr_chnls; int nr_luns; /* per channel */ -- cgit v1.2.3 From 3f48021bad73696421e2725c856b9b3aec7f567c Mon Sep 17 00:00:00 2001 From: Javier González Date: Fri, 30 Mar 2018 00:05:13 +0200 Subject: lightnvm: complete geo structure with maxoc* MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Complete the generic geometry structure with the maxoc and maxocpu felds, present in the 2.0 spec. Also, expose them through sysfs. Signed-off-by: Javier González Signed-off-by: Matias Bjørling Signed-off-by: Jens Axboe --- drivers/nvme/host/lightnvm.c | 17 +++++++++++++++++ include/linux/lightnvm.h | 2 ++ 2 files changed, 19 insertions(+) diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c index f7f7769e7588..41b38ebdb1f3 100644 --- a/drivers/nvme/host/lightnvm.c +++ b/drivers/nvme/host/lightnvm.c @@ -323,6 +323,13 @@ static int nvme_nvm_setup_12(struct nvme_nvm_id12 *id, geo->ws_opt = sec_per_pg; geo->mw_cunits = geo->ws_opt << 3; /* default to MLC safe values */ + /* Do not impose values for maximum number of open blocks as it is + * unspecified in 1.2. Users of 1.2 must be aware of this and eventually + * specify these values through a quirk if restrictions apply. + */ + geo->maxoc = geo->all_luns * geo->nr_chks; + geo->maxocpu = geo->nr_chks; + geo->mccap = le32_to_cpu(src->mccap); geo->trdt = le32_to_cpu(src->trdt); @@ -409,6 +416,8 @@ static int nvme_nvm_setup_20(struct nvme_nvm_id20 *id, geo->ws_min = le32_to_cpu(id->ws_min); geo->ws_opt = le32_to_cpu(id->ws_opt); geo->mw_cunits = le32_to_cpu(id->mw_cunits); + geo->maxoc = le32_to_cpu(id->maxoc); + geo->maxocpu = le32_to_cpu(id->maxocpu); geo->trdt = le32_to_cpu(id->trdt); geo->trdm = le32_to_cpu(id->trdm); @@ -1050,6 +1059,10 @@ static ssize_t nvm_dev_attr_show_20(struct device *dev, return scnprintf(page, PAGE_SIZE, "%u\n", geo->ws_min); } else if (strcmp(attr->name, "ws_opt") == 0) { return scnprintf(page, PAGE_SIZE, "%u\n", geo->ws_opt); + } else if (strcmp(attr->name, "maxoc") == 0) { + return scnprintf(page, PAGE_SIZE, "%u\n", geo->maxoc); + } else if (strcmp(attr->name, "maxocpu") == 0) { + return scnprintf(page, PAGE_SIZE, "%u\n", geo->maxocpu); } else if (strcmp(attr->name, "mw_cunits") == 0) { return scnprintf(page, PAGE_SIZE, "%u\n", geo->mw_cunits); } else if (strcmp(attr->name, "write_typ") == 0) { @@ -1147,6 +1160,8 @@ static NVM_DEV_ATTR_20_RO(chunks); static NVM_DEV_ATTR_20_RO(clba); static NVM_DEV_ATTR_20_RO(ws_min); static NVM_DEV_ATTR_20_RO(ws_opt); +static NVM_DEV_ATTR_20_RO(maxoc); +static NVM_DEV_ATTR_20_RO(maxocpu); static NVM_DEV_ATTR_20_RO(mw_cunits); static NVM_DEV_ATTR_20_RO(write_typ); static NVM_DEV_ATTR_20_RO(write_max); @@ -1163,6 +1178,8 @@ static struct attribute *nvm_dev_attrs_20[] = { &dev_attr_clba.attr, &dev_attr_ws_min.attr, &dev_attr_ws_opt.attr, + &dev_attr_maxoc.attr, + &dev_attr_maxocpu.attr, &dev_attr_mw_cunits.attr, &dev_attr_read_typ.attr, diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h index a073c0c76260..870959a58fef 100644 --- a/include/linux/lightnvm.h +++ b/include/linux/lightnvm.h @@ -296,6 +296,8 @@ struct nvm_geo { u32 ws_min; /* minimum write size */ u32 ws_opt; /* optimal write size */ u32 mw_cunits; /* distance required for successful read */ + u32 maxoc; /* maximum open chunks */ + u32 maxocpu; /* maximum open chunks per parallel unit */ /* device capabilities */ u32 mccap; -- cgit v1.2.3 From a40afad90b9a253b282183eb9365f1cc14aeff77 Mon Sep 17 00:00:00 2001 From: Javier González Date: Fri, 30 Mar 2018 00:05:14 +0200 Subject: lightnvm: normalize geometry nomenclature MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Normalize nomenclature for naming channels, luns, chunks, planes and sectors as well as derivations in order to improve readability. Signed-off-by: Javier González Signed-off-by: Matias Bjørling Signed-off-by: Jens Axboe --- drivers/lightnvm/core.c | 89 +++++++++++++++++++++---------------------- drivers/lightnvm/pblk-core.c | 4 +- drivers/lightnvm/pblk-init.c | 30 +++++++-------- drivers/lightnvm/pblk-sysfs.c | 4 +- drivers/lightnvm/pblk.h | 20 +++++----- drivers/nvme/host/lightnvm.c | 54 +++++++++++++------------- include/linux/lightnvm.h | 16 ++++---- 7 files changed, 108 insertions(+), 109 deletions(-) diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c index 103e0ad9622c..94b3b423840b 100644 --- a/drivers/lightnvm/core.c +++ b/drivers/lightnvm/core.c @@ -36,13 +36,13 @@ static DECLARE_RWSEM(nvm_lock); /* Map between virtual and physical channel and lun */ struct nvm_ch_map { int ch_off; - int nr_luns; + int num_lun; int *lun_offs; }; struct nvm_dev_map { struct nvm_ch_map *chnls; - int nr_chnls; + int num_ch; }; static struct nvm_target *nvm_find_target(struct nvm_dev *dev, const char *name) @@ -114,15 +114,15 @@ static void nvm_remove_tgt_dev(struct nvm_tgt_dev *tgt_dev, int clear) struct nvm_dev_map *dev_map = tgt_dev->map; int i, j; - for (i = 0; i < dev_map->nr_chnls; i++) { + for (i = 0; i < dev_map->num_ch; i++) { struct nvm_ch_map *ch_map = &dev_map->chnls[i]; int *lun_offs = ch_map->lun_offs; int ch = i + ch_map->ch_off; if (clear) { - for (j = 0; j < ch_map->nr_luns; j++) { + for (j = 0; j < ch_map->num_lun; j++) { int lun = j + lun_offs[j]; - int lunid = (ch * dev->geo.nr_luns) + lun; + int lunid = (ch * dev->geo.num_lun) + lun; WARN_ON(!test_and_clear_bit(lunid, dev->lun_map)); @@ -147,47 +147,46 @@ static struct nvm_tgt_dev *nvm_create_tgt_dev(struct nvm_dev *dev, struct nvm_dev_map *dev_rmap = dev->rmap; struct nvm_dev_map *dev_map; struct ppa_addr *luns; - int nr_luns = lun_end - lun_begin + 1; - int luns_left = nr_luns; - int nr_chnls = nr_luns / dev->geo.nr_luns; - int nr_chnls_mod = nr_luns % dev->geo.nr_luns; - int bch = lun_begin / dev->geo.nr_luns; - int blun = lun_begin % dev->geo.nr_luns; + int num_lun = lun_end - lun_begin + 1; + int luns_left = num_lun; + int num_ch = num_lun / dev->geo.num_lun; + int num_ch_mod = num_lun % dev->geo.num_lun; + int bch = lun_begin / dev->geo.num_lun; + int blun = lun_begin % dev->geo.num_lun; int lunid = 0; int lun_balanced = 1; - int sec_per_lun, prev_nr_luns; + int sec_per_lun, prev_num_lun; int i, j; - nr_chnls = (nr_chnls_mod == 0) ? nr_chnls : nr_chnls + 1; + num_ch = (num_ch_mod == 0) ? num_ch : num_ch + 1; dev_map = kmalloc(sizeof(struct nvm_dev_map), GFP_KERNEL); if (!dev_map) goto err_dev; - dev_map->chnls = kcalloc(nr_chnls, sizeof(struct nvm_ch_map), - GFP_KERNEL); + dev_map->chnls = kcalloc(num_ch, sizeof(struct nvm_ch_map), GFP_KERNEL); if (!dev_map->chnls) goto err_chnls; - luns = kcalloc(nr_luns, sizeof(struct ppa_addr), GFP_KERNEL); + luns = kcalloc(num_lun, sizeof(struct ppa_addr), GFP_KERNEL); if (!luns) goto err_luns; - prev_nr_luns = (luns_left > dev->geo.nr_luns) ? - dev->geo.nr_luns : luns_left; - for (i = 0; i < nr_chnls; i++) { + prev_num_lun = (luns_left > dev->geo.num_lun) ? + dev->geo.num_lun : luns_left; + for (i = 0; i < num_ch; i++) { struct nvm_ch_map *ch_rmap = &dev_rmap->chnls[i + bch]; int *lun_roffs = ch_rmap->lun_offs; struct nvm_ch_map *ch_map = &dev_map->chnls[i]; int *lun_offs; - int luns_in_chnl = (luns_left > dev->geo.nr_luns) ? - dev->geo.nr_luns : luns_left; + int luns_in_chnl = (luns_left > dev->geo.num_lun) ? + dev->geo.num_lun : luns_left; - if (lun_balanced && prev_nr_luns != luns_in_chnl) + if (lun_balanced && prev_num_lun != luns_in_chnl) lun_balanced = 0; ch_map->ch_off = ch_rmap->ch_off = bch; - ch_map->nr_luns = luns_in_chnl; + ch_map->num_lun = luns_in_chnl; lun_offs = kcalloc(luns_in_chnl, sizeof(int), GFP_KERNEL); if (!lun_offs) @@ -209,7 +208,7 @@ static struct nvm_tgt_dev *nvm_create_tgt_dev(struct nvm_dev *dev, luns_left -= luns_in_chnl; } - dev_map->nr_chnls = nr_chnls; + dev_map->num_ch = num_ch; tgt_dev = kmalloc(sizeof(struct nvm_tgt_dev), GFP_KERNEL); if (!tgt_dev) @@ -219,15 +218,15 @@ static struct nvm_tgt_dev *nvm_create_tgt_dev(struct nvm_dev *dev, memcpy(&tgt_dev->geo, &dev->geo, sizeof(struct nvm_geo)); /* Target device only owns a portion of the physical device */ - tgt_dev->geo.nr_chnls = nr_chnls; - tgt_dev->geo.nr_luns = (lun_balanced) ? prev_nr_luns : -1; - tgt_dev->geo.all_luns = nr_luns; - tgt_dev->geo.all_chunks = nr_luns * dev->geo.nr_chks; + tgt_dev->geo.num_ch = num_ch; + tgt_dev->geo.num_lun = (lun_balanced) ? prev_num_lun : -1; + tgt_dev->geo.all_luns = num_lun; + tgt_dev->geo.all_chunks = num_lun * dev->geo.num_chk; tgt_dev->geo.op = op; - sec_per_lun = dev->geo.clba * dev->geo.nr_chks; - tgt_dev->geo.total_secs = nr_luns * sec_per_lun; + sec_per_lun = dev->geo.clba * dev->geo.num_chk; + tgt_dev->geo.total_secs = num_lun * sec_per_lun; tgt_dev->q = dev->q; tgt_dev->map = dev_map; @@ -505,20 +504,20 @@ static int nvm_register_map(struct nvm_dev *dev) if (!rmap) goto err_rmap; - rmap->chnls = kcalloc(dev->geo.nr_chnls, sizeof(struct nvm_ch_map), + rmap->chnls = kcalloc(dev->geo.num_ch, sizeof(struct nvm_ch_map), GFP_KERNEL); if (!rmap->chnls) goto err_chnls; - for (i = 0; i < dev->geo.nr_chnls; i++) { + for (i = 0; i < dev->geo.num_ch; i++) { struct nvm_ch_map *ch_rmap; int *lun_roffs; - int luns_in_chnl = dev->geo.nr_luns; + int luns_in_chnl = dev->geo.num_lun; ch_rmap = &rmap->chnls[i]; ch_rmap->ch_off = -1; - ch_rmap->nr_luns = luns_in_chnl; + ch_rmap->num_lun = luns_in_chnl; lun_roffs = kcalloc(luns_in_chnl, sizeof(int), GFP_KERNEL); if (!lun_roffs) @@ -547,7 +546,7 @@ static void nvm_unregister_map(struct nvm_dev *dev) struct nvm_dev_map *rmap = dev->rmap; int i; - for (i = 0; i < dev->geo.nr_chnls; i++) + for (i = 0; i < dev->geo.num_ch; i++) kfree(rmap->chnls[i].lun_offs); kfree(rmap->chnls); @@ -676,7 +675,7 @@ static int nvm_set_rqd_ppalist(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd, int i, plane_cnt, pl_idx; struct ppa_addr ppa; - if (geo->plane_mode == NVM_PLANE_SINGLE && nr_ppas == 1) { + if (geo->pln_mode == NVM_PLANE_SINGLE && nr_ppas == 1) { rqd->nr_ppas = nr_ppas; rqd->ppa_addr = ppas[0]; @@ -690,7 +689,7 @@ static int nvm_set_rqd_ppalist(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd, return -ENOMEM; } - plane_cnt = geo->plane_mode; + plane_cnt = geo->pln_mode; rqd->nr_ppas *= plane_cnt; for (i = 0; i < nr_ppas; i++) { @@ -808,15 +807,15 @@ int nvm_bb_tbl_fold(struct nvm_dev *dev, u8 *blks, int nr_blks) struct nvm_geo *geo = &dev->geo; int blk, offset, pl, blktype; - if (nr_blks != geo->nr_chks * geo->plane_mode) + if (nr_blks != geo->num_chk * geo->pln_mode) return -EINVAL; - for (blk = 0; blk < geo->nr_chks; blk++) { - offset = blk * geo->plane_mode; + for (blk = 0; blk < geo->num_chk; blk++) { + offset = blk * geo->pln_mode; blktype = blks[offset]; /* Bad blocks on any planes take precedence over other types */ - for (pl = 0; pl < geo->plane_mode; pl++) { + for (pl = 0; pl < geo->pln_mode; pl++) { if (blks[offset + pl] & (NVM_BLK_T_BAD|NVM_BLK_T_GRWN_BAD)) { blktype = blks[offset + pl]; @@ -827,7 +826,7 @@ int nvm_bb_tbl_fold(struct nvm_dev *dev, u8 *blks, int nr_blks) blks[blk] = blktype; } - return geo->nr_chks; + return geo->num_chk; } EXPORT_SYMBOL(nvm_bb_tbl_fold); @@ -901,9 +900,9 @@ static int nvm_init(struct nvm_dev *dev) } pr_info("nvm: registered %s [%u/%u/%u/%u/%u]\n", - dev->name, geo->ws_min, geo->ws_opt, - geo->nr_chks, geo->all_luns, - geo->nr_chnls); + dev->name, dev->geo.ws_min, dev->geo.ws_opt, + dev->geo.num_chk, dev->geo.all_luns, + dev->geo.num_ch); return 0; err: pr_err("nvm: failed to initialize nvm\n"); diff --git a/drivers/lightnvm/pblk-core.c b/drivers/lightnvm/pblk-core.c index 52c0c3e5ec6e..64c87dd4f1cd 100644 --- a/drivers/lightnvm/pblk-core.c +++ b/drivers/lightnvm/pblk-core.c @@ -1742,10 +1742,10 @@ void pblk_up_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas, struct nvm_tgt_dev *dev = pblk->dev; struct nvm_geo *geo = &dev->geo; struct pblk_lun *rlun; - int nr_luns = geo->all_luns; + int num_lun = geo->all_luns; int bit = -1; - while ((bit = find_next_bit(lun_bitmap, nr_luns, bit + 1)) < nr_luns) { + while ((bit = find_next_bit(lun_bitmap, num_lun, bit + 1)) < num_lun) { rlun = &pblk->luns[bit]; up(&rlun->wr_sem); } diff --git a/drivers/lightnvm/pblk-init.c b/drivers/lightnvm/pblk-init.c index 2fca27d0a9b5..4656d1ff81a6 100644 --- a/drivers/lightnvm/pblk-init.c +++ b/drivers/lightnvm/pblk-init.c @@ -193,15 +193,15 @@ static int pblk_set_addrf_12(struct nvm_geo *geo, struct nvm_addrf_12 *dst) int power_len; /* Re-calculate channel and lun format to adapt to configuration */ - power_len = get_count_order(geo->nr_chnls); - if (1 << power_len != geo->nr_chnls) { + power_len = get_count_order(geo->num_ch); + if (1 << power_len != geo->num_ch) { pr_err("pblk: supports only power-of-two channel config.\n"); return -EINVAL; } dst->ch_len = power_len; - power_len = get_count_order(geo->nr_luns); - if (1 << power_len != geo->nr_luns) { + power_len = get_count_order(geo->num_lun); + if (1 << power_len != geo->num_lun) { pr_err("pblk: supports only power-of-two LUN config.\n"); return -EINVAL; } @@ -210,16 +210,16 @@ static int pblk_set_addrf_12(struct nvm_geo *geo, struct nvm_addrf_12 *dst) dst->blk_len = src->blk_len; dst->pg_len = src->pg_len; dst->pln_len = src->pln_len; - dst->sect_len = src->sect_len; + dst->sec_len = src->sec_len; - dst->sect_offset = 0; - dst->pln_offset = dst->sect_len; + dst->sec_offset = 0; + dst->pln_offset = dst->sec_len; dst->ch_offset = dst->pln_offset + dst->pln_len; dst->lun_offset = dst->ch_offset + dst->ch_len; dst->pg_offset = dst->lun_offset + dst->lun_len; dst->blk_offset = dst->pg_offset + dst->pg_len; - dst->sec_mask = ((1ULL << dst->sect_len) - 1) << dst->sect_offset; + dst->sec_mask = ((1ULL << dst->sec_len) - 1) << dst->sec_offset; dst->pln_mask = ((1ULL << dst->pln_len) - 1) << dst->pln_offset; dst->ch_mask = ((1ULL << dst->ch_len) - 1) << dst->ch_offset; dst->lun_mask = ((1ULL << dst->lun_len) - 1) << dst->lun_offset; @@ -503,7 +503,7 @@ static void *pblk_bb_get_log(struct pblk *pblk) int i, nr_blks, blk_per_lun; int ret; - blk_per_lun = geo->nr_chks * geo->plane_mode; + blk_per_lun = geo->num_chk * geo->pln_mode; nr_blks = blk_per_lun * geo->all_luns; log = kmalloc(nr_blks, GFP_KERNEL); @@ -530,7 +530,7 @@ static int pblk_bb_line(struct pblk *pblk, struct pblk_line *line, struct nvm_tgt_dev *dev = pblk->dev; struct nvm_geo *geo = &dev->geo; int i, bb_cnt = 0; - int blk_per_lun = geo->nr_chks * geo->plane_mode; + int blk_per_lun = geo->num_chk * geo->pln_mode; for (i = 0; i < blk_per_line; i++) { struct pblk_lun *rlun = &pblk->luns[i]; @@ -554,7 +554,7 @@ static int pblk_luns_init(struct pblk *pblk) int i; /* TODO: Implement unbalanced LUN support */ - if (geo->nr_luns < 0) { + if (geo->num_lun < 0) { pr_err("pblk: unbalanced LUN config.\n"); return -EINVAL; } @@ -566,9 +566,9 @@ static int pblk_luns_init(struct pblk *pblk) for (i = 0; i < geo->all_luns; i++) { /* Stripe across channels */ - int ch = i % geo->nr_chnls; - int lun_raw = i / geo->nr_chnls; - int lunid = lun_raw + ch * geo->nr_luns; + int ch = i % geo->num_ch; + int lun_raw = i / geo->num_ch; + int lunid = lun_raw + ch * geo->num_lun; rlun = &pblk->luns[i]; rlun->bppa = dev->luns[lunid]; @@ -672,7 +672,7 @@ static int pblk_line_mg_init(struct pblk *pblk) struct pblk_line_meta *lm = &pblk->lm; int i, bb_distance; - l_mg->nr_lines = geo->nr_chks; + l_mg->nr_lines = geo->num_chk; l_mg->log_line = l_mg->data_line = NULL; l_mg->l_seq_nr = l_mg->d_seq_nr = 0; l_mg->nr_free_lines = 0; diff --git a/drivers/lightnvm/pblk-sysfs.c b/drivers/lightnvm/pblk-sysfs.c index 2474ef4366fa..3e9364f60b44 100644 --- a/drivers/lightnvm/pblk-sysfs.c +++ b/drivers/lightnvm/pblk-sysfs.c @@ -128,7 +128,7 @@ static ssize_t pblk_sysfs_ppaf(struct pblk *pblk, char *page) ppaf->lun_offset, ppaf->lun_len, ppaf->ch_offset, ppaf->ch_len, ppaf->pln_offset, ppaf->pln_len, - ppaf->sect_offset, ppaf->sect_len); + ppaf->sec_offset, ppaf->sec_len); sz += snprintf(page + sz, PAGE_SIZE - sz, "d:blk:%d/%d,pg:%d/%d,lun:%d/%d,ch:%d/%d,pl:%d/%d,sec:%d/%d\n", @@ -137,7 +137,7 @@ static ssize_t pblk_sysfs_ppaf(struct pblk *pblk, char *page) geo_ppaf->lun_offset, geo_ppaf->lun_len, geo_ppaf->ch_offset, geo_ppaf->ch_len, geo_ppaf->pln_offset, geo_ppaf->pln_len, - geo_ppaf->sect_offset, geo_ppaf->sect_len); + geo_ppaf->sec_offset, geo_ppaf->sec_len); return sz; } diff --git a/drivers/lightnvm/pblk.h b/drivers/lightnvm/pblk.h index 898c4e49f77d..dcdad255ccb5 100644 --- a/drivers/lightnvm/pblk.h +++ b/drivers/lightnvm/pblk.h @@ -941,7 +941,7 @@ static inline int pblk_ppa_to_line(struct ppa_addr p) static inline int pblk_ppa_to_pos(struct nvm_geo *geo, struct ppa_addr p) { - return p.g.lun * geo->nr_chnls + p.g.ch; + return p.g.lun * geo->num_ch + p.g.ch; } static inline struct ppa_addr addr_to_gen_ppa(struct pblk *pblk, u64 paddr, @@ -956,7 +956,7 @@ static inline struct ppa_addr addr_to_gen_ppa(struct pblk *pblk, u64 paddr, ppa.g.lun = (paddr & ppaf->lun_mask) >> ppaf->lun_offset; ppa.g.ch = (paddr & ppaf->ch_mask) >> ppaf->ch_offset; ppa.g.pl = (paddr & ppaf->pln_mask) >> ppaf->pln_offset; - ppa.g.sec = (paddr & ppaf->sec_mask) >> ppaf->sect_offset; + ppa.g.sec = (paddr & ppaf->sec_mask) >> ppaf->sec_offset; return ppa; } @@ -971,7 +971,7 @@ static inline u64 pblk_dev_ppa_to_line_addr(struct pblk *pblk, paddr |= (u64)p.g.lun << ppaf->lun_offset; paddr |= (u64)p.g.pg << ppaf->pg_offset; paddr |= (u64)p.g.pl << ppaf->pln_offset; - paddr |= (u64)p.g.sec << ppaf->sect_offset; + paddr |= (u64)p.g.sec << ppaf->sec_offset; return paddr; } @@ -995,7 +995,7 @@ static inline struct ppa_addr pblk_ppa32_to_ppa64(struct pblk *pblk, u32 ppa32) ppa64.g.blk = (ppa32 & ppaf->blk_mask) >> ppaf->blk_offset; ppa64.g.pg = (ppa32 & ppaf->pg_mask) >> ppaf->pg_offset; ppa64.g.pl = (ppa32 & ppaf->pln_mask) >> ppaf->pln_offset; - ppa64.g.sec = (ppa32 & ppaf->sec_mask) >> ppaf->sect_offset; + ppa64.g.sec = (ppa32 & ppaf->sec_mask) >> ppaf->sec_offset; } return ppa64; @@ -1018,7 +1018,7 @@ static inline u32 pblk_ppa64_to_ppa32(struct pblk *pblk, struct ppa_addr ppa64) ppa32 |= ppa64.g.blk << ppaf->blk_offset; ppa32 |= ppa64.g.pg << ppaf->pg_offset; ppa32 |= ppa64.g.pl << ppaf->pln_offset; - ppa32 |= ppa64.g.sec << ppaf->sect_offset; + ppa32 |= ppa64.g.sec << ppaf->sec_offset; } return ppa32; @@ -1136,7 +1136,7 @@ static inline int pblk_set_progr_mode(struct pblk *pblk, int type) struct nvm_geo *geo = &dev->geo; int flags; - flags = geo->plane_mode >> 1; + flags = geo->pln_mode >> 1; if (type == PBLK_WRITE) flags |= NVM_IO_SCRAMBLE_ENABLE; @@ -1157,7 +1157,7 @@ static inline int pblk_set_read_mode(struct pblk *pblk, int type) flags = NVM_IO_SUSPEND | NVM_IO_SCRAMBLE_ENABLE; if (type == PBLK_READ_SEQUENTIAL) - flags |= geo->plane_mode >> 1; + flags |= geo->pln_mode >> 1; return flags; } @@ -1210,10 +1210,10 @@ static inline int pblk_boundary_ppa_checks(struct nvm_tgt_dev *tgt_dev, ppa = &ppas[i]; if (!ppa->c.is_cached && - ppa->g.ch < geo->nr_chnls && - ppa->g.lun < geo->nr_luns && + ppa->g.ch < geo->num_ch && + ppa->g.lun < geo->num_lun && ppa->g.pl < geo->num_pln && - ppa->g.blk < geo->nr_chks && + ppa->g.blk < geo->num_chk && ppa->g.pg < geo->num_pg && ppa->g.sec < geo->ws_min) continue; diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c index 41b38ebdb1f3..08f0f6b5bc06 100644 --- a/drivers/nvme/host/lightnvm.c +++ b/drivers/nvme/host/lightnvm.c @@ -262,21 +262,21 @@ static void nvme_nvm_set_addr_12(struct nvm_addrf_12 *dst, dst->blk_len = src->blk_len; dst->pg_len = src->pg_len; dst->pln_len = src->pln_len; - dst->sect_len = src->sec_len; + dst->sec_len = src->sec_len; dst->ch_offset = src->ch_offset; dst->lun_offset = src->lun_offset; dst->blk_offset = src->blk_offset; dst->pg_offset = src->pg_offset; dst->pln_offset = src->pln_offset; - dst->sect_offset = src->sec_offset; + dst->sec_offset = src->sec_offset; dst->ch_mask = ((1ULL << dst->ch_len) - 1) << dst->ch_offset; dst->lun_mask = ((1ULL << dst->lun_len) - 1) << dst->lun_offset; dst->blk_mask = ((1ULL << dst->blk_len) - 1) << dst->blk_offset; dst->pg_mask = ((1ULL << dst->pg_len) - 1) << dst->pg_offset; dst->pln_mask = ((1ULL << dst->pln_len) - 1) << dst->pln_offset; - dst->sec_mask = ((1ULL << dst->sect_len) - 1) << dst->sect_offset; + dst->sec_mask = ((1ULL << dst->sec_len) - 1) << dst->sec_offset; } static int nvme_nvm_setup_12(struct nvme_nvm_id12 *id, @@ -302,11 +302,11 @@ static int nvme_nvm_setup_12(struct nvme_nvm_id12 *id, /* Set compacted version for upper layers */ geo->version = NVM_OCSSD_SPEC_12; - geo->nr_chnls = src->num_ch; - geo->nr_luns = src->num_lun; - geo->all_luns = geo->nr_chnls * geo->nr_luns; + geo->num_ch = src->num_ch; + geo->num_lun = src->num_lun; + geo->all_luns = geo->num_ch * geo->num_lun; - geo->nr_chks = le16_to_cpu(src->num_chk); + geo->num_chk = le16_to_cpu(src->num_chk); geo->csecs = le16_to_cpu(src->csecs); geo->sos = le16_to_cpu(src->sos); @@ -316,7 +316,7 @@ static int nvme_nvm_setup_12(struct nvme_nvm_id12 *id, sec_per_pl = sec_per_pg * src->num_pln; geo->clba = sec_per_pl * pg_per_blk; - geo->all_chunks = geo->all_luns * geo->nr_chks; + geo->all_chunks = geo->all_luns * geo->num_chk; geo->total_secs = geo->clba * geo->all_chunks; geo->ws_min = sec_per_pg; @@ -327,8 +327,8 @@ static int nvme_nvm_setup_12(struct nvme_nvm_id12 *id, * unspecified in 1.2. Users of 1.2 must be aware of this and eventually * specify these values through a quirk if restrictions apply. */ - geo->maxoc = geo->all_luns * geo->nr_chks; - geo->maxocpu = geo->nr_chks; + geo->maxoc = geo->all_luns * geo->num_chk; + geo->maxocpu = geo->num_chk; geo->mccap = le32_to_cpu(src->mccap); @@ -350,13 +350,13 @@ static int nvme_nvm_setup_12(struct nvme_nvm_id12 *id, geo->cpar = le16_to_cpu(src->cpar); geo->mpos = le32_to_cpu(src->mpos); - geo->plane_mode = NVM_PLANE_SINGLE; + geo->pln_mode = NVM_PLANE_SINGLE; if (geo->mpos & 0x020202) { - geo->plane_mode = NVM_PLANE_DOUBLE; + geo->pln_mode = NVM_PLANE_DOUBLE; geo->ws_opt <<= 1; } else if (geo->mpos & 0x040404) { - geo->plane_mode = NVM_PLANE_QUAD; + geo->pln_mode = NVM_PLANE_QUAD; geo->ws_opt <<= 2; } @@ -403,14 +403,14 @@ static int nvme_nvm_setup_20(struct nvme_nvm_id20 *id, return -EINVAL; } - geo->nr_chnls = le16_to_cpu(id->num_grp); - geo->nr_luns = le16_to_cpu(id->num_pu); - geo->all_luns = geo->nr_chnls * geo->nr_luns; + geo->num_ch = le16_to_cpu(id->num_grp); + geo->num_lun = le16_to_cpu(id->num_pu); + geo->all_luns = geo->num_ch * geo->num_lun; - geo->nr_chks = le32_to_cpu(id->num_chk); + geo->num_chk = le32_to_cpu(id->num_chk); geo->clba = le32_to_cpu(id->clba); - geo->all_chunks = geo->all_luns * geo->nr_chks; + geo->all_chunks = geo->all_luns * geo->num_chk; geo->total_secs = geo->clba * geo->all_chunks; geo->ws_min = le32_to_cpu(id->ws_min); @@ -484,7 +484,7 @@ static int nvme_nvm_get_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr ppa, struct nvme_ctrl *ctrl = ns->ctrl; struct nvme_nvm_command c = {}; struct nvme_nvm_bb_tbl *bb_tbl; - int nr_blks = geo->nr_chks * geo->num_pln; + int nr_blks = geo->num_chk * geo->num_pln; int tblsz = sizeof(struct nvme_nvm_bb_tbl) + nr_blks; int ret = 0; @@ -525,7 +525,7 @@ static int nvme_nvm_get_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr ppa, goto out; } - memcpy(blks, bb_tbl->blk, geo->nr_chks * geo->num_pln); + memcpy(blks, bb_tbl->blk, geo->num_chk * geo->num_pln); out: kfree(bb_tbl); return ret; @@ -968,7 +968,7 @@ static ssize_t nvm_dev_attr_show_ppaf(struct nvm_addrf_12 *ppaf, char *page) ppaf->pln_offset, ppaf->pln_len, ppaf->blk_offset, ppaf->blk_len, ppaf->pg_offset, ppaf->pg_len, - ppaf->sect_offset, ppaf->sect_len); + ppaf->sec_offset, ppaf->sec_len); } static ssize_t nvm_dev_attr_show_12(struct device *dev, @@ -998,13 +998,13 @@ static ssize_t nvm_dev_attr_show_12(struct device *dev, } else if (strcmp(attr->name, "flash_media_type") == 0) { return scnprintf(page, PAGE_SIZE, "%u\n", geo->fmtype); } else if (strcmp(attr->name, "num_channels") == 0) { - return scnprintf(page, PAGE_SIZE, "%u\n", geo->nr_chnls); + return scnprintf(page, PAGE_SIZE, "%u\n", geo->num_ch); } else if (strcmp(attr->name, "num_luns") == 0) { - return scnprintf(page, PAGE_SIZE, "%u\n", geo->nr_luns); + return scnprintf(page, PAGE_SIZE, "%u\n", geo->num_lun); } else if (strcmp(attr->name, "num_planes") == 0) { return scnprintf(page, PAGE_SIZE, "%u\n", geo->num_pln); } else if (strcmp(attr->name, "num_blocks") == 0) { /* u16 */ - return scnprintf(page, PAGE_SIZE, "%u\n", geo->nr_chks); + return scnprintf(page, PAGE_SIZE, "%u\n", geo->num_chk); } else if (strcmp(attr->name, "num_pages") == 0) { return scnprintf(page, PAGE_SIZE, "%u\n", geo->num_pg); } else if (strcmp(attr->name, "page_size") == 0) { @@ -1048,11 +1048,11 @@ static ssize_t nvm_dev_attr_show_20(struct device *dev, attr = &dattr->attr; if (strcmp(attr->name, "groups") == 0) { - return scnprintf(page, PAGE_SIZE, "%u\n", geo->nr_chnls); + return scnprintf(page, PAGE_SIZE, "%u\n", geo->num_ch); } else if (strcmp(attr->name, "punits") == 0) { - return scnprintf(page, PAGE_SIZE, "%u\n", geo->nr_luns); + return scnprintf(page, PAGE_SIZE, "%u\n", geo->num_lun); } else if (strcmp(attr->name, "chunks") == 0) { - return scnprintf(page, PAGE_SIZE, "%u\n", geo->nr_chks); + return scnprintf(page, PAGE_SIZE, "%u\n", geo->num_chk); } else if (strcmp(attr->name, "clba") == 0) { return scnprintf(page, PAGE_SIZE, "%u\n", geo->clba); } else if (strcmp(attr->name, "ws_min") == 0) { diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h index 870959a58fef..00295d9f9522 100644 --- a/include/linux/lightnvm.h +++ b/include/linux/lightnvm.h @@ -163,14 +163,14 @@ struct nvm_addrf_12 { u8 blk_len; u8 pg_len; u8 pln_len; - u8 sect_len; + u8 sec_len; u8 ch_offset; u8 lun_offset; u8 blk_offset; u8 pg_offset; u8 pln_offset; - u8 sect_offset; + u8 sec_offset; u64 ch_mask; u64 lun_mask; @@ -275,8 +275,8 @@ struct nvm_geo { u8 version; /* instance specific geometry */ - int nr_chnls; - int nr_luns; /* per channel */ + int num_ch; + int num_lun; /* per channel */ /* calculated values */ int all_luns; /* across channels */ @@ -287,7 +287,7 @@ struct nvm_geo { sector_t total_secs; /* across channels */ /* chunk geometry */ - u32 nr_chks; /* chunks per lun */ + u32 num_chk; /* chunks per lun */ u32 clba; /* sectors per chunk */ u16 csecs; /* sector size */ u16 sos; /* out-of-band area size */ @@ -325,7 +325,7 @@ struct nvm_geo { u32 mpos; u8 num_pln; - u8 plane_mode; + u8 pln_mode; u16 num_pg; u16 fpg_sz; }; @@ -382,7 +382,7 @@ static inline struct ppa_addr generic_to_dev_addr(struct nvm_tgt_dev *tgt_dev, l.ppa |= ((u64)r.g.blk) << ppaf->blk_offset; l.ppa |= ((u64)r.g.pg) << ppaf->pg_offset; l.ppa |= ((u64)r.g.pl) << ppaf->pln_offset; - l.ppa |= ((u64)r.g.sec) << ppaf->sect_offset; + l.ppa |= ((u64)r.g.sec) << ppaf->sec_offset; return l; } @@ -401,7 +401,7 @@ static inline struct ppa_addr dev_to_generic_addr(struct nvm_tgt_dev *tgt_dev, l.g.blk = (r.ppa & ppaf->blk_mask) >> ppaf->blk_offset; l.g.pg = (r.ppa & ppaf->pg_mask) >> ppaf->pg_offset; l.g.pl = (r.ppa & ppaf->pln_mask) >> ppaf->pln_offset; - l.g.sec = (r.ppa & ppaf->sec_mask) >> ppaf->sect_offset; + l.g.sec = (r.ppa & ppaf->sec_mask) >> ppaf->sec_offset; return l; } -- cgit v1.2.3 From 694715137482b10d5be83b1dadf9a3cdee2ce1bc Mon Sep 17 00:00:00 2001 From: Javier González Date: Fri, 30 Mar 2018 00:05:15 +0200 Subject: lightnvm: add support for 2.0 address format MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add support for 2.0 address format. Also, align address bits for 1.2 and 2.0 to be able to operate on channel and luns without requiring a format conversion. Use a generic address format for this purpose. Also, convert the generic operations to the generic format in pblk. Signed-off-by: Javier González Signed-off-by: Matias Bjørling Signed-off-by: Jens Axboe --- drivers/lightnvm/core.c | 20 ++++----- drivers/lightnvm/pblk-core.c | 10 ++--- drivers/lightnvm/pblk-map.c | 4 +- drivers/lightnvm/pblk-sysfs.c | 4 +- drivers/lightnvm/pblk.h | 4 +- include/linux/lightnvm.h | 101 +++++++++++++++++++++++++++++++----------- 6 files changed, 95 insertions(+), 48 deletions(-) diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c index 94b3b423840b..63d948cc6dec 100644 --- a/drivers/lightnvm/core.c +++ b/drivers/lightnvm/core.c @@ -194,8 +194,8 @@ static struct nvm_tgt_dev *nvm_create_tgt_dev(struct nvm_dev *dev, for (j = 0; j < luns_in_chnl; j++) { luns[lunid].ppa = 0; - luns[lunid].g.ch = i; - luns[lunid++].g.lun = j; + luns[lunid].a.ch = i; + luns[lunid++].a.lun = j; lun_offs[j] = blun; lun_roffs[j + blun] = blun; @@ -556,22 +556,22 @@ static void nvm_unregister_map(struct nvm_dev *dev) static void nvm_map_to_dev(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *p) { struct nvm_dev_map *dev_map = tgt_dev->map; - struct nvm_ch_map *ch_map = &dev_map->chnls[p->g.ch]; - int lun_off = ch_map->lun_offs[p->g.lun]; + struct nvm_ch_map *ch_map = &dev_map->chnls[p->a.ch]; + int lun_off = ch_map->lun_offs[p->a.lun]; - p->g.ch += ch_map->ch_off; - p->g.lun += lun_off; + p->a.ch += ch_map->ch_off; + p->a.lun += lun_off; } static void nvm_map_to_tgt(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *p) { struct nvm_dev *dev = tgt_dev->parent; struct nvm_dev_map *dev_rmap = dev->rmap; - struct nvm_ch_map *ch_rmap = &dev_rmap->chnls[p->g.ch]; - int lun_roff = ch_rmap->lun_offs[p->g.lun]; + struct nvm_ch_map *ch_rmap = &dev_rmap->chnls[p->a.ch]; + int lun_roff = ch_rmap->lun_offs[p->a.lun]; - p->g.ch -= ch_rmap->ch_off; - p->g.lun -= lun_roff; + p->a.ch -= ch_rmap->ch_off; + p->a.lun -= lun_roff; } static void nvm_ppa_tgt_to_dev(struct nvm_tgt_dev *tgt_dev, diff --git a/drivers/lightnvm/pblk-core.c b/drivers/lightnvm/pblk-core.c index 64c87dd4f1cd..c3eb135fce07 100644 --- a/drivers/lightnvm/pblk-core.c +++ b/drivers/lightnvm/pblk-core.c @@ -885,7 +885,7 @@ int pblk_line_erase(struct pblk *pblk, struct pblk_line *line) } ppa = pblk->luns[bit].bppa; /* set ch and lun */ - ppa.g.blk = line->id; + ppa.a.blk = line->id; atomic_dec(&line->left_eblks); WARN_ON(test_and_set_bit(bit, line->erase_bitmap)); @@ -1683,8 +1683,8 @@ static void __pblk_down_page(struct pblk *pblk, struct ppa_addr *ppa_list, int i; for (i = 1; i < nr_ppas; i++) - WARN_ON(ppa_list[0].g.lun != ppa_list[i].g.lun || - ppa_list[0].g.ch != ppa_list[i].g.ch); + WARN_ON(ppa_list[0].a.lun != ppa_list[i].a.lun || + ppa_list[0].a.ch != ppa_list[i].a.ch); #endif ret = down_timeout(&rlun->wr_sem, msecs_to_jiffies(30000)); @@ -1728,8 +1728,8 @@ void pblk_up_page(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas) int i; for (i = 1; i < nr_ppas; i++) - WARN_ON(ppa_list[0].g.lun != ppa_list[i].g.lun || - ppa_list[0].g.ch != ppa_list[i].g.ch); + WARN_ON(ppa_list[0].a.lun != ppa_list[i].a.lun || + ppa_list[0].a.ch != ppa_list[i].a.ch); #endif rlun = &pblk->luns[pos]; diff --git a/drivers/lightnvm/pblk-map.c b/drivers/lightnvm/pblk-map.c index 04e08d76ea5f..20dbaa89c9df 100644 --- a/drivers/lightnvm/pblk-map.c +++ b/drivers/lightnvm/pblk-map.c @@ -127,7 +127,7 @@ void pblk_map_erase_rq(struct pblk *pblk, struct nvm_rq *rqd, atomic_dec(&e_line->left_eblks); *erase_ppa = rqd->ppa_list[i]; - erase_ppa->g.blk = e_line->id; + erase_ppa->a.blk = e_line->id; spin_unlock(&e_line->lock); @@ -168,6 +168,6 @@ retry: set_bit(bit, e_line->erase_bitmap); atomic_dec(&e_line->left_eblks); *erase_ppa = pblk->luns[bit].bppa; /* set ch and lun */ - erase_ppa->g.blk = e_line->id; + erase_ppa->a.blk = e_line->id; } } diff --git a/drivers/lightnvm/pblk-sysfs.c b/drivers/lightnvm/pblk-sysfs.c index 3e9364f60b44..2489ea0edfa0 100644 --- a/drivers/lightnvm/pblk-sysfs.c +++ b/drivers/lightnvm/pblk-sysfs.c @@ -39,8 +39,8 @@ static ssize_t pblk_sysfs_luns_show(struct pblk *pblk, char *page) sz += snprintf(page + sz, PAGE_SIZE - sz, "pblk: pos:%d, ch:%d, lun:%d - %d\n", i, - rlun->bppa.g.ch, - rlun->bppa.g.lun, + rlun->bppa.a.ch, + rlun->bppa.a.lun, active); } diff --git a/drivers/lightnvm/pblk.h b/drivers/lightnvm/pblk.h index dcdad255ccb5..6607c41b23c0 100644 --- a/drivers/lightnvm/pblk.h +++ b/drivers/lightnvm/pblk.h @@ -936,12 +936,12 @@ static inline int pblk_pad_distance(struct pblk *pblk) static inline int pblk_ppa_to_line(struct ppa_addr p) { - return p.g.blk; + return p.a.blk; } static inline int pblk_ppa_to_pos(struct nvm_geo *geo, struct ppa_addr p) { - return p.g.lun * geo->num_ch + p.g.ch; + return p.a.lun * geo->num_ch + p.a.ch; } static inline struct ppa_addr addr_to_gen_ppa(struct pblk *pblk, u64 paddr, diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h index 00295d9f9522..f2549b4b8626 100644 --- a/include/linux/lightnvm.h +++ b/include/linux/lightnvm.h @@ -16,12 +16,21 @@ enum { NVM_IOTYPE_GC = 1, }; -#define NVM_BLK_BITS (16) -#define NVM_PG_BITS (16) -#define NVM_SEC_BITS (8) -#define NVM_PL_BITS (8) -#define NVM_LUN_BITS (8) -#define NVM_CH_BITS (7) +/* common format */ +#define NVM_GEN_CH_BITS (8) +#define NVM_GEN_LUN_BITS (8) +#define NVM_GEN_BLK_BITS (16) +#define NVM_GEN_RESERVED (32) + +/* 1.2 format */ +#define NVM_12_PG_BITS (16) +#define NVM_12_PL_BITS (4) +#define NVM_12_SEC_BITS (4) +#define NVM_12_RESERVED (8) + +/* 2.0 format */ +#define NVM_20_SEC_BITS (24) +#define NVM_20_RESERVED (8) enum { NVM_OCSSD_SPEC_12 = 12, @@ -31,16 +40,34 @@ enum { struct ppa_addr { /* Generic structure for all addresses */ union { + /* generic device format */ struct { - u64 blk : NVM_BLK_BITS; - u64 pg : NVM_PG_BITS; - u64 sec : NVM_SEC_BITS; - u64 pl : NVM_PL_BITS; - u64 lun : NVM_LUN_BITS; - u64 ch : NVM_CH_BITS; - u64 reserved : 1; + u64 ch : NVM_GEN_CH_BITS; + u64 lun : NVM_GEN_LUN_BITS; + u64 blk : NVM_GEN_BLK_BITS; + u64 reserved : NVM_GEN_RESERVED; + } a; + + /* 1.2 device format */ + struct { + u64 ch : NVM_GEN_CH_BITS; + u64 lun : NVM_GEN_LUN_BITS; + u64 blk : NVM_GEN_BLK_BITS; + u64 pg : NVM_12_PG_BITS; + u64 pl : NVM_12_PL_BITS; + u64 sec : NVM_12_SEC_BITS; + u64 reserved : NVM_12_RESERVED; } g; + /* 2.0 device format */ + struct { + u64 grp : NVM_GEN_CH_BITS; + u64 pu : NVM_GEN_LUN_BITS; + u64 chk : NVM_GEN_BLK_BITS; + u64 sec : NVM_20_SEC_BITS; + u64 reserved : NVM_20_RESERVED; + } m; + struct { u64 line : 63; u64 is_cached : 1; @@ -374,15 +401,25 @@ static inline struct ppa_addr generic_to_dev_addr(struct nvm_tgt_dev *tgt_dev, struct ppa_addr r) { struct nvm_geo *geo = &tgt_dev->geo; - struct nvm_addrf_12 *ppaf = (struct nvm_addrf_12 *)&geo->addrf; struct ppa_addr l; - l.ppa = ((u64)r.g.ch) << ppaf->ch_offset; - l.ppa |= ((u64)r.g.lun) << ppaf->lun_offset; - l.ppa |= ((u64)r.g.blk) << ppaf->blk_offset; - l.ppa |= ((u64)r.g.pg) << ppaf->pg_offset; - l.ppa |= ((u64)r.g.pl) << ppaf->pln_offset; - l.ppa |= ((u64)r.g.sec) << ppaf->sec_offset; + if (geo->version == NVM_OCSSD_SPEC_12) { + struct nvm_addrf_12 *ppaf = (struct nvm_addrf_12 *)&geo->addrf; + + l.ppa = ((u64)r.g.ch) << ppaf->ch_offset; + l.ppa |= ((u64)r.g.lun) << ppaf->lun_offset; + l.ppa |= ((u64)r.g.blk) << ppaf->blk_offset; + l.ppa |= ((u64)r.g.pg) << ppaf->pg_offset; + l.ppa |= ((u64)r.g.pl) << ppaf->pln_offset; + l.ppa |= ((u64)r.g.sec) << ppaf->sec_offset; + } else { + struct nvm_addrf *lbaf = &geo->addrf; + + l.ppa = ((u64)r.m.grp) << lbaf->ch_offset; + l.ppa |= ((u64)r.m.pu) << lbaf->lun_offset; + l.ppa |= ((u64)r.m.chk) << lbaf->chk_offset; + l.ppa |= ((u64)r.m.sec) << lbaf->sec_offset; + } return l; } @@ -391,17 +428,27 @@ static inline struct ppa_addr dev_to_generic_addr(struct nvm_tgt_dev *tgt_dev, struct ppa_addr r) { struct nvm_geo *geo = &tgt_dev->geo; - struct nvm_addrf_12 *ppaf = (struct nvm_addrf_12 *)&geo->addrf; struct ppa_addr l; l.ppa = 0; - l.g.ch = (r.ppa & ppaf->ch_mask) >> ppaf->ch_offset; - l.g.lun = (r.ppa & ppaf->lun_mask) >> ppaf->lun_offset; - l.g.blk = (r.ppa & ppaf->blk_mask) >> ppaf->blk_offset; - l.g.pg = (r.ppa & ppaf->pg_mask) >> ppaf->pg_offset; - l.g.pl = (r.ppa & ppaf->pln_mask) >> ppaf->pln_offset; - l.g.sec = (r.ppa & ppaf->sec_mask) >> ppaf->sec_offset; + if (geo->version == NVM_OCSSD_SPEC_12) { + struct nvm_addrf_12 *ppaf = (struct nvm_addrf_12 *)&geo->addrf; + + l.g.ch = (r.ppa & ppaf->ch_mask) >> ppaf->ch_offset; + l.g.lun = (r.ppa & ppaf->lun_mask) >> ppaf->lun_offset; + l.g.blk = (r.ppa & ppaf->blk_mask) >> ppaf->blk_offset; + l.g.pg = (r.ppa & ppaf->pg_mask) >> ppaf->pg_offset; + l.g.pl = (r.ppa & ppaf->pln_mask) >> ppaf->pln_offset; + l.g.sec = (r.ppa & ppaf->sec_mask) >> ppaf->sec_offset; + } else { + struct nvm_addrf *lbaf = &geo->addrf; + + l.m.grp = (r.ppa & lbaf->ch_mask) >> lbaf->ch_offset; + l.m.pu = (r.ppa & lbaf->lun_mask) >> lbaf->lun_offset; + l.m.chk = (r.ppa & lbaf->chk_mask) >> lbaf->chk_offset; + l.m.sec = (r.ppa & lbaf->sec_mask) >> lbaf->sec_offset; + } return l; } -- cgit v1.2.3 From 7100d50a7e58a6884368001e2b1a32b7169c072c Mon Sep 17 00:00:00 2001 From: Javier González Date: Fri, 30 Mar 2018 00:05:16 +0200 Subject: lightnvm: make address conversions depend on generic device MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit On address conversions, use the generic device, instead of the target device. This allows to use conversions outside of the target's realm. Signed-off-by: Javier González Signed-off-by: Matias Bjørling Signed-off-by: Jens Axboe --- drivers/lightnvm/core.c | 4 ++-- include/linux/lightnvm.h | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c index 63d948cc6dec..77901bf17416 100644 --- a/drivers/lightnvm/core.c +++ b/drivers/lightnvm/core.c @@ -581,7 +581,7 @@ static void nvm_ppa_tgt_to_dev(struct nvm_tgt_dev *tgt_dev, for (i = 0; i < nr_ppas; i++) { nvm_map_to_dev(tgt_dev, &ppa_list[i]); - ppa_list[i] = generic_to_dev_addr(tgt_dev, ppa_list[i]); + ppa_list[i] = generic_to_dev_addr(tgt_dev->parent, ppa_list[i]); } } @@ -591,7 +591,7 @@ static void nvm_ppa_dev_to_tgt(struct nvm_tgt_dev *tgt_dev, int i; for (i = 0; i < nr_ppas; i++) { - ppa_list[i] = dev_to_generic_addr(tgt_dev, ppa_list[i]); + ppa_list[i] = dev_to_generic_addr(tgt_dev->parent, ppa_list[i]); nvm_map_to_tgt(tgt_dev, &ppa_list[i]); } } diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h index f2549b4b8626..f3b273e543c3 100644 --- a/include/linux/lightnvm.h +++ b/include/linux/lightnvm.h @@ -397,10 +397,10 @@ struct nvm_dev { struct list_head targets; }; -static inline struct ppa_addr generic_to_dev_addr(struct nvm_tgt_dev *tgt_dev, +static inline struct ppa_addr generic_to_dev_addr(struct nvm_dev *dev, struct ppa_addr r) { - struct nvm_geo *geo = &tgt_dev->geo; + struct nvm_geo *geo = &dev->geo; struct ppa_addr l; if (geo->version == NVM_OCSSD_SPEC_12) { @@ -424,10 +424,10 @@ static inline struct ppa_addr generic_to_dev_addr(struct nvm_tgt_dev *tgt_dev, return l; } -static inline struct ppa_addr dev_to_generic_addr(struct nvm_tgt_dev *tgt_dev, +static inline struct ppa_addr dev_to_generic_addr(struct nvm_dev *dev, struct ppa_addr r) { - struct nvm_geo *geo = &tgt_dev->geo; + struct nvm_geo *geo = &dev->geo; struct ppa_addr l; l.ppa = 0; -- cgit v1.2.3 From a294c199455187d124b0760fa8f86c13cdaa4b25 Mon Sep 17 00:00:00 2001 From: Javier González Date: Fri, 30 Mar 2018 00:05:17 +0200 Subject: lightnvm: implement get log report chunk helpers MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The 2.0 spec provides a report chunk log page that can be retrieved using the stangard nvme get log page. This replaces the dedicated get/put bad block table in 1.2. This patch implements the helper functions to allow targets retrieve the chunk metadata using get log page. It makes nvme_get_log_ext available outside of nvme core so that we can use it form lightnvm. Signed-off-by: Javier González Signed-off-by: Matias Bjørling Signed-off-by: Jens Axboe --- drivers/lightnvm/core.c | 11 +++++++ drivers/nvme/host/core.c | 4 +-- drivers/nvme/host/lightnvm.c | 74 ++++++++++++++++++++++++++++++++++++++++++++ include/linux/lightnvm.h | 24 ++++++++++++++ 4 files changed, 111 insertions(+), 2 deletions(-) diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c index 77901bf17416..63171cdce270 100644 --- a/drivers/lightnvm/core.c +++ b/drivers/lightnvm/core.c @@ -712,6 +712,17 @@ static void nvm_free_rqd_ppalist(struct nvm_tgt_dev *tgt_dev, nvm_dev_dma_free(tgt_dev->parent, rqd->ppa_list, rqd->dma_ppa_list); } +int nvm_get_chunk_meta(struct nvm_tgt_dev *tgt_dev, struct nvm_chk_meta *meta, + struct ppa_addr ppa, int nchks) +{ + struct nvm_dev *dev = tgt_dev->parent; + + nvm_ppa_tgt_to_dev(tgt_dev, &ppa, 1); + + return dev->ops->get_chk_meta(tgt_dev->parent, meta, + (sector_t)ppa.ppa, nchks); +} +EXPORT_SYMBOL(nvm_get_chunk_meta); int nvm_set_tgt_bb_tbl(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *ppas, int nr_ppas, int type) diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index e7ec2fb5c59a..f81e3b323366 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -2219,8 +2219,8 @@ out_unlock: } int nvme_get_log_ext(struct nvme_ctrl *ctrl, struct nvme_ns *ns, - u8 log_page, void *log, - size_t size, size_t offset) + u8 log_page, void *log, + size_t size, size_t offset) { struct nvme_command c = { }; unsigned long dwlen = size / 4 - 1; diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c index 08f0f6b5bc06..ffd64a83c8c3 100644 --- a/drivers/nvme/host/lightnvm.c +++ b/drivers/nvme/host/lightnvm.c @@ -35,6 +35,10 @@ enum nvme_nvm_admin_opcode { nvme_nvm_admin_set_bb_tbl = 0xf1, }; +enum nvme_nvm_log_page { + NVME_NVM_LOG_REPORT_CHUNK = 0xca, +}; + struct nvme_nvm_ph_rw { __u8 opcode; __u8 flags; @@ -236,6 +240,16 @@ struct nvme_nvm_id20 { __u8 vs[1024]; }; +struct nvme_nvm_chk_meta { + __u8 state; + __u8 type; + __u8 wi; + __u8 rsvd[5]; + __le64 slba; + __le64 cnlb; + __le64 wp; +}; + /* * Check we didn't inadvertently grow the command struct */ @@ -252,6 +266,9 @@ static inline void _nvme_nvm_check_size(void) BUILD_BUG_ON(sizeof(struct nvme_nvm_bb_tbl) != 64); BUILD_BUG_ON(sizeof(struct nvme_nvm_id20_addrf) != 8); BUILD_BUG_ON(sizeof(struct nvme_nvm_id20) != NVME_IDENTIFY_DATA_SIZE); + BUILD_BUG_ON(sizeof(struct nvme_nvm_chk_meta) != 32); + BUILD_BUG_ON(sizeof(struct nvme_nvm_chk_meta) != + sizeof(struct nvm_chk_meta)); } static void nvme_nvm_set_addr_12(struct nvm_addrf_12 *dst, @@ -552,6 +569,61 @@ static int nvme_nvm_set_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr *ppas, return ret; } +/* + * Expect the lba in device format + */ +static int nvme_nvm_get_chk_meta(struct nvm_dev *ndev, + struct nvm_chk_meta *meta, + sector_t slba, int nchks) +{ + struct nvm_geo *geo = &ndev->geo; + struct nvme_ns *ns = ndev->q->queuedata; + struct nvme_ctrl *ctrl = ns->ctrl; + struct nvme_nvm_chk_meta *dev_meta = (struct nvme_nvm_chk_meta *)meta; + struct ppa_addr ppa; + size_t left = nchks * sizeof(struct nvme_nvm_chk_meta); + size_t log_pos, offset, len; + int ret, i; + + /* Normalize lba address space to obtain log offset */ + ppa.ppa = slba; + ppa = dev_to_generic_addr(ndev, ppa); + + log_pos = ppa.m.chk; + log_pos += ppa.m.pu * geo->num_chk; + log_pos += ppa.m.grp * geo->num_lun * geo->num_chk; + + offset = log_pos * sizeof(struct nvme_nvm_chk_meta); + + while (left) { + len = min_t(unsigned int, left, ctrl->max_hw_sectors << 9); + + ret = nvme_get_log_ext(ctrl, ns, NVME_NVM_LOG_REPORT_CHUNK, + dev_meta, len, offset); + if (ret) { + dev_err(ctrl->device, "Get REPORT CHUNK log error\n"); + break; + } + + for (i = 0; i < len; i += sizeof(struct nvme_nvm_chk_meta)) { + meta->state = dev_meta->state; + meta->type = dev_meta->type; + meta->wi = dev_meta->wi; + meta->slba = le64_to_cpu(dev_meta->slba); + meta->cnlb = le64_to_cpu(dev_meta->cnlb); + meta->wp = le64_to_cpu(dev_meta->wp); + + meta++; + dev_meta++; + } + + offset += len; + left -= len; + } + + return ret; +} + static inline void nvme_nvm_rqtocmd(struct nvm_rq *rqd, struct nvme_ns *ns, struct nvme_nvm_command *c) { @@ -683,6 +755,8 @@ static struct nvm_dev_ops nvme_nvm_dev_ops = { .get_bb_tbl = nvme_nvm_get_bb_tbl, .set_bb_tbl = nvme_nvm_set_bb_tbl, + .get_chk_meta = nvme_nvm_get_chk_meta, + .submit_io = nvme_nvm_submit_io, .submit_io_sync = nvme_nvm_submit_io_sync, diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h index f3b273e543c3..da45efa09bb2 100644 --- a/include/linux/lightnvm.h +++ b/include/linux/lightnvm.h @@ -81,10 +81,13 @@ struct nvm_rq; struct nvm_id; struct nvm_dev; struct nvm_tgt_dev; +struct nvm_chk_meta; typedef int (nvm_id_fn)(struct nvm_dev *); typedef int (nvm_op_bb_tbl_fn)(struct nvm_dev *, struct ppa_addr, u8 *); typedef int (nvm_op_set_bb_fn)(struct nvm_dev *, struct ppa_addr *, int, int); +typedef int (nvm_get_chk_meta_fn)(struct nvm_dev *, struct nvm_chk_meta *, + sector_t, int); typedef int (nvm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *); typedef int (nvm_submit_io_sync_fn)(struct nvm_dev *, struct nvm_rq *); typedef void *(nvm_create_dma_pool_fn)(struct nvm_dev *, char *); @@ -98,6 +101,8 @@ struct nvm_dev_ops { nvm_op_bb_tbl_fn *get_bb_tbl; nvm_op_set_bb_fn *set_bb_tbl; + nvm_get_chk_meta_fn *get_chk_meta; + nvm_submit_io_fn *submit_io; nvm_submit_io_sync_fn *submit_io_sync; @@ -227,6 +232,20 @@ struct nvm_addrf { u64 rsv_mask[2]; }; +/* + * Note: The structure size is linked to nvme_nvm_chk_meta such that the same + * buffer can be used when converting from little endian to cpu addressing. + */ +struct nvm_chk_meta { + u8 state; + u8 type; + u8 wi; + u8 rsvd[5]; + u64 slba; + u64 cnlb; + u64 wp; +}; + struct nvm_target { struct list_head list; struct nvm_tgt_dev *dev; @@ -492,6 +511,11 @@ extern struct nvm_dev *nvm_alloc_dev(int); extern int nvm_register(struct nvm_dev *); extern void nvm_unregister(struct nvm_dev *); + +extern int nvm_get_chunk_meta(struct nvm_tgt_dev *tgt_dev, + struct nvm_chk_meta *meta, struct ppa_addr ppa, + int nchks); + extern int nvm_set_tgt_bb_tbl(struct nvm_tgt_dev *, struct ppa_addr *, int, int); extern int nvm_submit_io(struct nvm_tgt_dev *, struct nvm_rq *); -- cgit v1.2.3 From 7ad5039ee557f0a427a0768e43c28d0236d56ba3 Mon Sep 17 00:00:00 2001 From: Javier González Date: Fri, 30 Mar 2018 00:05:18 +0200 Subject: lightnvm: pblk: check for supported version MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit At this point, only 1.2 spec is supported, thus check for it. Also, since device-side L2P is only supported in the 1.2 spec, make sure to only check its value under 1.2. Signed-off-by: Javier González Signed-off-by: Matias Bjørling Signed-off-by: Jens Axboe --- drivers/lightnvm/pblk-init.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/drivers/lightnvm/pblk-init.c b/drivers/lightnvm/pblk-init.c index 4656d1ff81a6..34ff47705293 100644 --- a/drivers/lightnvm/pblk-init.c +++ b/drivers/lightnvm/pblk-init.c @@ -1016,9 +1016,15 @@ static void *pblk_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk, struct pblk *pblk; int ret; - if (dev->geo.dom & NVM_RSP_L2P) { + if (geo->version != NVM_OCSSD_SPEC_12) { + pr_err("pblk: OCSSD version not supported (%u)\n", + geo->version); + return ERR_PTR(-EINVAL); + } + + if (geo->version == NVM_OCSSD_SPEC_12 && geo->dom & NVM_RSP_L2P) { pr_err("pblk: host-side L2P table not supported. (%x)\n", - dev->geo.dom); + geo->dom); return ERR_PTR(-EINVAL); } -- cgit v1.2.3 From bb845ae45c3d669ee814ce9f0ed51f2915ee55a0 Mon Sep 17 00:00:00 2001 From: Javier González Date: Fri, 30 Mar 2018 00:05:19 +0200 Subject: lightnvm: pblk: rename ppaf* to addrf* MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In preparation for 2.0 support in pblk, rename variables referring to the address format to addrf and reserve ppaf for the 1.2 path. Signed-off-by: Javier González Signed-off-by: Matias Bjørling Signed-off-by: Jens Axboe --- drivers/lightnvm/pblk-init.c | 8 ++++---- drivers/lightnvm/pblk-sysfs.c | 4 ++-- drivers/lightnvm/pblk.h | 16 ++++++++-------- 3 files changed, 14 insertions(+), 14 deletions(-) diff --git a/drivers/lightnvm/pblk-init.c b/drivers/lightnvm/pblk-init.c index 34ff47705293..5b381700ef30 100644 --- a/drivers/lightnvm/pblk-init.c +++ b/drivers/lightnvm/pblk-init.c @@ -80,7 +80,7 @@ static size_t pblk_trans_map_size(struct pblk *pblk) { int entry_size = 8; - if (pblk->ppaf_bitsize < 32) + if (pblk->addrf_len < 32) entry_size = 4; return entry_size * pblk->rl.nr_secs; @@ -229,7 +229,7 @@ static int pblk_set_addrf_12(struct nvm_geo *geo, struct nvm_addrf_12 *dst) return dst->blk_offset + src->blk_len; } -static int pblk_set_ppaf(struct pblk *pblk) +static int pblk_set_addrf(struct pblk *pblk) { struct nvm_tgt_dev *dev = pblk->dev; struct nvm_geo *geo = &dev->geo; @@ -241,7 +241,7 @@ static int pblk_set_ppaf(struct pblk *pblk) return -EINVAL; } - pblk->ppaf_bitsize = pblk_set_addrf_12(geo, (void *)&pblk->ppaf); + pblk->addrf_len = pblk_set_addrf_12(geo, (void *)&pblk->addrf); return 0; } @@ -377,7 +377,7 @@ static int pblk_core_init(struct pblk *pblk) if (!pblk->r_end_wq) goto free_bb_wq; - if (pblk_set_ppaf(pblk)) + if (pblk_set_addrf(pblk)) goto free_r_end_wq; INIT_LIST_HEAD(&pblk->compl_list); diff --git a/drivers/lightnvm/pblk-sysfs.c b/drivers/lightnvm/pblk-sysfs.c index 2489ea0edfa0..fd2caad39d49 100644 --- a/drivers/lightnvm/pblk-sysfs.c +++ b/drivers/lightnvm/pblk-sysfs.c @@ -117,12 +117,12 @@ static ssize_t pblk_sysfs_ppaf(struct pblk *pblk, char *page) struct nvm_addrf_12 *geo_ppaf; ssize_t sz = 0; - ppaf = (struct nvm_addrf_12 *)&pblk->ppaf; + ppaf = (struct nvm_addrf_12 *)&pblk->addrf; geo_ppaf = (struct nvm_addrf_12 *)&geo->addrf; sz = snprintf(page, PAGE_SIZE, "g:(b:%d)blk:%d/%d,pg:%d/%d,lun:%d/%d,ch:%d/%d,pl:%d/%d,sec:%d/%d\n", - pblk->ppaf_bitsize, + pblk->addrf_len, ppaf->blk_offset, ppaf->blk_len, ppaf->pg_offset, ppaf->pg_len, ppaf->lun_offset, ppaf->lun_len, diff --git a/drivers/lightnvm/pblk.h b/drivers/lightnvm/pblk.h index 6607c41b23c0..40aee9e48af4 100644 --- a/drivers/lightnvm/pblk.h +++ b/drivers/lightnvm/pblk.h @@ -570,8 +570,8 @@ struct pblk { struct pblk_line_mgmt l_mg; /* Line management */ struct pblk_line_meta lm; /* Line metadata */ - struct nvm_addrf ppaf; - int ppaf_bitsize; + struct nvm_addrf addrf; + int addrf_len; struct pblk_rb rwb; @@ -947,7 +947,7 @@ static inline int pblk_ppa_to_pos(struct nvm_geo *geo, struct ppa_addr p) static inline struct ppa_addr addr_to_gen_ppa(struct pblk *pblk, u64 paddr, u64 line_id) { - struct nvm_addrf_12 *ppaf = (struct nvm_addrf_12 *)&pblk->ppaf; + struct nvm_addrf_12 *ppaf = (struct nvm_addrf_12 *)&pblk->addrf; struct ppa_addr ppa; ppa.ppa = 0; @@ -964,7 +964,7 @@ static inline struct ppa_addr addr_to_gen_ppa(struct pblk *pblk, u64 paddr, static inline u64 pblk_dev_ppa_to_line_addr(struct pblk *pblk, struct ppa_addr p) { - struct nvm_addrf_12 *ppaf = (struct nvm_addrf_12 *)&pblk->ppaf; + struct nvm_addrf_12 *ppaf = (struct nvm_addrf_12 *)&pblk->addrf; u64 paddr; paddr = (u64)p.g.ch << ppaf->ch_offset; @@ -988,7 +988,7 @@ static inline struct ppa_addr pblk_ppa32_to_ppa64(struct pblk *pblk, u32 ppa32) ppa64.c.line = ppa32 & ((~0U) >> 1); ppa64.c.is_cached = 1; } else { - struct nvm_addrf_12 *ppaf = (struct nvm_addrf_12 *)&pblk->ppaf; + struct nvm_addrf_12 *ppaf = (struct nvm_addrf_12 *)&pblk->addrf; ppa64.g.ch = (ppa32 & ppaf->ch_mask) >> ppaf->ch_offset; ppa64.g.lun = (ppa32 & ppaf->lun_mask) >> ppaf->lun_offset; @@ -1011,7 +1011,7 @@ static inline u32 pblk_ppa64_to_ppa32(struct pblk *pblk, struct ppa_addr ppa64) ppa32 |= ppa64.c.line; ppa32 |= 1U << 31; } else { - struct nvm_addrf_12 *ppaf = (struct nvm_addrf_12 *)&pblk->ppaf; + struct nvm_addrf_12 *ppaf = (struct nvm_addrf_12 *)&pblk->addrf; ppa32 |= ppa64.g.ch << ppaf->ch_offset; ppa32 |= ppa64.g.lun << ppaf->lun_offset; @@ -1029,7 +1029,7 @@ static inline struct ppa_addr pblk_trans_map_get(struct pblk *pblk, { struct ppa_addr ppa; - if (pblk->ppaf_bitsize < 32) { + if (pblk->addrf_len < 32) { u32 *map = (u32 *)pblk->trans_map; ppa = pblk_ppa32_to_ppa64(pblk, map[lba]); @@ -1045,7 +1045,7 @@ static inline struct ppa_addr pblk_trans_map_get(struct pblk *pblk, static inline void pblk_trans_map_set(struct pblk *pblk, sector_t lba, struct ppa_addr ppa) { - if (pblk->ppaf_bitsize < 32) { + if (pblk->addrf_len < 32) { u32 *map = (u32 *)pblk->trans_map; map[lba] = pblk_ppa64_to_ppa32(pblk, ppa); -- cgit v1.2.3 From 32ef9412c1142c64b372b83d3740f234f4226317 Mon Sep 17 00:00:00 2001 From: Javier González Date: Fri, 30 Mar 2018 00:05:20 +0200 Subject: lightnvm: pblk: implement get log report chunk MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In preparation of pblk supporting 2.0, implement the get log report chunk in pblk. Also, define the chunk states as given in the 2.0 spec. Signed-off-by: Javier González Signed-off-by: Matias Bjørling Signed-off-by: Jens Axboe --- drivers/lightnvm/pblk-core.c | 138 +++++++++++++++++++++++---- drivers/lightnvm/pblk-init.c | 222 ++++++++++++++++++++++++++++++------------- drivers/lightnvm/pblk.h | 7 ++ include/linux/lightnvm.h | 13 +++ 4 files changed, 298 insertions(+), 82 deletions(-) diff --git a/drivers/lightnvm/pblk-core.c b/drivers/lightnvm/pblk-core.c index c3eb135fce07..94d5d97c9d8a 100644 --- a/drivers/lightnvm/pblk-core.c +++ b/drivers/lightnvm/pblk-core.c @@ -44,11 +44,12 @@ static void pblk_line_mark_bb(struct work_struct *work) } static void pblk_mark_bb(struct pblk *pblk, struct pblk_line *line, - struct ppa_addr *ppa) + struct ppa_addr ppa_addr) { struct nvm_tgt_dev *dev = pblk->dev; struct nvm_geo *geo = &dev->geo; - int pos = pblk_ppa_to_pos(geo, *ppa); + struct ppa_addr *ppa; + int pos = pblk_ppa_to_pos(geo, ppa_addr); pr_debug("pblk: erase failed: line:%d, pos:%d\n", line->id, pos); atomic_long_inc(&pblk->erase_failed); @@ -58,26 +59,38 @@ static void pblk_mark_bb(struct pblk *pblk, struct pblk_line *line, pr_err("pblk: attempted to erase bb: line:%d, pos:%d\n", line->id, pos); + /* Not necessary to mark bad blocks on 2.0 spec. */ + if (geo->version == NVM_OCSSD_SPEC_20) + return; + + ppa = kmalloc(sizeof(struct ppa_addr), GFP_ATOMIC); + if (!ppa) + return; + + *ppa = ppa_addr; pblk_gen_run_ws(pblk, NULL, ppa, pblk_line_mark_bb, GFP_ATOMIC, pblk->bb_wq); } static void __pblk_end_io_erase(struct pblk *pblk, struct nvm_rq *rqd) { + struct nvm_tgt_dev *dev = pblk->dev; + struct nvm_geo *geo = &dev->geo; + struct nvm_chk_meta *chunk; struct pblk_line *line; + int pos; line = &pblk->lines[pblk_ppa_to_line(rqd->ppa_addr)]; + pos = pblk_ppa_to_pos(geo, rqd->ppa_addr); + chunk = &line->chks[pos]; + atomic_dec(&line->left_seblks); if (rqd->error) { - struct ppa_addr *ppa; - - ppa = kmalloc(sizeof(struct ppa_addr), GFP_ATOMIC); - if (!ppa) - return; - - *ppa = rqd->ppa_addr; - pblk_mark_bb(pblk, line, ppa); + chunk->state = NVM_CHK_ST_OFFLINE; + pblk_mark_bb(pblk, line, rqd->ppa_addr); + } else { + chunk->state = NVM_CHK_ST_FREE; } atomic_dec(&pblk->inflight_io); @@ -92,6 +105,49 @@ static void pblk_end_io_erase(struct nvm_rq *rqd) mempool_free(rqd, pblk->e_rq_pool); } +/* + * Get information for all chunks from the device. + * + * The caller is responsible for freeing the returned structure + */ +struct nvm_chk_meta *pblk_chunk_get_info(struct pblk *pblk) +{ + struct nvm_tgt_dev *dev = pblk->dev; + struct nvm_geo *geo = &dev->geo; + struct nvm_chk_meta *meta; + struct ppa_addr ppa; + unsigned long len; + int ret; + + ppa.ppa = 0; + + len = geo->all_chunks * sizeof(*meta); + meta = kzalloc(len, GFP_KERNEL); + if (!meta) + return ERR_PTR(-ENOMEM); + + ret = nvm_get_chunk_meta(dev, meta, ppa, geo->all_chunks); + if (ret) { + kfree(meta); + return ERR_PTR(-EIO); + } + + return meta; +} + +struct nvm_chk_meta *pblk_chunk_get_off(struct pblk *pblk, + struct nvm_chk_meta *meta, + struct ppa_addr ppa) +{ + struct nvm_tgt_dev *dev = pblk->dev; + struct nvm_geo *geo = &dev->geo; + int ch_off = ppa.m.grp * geo->num_chk * geo->num_lun; + int lun_off = ppa.m.pu * geo->num_chk; + int chk_off = ppa.m.chk; + + return meta + ch_off + lun_off + chk_off; +} + void __pblk_map_invalidate(struct pblk *pblk, struct pblk_line *line, u64 paddr) { @@ -1091,10 +1147,34 @@ static int pblk_line_init_bb(struct pblk *pblk, struct pblk_line *line, return 1; } +static int pblk_prepare_new_line(struct pblk *pblk, struct pblk_line *line) +{ + struct pblk_line_meta *lm = &pblk->lm; + struct nvm_tgt_dev *dev = pblk->dev; + struct nvm_geo *geo = &dev->geo; + int blk_to_erase = atomic_read(&line->blk_in_line); + int i; + + for (i = 0; i < lm->blk_per_line; i++) { + struct pblk_lun *rlun = &pblk->luns[i]; + int pos = pblk_ppa_to_pos(geo, rlun->bppa); + int state = line->chks[pos].state; + + /* Free chunks should not be erased */ + if (state & NVM_CHK_ST_FREE) { + set_bit(pblk_ppa_to_pos(geo, rlun->bppa), + line->erase_bitmap); + blk_to_erase--; + } + } + + return blk_to_erase; +} + static int pblk_line_prepare(struct pblk *pblk, struct pblk_line *line) { struct pblk_line_meta *lm = &pblk->lm; - int blk_in_line = atomic_read(&line->blk_in_line); + int blk_to_erase; line->map_bitmap = kzalloc(lm->sec_bitmap_len, GFP_ATOMIC); if (!line->map_bitmap) @@ -1107,7 +1187,21 @@ static int pblk_line_prepare(struct pblk *pblk, struct pblk_line *line) return -ENOMEM; } + /* Bad blocks do not need to be erased */ + bitmap_copy(line->erase_bitmap, line->blk_bitmap, lm->blk_per_line); + spin_lock(&line->lock); + + /* If we have not written to this line, we need to mark up free chunks + * as already erased + */ + if (line->state == PBLK_LINESTATE_NEW) { + blk_to_erase = pblk_prepare_new_line(pblk, line); + line->state = PBLK_LINESTATE_FREE; + } else { + blk_to_erase = atomic_read(&line->blk_in_line); + } + if (line->state != PBLK_LINESTATE_FREE) { kfree(line->map_bitmap); kfree(line->invalid_bitmap); @@ -1119,15 +1213,12 @@ static int pblk_line_prepare(struct pblk *pblk, struct pblk_line *line) line->state = PBLK_LINESTATE_OPEN; - atomic_set(&line->left_eblks, blk_in_line); - atomic_set(&line->left_seblks, blk_in_line); + atomic_set(&line->left_eblks, blk_to_erase); + atomic_set(&line->left_seblks, blk_to_erase); line->meta_distance = lm->meta_distance; spin_unlock(&line->lock); - /* Bad blocks do not need to be erased */ - bitmap_copy(line->erase_bitmap, line->blk_bitmap, lm->blk_per_line); - kref_init(&line->ref); return 0; @@ -1583,12 +1674,14 @@ static void pblk_line_should_sync_meta(struct pblk *pblk) void pblk_line_close(struct pblk *pblk, struct pblk_line *line) { + struct nvm_tgt_dev *dev = pblk->dev; + struct nvm_geo *geo = &dev->geo; + struct pblk_line_meta *lm = &pblk->lm; struct pblk_line_mgmt *l_mg = &pblk->l_mg; struct list_head *move_list; + int i; #ifdef CONFIG_NVM_DEBUG - struct pblk_line_meta *lm = &pblk->lm; - WARN(!bitmap_full(line->map_bitmap, lm->sec_per_line), "pblk: corrupt closed line %d\n", line->id); #endif @@ -1610,6 +1703,15 @@ void pblk_line_close(struct pblk *pblk, struct pblk_line *line) line->smeta = NULL; line->emeta = NULL; + for (i = 0; i < lm->blk_per_line; i++) { + struct pblk_lun *rlun = &pblk->luns[i]; + int pos = pblk_ppa_to_pos(geo, rlun->bppa); + int state = line->chks[pos].state; + + if (!(state & NVM_CHK_ST_OFFLINE)) + state = NVM_CHK_ST_CLOSED; + } + spin_unlock(&line->lock); spin_unlock(&l_mg->gc_lock); } diff --git a/drivers/lightnvm/pblk-init.c b/drivers/lightnvm/pblk-init.c index 5b381700ef30..27b4974930b4 100644 --- a/drivers/lightnvm/pblk-init.c +++ b/drivers/lightnvm/pblk-init.c @@ -451,6 +451,7 @@ static void pblk_line_meta_free(struct pblk_line *line) { kfree(line->blk_bitmap); kfree(line->erase_bitmap); + kfree(line->chks); } static void pblk_lines_free(struct pblk *pblk) @@ -495,55 +496,44 @@ static int pblk_bb_get_tbl(struct nvm_tgt_dev *dev, struct pblk_lun *rlun, return 0; } -static void *pblk_bb_get_log(struct pblk *pblk) +static void *pblk_bb_get_meta(struct pblk *pblk) { struct nvm_tgt_dev *dev = pblk->dev; struct nvm_geo *geo = &dev->geo; - u8 *log; + u8 *meta; int i, nr_blks, blk_per_lun; int ret; blk_per_lun = geo->num_chk * geo->pln_mode; nr_blks = blk_per_lun * geo->all_luns; - log = kmalloc(nr_blks, GFP_KERNEL); - if (!log) + meta = kmalloc(nr_blks, GFP_KERNEL); + if (!meta) return ERR_PTR(-ENOMEM); for (i = 0; i < geo->all_luns; i++) { struct pblk_lun *rlun = &pblk->luns[i]; - u8 *log_pos = log + i * blk_per_lun; + u8 *meta_pos = meta + i * blk_per_lun; - ret = pblk_bb_get_tbl(dev, rlun, log_pos, blk_per_lun); + ret = pblk_bb_get_tbl(dev, rlun, meta_pos, blk_per_lun); if (ret) { - kfree(log); + kfree(meta); return ERR_PTR(-EIO); } } - return log; + return meta; } -static int pblk_bb_line(struct pblk *pblk, struct pblk_line *line, - u8 *bb_log, int blk_per_line) +static void *pblk_chunk_get_meta(struct pblk *pblk) { struct nvm_tgt_dev *dev = pblk->dev; struct nvm_geo *geo = &dev->geo; - int i, bb_cnt = 0; - int blk_per_lun = geo->num_chk * geo->pln_mode; - for (i = 0; i < blk_per_line; i++) { - struct pblk_lun *rlun = &pblk->luns[i]; - u8 *lun_bb_log = bb_log + i * blk_per_lun; - - if (lun_bb_log[line->id] == NVM_BLK_T_FREE) - continue; - - set_bit(pblk_ppa_to_pos(geo, rlun->bppa), line->blk_bitmap); - bb_cnt++; - } - - return bb_cnt; + if (geo->version == NVM_OCSSD_SPEC_12) + return pblk_bb_get_meta(pblk); + else + return pblk_chunk_get_info(pblk); } static int pblk_luns_init(struct pblk *pblk) @@ -644,8 +634,131 @@ static void pblk_set_provision(struct pblk *pblk, long nr_free_blks) atomic_set(&pblk->rl.free_user_blocks, nr_free_blks); } -static int pblk_setup_line_meta(struct pblk *pblk, struct pblk_line *line, - void *chunk_log, long *nr_bad_blks) +static int pblk_setup_line_meta_12(struct pblk *pblk, struct pblk_line *line, + void *chunk_meta) +{ + struct nvm_tgt_dev *dev = pblk->dev; + struct nvm_geo *geo = &dev->geo; + struct pblk_line_meta *lm = &pblk->lm; + int i, chk_per_lun, nr_bad_chks = 0; + + chk_per_lun = geo->num_chk * geo->pln_mode; + + for (i = 0; i < lm->blk_per_line; i++) { + struct pblk_lun *rlun = &pblk->luns[i]; + struct nvm_chk_meta *chunk; + int pos = pblk_ppa_to_pos(geo, rlun->bppa); + u8 *lun_bb_meta = chunk_meta + pos * chk_per_lun; + + chunk = &line->chks[pos]; + + /* + * In 1.2 spec. chunk state is not persisted by the device. Thus + * some of the values are reset each time pblk is instantiated. + */ + if (lun_bb_meta[line->id] == NVM_BLK_T_FREE) + chunk->state = NVM_CHK_ST_FREE; + else + chunk->state = NVM_CHK_ST_OFFLINE; + + chunk->type = NVM_CHK_TP_W_SEQ; + chunk->wi = 0; + chunk->slba = -1; + chunk->cnlb = geo->clba; + chunk->wp = 0; + + if (!(chunk->state & NVM_CHK_ST_OFFLINE)) + continue; + + set_bit(pos, line->blk_bitmap); + nr_bad_chks++; + } + + return nr_bad_chks; +} + +static int pblk_setup_line_meta_20(struct pblk *pblk, struct pblk_line *line, + struct nvm_chk_meta *meta) +{ + struct nvm_tgt_dev *dev = pblk->dev; + struct nvm_geo *geo = &dev->geo; + struct pblk_line_meta *lm = &pblk->lm; + int i, nr_bad_chks = 0; + + for (i = 0; i < lm->blk_per_line; i++) { + struct pblk_lun *rlun = &pblk->luns[i]; + struct nvm_chk_meta *chunk; + struct nvm_chk_meta *chunk_meta; + struct ppa_addr ppa; + int pos; + + ppa = rlun->bppa; + pos = pblk_ppa_to_pos(geo, ppa); + chunk = &line->chks[pos]; + + ppa.m.chk = line->id; + chunk_meta = pblk_chunk_get_off(pblk, meta, ppa); + + chunk->state = chunk_meta->state; + chunk->type = chunk_meta->type; + chunk->wi = chunk_meta->wi; + chunk->slba = chunk_meta->slba; + chunk->cnlb = chunk_meta->cnlb; + chunk->wp = chunk_meta->wp; + + if (!(chunk->state & NVM_CHK_ST_OFFLINE)) + continue; + + if (chunk->type & NVM_CHK_TP_SZ_SPEC) { + WARN_ONCE(1, "pblk: custom-sized chunks unsupported\n"); + continue; + } + + set_bit(pos, line->blk_bitmap); + nr_bad_chks++; + } + + return nr_bad_chks; +} + +static long pblk_setup_line_meta(struct pblk *pblk, struct pblk_line *line, + void *chunk_meta, int line_id) +{ + struct nvm_tgt_dev *dev = pblk->dev; + struct nvm_geo *geo = &dev->geo; + struct pblk_line_mgmt *l_mg = &pblk->l_mg; + struct pblk_line_meta *lm = &pblk->lm; + long nr_bad_chks, chk_in_line; + + line->pblk = pblk; + line->id = line_id; + line->type = PBLK_LINETYPE_FREE; + line->state = PBLK_LINESTATE_NEW; + line->gc_group = PBLK_LINEGC_NONE; + line->vsc = &l_mg->vsc_list[line_id]; + spin_lock_init(&line->lock); + + if (geo->version == NVM_OCSSD_SPEC_12) + nr_bad_chks = pblk_setup_line_meta_12(pblk, line, chunk_meta); + else + nr_bad_chks = pblk_setup_line_meta_20(pblk, line, chunk_meta); + + chk_in_line = lm->blk_per_line - nr_bad_chks; + if (nr_bad_chks < 0 || nr_bad_chks > lm->blk_per_line || + chk_in_line < lm->min_blk_line) { + line->state = PBLK_LINESTATE_BAD; + list_add_tail(&line->list, &l_mg->bad_list); + return 0; + } + + atomic_set(&line->blk_in_line, chk_in_line); + list_add_tail(&line->list, &l_mg->free_list); + l_mg->nr_free_lines++; + + return chk_in_line; +} + +static int pblk_alloc_line_meta(struct pblk *pblk, struct pblk_line *line) { struct pblk_line_meta *lm = &pblk->lm; @@ -659,7 +772,13 @@ static int pblk_setup_line_meta(struct pblk *pblk, struct pblk_line *line, return -ENOMEM; } - *nr_bad_blks = pblk_bb_line(pblk, line, chunk_log, lm->blk_per_line); + line->chks = kmalloc(lm->blk_per_line * sizeof(struct nvm_chk_meta), + GFP_KERNEL); + if (!line->chks) { + kfree(line->erase_bitmap); + kfree(line->blk_bitmap); + return -ENOMEM; + } return 0; } @@ -846,10 +965,9 @@ add_emeta_page: static int pblk_lines_init(struct pblk *pblk) { struct pblk_line_mgmt *l_mg = &pblk->l_mg; - struct pblk_line_meta *lm = &pblk->lm; struct pblk_line *line; - void *chunk_log; - long nr_bad_blks = 0, nr_free_blks = 0; + void *chunk_meta; + long nr_free_chks = 0; int i, ret; ret = pblk_line_meta_init(pblk); @@ -864,11 +982,9 @@ static int pblk_lines_init(struct pblk *pblk) if (ret) goto fail_free_meta; - chunk_log = pblk_bb_get_log(pblk); - if (IS_ERR(chunk_log)) { - pr_err("pblk: could not get bad block log (%lu)\n", - PTR_ERR(chunk_log)); - ret = PTR_ERR(chunk_log); + chunk_meta = pblk_chunk_get_meta(pblk); + if (IS_ERR(chunk_meta)) { + ret = PTR_ERR(chunk_meta); goto fail_free_luns; } @@ -876,52 +992,30 @@ static int pblk_lines_init(struct pblk *pblk) GFP_KERNEL); if (!pblk->lines) { ret = -ENOMEM; - goto fail_free_chunk_log; + goto fail_free_chunk_meta; } for (i = 0; i < l_mg->nr_lines; i++) { - int chk_in_line; - line = &pblk->lines[i]; - line->pblk = pblk; - line->id = i; - line->type = PBLK_LINETYPE_FREE; - line->state = PBLK_LINESTATE_FREE; - line->gc_group = PBLK_LINEGC_NONE; - line->vsc = &l_mg->vsc_list[i]; - spin_lock_init(&line->lock); - - ret = pblk_setup_line_meta(pblk, line, chunk_log, &nr_bad_blks); + ret = pblk_alloc_line_meta(pblk, line); if (ret) goto fail_free_lines; - chk_in_line = lm->blk_per_line - nr_bad_blks; - if (nr_bad_blks < 0 || nr_bad_blks > lm->blk_per_line || - chk_in_line < lm->min_blk_line) { - line->state = PBLK_LINESTATE_BAD; - list_add_tail(&line->list, &l_mg->bad_list); - continue; - } - - nr_free_blks += chk_in_line; - atomic_set(&line->blk_in_line, chk_in_line); - - l_mg->nr_free_lines++; - list_add_tail(&line->list, &l_mg->free_list); + nr_free_chks += pblk_setup_line_meta(pblk, line, chunk_meta, i); } - pblk_set_provision(pblk, nr_free_blks); + pblk_set_provision(pblk, nr_free_chks); - kfree(chunk_log); + kfree(chunk_meta); return 0; fail_free_lines: while (--i >= 0) pblk_line_meta_free(&pblk->lines[i]); kfree(pblk->lines); -fail_free_chunk_log: - kfree(chunk_log); +fail_free_chunk_meta: + kfree(chunk_meta); fail_free_luns: kfree(pblk->luns); fail_free_meta: diff --git a/drivers/lightnvm/pblk.h b/drivers/lightnvm/pblk.h index 40aee9e48af4..39e47e3d6f23 100644 --- a/drivers/lightnvm/pblk.h +++ b/drivers/lightnvm/pblk.h @@ -297,6 +297,7 @@ enum { PBLK_LINETYPE_DATA = 2, /* Line state */ + PBLK_LINESTATE_NEW = 9, PBLK_LINESTATE_FREE = 10, PBLK_LINESTATE_OPEN = 11, PBLK_LINESTATE_CLOSED = 12, @@ -426,6 +427,8 @@ struct pblk_line { unsigned long *lun_bitmap; /* Bitmap for LUNs mapped in line */ + struct nvm_chk_meta *chks; /* Chunks forming line */ + struct pblk_smeta *smeta; /* Start metadata */ struct pblk_emeta *emeta; /* End medatada */ @@ -729,6 +732,10 @@ void pblk_set_sec_per_write(struct pblk *pblk, int sec_per_write); int pblk_setup_w_rec_rq(struct pblk *pblk, struct nvm_rq *rqd, struct pblk_c_ctx *c_ctx); void pblk_discard(struct pblk *pblk, struct bio *bio); +struct nvm_chk_meta *pblk_chunk_get_info(struct pblk *pblk); +struct nvm_chk_meta *pblk_chunk_get_off(struct pblk *pblk, + struct nvm_chk_meta *lp, + struct ppa_addr ppa); void pblk_log_write_err(struct pblk *pblk, struct nvm_rq *rqd); void pblk_log_read_err(struct pblk *pblk, struct nvm_rq *rqd); int pblk_submit_io(struct pblk *pblk, struct nvm_rq *rqd); diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h index da45efa09bb2..6e0859b9d4d2 100644 --- a/include/linux/lightnvm.h +++ b/include/linux/lightnvm.h @@ -232,6 +232,19 @@ struct nvm_addrf { u64 rsv_mask[2]; }; +enum { + /* Chunk states */ + NVM_CHK_ST_FREE = 1 << 0, + NVM_CHK_ST_CLOSED = 1 << 1, + NVM_CHK_ST_OPEN = 1 << 2, + NVM_CHK_ST_OFFLINE = 1 << 3, + + /* Chunk types */ + NVM_CHK_TP_W_SEQ = 1 << 0, + NVM_CHK_TP_W_RAN = 1 << 1, + NVM_CHK_TP_SZ_SPEC = 1 << 4, +}; + /* * Note: The structure size is linked to nvme_nvm_chk_meta such that the same * buffer can be used when converting from little endian to cpu addressing. -- cgit v1.2.3 From 3b2a3ad11946b62a17dd6c24fba6aecf7fdbfc44 Mon Sep 17 00:00:00 2001 From: Javier González Date: Fri, 30 Mar 2018 00:05:21 +0200 Subject: lightnvm: pblk: implement 2.0 support MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Implement 2.0 support in pblk. This includes the address formatting and mapping paths, as well as the sysfs entries for them. Signed-off-by: Javier González Signed-off-by: Matias Bjørling Signed-off-by: Jens Axboe --- drivers/lightnvm/pblk-init.c | 56 ++++++++++-- drivers/lightnvm/pblk-sysfs.c | 46 +++++++--- drivers/lightnvm/pblk.h | 196 ++++++++++++++++++++++++++++++++---------- 3 files changed, 234 insertions(+), 64 deletions(-) diff --git a/drivers/lightnvm/pblk-init.c b/drivers/lightnvm/pblk-init.c index 27b4974930b4..91a5bc2556a3 100644 --- a/drivers/lightnvm/pblk-init.c +++ b/drivers/lightnvm/pblk-init.c @@ -229,20 +229,62 @@ static int pblk_set_addrf_12(struct nvm_geo *geo, struct nvm_addrf_12 *dst) return dst->blk_offset + src->blk_len; } +static int pblk_set_addrf_20(struct nvm_geo *geo, struct nvm_addrf *adst, + struct pblk_addrf *udst) +{ + struct nvm_addrf *src = &geo->addrf; + + adst->ch_len = get_count_order(geo->num_ch); + adst->lun_len = get_count_order(geo->num_lun); + adst->chk_len = src->chk_len; + adst->sec_len = src->sec_len; + + adst->sec_offset = 0; + adst->ch_offset = adst->sec_len; + adst->lun_offset = adst->ch_offset + adst->ch_len; + adst->chk_offset = adst->lun_offset + adst->lun_len; + + adst->sec_mask = ((1ULL << adst->sec_len) - 1) << adst->sec_offset; + adst->chk_mask = ((1ULL << adst->chk_len) - 1) << adst->chk_offset; + adst->lun_mask = ((1ULL << adst->lun_len) - 1) << adst->lun_offset; + adst->ch_mask = ((1ULL << adst->ch_len) - 1) << adst->ch_offset; + + udst->sec_stripe = geo->ws_opt; + udst->ch_stripe = geo->num_ch; + udst->lun_stripe = geo->num_lun; + + udst->sec_lun_stripe = udst->sec_stripe * udst->ch_stripe; + udst->sec_ws_stripe = udst->sec_lun_stripe * udst->lun_stripe; + + return adst->chk_offset + adst->chk_len; +} + static int pblk_set_addrf(struct pblk *pblk) { struct nvm_tgt_dev *dev = pblk->dev; struct nvm_geo *geo = &dev->geo; int mod; - div_u64_rem(geo->clba, pblk->min_write_pgs, &mod); - if (mod) { - pr_err("pblk: bad configuration of sectors/pages\n"); + switch (geo->version) { + case NVM_OCSSD_SPEC_12: + div_u64_rem(geo->clba, pblk->min_write_pgs, &mod); + if (mod) { + pr_err("pblk: bad configuration of sectors/pages\n"); + return -EINVAL; + } + + pblk->addrf_len = pblk_set_addrf_12(geo, (void *)&pblk->addrf); + break; + case NVM_OCSSD_SPEC_20: + pblk->addrf_len = pblk_set_addrf_20(geo, (void *)&pblk->addrf, + &pblk->uaddrf); + break; + default: + pr_err("pblk: OCSSD revision not supported (%d)\n", + geo->version); return -EINVAL; } - pblk->addrf_len = pblk_set_addrf_12(geo, (void *)&pblk->addrf); - return 0; } @@ -1110,7 +1152,9 @@ static void *pblk_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk, struct pblk *pblk; int ret; - if (geo->version != NVM_OCSSD_SPEC_12) { + /* pblk supports 1.2 and 2.0 versions */ + if (!(geo->version == NVM_OCSSD_SPEC_12 || + geo->version == NVM_OCSSD_SPEC_20)) { pr_err("pblk: OCSSD version not supported (%u)\n", geo->version); return ERR_PTR(-EINVAL); diff --git a/drivers/lightnvm/pblk-sysfs.c b/drivers/lightnvm/pblk-sysfs.c index fd2caad39d49..e61909af23a5 100644 --- a/drivers/lightnvm/pblk-sysfs.c +++ b/drivers/lightnvm/pblk-sysfs.c @@ -113,15 +113,14 @@ static ssize_t pblk_sysfs_ppaf(struct pblk *pblk, char *page) { struct nvm_tgt_dev *dev = pblk->dev; struct nvm_geo *geo = &dev->geo; - struct nvm_addrf_12 *ppaf; - struct nvm_addrf_12 *geo_ppaf; ssize_t sz = 0; - ppaf = (struct nvm_addrf_12 *)&pblk->addrf; - geo_ppaf = (struct nvm_addrf_12 *)&geo->addrf; + if (geo->version == NVM_OCSSD_SPEC_12) { + struct nvm_addrf_12 *ppaf = (struct nvm_addrf_12 *)&pblk->addrf; + struct nvm_addrf_12 *gppaf = (struct nvm_addrf_12 *)&geo->addrf; - sz = snprintf(page, PAGE_SIZE, - "g:(b:%d)blk:%d/%d,pg:%d/%d,lun:%d/%d,ch:%d/%d,pl:%d/%d,sec:%d/%d\n", + sz = snprintf(page, PAGE_SIZE, + "g:(b:%d)blk:%d/%d,pg:%d/%d,lun:%d/%d,ch:%d/%d,pl:%d/%d,sec:%d/%d\n", pblk->addrf_len, ppaf->blk_offset, ppaf->blk_len, ppaf->pg_offset, ppaf->pg_len, @@ -130,14 +129,33 @@ static ssize_t pblk_sysfs_ppaf(struct pblk *pblk, char *page) ppaf->pln_offset, ppaf->pln_len, ppaf->sec_offset, ppaf->sec_len); - sz += snprintf(page + sz, PAGE_SIZE - sz, - "d:blk:%d/%d,pg:%d/%d,lun:%d/%d,ch:%d/%d,pl:%d/%d,sec:%d/%d\n", - geo_ppaf->blk_offset, geo_ppaf->blk_len, - geo_ppaf->pg_offset, geo_ppaf->pg_len, - geo_ppaf->lun_offset, geo_ppaf->lun_len, - geo_ppaf->ch_offset, geo_ppaf->ch_len, - geo_ppaf->pln_offset, geo_ppaf->pln_len, - geo_ppaf->sec_offset, geo_ppaf->sec_len); + sz += snprintf(page + sz, PAGE_SIZE - sz, + "d:blk:%d/%d,pg:%d/%d,lun:%d/%d,ch:%d/%d,pl:%d/%d,sec:%d/%d\n", + gppaf->blk_offset, gppaf->blk_len, + gppaf->pg_offset, gppaf->pg_len, + gppaf->lun_offset, gppaf->lun_len, + gppaf->ch_offset, gppaf->ch_len, + gppaf->pln_offset, gppaf->pln_len, + gppaf->sec_offset, gppaf->sec_len); + } else { + struct nvm_addrf *ppaf = &pblk->addrf; + struct nvm_addrf *gppaf = &geo->addrf; + + sz = snprintf(page, PAGE_SIZE, + "pblk:(s:%d)ch:%d/%d,lun:%d/%d,chk:%d/%d/sec:%d/%d\n", + pblk->addrf_len, + ppaf->ch_offset, ppaf->ch_len, + ppaf->lun_offset, ppaf->lun_len, + ppaf->chk_offset, ppaf->chk_len, + ppaf->sec_offset, ppaf->sec_len); + + sz += snprintf(page + sz, PAGE_SIZE - sz, + "device:ch:%d/%d,lun:%d/%d,chk:%d/%d,sec:%d/%d\n", + gppaf->ch_offset, gppaf->ch_len, + gppaf->lun_offset, gppaf->lun_len, + gppaf->chk_offset, gppaf->chk_len, + gppaf->sec_offset, gppaf->sec_len); + } return sz; } diff --git a/drivers/lightnvm/pblk.h b/drivers/lightnvm/pblk.h index 39e47e3d6f23..9c682acfc5d1 100644 --- a/drivers/lightnvm/pblk.h +++ b/drivers/lightnvm/pblk.h @@ -561,6 +561,18 @@ enum { PBLK_STATE_STOPPED = 3, }; +/* Internal format to support not power-of-2 device formats */ +struct pblk_addrf { + /* gen to dev */ + int sec_stripe; + int ch_stripe; + int lun_stripe; + + /* dev to gen */ + int sec_lun_stripe; + int sec_ws_stripe; +}; + struct pblk { struct nvm_tgt_dev *dev; struct gendisk *disk; @@ -573,7 +585,8 @@ struct pblk { struct pblk_line_mgmt l_mg; /* Line management */ struct pblk_line_meta lm; /* Line metadata */ - struct nvm_addrf addrf; + struct nvm_addrf addrf; /* Aligned address format */ + struct pblk_addrf uaddrf; /* Unaligned address format */ int addrf_len; struct pblk_rb rwb; @@ -954,16 +967,39 @@ static inline int pblk_ppa_to_pos(struct nvm_geo *geo, struct ppa_addr p) static inline struct ppa_addr addr_to_gen_ppa(struct pblk *pblk, u64 paddr, u64 line_id) { - struct nvm_addrf_12 *ppaf = (struct nvm_addrf_12 *)&pblk->addrf; + struct nvm_tgt_dev *dev = pblk->dev; + struct nvm_geo *geo = &dev->geo; struct ppa_addr ppa; - ppa.ppa = 0; - ppa.g.blk = line_id; - ppa.g.pg = (paddr & ppaf->pg_mask) >> ppaf->pg_offset; - ppa.g.lun = (paddr & ppaf->lun_mask) >> ppaf->lun_offset; - ppa.g.ch = (paddr & ppaf->ch_mask) >> ppaf->ch_offset; - ppa.g.pl = (paddr & ppaf->pln_mask) >> ppaf->pln_offset; - ppa.g.sec = (paddr & ppaf->sec_mask) >> ppaf->sec_offset; + if (geo->version == NVM_OCSSD_SPEC_12) { + struct nvm_addrf_12 *ppaf = (struct nvm_addrf_12 *)&pblk->addrf; + + ppa.ppa = 0; + ppa.g.blk = line_id; + ppa.g.pg = (paddr & ppaf->pg_mask) >> ppaf->pg_offset; + ppa.g.lun = (paddr & ppaf->lun_mask) >> ppaf->lun_offset; + ppa.g.ch = (paddr & ppaf->ch_mask) >> ppaf->ch_offset; + ppa.g.pl = (paddr & ppaf->pln_mask) >> ppaf->pln_offset; + ppa.g.sec = (paddr & ppaf->sec_mask) >> ppaf->sec_offset; + } else { + struct pblk_addrf *uaddrf = &pblk->uaddrf; + int secs, chnls, luns; + + ppa.ppa = 0; + + ppa.m.chk = line_id; + + paddr = div_u64_rem(paddr, uaddrf->sec_stripe, &secs); + ppa.m.sec = secs; + + paddr = div_u64_rem(paddr, uaddrf->ch_stripe, &chnls); + ppa.m.grp = chnls; + + paddr = div_u64_rem(paddr, uaddrf->lun_stripe, &luns); + ppa.m.pu = luns; + + ppa.m.sec += uaddrf->sec_stripe * paddr; + } return ppa; } @@ -971,14 +1007,30 @@ static inline struct ppa_addr addr_to_gen_ppa(struct pblk *pblk, u64 paddr, static inline u64 pblk_dev_ppa_to_line_addr(struct pblk *pblk, struct ppa_addr p) { - struct nvm_addrf_12 *ppaf = (struct nvm_addrf_12 *)&pblk->addrf; + struct nvm_tgt_dev *dev = pblk->dev; + struct nvm_geo *geo = &dev->geo; u64 paddr; - paddr = (u64)p.g.ch << ppaf->ch_offset; - paddr |= (u64)p.g.lun << ppaf->lun_offset; - paddr |= (u64)p.g.pg << ppaf->pg_offset; - paddr |= (u64)p.g.pl << ppaf->pln_offset; - paddr |= (u64)p.g.sec << ppaf->sec_offset; + if (geo->version == NVM_OCSSD_SPEC_12) { + struct nvm_addrf_12 *ppaf = (struct nvm_addrf_12 *)&pblk->addrf; + + paddr = (u64)p.g.ch << ppaf->ch_offset; + paddr |= (u64)p.g.lun << ppaf->lun_offset; + paddr |= (u64)p.g.pg << ppaf->pg_offset; + paddr |= (u64)p.g.pl << ppaf->pln_offset; + paddr |= (u64)p.g.sec << ppaf->sec_offset; + } else { + struct pblk_addrf *uaddrf = &pblk->uaddrf; + u64 secs = p.m.sec; + int sec_stripe; + + paddr = (u64)p.m.grp * uaddrf->sec_stripe; + paddr += (u64)p.m.pu * uaddrf->sec_lun_stripe; + + secs = div_u64_rem(secs, uaddrf->sec_stripe, &sec_stripe); + paddr += secs * uaddrf->sec_ws_stripe; + paddr += sec_stripe; + } return paddr; } @@ -995,14 +1047,37 @@ static inline struct ppa_addr pblk_ppa32_to_ppa64(struct pblk *pblk, u32 ppa32) ppa64.c.line = ppa32 & ((~0U) >> 1); ppa64.c.is_cached = 1; } else { - struct nvm_addrf_12 *ppaf = (struct nvm_addrf_12 *)&pblk->addrf; - - ppa64.g.ch = (ppa32 & ppaf->ch_mask) >> ppaf->ch_offset; - ppa64.g.lun = (ppa32 & ppaf->lun_mask) >> ppaf->lun_offset; - ppa64.g.blk = (ppa32 & ppaf->blk_mask) >> ppaf->blk_offset; - ppa64.g.pg = (ppa32 & ppaf->pg_mask) >> ppaf->pg_offset; - ppa64.g.pl = (ppa32 & ppaf->pln_mask) >> ppaf->pln_offset; - ppa64.g.sec = (ppa32 & ppaf->sec_mask) >> ppaf->sec_offset; + struct nvm_tgt_dev *dev = pblk->dev; + struct nvm_geo *geo = &dev->geo; + + if (geo->version == NVM_OCSSD_SPEC_12) { + struct nvm_addrf_12 *ppaf = + (struct nvm_addrf_12 *)&pblk->addrf; + + ppa64.g.ch = (ppa32 & ppaf->ch_mask) >> + ppaf->ch_offset; + ppa64.g.lun = (ppa32 & ppaf->lun_mask) >> + ppaf->lun_offset; + ppa64.g.blk = (ppa32 & ppaf->blk_mask) >> + ppaf->blk_offset; + ppa64.g.pg = (ppa32 & ppaf->pg_mask) >> + ppaf->pg_offset; + ppa64.g.pl = (ppa32 & ppaf->pln_mask) >> + ppaf->pln_offset; + ppa64.g.sec = (ppa32 & ppaf->sec_mask) >> + ppaf->sec_offset; + } else { + struct nvm_addrf *lbaf = &pblk->addrf; + + ppa64.m.grp = (ppa32 & lbaf->ch_mask) >> + lbaf->ch_offset; + ppa64.m.pu = (ppa32 & lbaf->lun_mask) >> + lbaf->lun_offset; + ppa64.m.chk = (ppa32 & lbaf->chk_mask) >> + lbaf->chk_offset; + ppa64.m.sec = (ppa32 & lbaf->sec_mask) >> + lbaf->sec_offset; + } } return ppa64; @@ -1018,14 +1093,27 @@ static inline u32 pblk_ppa64_to_ppa32(struct pblk *pblk, struct ppa_addr ppa64) ppa32 |= ppa64.c.line; ppa32 |= 1U << 31; } else { - struct nvm_addrf_12 *ppaf = (struct nvm_addrf_12 *)&pblk->addrf; - - ppa32 |= ppa64.g.ch << ppaf->ch_offset; - ppa32 |= ppa64.g.lun << ppaf->lun_offset; - ppa32 |= ppa64.g.blk << ppaf->blk_offset; - ppa32 |= ppa64.g.pg << ppaf->pg_offset; - ppa32 |= ppa64.g.pl << ppaf->pln_offset; - ppa32 |= ppa64.g.sec << ppaf->sec_offset; + struct nvm_tgt_dev *dev = pblk->dev; + struct nvm_geo *geo = &dev->geo; + + if (geo->version == NVM_OCSSD_SPEC_12) { + struct nvm_addrf_12 *ppaf = + (struct nvm_addrf_12 *)&pblk->addrf; + + ppa32 |= ppa64.g.ch << ppaf->ch_offset; + ppa32 |= ppa64.g.lun << ppaf->lun_offset; + ppa32 |= ppa64.g.blk << ppaf->blk_offset; + ppa32 |= ppa64.g.pg << ppaf->pg_offset; + ppa32 |= ppa64.g.pl << ppaf->pln_offset; + ppa32 |= ppa64.g.sec << ppaf->sec_offset; + } else { + struct nvm_addrf *lbaf = &pblk->addrf; + + ppa32 |= ppa64.m.grp << lbaf->ch_offset; + ppa32 |= ppa64.m.pu << lbaf->lun_offset; + ppa32 |= ppa64.m.chk << lbaf->chk_offset; + ppa32 |= ppa64.m.sec << lbaf->sec_offset; + } } return ppa32; @@ -1143,6 +1231,9 @@ static inline int pblk_set_progr_mode(struct pblk *pblk, int type) struct nvm_geo *geo = &dev->geo; int flags; + if (geo->version == NVM_OCSSD_SPEC_20) + return 0; + flags = geo->pln_mode >> 1; if (type == PBLK_WRITE) @@ -1162,6 +1253,9 @@ static inline int pblk_set_read_mode(struct pblk *pblk, int type) struct nvm_geo *geo = &dev->geo; int flags; + if (geo->version == NVM_OCSSD_SPEC_20) + return 0; + flags = NVM_IO_SUSPEND | NVM_IO_SCRAMBLE_ENABLE; if (type == PBLK_READ_SEQUENTIAL) flags |= geo->pln_mode >> 1; @@ -1175,16 +1269,21 @@ static inline int pblk_io_aligned(struct pblk *pblk, int nr_secs) } #ifdef CONFIG_NVM_DEBUG -static inline void print_ppa(struct ppa_addr *p, char *msg, int error) +static inline void print_ppa(struct nvm_geo *geo, struct ppa_addr *p, + char *msg, int error) { if (p->c.is_cached) { pr_err("ppa: (%s: %x) cache line: %llu\n", msg, error, (u64)p->c.line); - } else { + } else if (geo->version == NVM_OCSSD_SPEC_12) { pr_err("ppa: (%s: %x):ch:%d,lun:%d,blk:%d,pg:%d,pl:%d,sec:%d\n", msg, error, p->g.ch, p->g.lun, p->g.blk, p->g.pg, p->g.pl, p->g.sec); + } else { + pr_err("ppa: (%s: %x):ch:%d,lun:%d,chk:%d,sec:%d\n", + msg, error, + p->m.grp, p->m.pu, p->m.chk, p->m.sec); } } @@ -1194,13 +1293,13 @@ static inline void pblk_print_failed_rqd(struct pblk *pblk, struct nvm_rq *rqd, int bit = -1; if (rqd->nr_ppas == 1) { - print_ppa(&rqd->ppa_addr, "rqd", error); + print_ppa(&pblk->dev->geo, &rqd->ppa_addr, "rqd", error); return; } while ((bit = find_next_bit((void *)&rqd->ppa_status, rqd->nr_ppas, bit + 1)) < rqd->nr_ppas) { - print_ppa(&rqd->ppa_list[bit], "rqd", error); + print_ppa(&pblk->dev->geo, &rqd->ppa_list[bit], "rqd", error); } pr_err("error:%d, ppa_status:%llx\n", error, rqd->ppa_status); @@ -1216,16 +1315,25 @@ static inline int pblk_boundary_ppa_checks(struct nvm_tgt_dev *tgt_dev, for (i = 0; i < nr_ppas; i++) { ppa = &ppas[i]; - if (!ppa->c.is_cached && - ppa->g.ch < geo->num_ch && - ppa->g.lun < geo->num_lun && - ppa->g.pl < geo->num_pln && - ppa->g.blk < geo->num_chk && - ppa->g.pg < geo->num_pg && - ppa->g.sec < geo->ws_min) - continue; + if (geo->version == NVM_OCSSD_SPEC_12) { + if (!ppa->c.is_cached && + ppa->g.ch < geo->num_ch && + ppa->g.lun < geo->num_lun && + ppa->g.pl < geo->num_pln && + ppa->g.blk < geo->num_chk && + ppa->g.pg < geo->num_pg && + ppa->g.sec < geo->ws_min) + continue; + } else { + if (!ppa->c.is_cached && + ppa->m.grp < geo->num_ch && + ppa->m.pu < geo->num_lun && + ppa->m.chk < geo->num_chk && + ppa->m.sec < geo->clba) + continue; + } - print_ppa(ppa, "boundary", i); + print_ppa(geo, ppa, "boundary", i); return 1; } -- cgit v1.2.3 From 9156f360a77f9b975645a66af5ae8f7f68eccded Mon Sep 17 00:00:00 2001 From: Hans Holmberg Date: Fri, 30 Mar 2018 00:05:22 +0200 Subject: lightnvm: pblk: don't recover unwritten lines MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit If the line has not been written to, we should not try to recover any data from it, so check the state of the chunks in the line before attempting to read smeta. Signed-off-by: Hans Holmberg Signed-off-by: Javier González Signed-off-by: Matias Bjørling Signed-off-by: Jens Axboe --- drivers/lightnvm/pblk-recovery.c | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/drivers/lightnvm/pblk-recovery.c b/drivers/lightnvm/pblk-recovery.c index 26356429dc72..3e079c2afa6e 100644 --- a/drivers/lightnvm/pblk-recovery.c +++ b/drivers/lightnvm/pblk-recovery.c @@ -864,6 +864,21 @@ static void pblk_recov_wa_counters(struct pblk *pblk, } } +static int pblk_line_was_written(struct pblk_line *line, + struct pblk_line_meta *lm) +{ + + int i; + int state_mask = NVM_CHK_ST_OFFLINE | NVM_CHK_ST_FREE; + + for (i = 0; i < lm->blk_per_line; i++) { + if (!(line->chks[i].state & state_mask)) + return 1; + } + + return 0; +} + struct pblk_line *pblk_recov_l2p(struct pblk *pblk) { struct pblk_line_meta *lm = &pblk->lm; @@ -900,6 +915,9 @@ struct pblk_line *pblk_recov_l2p(struct pblk *pblk) line->lun_bitmap = ((void *)(smeta_buf)) + sizeof(struct line_smeta); + if (!pblk_line_was_written(line, lm)) + continue; + /* Lines that cannot be read are assumed as not written here */ if (pblk_line_read_smeta(pblk, line)) continue; -- cgit v1.2.3 From 5565b0ca50b5b82e0439f6e4dc7fbb122cd0b025 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Fri, 30 Mar 2018 00:05:23 +0200 Subject: lightnvm: pblk: remove some unnecessary NULL checks MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Smatch complains that flush_workqueue() dereferences the work queue pointer but then we check if it's NULL on the next line when it's too late. These NULL checks can be removed because the module won't load if we can't allocate the work queues. Signed-off-by: Dan Carpenter Signed-off-by: Matias Bjørling Signed-off-by: Jens Axboe --- drivers/lightnvm/pblk-gc.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/drivers/lightnvm/pblk-gc.c b/drivers/lightnvm/pblk-gc.c index 7143b0f740fb..6851a5c67189 100644 --- a/drivers/lightnvm/pblk-gc.c +++ b/drivers/lightnvm/pblk-gc.c @@ -664,12 +664,10 @@ void pblk_gc_exit(struct pblk *pblk) kthread_stop(gc->gc_reader_ts); flush_workqueue(gc->gc_reader_wq); - if (gc->gc_reader_wq) - destroy_workqueue(gc->gc_reader_wq); + destroy_workqueue(gc->gc_reader_wq); flush_workqueue(gc->gc_line_reader_wq); - if (gc->gc_line_reader_wq) - destroy_workqueue(gc->gc_line_reader_wq); + destroy_workqueue(gc->gc_line_reader_wq); if (gc->gc_writer_ts) kthread_stop(gc->gc_writer_ts); -- cgit v1.2.3 From b65125fa57973baeabc3a6fe2f8c1179bd45fd95 Mon Sep 17 00:00:00 2001 From: Matias Bjørling Date: Fri, 30 Mar 2018 00:05:24 +0200 Subject: lightnvm: remove function name in strings MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit For the sysfs functions, the function names are embedded into their error strings. If the function name later changes, the string may not be updated accordingly. Update the strings to use __func__ to avoid this. Signed-off-by: Matias Bjørling Signed-off-by: Jens Axboe --- drivers/nvme/host/lightnvm.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c index ffd64a83c8c3..41279da799ed 100644 --- a/drivers/nvme/host/lightnvm.c +++ b/drivers/nvme/host/lightnvm.c @@ -1028,8 +1028,8 @@ static ssize_t nvm_dev_attr_show(struct device *dev, } else { return scnprintf(page, PAGE_SIZE, - "Unhandled attr(%s) in `nvm_dev_attr_show`\n", - attr->name); + "Unhandled attr(%s) in `%s`\n", + attr->name, __func__); } } @@ -1103,8 +1103,8 @@ static ssize_t nvm_dev_attr_show_12(struct device *dev, return scnprintf(page, PAGE_SIZE, "%u\n", NVM_MAX_VLBA); } else { return scnprintf(page, PAGE_SIZE, - "Unhandled attr(%s) in `nvm_dev_attr_show_12`\n", - attr->name); + "Unhandled attr(%s) in `%s`\n", + attr->name, __func__); } } @@ -1149,8 +1149,8 @@ static ssize_t nvm_dev_attr_show_20(struct device *dev, return scnprintf(page, PAGE_SIZE, "%u\n", geo->tbem); } else { return scnprintf(page, PAGE_SIZE, - "Unhandled attr(%s) in `nvm_dev_attr_show_20`\n", - attr->name); + "Unhandled attr(%s) in `%s`\n", + attr->name, __func__); } } -- cgit v1.2.3 From a5040c2d8dd732252609cde84ee3cdd6f1b1f927 Mon Sep 17 00:00:00 2001 From: Souvik Banerjee Date: Fri, 30 Mar 2018 14:32:42 -0500 Subject: blktrace: fix comment in blktrace_api.h The `__u64 time` field of the blk_io_trace struct refers to the time in nanoseconds, not in microseconds. It is set in __blk_add_trace, which does the following: t->time = ktime_to_ns(ktime_get()); ktime_to_ns returns ktime_t in nanoseconds, not microseconds. Signed-off-by: Souvik Banerjee Signed-off-by: Jens Axboe --- include/uapi/linux/blktrace_api.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/uapi/linux/blktrace_api.h b/include/uapi/linux/blktrace_api.h index 3c50e07ee833..690621b610e5 100644 --- a/include/uapi/linux/blktrace_api.h +++ b/include/uapi/linux/blktrace_api.h @@ -101,7 +101,7 @@ enum blktrace_notify { struct blk_io_trace { __u32 magic; /* MAGIC << 8 | version */ __u32 sequence; /* event number */ - __u64 time; /* in microseconds */ + __u64 time; /* in nanoseconds */ __u64 sector; /* disk offset */ __u32 bytes; /* transfer length */ __u32 action; /* what happened */ -- cgit v1.2.3 From bc6d65e6dc89c3b7ff78e4ad797117c122ffde8e Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Mon, 2 Apr 2018 15:04:58 -0700 Subject: blk-mq: Directly schedule q->timeout_work when aborting a request Request abortion is performed by overriding deadline to now and scheduling timeout handling immediately. For the latter part, the code was using mod_timer(timeout, 0) which can't guarantee that the timer runs afterwards. Let's schedule the underlying work item directly instead. This fixes the hangs during probing reported by Sitsofe but it isn't yet clear to me how the failure can happen reliably if it's just the above described race condition. Signed-off-by: Tejun Heo Reported-by: Sitsofe Wheeler Reported-by: Meelis Roos Fixes: 358f70da49d7 ("blk-mq: make blk_abort_request() trigger timeout path") Cc: stable@vger.kernel.org # v4.16 Link: http://lkml.kernel.org/r/CALjAwxh-PVYFnYFCJpGOja+m5SzZ8Sa4J7ohxdK=r8NyOF-EMA@mail.gmail.com Link: http://lkml.kernel.org/r/alpine.LRH.2.21.1802261049140.4893@math.ut.ee Signed-off-by: Jens Axboe --- block/blk-timeout.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/block/blk-timeout.c b/block/blk-timeout.c index 34a55250f08a..652d4d4d3e97 100644 --- a/block/blk-timeout.c +++ b/block/blk-timeout.c @@ -163,7 +163,7 @@ void blk_abort_request(struct request *req) * No need for fancy synchronizations. */ blk_rq_set_deadline(req, jiffies); - mod_timer(&req->q->timeout, 0); + kblockd_schedule_work(&req->q->timeout_work); } else { if (blk_mark_rq_complete(req)) return; -- cgit v1.2.3