From 3cd485b1f8e25a6534eb4c542e7eba1b944fbaaf Mon Sep 17 00:00:00 2001 From: Wenwei Tao Date: Tue, 12 Jan 2016 07:49:15 +0100 Subject: lightnvm: fix bio submission issue MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Put bio when submission fails, since we get it before submission. And return error when backend device driver doesn't provide a submit_io method, thus we can end IO properly. Signed-off-by: Wenwei Tao Signed-off-by: Matias Bjørling Signed-off-by: Jens Axboe --- drivers/lightnvm/rrpc.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers/lightnvm/rrpc.c') diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c index 134e4faba482..a1e7488c1f3e 100644 --- a/drivers/lightnvm/rrpc.c +++ b/drivers/lightnvm/rrpc.c @@ -650,11 +650,12 @@ static int rrpc_end_io(struct nvm_rq *rqd, int error) if (bio_data_dir(rqd->bio) == WRITE) rrpc_end_io_write(rrpc, rrqd, laddr, npages); + bio_put(rqd->bio); + if (rrqd->flags & NVM_IOTYPE_GC) return 0; rrpc_unlock_rq(rrpc, rqd); - bio_put(rqd->bio); if (npages > 1) nvm_dev_dma_free(rrpc->dev, rqd->ppa_list, rqd->dma_ppa_list); @@ -841,6 +842,7 @@ static int rrpc_submit_io(struct rrpc *rrpc, struct bio *bio, err = nvm_submit_io(rrpc->dev, rqd); if (err) { pr_err("rrpc: I/O submission failed: %d\n", err); + bio_put(bio); return NVM_IO_ERR; } -- cgit v1.2.3 From 3bfbc6adbc5031e8a5907baa5beb27b41637742a Mon Sep 17 00:00:00 2001 From: Javier Gonzalez Date: Tue, 12 Jan 2016 07:49:17 +0100 Subject: lightnvm: add check after mempool allocation MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The mempool allocation might fail. Make sure to return error when it does, instead of causing a kernel panic. Signed-off-by: Javier Gonzalez Signed-off-by: Matias Bjørling Signed-off-by: Jens Axboe --- drivers/lightnvm/rrpc.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers/lightnvm/rrpc.c') diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c index a1e7488c1f3e..f4bc98687d7f 100644 --- a/drivers/lightnvm/rrpc.c +++ b/drivers/lightnvm/rrpc.c @@ -287,6 +287,8 @@ static int rrpc_move_valid_pages(struct rrpc *rrpc, struct rrpc_block *rblk) } page = mempool_alloc(rrpc->page_pool, GFP_NOIO); + if (!page) + return -ENOMEM; while ((slot = find_first_zero_bit(rblk->invalid_pages, nr_pgs_per_blk)) < nr_pgs_per_blk) { -- cgit v1.2.3 From c27278bddd75a3ee755c8e83c6bcc3fdd7271ef6 Mon Sep 17 00:00:00 2001 From: Wenwei Tao Date: Tue, 12 Jan 2016 07:49:18 +0100 Subject: lightnvm: unlock rq and free ppa_list on submission fail MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When rrpc_write_ppalist_rq and rrpc_read_ppalist_rq succeed, we setup rq correctly, but nvm_submit_io may afterward fail since it cannot allocate request or nvme_nvm_command, we return error but forget to cleanup the previous work. Signed-off-by: Wenwei Tao Signed-off-by: Matias Bjørling Signed-off-by: Jens Axboe --- drivers/lightnvm/rrpc.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'drivers/lightnvm/rrpc.c') diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c index f4bc98687d7f..748cab499580 100644 --- a/drivers/lightnvm/rrpc.c +++ b/drivers/lightnvm/rrpc.c @@ -845,6 +845,12 @@ static int rrpc_submit_io(struct rrpc *rrpc, struct bio *bio, if (err) { pr_err("rrpc: I/O submission failed: %d\n", err); bio_put(bio); + if (!(flags & NVM_IOTYPE_GC)) { + rrpc_unlock_rq(rrpc, rqd); + if (rqd->nr_pages > 1) + nvm_dev_dma_free(rrpc->dev, + rqd->ppa_list, rqd->dma_ppa_list); + } return NVM_IO_ERR; } -- cgit v1.2.3 From 91276162de9476b8ff32d9452e849210e5dd09e9 Mon Sep 17 00:00:00 2001 From: Matias Bjørling Date: Tue, 12 Jan 2016 07:49:21 +0100 Subject: lightnvm: refactor end_io functions for sync MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit To implement sync I/O support within the LightNVM core, the end_io functions are refactored to take an end_io function pointer instead of testing for initialized media manager, followed by calling its end_io function. Sync I/O can then be implemented using a callback that signal I/O completion. This is similar to the logic found in blk_to_execute_io(). By implementing it this way, the underlying device I/Os submission logic is abstracted away from core, targets, and media managers. Signed-off-by: Matias Bjørling Signed-off-by: Jens Axboe --- drivers/block/null_blk.c | 3 +-- drivers/lightnvm/core.c | 16 ++++++++++++++++ drivers/lightnvm/gennvm.c | 34 ++++++++++++++-------------------- drivers/lightnvm/rrpc.c | 6 ++---- drivers/nvme/host/lightnvm.c | 5 +---- include/linux/lightnvm.h | 12 ++++++++---- 6 files changed, 42 insertions(+), 34 deletions(-) (limited to 'drivers/lightnvm/rrpc.c') diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c index 09e3c0d87ecc..e6cad40a248f 100644 --- a/drivers/block/null_blk.c +++ b/drivers/block/null_blk.c @@ -436,9 +436,8 @@ static void null_del_dev(struct nullb *nullb) static void null_lnvm_end_io(struct request *rq, int error) { struct nvm_rq *rqd = rq->end_io_data; - struct nvm_dev *dev = rqd->dev; - dev->mt->end_io(rqd, error); + nvm_end_io(rqd, error); blk_put_request(rq); } diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c index 081b0f59b773..fa1a052c4737 100644 --- a/drivers/lightnvm/core.c +++ b/drivers/lightnvm/core.c @@ -27,6 +27,7 @@ #include #include #include +#include #include static LIST_HEAD(nvm_targets); @@ -288,6 +289,21 @@ int nvm_erase_ppa(struct nvm_dev *dev, struct ppa_addr ppa) } EXPORT_SYMBOL(nvm_erase_ppa); +void nvm_end_io(struct nvm_rq *rqd, int error) +{ + rqd->end_io(rqd, error); +} +EXPORT_SYMBOL(nvm_end_io); + +static void nvm_end_io_sync(struct nvm_rq *rqd, int errors) +{ + struct completion *waiting = rqd->wait; + + rqd->wait = NULL; + + complete(waiting); +} + static int nvm_core_init(struct nvm_dev *dev) { struct nvm_id *id = &dev->identity; diff --git a/drivers/lightnvm/gennvm.c b/drivers/lightnvm/gennvm.c index 373be72816bd..12ddcaa343e9 100644 --- a/drivers/lightnvm/gennvm.c +++ b/drivers/lightnvm/gennvm.c @@ -317,18 +317,6 @@ static void gennvm_put_blk(struct nvm_dev *dev, struct nvm_block *blk) spin_unlock(&vlun->lock); } -static int gennvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd) -{ - if (!dev->ops->submit_io) - return -ENODEV; - - /* Convert address space */ - nvm_generic_to_addr_mode(dev, rqd); - - rqd->dev = dev; - return dev->ops->submit_io(dev, rqd); -} - static void gennvm_blk_set_type(struct nvm_dev *dev, struct ppa_addr *ppa, int type) { @@ -375,25 +363,32 @@ static void gennvm_mark_blk_bad(struct nvm_dev *dev, struct nvm_rq *rqd) gennvm_blk_set_type(dev, &rqd->ppa_addr, 2); } -static int gennvm_end_io(struct nvm_rq *rqd, int error) +static void gennvm_end_io(struct nvm_rq *rqd, int error) { struct nvm_tgt_instance *ins = rqd->ins; - int ret = 0; switch (error) { case NVM_RSP_SUCCESS: - break; case NVM_RSP_ERR_EMPTYPAGE: break; case NVM_RSP_ERR_FAILWRITE: gennvm_mark_blk_bad(rqd->dev, rqd); - default: - ret++; } - ret += ins->tt->end_io(rqd, error); + ins->tt->end_io(rqd, error); +} - return ret; +static int gennvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd) +{ + if (!dev->ops->submit_io) + return -ENODEV; + + /* Convert address space */ + nvm_generic_to_addr_mode(dev, rqd); + + rqd->dev = dev; + rqd->end_io = gennvm_end_io; + return dev->ops->submit_io(dev, rqd); } static int gennvm_erase_blk(struct nvm_dev *dev, struct nvm_block *blk, @@ -442,7 +437,6 @@ static struct nvmm_type gennvm = { .put_blk = gennvm_put_blk, .submit_io = gennvm_submit_io, - .end_io = gennvm_end_io, .erase_blk = gennvm_erase_blk, .get_lun = gennvm_get_lun, diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c index 748cab499580..661c6f370f5a 100644 --- a/drivers/lightnvm/rrpc.c +++ b/drivers/lightnvm/rrpc.c @@ -642,7 +642,7 @@ static void rrpc_end_io_write(struct rrpc *rrpc, struct rrpc_rq *rrqd, } } -static int rrpc_end_io(struct nvm_rq *rqd, int error) +static void rrpc_end_io(struct nvm_rq *rqd, int error) { struct rrpc *rrpc = container_of(rqd->ins, struct rrpc, instance); struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd); @@ -655,7 +655,7 @@ static int rrpc_end_io(struct nvm_rq *rqd, int error) bio_put(rqd->bio); if (rrqd->flags & NVM_IOTYPE_GC) - return 0; + return; rrpc_unlock_rq(rrpc, rqd); @@ -665,8 +665,6 @@ static int rrpc_end_io(struct nvm_rq *rqd, int error) nvm_dev_dma_free(rrpc->dev, rqd->metadata, rqd->dma_metadata); mempool_free(rqd, rrpc->rq_pool); - - return 0; } static int rrpc_read_ppalist_rq(struct rrpc *rrpc, struct bio *bio, diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c index 15f2acb4d5cd..1d1830e2ee10 100644 --- a/drivers/nvme/host/lightnvm.c +++ b/drivers/nvme/host/lightnvm.c @@ -453,11 +453,8 @@ static inline void nvme_nvm_rqtocmd(struct request *rq, struct nvm_rq *rqd, static void nvme_nvm_end_io(struct request *rq, int error) { struct nvm_rq *rqd = rq->end_io_data; - struct nvm_dev *dev = rqd->dev; - if (dev->mt && dev->mt->end_io(rqd, error)) - pr_err("nvme: err status: %x result: %lx\n", - rq->errors, (unsigned long)rq->special); + nvm_end_io(rqd, error); kfree(rq->cmd); blk_mq_free_request(rq); diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h index 2fd6871ac7f5..9c9fe9ca0441 100644 --- a/include/linux/lightnvm.h +++ b/include/linux/lightnvm.h @@ -148,6 +148,9 @@ struct ppa_addr { }; }; +struct nvm_rq; +typedef void (nvm_end_io_fn)(struct nvm_rq *, int); + struct nvm_rq { struct nvm_tgt_instance *ins; struct nvm_dev *dev; @@ -164,6 +167,9 @@ struct nvm_rq { void *metadata; dma_addr_t dma_metadata; + struct completion *wait; + nvm_end_io_fn *end_io; + uint8_t opcode; uint16_t nr_pages; uint16_t flags; @@ -347,7 +353,6 @@ static inline struct ppa_addr block_to_ppa(struct nvm_dev *dev, typedef blk_qc_t (nvm_tgt_make_rq_fn)(struct request_queue *, struct bio *); typedef sector_t (nvm_tgt_capacity_fn)(void *); -typedef int (nvm_tgt_end_io_fn)(struct nvm_rq *, int); typedef void *(nvm_tgt_init_fn)(struct nvm_dev *, struct gendisk *, int, int); typedef void (nvm_tgt_exit_fn)(void *); @@ -358,7 +363,7 @@ struct nvm_tgt_type { /* target entry points */ nvm_tgt_make_rq_fn *make_rq; nvm_tgt_capacity_fn *capacity; - nvm_tgt_end_io_fn *end_io; + nvm_end_io_fn *end_io; /* module-specific init/teardown */ nvm_tgt_init_fn *init; @@ -383,7 +388,6 @@ typedef int (nvmm_open_blk_fn)(struct nvm_dev *, struct nvm_block *); typedef int (nvmm_close_blk_fn)(struct nvm_dev *, struct nvm_block *); typedef void (nvmm_flush_blk_fn)(struct nvm_dev *, struct nvm_block *); typedef int (nvmm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *); -typedef int (nvmm_end_io_fn)(struct nvm_rq *, int); typedef int (nvmm_erase_blk_fn)(struct nvm_dev *, struct nvm_block *, unsigned long); typedef struct nvm_lun *(nvmm_get_lun_fn)(struct nvm_dev *, int); @@ -404,7 +408,6 @@ struct nvmm_type { nvmm_flush_blk_fn *flush_blk; nvmm_submit_io_fn *submit_io; - nvmm_end_io_fn *end_io; nvmm_erase_blk_fn *erase_blk; /* Configuration management */ @@ -434,6 +437,7 @@ extern int nvm_set_rqd_ppalist(struct nvm_dev *, struct nvm_rq *, extern void nvm_free_rqd_ppalist(struct nvm_dev *, struct nvm_rq *); extern int nvm_erase_ppa(struct nvm_dev *, struct ppa_addr); extern int nvm_erase_blk(struct nvm_dev *, struct nvm_block *); +extern void nvm_end_io(struct nvm_rq *, int); #else /* CONFIG_NVM */ struct nvm_dev_ops; -- cgit v1.2.3 From 2b11c1b24e50a26d435f1d59955f1268053623b7 Mon Sep 17 00:00:00 2001 From: Wenwei Tao Date: Tue, 12 Jan 2016 07:49:23 +0100 Subject: lightnvm: check bi_error in gc MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We should check last io completion status before starting another one. Signed-off-by: Wenwei Tao Signed-off-by: Matias Bjørling Signed-off-by: Jens Axboe --- drivers/lightnvm/rrpc.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'drivers/lightnvm/rrpc.c') diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c index 661c6f370f5a..5df4a696184a 100644 --- a/drivers/lightnvm/rrpc.c +++ b/drivers/lightnvm/rrpc.c @@ -330,6 +330,10 @@ try: goto finished; } wait_for_completion_io(&wait); + if (bio->bi_error) { + rrpc_inflight_laddr_release(rrpc, rqd); + goto finished; + } bio_reset(bio); reinit_completion(&wait); @@ -352,6 +356,8 @@ try: wait_for_completion_io(&wait); rrpc_inflight_laddr_release(rrpc, rqd); + if (bio->bi_error) + goto finished; bio_reset(bio); } -- cgit v1.2.3 From d0ca798f960ad7d86f5186fe312c131d00563eb7 Mon Sep 17 00:00:00 2001 From: Wenwei Tao Date: Tue, 12 Jan 2016 07:49:24 +0100 Subject: lightnvm: put block back to gc list on its reclaim fail MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We delete a block from the gc list before reclaim it, so put it back to the list on its reclaim fail, otherwise this block will not get reclaimed and be programmable in the future. Signed-off-by: Wenwei Tao Signed-off-by: Matias Bjørling Signed-off-by: Jens Axboe --- drivers/lightnvm/rrpc.c | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) (limited to 'drivers/lightnvm/rrpc.c') diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c index 5df4a696184a..bee2352fcce1 100644 --- a/drivers/lightnvm/rrpc.c +++ b/drivers/lightnvm/rrpc.c @@ -381,16 +381,26 @@ static void rrpc_block_gc(struct work_struct *work) struct rrpc *rrpc = gcb->rrpc; struct rrpc_block *rblk = gcb->rblk; struct nvm_dev *dev = rrpc->dev; + struct nvm_lun *lun = rblk->parent->lun; + struct rrpc_lun *rlun = &rrpc->luns[lun->id - rrpc->lun_offset]; + mempool_free(gcb, rrpc->gcb_pool); pr_debug("nvm: block '%lu' being reclaimed\n", rblk->parent->id); if (rrpc_move_valid_pages(rrpc, rblk)) - goto done; + goto put_back; + + if (nvm_erase_blk(dev, rblk->parent)) + goto put_back; - nvm_erase_blk(dev, rblk->parent); rrpc_put_blk(rrpc, rblk); -done: - mempool_free(gcb, rrpc->gcb_pool); + + return; + +put_back: + spin_lock(&rlun->lock); + list_add_tail(&rblk->prio, &rlun->prio_list); + spin_unlock(&rlun->lock); } /* the block with highest number of invalid pages, will be in the beginning -- cgit v1.2.3 From b262924be03d5d2ae735bc9a4b37eb2c613f61f8 Mon Sep 17 00:00:00 2001 From: Wenwei Tao Date: Tue, 12 Jan 2016 07:49:25 +0100 Subject: lightnvm: fix locking and mempool in rrpc_lun_gc MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This patch fix two issues in rrpc_lun_gc 1. prio_list is protected by rrpc_lun's lock not nvm_lun's, so acquire rlun's lock instead of lun's before operate on the list. 2. we delete block from prio_list before allocating gcb, but gcb allocation may fail, we end without putting it back to the list, this makes the block won't get reclaimed in the future. To solve this issue, delete block after gcb allocation. Signed-off-by: Wenwei Tao Signed-off-by: Matias Bjørling Signed-off-by: Jens Axboe --- drivers/lightnvm/rrpc.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'drivers/lightnvm/rrpc.c') diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c index bee2352fcce1..745acd9db523 100644 --- a/drivers/lightnvm/rrpc.c +++ b/drivers/lightnvm/rrpc.c @@ -445,7 +445,7 @@ static void rrpc_lun_gc(struct work_struct *work) if (nr_blocks_need < rrpc->nr_luns) nr_blocks_need = rrpc->nr_luns; - spin_lock(&lun->lock); + spin_lock(&rlun->lock); while (nr_blocks_need > lun->nr_free_blocks && !list_empty(&rlun->prio_list)) { struct rrpc_block *rblock = block_prio_find_max(rlun); @@ -454,16 +454,16 @@ static void rrpc_lun_gc(struct work_struct *work) if (!rblock->nr_invalid_pages) break; + gcb = mempool_alloc(rrpc->gcb_pool, GFP_ATOMIC); + if (!gcb) + break; + list_del_init(&rblock->prio); BUG_ON(!block_is_full(rrpc, rblock)); pr_debug("rrpc: selected block '%lu' for GC\n", block->id); - gcb = mempool_alloc(rrpc->gcb_pool, GFP_ATOMIC); - if (!gcb) - break; - gcb->rrpc = rrpc; gcb->rblk = rblock; INIT_WORK(&gcb->ws_gc, rrpc_block_gc); @@ -472,7 +472,7 @@ static void rrpc_lun_gc(struct work_struct *work) nr_blocks_need--; } - spin_unlock(&lun->lock); + spin_unlock(&rlun->lock); /* TODO: Hint that request queue can be started again */ } -- cgit v1.2.3 From 4b79beb4c36d697e940e9f70d72399c71230a418 Mon Sep 17 00:00:00 2001 From: Wenwei Tao Date: Tue, 12 Jan 2016 07:49:27 +0100 Subject: lightnvm: move the pages per block check out of the loop MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit There is no need to check whether dev's pages per block is beyond rrpc support every time we init a lun, we only need to check it once before enter the lun init loop. Signed-off-by: Wenwei Tao Signed-off-by: Matias Bjørling Signed-off-by: Jens Axboe --- drivers/lightnvm/rrpc.c | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) (limited to 'drivers/lightnvm/rrpc.c') diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c index 745acd9db523..ec7aacf78f2f 100644 --- a/drivers/lightnvm/rrpc.c +++ b/drivers/lightnvm/rrpc.c @@ -1114,6 +1114,11 @@ static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end) struct rrpc_lun *rlun; int i, j; + if (dev->pgs_per_blk > MAX_INVALID_PAGES_STORAGE * BITS_PER_LONG) { + pr_err("rrpc: number of pages per block too high."); + return -EINVAL; + } + spin_lock_init(&rrpc->rev_lock); rrpc->luns = kcalloc(rrpc->nr_luns, sizeof(struct rrpc_lun), @@ -1125,12 +1130,6 @@ static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end) for (i = 0; i < rrpc->nr_luns; i++) { struct nvm_lun *lun = dev->mt->get_lun(dev, lun_begin + i); - if (dev->pgs_per_blk > - MAX_INVALID_PAGES_STORAGE * BITS_PER_LONG) { - pr_err("rrpc: number of pages per block too high."); - goto err; - } - rlun = &rrpc->luns[i]; rlun->rrpc = rrpc; rlun->parent = lun; -- cgit v1.2.3 From 72d256ecc5d0c8cbcc0bd5c6d983b434df556cb4 Mon Sep 17 00:00:00 2001 From: Matias Bjørling Date: Tue, 12 Jan 2016 07:49:29 +0100 Subject: lightnvm: move rq->error to nvm_rq->error MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Instead of passing request error into the LightNVM modules, incorporate it into the nvm_rq. Signed-off-by: Matias Bjørling Signed-off-by: Jens Axboe --- drivers/lightnvm/core.c | 3 ++- drivers/lightnvm/gennvm.c | 6 +++--- drivers/lightnvm/rrpc.c | 2 +- include/linux/lightnvm.h | 4 +++- 4 files changed, 9 insertions(+), 6 deletions(-) (limited to 'drivers/lightnvm/rrpc.c') diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c index cd674af3d17d..dad84ddbefb4 100644 --- a/drivers/lightnvm/core.c +++ b/drivers/lightnvm/core.c @@ -291,7 +291,8 @@ EXPORT_SYMBOL(nvm_erase_ppa); void nvm_end_io(struct nvm_rq *rqd, int error) { - rqd->end_io(rqd, error); + rqd->error = error; + rqd->end_io(rqd); } EXPORT_SYMBOL(nvm_end_io); diff --git a/drivers/lightnvm/gennvm.c b/drivers/lightnvm/gennvm.c index 262da6dd9056..4c15846b327f 100644 --- a/drivers/lightnvm/gennvm.c +++ b/drivers/lightnvm/gennvm.c @@ -363,11 +363,11 @@ static void gennvm_mark_blk_bad(struct nvm_dev *dev, struct nvm_rq *rqd) gennvm_blk_set_type(dev, &rqd->ppa_addr, 2); } -static void gennvm_end_io(struct nvm_rq *rqd, int error) +static void gennvm_end_io(struct nvm_rq *rqd) { struct nvm_tgt_instance *ins = rqd->ins; - switch (error) { + switch (rqd->error) { case NVM_RSP_SUCCESS: case NVM_RSP_ERR_EMPTYPAGE: break; @@ -375,7 +375,7 @@ static void gennvm_end_io(struct nvm_rq *rqd, int error) gennvm_mark_blk_bad(rqd->dev, rqd); } - ins->tt->end_io(rqd, error); + ins->tt->end_io(rqd); } static int gennvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd) diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c index ec7aacf78f2f..9a5d94007ec0 100644 --- a/drivers/lightnvm/rrpc.c +++ b/drivers/lightnvm/rrpc.c @@ -658,7 +658,7 @@ static void rrpc_end_io_write(struct rrpc *rrpc, struct rrpc_rq *rrqd, } } -static void rrpc_end_io(struct nvm_rq *rqd, int error) +static void rrpc_end_io(struct nvm_rq *rqd) { struct rrpc *rrpc = container_of(rqd->ins, struct rrpc, instance); struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd); diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h index a83298f62122..9acc71a9a47f 100644 --- a/include/linux/lightnvm.h +++ b/include/linux/lightnvm.h @@ -149,7 +149,7 @@ struct ppa_addr { }; struct nvm_rq; -typedef void (nvm_end_io_fn)(struct nvm_rq *, int); +typedef void (nvm_end_io_fn)(struct nvm_rq *); struct nvm_rq { struct nvm_tgt_instance *ins; @@ -173,6 +173,8 @@ struct nvm_rq { uint8_t opcode; uint16_t nr_pages; uint16_t flags; + + int error; }; static inline struct nvm_rq *nvm_rq_from_pdu(void *pdu) -- cgit v1.2.3 From d7a64d275b39e19c010cdfd8728cc64f14b59bda Mon Sep 17 00:00:00 2001 From: Javier González Date: Tue, 12 Jan 2016 07:49:31 +0100 Subject: lightnvm: reference rrpc lun in rrpc block MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Currently, a rrpc block only points to its nvm_lun. If a user wants to find the associated rrpc lun, it will have to calculate the index and look it up manually. By referencing the rrpc lun directly, this step can be omitted, at the cost of a larger memory footprint. This is important for upcoming patches that implement write buffering in rrpc. Signed-off-by: Javier González Signed-off-by: Matias Bjørling Signed-off-by: Jens Axboe --- drivers/lightnvm/rrpc.c | 1 + drivers/lightnvm/rrpc.h | 1 + 2 files changed, 2 insertions(+) (limited to 'drivers/lightnvm/rrpc.c') diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c index 9a5d94007ec0..280350c24cec 100644 --- a/drivers/lightnvm/rrpc.c +++ b/drivers/lightnvm/rrpc.c @@ -1150,6 +1150,7 @@ static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end) struct nvm_block *blk = &lun->blocks[j]; rblk->parent = blk; + rblk->rlun = rlun; INIT_LIST_HEAD(&rblk->prio); spin_lock_init(&rblk->lock); } diff --git a/drivers/lightnvm/rrpc.h b/drivers/lightnvm/rrpc.h index a9696a06c38c..7c5fa4dd9722 100644 --- a/drivers/lightnvm/rrpc.h +++ b/drivers/lightnvm/rrpc.h @@ -54,6 +54,7 @@ struct rrpc_rq { struct rrpc_block { struct nvm_block *parent; + struct rrpc_lun *rlun; struct list_head prio; #define MAX_INVALID_PAGES_STORAGE 8 -- cgit v1.2.3 From ff0e498bfa185fad5e86c4c7a2db4f9648d2344f Mon Sep 17 00:00:00 2001 From: Javier González Date: Tue, 12 Jan 2016 07:49:33 +0100 Subject: lightnvm: manage open and closed blocks separately MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit LightNVM targets need to know the state of the flash block when doing flash optimizations. An example is implementing a write buffer to respect the flash page size. Currently, block state is not accounted for; the media manager only differentiates among free, bad and in-use blocks. This patch adds the logic in the generic media manager to enable targets manage blocks into open and close separately, and it implements such management in rrpc. It also adds a set of flags to describe the state of the block (open, closed, free, bad). In order to avoid taking two locks (nvm_lun and rrpc_lun) consecutively, we introduce lockless get_/put_block primitives so that the open and close list locks and future common logic is handled within the nvm_lun lock. Signed-off-by: Javier González Signed-off-by: Matias Bjørling Signed-off-by: Jens Axboe --- drivers/lightnvm/core.c | 14 +++++++ drivers/lightnvm/gennvm.c | 99 +++++++++++++++++++++++++++++------------------ drivers/lightnvm/rrpc.c | 38 +++++++++++++++--- drivers/lightnvm/rrpc.h | 12 +++++- include/linux/lightnvm.h | 25 ++++++++++-- 5 files changed, 142 insertions(+), 46 deletions(-) (limited to 'drivers/lightnvm/rrpc.c') diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c index dc83e010d084..e5e396338319 100644 --- a/drivers/lightnvm/core.c +++ b/drivers/lightnvm/core.c @@ -167,6 +167,20 @@ static struct nvm_dev *nvm_find_nvm_dev(const char *name) return NULL; } +struct nvm_block *nvm_get_blk_unlocked(struct nvm_dev *dev, struct nvm_lun *lun, + unsigned long flags) +{ + return dev->mt->get_blk_unlocked(dev, lun, flags); +} +EXPORT_SYMBOL(nvm_get_blk_unlocked); + +/* Assumes that all valid pages have already been moved on release to bm */ +void nvm_put_blk_unlocked(struct nvm_dev *dev, struct nvm_block *blk) +{ + return dev->mt->put_blk_unlocked(dev, blk); +} +EXPORT_SYMBOL(nvm_put_blk_unlocked); + struct nvm_block *nvm_get_blk(struct nvm_dev *dev, struct nvm_lun *lun, unsigned long flags) { diff --git a/drivers/lightnvm/gennvm.c b/drivers/lightnvm/gennvm.c index 4c15846b327f..7fb725b16148 100644 --- a/drivers/lightnvm/gennvm.c +++ b/drivers/lightnvm/gennvm.c @@ -60,7 +60,8 @@ static int gennvm_luns_init(struct nvm_dev *dev, struct gen_nvm *gn) lun->vlun.lun_id = i % dev->luns_per_chnl; lun->vlun.chnl_id = i / dev->luns_per_chnl; lun->vlun.nr_free_blocks = dev->blks_per_lun; - lun->vlun.nr_inuse_blocks = 0; + lun->vlun.nr_open_blocks = 0; + lun->vlun.nr_closed_blocks = 0; lun->vlun.nr_bad_blocks = 0; } return 0; @@ -134,15 +135,15 @@ static int gennvm_block_map(u64 slba, u32 nlb, __le64 *entries, void *private) pba = pba - (dev->sec_per_lun * lun_id); blk = &lun->vlun.blocks[div_u64(pba, dev->sec_per_blk)]; - if (!blk->type) { + if (!blk->state) { /* at this point, we don't know anything about the * block. It's up to the FTL on top to re-etablish the - * block state + * block state. The block is assumed to be open. */ list_move_tail(&blk->list, &lun->used_list); - blk->type = 1; + blk->state = NVM_BLK_ST_OPEN; lun->vlun.nr_free_blocks--; - lun->vlun.nr_inuse_blocks++; + lun->vlun.nr_open_blocks++; } } @@ -256,14 +257,14 @@ static void gennvm_unregister(struct nvm_dev *dev) module_put(THIS_MODULE); } -static struct nvm_block *gennvm_get_blk(struct nvm_dev *dev, +static struct nvm_block *gennvm_get_blk_unlocked(struct nvm_dev *dev, struct nvm_lun *vlun, unsigned long flags) { struct gen_lun *lun = container_of(vlun, struct gen_lun, vlun); struct nvm_block *blk = NULL; int is_gc = flags & NVM_IOTYPE_GC; - spin_lock(&vlun->lock); + assert_spin_locked(&vlun->lock); if (list_empty(&lun->free_list)) { pr_err_ratelimited("gennvm: lun %u have no free pages available", @@ -276,44 +277,63 @@ static struct nvm_block *gennvm_get_blk(struct nvm_dev *dev, blk = list_first_entry(&lun->free_list, struct nvm_block, list); list_move_tail(&blk->list, &lun->used_list); - blk->type = 1; + blk->state = NVM_BLK_ST_OPEN; lun->vlun.nr_free_blocks--; - lun->vlun.nr_inuse_blocks++; + lun->vlun.nr_open_blocks++; out: + return blk; +} + +static struct nvm_block *gennvm_get_blk(struct nvm_dev *dev, + struct nvm_lun *vlun, unsigned long flags) +{ + struct nvm_block *blk; + + spin_lock(&vlun->lock); + blk = gennvm_get_blk_unlocked(dev, vlun, flags); spin_unlock(&vlun->lock); return blk; } -static void gennvm_put_blk(struct nvm_dev *dev, struct nvm_block *blk) +static void gennvm_put_blk_unlocked(struct nvm_dev *dev, struct nvm_block *blk) { struct nvm_lun *vlun = blk->lun; struct gen_lun *lun = container_of(vlun, struct gen_lun, vlun); - spin_lock(&vlun->lock); + assert_spin_locked(&vlun->lock); - switch (blk->type) { - case 1: + if (blk->state & NVM_BLK_ST_OPEN) { list_move_tail(&blk->list, &lun->free_list); + lun->vlun.nr_open_blocks--; lun->vlun.nr_free_blocks++; - lun->vlun.nr_inuse_blocks--; - blk->type = 0; - break; - case 2: + blk->state = NVM_BLK_ST_FREE; + } else if (blk->state & NVM_BLK_ST_CLOSED) { + list_move_tail(&blk->list, &lun->free_list); + lun->vlun.nr_closed_blocks--; + lun->vlun.nr_free_blocks++; + blk->state = NVM_BLK_ST_FREE; + } else if (blk->state & NVM_BLK_ST_BAD) { list_move_tail(&blk->list, &lun->bb_list); lun->vlun.nr_bad_blocks++; - lun->vlun.nr_inuse_blocks--; - break; - default: + blk->state = NVM_BLK_ST_BAD; + } else { WARN_ON_ONCE(1); pr_err("gennvm: erroneous block type (%lu -> %u)\n", - blk->id, blk->type); + blk->id, blk->state); list_move_tail(&blk->list, &lun->bb_list); lun->vlun.nr_bad_blocks++; - lun->vlun.nr_inuse_blocks--; + blk->state = NVM_BLK_ST_BAD; } +} +static void gennvm_put_blk(struct nvm_dev *dev, struct nvm_block *blk) +{ + struct nvm_lun *vlun = blk->lun; + + spin_lock(&vlun->lock); + gennvm_put_blk_unlocked(dev, blk); spin_unlock(&vlun->lock); } @@ -339,7 +359,7 @@ static void gennvm_blk_set_type(struct nvm_dev *dev, struct ppa_addr *ppa, blk = &lun->vlun.blocks[ppa->g.blk]; /* will be moved to bb list on put_blk from target */ - blk->type = type; + blk->state = type; } /* mark block bad. It is expected the target recover from the error. */ @@ -358,9 +378,10 @@ static void gennvm_mark_blk_bad(struct nvm_dev *dev, struct nvm_rq *rqd) /* look up blocks and mark them as bad */ if (rqd->nr_pages > 1) for (i = 0; i < rqd->nr_pages; i++) - gennvm_blk_set_type(dev, &rqd->ppa_list[i], 2); + gennvm_blk_set_type(dev, &rqd->ppa_list[i], + NVM_BLK_ST_BAD); else - gennvm_blk_set_type(dev, &rqd->ppa_addr, 2); + gennvm_blk_set_type(dev, &rqd->ppa_addr, NVM_BLK_ST_BAD); } static void gennvm_end_io(struct nvm_rq *rqd) @@ -416,10 +437,11 @@ static void gennvm_lun_info_print(struct nvm_dev *dev) gennvm_for_each_lun(gn, lun, i) { spin_lock(&lun->vlun.lock); - pr_info("%s: lun%8u\t%u\t%u\t%u\n", + pr_info("%s: lun%8u\t%u\t%u\t%u\t%u\n", dev->name, i, lun->vlun.nr_free_blocks, - lun->vlun.nr_inuse_blocks, + lun->vlun.nr_open_blocks, + lun->vlun.nr_closed_blocks, lun->vlun.nr_bad_blocks); spin_unlock(&lun->vlun.lock); @@ -427,20 +449,23 @@ static void gennvm_lun_info_print(struct nvm_dev *dev) } static struct nvmm_type gennvm = { - .name = "gennvm", - .version = {0, 1, 0}, + .name = "gennvm", + .version = {0, 1, 0}, + + .register_mgr = gennvm_register, + .unregister_mgr = gennvm_unregister, - .register_mgr = gennvm_register, - .unregister_mgr = gennvm_unregister, + .get_blk_unlocked = gennvm_get_blk_unlocked, + .put_blk_unlocked = gennvm_put_blk_unlocked, - .get_blk = gennvm_get_blk, - .put_blk = gennvm_put_blk, + .get_blk = gennvm_get_blk, + .put_blk = gennvm_put_blk, - .submit_io = gennvm_submit_io, - .erase_blk = gennvm_erase_blk, + .submit_io = gennvm_submit_io, + .erase_blk = gennvm_erase_blk, - .get_lun = gennvm_get_lun, - .lun_info_print = gennvm_lun_info_print, + .get_lun = gennvm_get_lun, + .lun_info_print = gennvm_lun_info_print, }; static int __init gennvm_module_init(void) diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c index 280350c24cec..d8c75958ced3 100644 --- a/drivers/lightnvm/rrpc.c +++ b/drivers/lightnvm/rrpc.c @@ -179,16 +179,23 @@ static void rrpc_set_lun_cur(struct rrpc_lun *rlun, struct rrpc_block *rblk) static struct rrpc_block *rrpc_get_blk(struct rrpc *rrpc, struct rrpc_lun *rlun, unsigned long flags) { + struct nvm_lun *lun = rlun->parent; struct nvm_block *blk; struct rrpc_block *rblk; - blk = nvm_get_blk(rrpc->dev, rlun->parent, flags); - if (!blk) + spin_lock(&lun->lock); + blk = nvm_get_blk_unlocked(rrpc->dev, rlun->parent, flags); + if (!blk) { + pr_err("nvm: rrpc: cannot get new block from media manager\n"); + spin_unlock(&lun->lock); return NULL; + } rblk = &rlun->blocks[blk->id]; - blk->priv = rblk; + list_add_tail(&rblk->list, &rlun->open_list); + spin_unlock(&lun->lock); + blk->priv = rblk; bitmap_zero(rblk->invalid_pages, rrpc->dev->pgs_per_blk); rblk->next_page = 0; rblk->nr_invalid_pages = 0; @@ -199,7 +206,13 @@ static struct rrpc_block *rrpc_get_blk(struct rrpc *rrpc, struct rrpc_lun *rlun, static void rrpc_put_blk(struct rrpc *rrpc, struct rrpc_block *rblk) { - nvm_put_blk(rrpc->dev, rblk->parent); + struct rrpc_lun *rlun = rblk->rlun; + struct nvm_lun *lun = rlun->parent; + + spin_lock(&lun->lock); + nvm_put_blk_unlocked(rrpc->dev, rblk->parent); + list_del(&rblk->list); + spin_unlock(&lun->lock); } static void rrpc_put_blks(struct rrpc *rrpc) @@ -653,8 +666,20 @@ static void rrpc_end_io_write(struct rrpc *rrpc, struct rrpc_rq *rrqd, lun = rblk->parent->lun; cmnt_size = atomic_inc_return(&rblk->data_cmnt_size); - if (unlikely(cmnt_size == rrpc->dev->pgs_per_blk)) + if (unlikely(cmnt_size == rrpc->dev->pgs_per_blk)) { + struct nvm_block *blk = rblk->parent; + struct rrpc_lun *rlun = rblk->rlun; + + spin_lock(&lun->lock); + lun->nr_open_blocks--; + lun->nr_closed_blocks++; + blk->state &= ~NVM_BLK_ST_OPEN; + blk->state |= NVM_BLK_ST_CLOSED; + list_move_tail(&rblk->list, &rlun->closed_list); + spin_unlock(&lun->lock); + rrpc_run_gc(rrpc, rblk); + } } } @@ -1134,6 +1159,9 @@ static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end) rlun->rrpc = rrpc; rlun->parent = lun; INIT_LIST_HEAD(&rlun->prio_list); + INIT_LIST_HEAD(&rlun->open_list); + INIT_LIST_HEAD(&rlun->closed_list); + INIT_WORK(&rlun->ws_gc, rrpc_lun_gc); spin_lock_init(&rlun->lock); diff --git a/drivers/lightnvm/rrpc.h b/drivers/lightnvm/rrpc.h index 7c5fa4dd9722..ef13ac7700c8 100644 --- a/drivers/lightnvm/rrpc.h +++ b/drivers/lightnvm/rrpc.h @@ -56,6 +56,7 @@ struct rrpc_block { struct nvm_block *parent; struct rrpc_lun *rlun; struct list_head prio; + struct list_head list; #define MAX_INVALID_PAGES_STORAGE 8 /* Bitmap for invalid page intries */ @@ -74,7 +75,16 @@ struct rrpc_lun { struct nvm_lun *parent; struct rrpc_block *cur, *gc_cur; struct rrpc_block *blocks; /* Reference to block allocation */ - struct list_head prio_list; /* Blocks that may be GC'ed */ + + struct list_head prio_list; /* Blocks that may be GC'ed */ + struct list_head open_list; /* In-use open blocks. These are blocks + * that can be both written to and read + * from + */ + struct list_head closed_list; /* In-use closed blocks. These are + * blocks that can _only_ be read from + */ + struct work_struct ws_gc; spinlock_t lock; diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h index 4a700a137460..aa35907714b8 100644 --- a/include/linux/lightnvm.h +++ b/include/linux/lightnvm.h @@ -229,12 +229,25 @@ struct nvm_lun { int lun_id; int chnl_id; - unsigned int nr_inuse_blocks; /* Number of used blocks */ + /* It is up to the target to mark blocks as closed. If the target does + * not do it, all blocks are marked as open, and nr_open_blocks + * represents the number of blocks in use + */ + unsigned int nr_open_blocks; /* Number of used, writable blocks */ + unsigned int nr_closed_blocks; /* Number of used, read-only blocks */ unsigned int nr_free_blocks; /* Number of unused blocks */ unsigned int nr_bad_blocks; /* Number of bad blocks */ - struct nvm_block *blocks; spinlock_t lock; + + struct nvm_block *blocks; +}; + +enum { + NVM_BLK_ST_FREE = 0x1, /* Free block */ + NVM_BLK_ST_OPEN = 0x2, /* Open block - read-write */ + NVM_BLK_ST_CLOSED = 0x4, /* Closed block - read-only */ + NVM_BLK_ST_BAD = 0x8, /* Bad block */ }; struct nvm_block { @@ -243,7 +256,7 @@ struct nvm_block { unsigned long id; void *priv; - int type; + int state; }; struct nvm_dev { @@ -404,6 +417,8 @@ struct nvmm_type { nvmm_unregister_fn *unregister_mgr; /* Block administration callbacks */ + nvmm_get_blk_fn *get_blk_unlocked; + nvmm_put_blk_fn *put_blk_unlocked; nvmm_get_blk_fn *get_blk; nvmm_put_blk_fn *put_blk; nvmm_open_blk_fn *open_blk; @@ -424,6 +439,10 @@ struct nvmm_type { extern int nvm_register_mgr(struct nvmm_type *); extern void nvm_unregister_mgr(struct nvmm_type *); +extern struct nvm_block *nvm_get_blk_unlocked(struct nvm_dev *, + struct nvm_lun *, unsigned long); +extern void nvm_put_blk_unlocked(struct nvm_dev *, struct nvm_block *); + extern struct nvm_block *nvm_get_blk(struct nvm_dev *, struct nvm_lun *, unsigned long); extern void nvm_put_blk(struct nvm_dev *, struct nvm_block *); -- cgit v1.2.3