From 069368e91879a3a640cfae4bdc1f9f8cc99c93a0 Mon Sep 17 00:00:00 2001 From: Matias Bjørling Date: Tue, 12 Jan 2016 07:49:19 +0100 Subject: lightnvm: move ppa erase logic to core MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit A device may function in single, dual or quad plane mode. The gennvm media manager manages this with explicit helpers. They convert a single ppa to 1, 2 or 4 separate ppas in a ppa list. To aid implementation of recovery and system blocks, this functionality can be moved directly into the core. Signed-off-by: Matias Bjørling Signed-off-by: Jens Axboe --- include/linux/lightnvm.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'include') diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h index 034117b3be5f..c228dbc803bf 100644 --- a/include/linux/lightnvm.h +++ b/include/linux/lightnvm.h @@ -427,6 +427,9 @@ extern int nvm_register(struct request_queue *, char *, extern void nvm_unregister(char *); extern int nvm_submit_io(struct nvm_dev *, struct nvm_rq *); +extern void nvm_generic_to_addr_mode(struct nvm_dev *, struct nvm_rq *); +extern void nvm_addr_to_generic_mode(struct nvm_dev *, struct nvm_rq *); +extern int nvm_erase_ppa(struct nvm_dev *, struct ppa_addr); extern int nvm_erase_blk(struct nvm_dev *, struct nvm_block *); #else /* CONFIG_NVM */ struct nvm_dev_ops; -- cgit v1.2.3 From abd805ec9f51f37db9da63dda44c3f4b4ae8ad57 Mon Sep 17 00:00:00 2001 From: Matias Bjørling Date: Tue, 12 Jan 2016 07:49:20 +0100 Subject: lightnvm: refactor rqd ppa list into set/free MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit A device may be driven in single, double or quad plane mode. In that case, the rqd must have either one, two, or four PPAs set for a single PPA sent to the device. Refactor this logic into their own functions to be shared by program/erase/read in the core. Signed-off-by: Matias Bjørling Signed-off-by: Jens Axboe --- drivers/lightnvm/core.c | 71 ++++++++++++++++++++++++++++++++++-------------- include/linux/lightnvm.h | 3 ++ 2 files changed, 53 insertions(+), 21 deletions(-) (limited to 'include') diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c index 6134339aa6cf..081b0f59b773 100644 --- a/drivers/lightnvm/core.c +++ b/drivers/lightnvm/core.c @@ -220,40 +220,69 @@ void nvm_generic_to_addr_mode(struct nvm_dev *dev, struct nvm_rq *rqd) } EXPORT_SYMBOL(nvm_generic_to_addr_mode); -int nvm_erase_ppa(struct nvm_dev *dev, struct ppa_addr ppa) +int nvm_set_rqd_ppalist(struct nvm_dev *dev, struct nvm_rq *rqd, + struct ppa_addr *ppas, int nr_ppas) { - int plane_cnt = 0, pl_idx, ret; - struct nvm_rq rqd; + int i, plane_cnt, pl_idx; + + if (dev->plane_mode == NVM_PLANE_SINGLE && nr_ppas == 1) { + rqd->nr_pages = 1; + rqd->ppa_addr = ppas[0]; - if (!dev->ops->erase_block) return 0; + } - if (dev->plane_mode == NVM_PLANE_SINGLE) { - rqd.nr_pages = 1; - rqd.ppa_addr = ppa; - } else { - plane_cnt = (1 << dev->plane_mode); - rqd.nr_pages = plane_cnt; - - rqd.ppa_list = nvm_dev_dma_alloc(dev, GFP_KERNEL, - &rqd.dma_ppa_list); - if (!rqd.ppa_list) { - pr_err("nvm: failed to allocate dma memory\n"); - return -ENOMEM; - } + plane_cnt = (1 << dev->plane_mode); + rqd->nr_pages = plane_cnt * nr_ppas; + + if (dev->ops->max_phys_sect < rqd->nr_pages) + return -EINVAL; + + rqd->ppa_list = nvm_dev_dma_alloc(dev, GFP_KERNEL, &rqd->dma_ppa_list); + if (!rqd->ppa_list) { + pr_err("nvm: failed to allocate dma memory\n"); + return -ENOMEM; + } + for (i = 0; i < nr_ppas; i++) { for (pl_idx = 0; pl_idx < plane_cnt; pl_idx++) { - ppa.g.pl = pl_idx; - rqd.ppa_list[pl_idx] = ppa; + ppas[i].g.pl = pl_idx; + rqd->ppa_list[(i * plane_cnt) + pl_idx] = ppas[i]; } } + return 0; +} +EXPORT_SYMBOL(nvm_set_rqd_ppalist); + +void nvm_free_rqd_ppalist(struct nvm_dev *dev, struct nvm_rq *rqd) +{ + if (!rqd->ppa_list) + return; + + nvm_dev_dma_free(dev, rqd->ppa_list, rqd->dma_ppa_list); +} +EXPORT_SYMBOL(nvm_free_rqd_ppalist); + +int nvm_erase_ppa(struct nvm_dev *dev, struct ppa_addr ppa) +{ + struct nvm_rq rqd; + int ret; + + if (!dev->ops->erase_block) + return 0; + + memset(&rqd, 0, sizeof(struct nvm_rq)); + + ret = nvm_set_rqd_ppalist(dev, &rqd, &ppa, 1); + if (ret) + return ret; + nvm_generic_to_addr_mode(dev, &rqd); ret = dev->ops->erase_block(dev, &rqd); - if (plane_cnt) - nvm_dev_dma_free(dev, rqd.ppa_list, rqd.dma_ppa_list); + nvm_free_rqd_ppalist(dev, &rqd); return ret; } diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h index c228dbc803bf..2fd6871ac7f5 100644 --- a/include/linux/lightnvm.h +++ b/include/linux/lightnvm.h @@ -429,6 +429,9 @@ extern void nvm_unregister(char *); extern int nvm_submit_io(struct nvm_dev *, struct nvm_rq *); extern void nvm_generic_to_addr_mode(struct nvm_dev *, struct nvm_rq *); extern void nvm_addr_to_generic_mode(struct nvm_dev *, struct nvm_rq *); +extern int nvm_set_rqd_ppalist(struct nvm_dev *, struct nvm_rq *, + struct ppa_addr *, int); +extern void nvm_free_rqd_ppalist(struct nvm_dev *, struct nvm_rq *); extern int nvm_erase_ppa(struct nvm_dev *, struct ppa_addr); extern int nvm_erase_blk(struct nvm_dev *, struct nvm_block *); #else /* CONFIG_NVM */ -- cgit v1.2.3 From 91276162de9476b8ff32d9452e849210e5dd09e9 Mon Sep 17 00:00:00 2001 From: Matias Bjørling Date: Tue, 12 Jan 2016 07:49:21 +0100 Subject: lightnvm: refactor end_io functions for sync MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit To implement sync I/O support within the LightNVM core, the end_io functions are refactored to take an end_io function pointer instead of testing for initialized media manager, followed by calling its end_io function. Sync I/O can then be implemented using a callback that signal I/O completion. This is similar to the logic found in blk_to_execute_io(). By implementing it this way, the underlying device I/Os submission logic is abstracted away from core, targets, and media managers. Signed-off-by: Matias Bjørling Signed-off-by: Jens Axboe --- drivers/block/null_blk.c | 3 +-- drivers/lightnvm/core.c | 16 ++++++++++++++++ drivers/lightnvm/gennvm.c | 34 ++++++++++++++-------------------- drivers/lightnvm/rrpc.c | 6 ++---- drivers/nvme/host/lightnvm.c | 5 +---- include/linux/lightnvm.h | 12 ++++++++---- 6 files changed, 42 insertions(+), 34 deletions(-) (limited to 'include') diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c index 09e3c0d87ecc..e6cad40a248f 100644 --- a/drivers/block/null_blk.c +++ b/drivers/block/null_blk.c @@ -436,9 +436,8 @@ static void null_del_dev(struct nullb *nullb) static void null_lnvm_end_io(struct request *rq, int error) { struct nvm_rq *rqd = rq->end_io_data; - struct nvm_dev *dev = rqd->dev; - dev->mt->end_io(rqd, error); + nvm_end_io(rqd, error); blk_put_request(rq); } diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c index 081b0f59b773..fa1a052c4737 100644 --- a/drivers/lightnvm/core.c +++ b/drivers/lightnvm/core.c @@ -27,6 +27,7 @@ #include #include #include +#include #include static LIST_HEAD(nvm_targets); @@ -288,6 +289,21 @@ int nvm_erase_ppa(struct nvm_dev *dev, struct ppa_addr ppa) } EXPORT_SYMBOL(nvm_erase_ppa); +void nvm_end_io(struct nvm_rq *rqd, int error) +{ + rqd->end_io(rqd, error); +} +EXPORT_SYMBOL(nvm_end_io); + +static void nvm_end_io_sync(struct nvm_rq *rqd, int errors) +{ + struct completion *waiting = rqd->wait; + + rqd->wait = NULL; + + complete(waiting); +} + static int nvm_core_init(struct nvm_dev *dev) { struct nvm_id *id = &dev->identity; diff --git a/drivers/lightnvm/gennvm.c b/drivers/lightnvm/gennvm.c index 373be72816bd..12ddcaa343e9 100644 --- a/drivers/lightnvm/gennvm.c +++ b/drivers/lightnvm/gennvm.c @@ -317,18 +317,6 @@ static void gennvm_put_blk(struct nvm_dev *dev, struct nvm_block *blk) spin_unlock(&vlun->lock); } -static int gennvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd) -{ - if (!dev->ops->submit_io) - return -ENODEV; - - /* Convert address space */ - nvm_generic_to_addr_mode(dev, rqd); - - rqd->dev = dev; - return dev->ops->submit_io(dev, rqd); -} - static void gennvm_blk_set_type(struct nvm_dev *dev, struct ppa_addr *ppa, int type) { @@ -375,25 +363,32 @@ static void gennvm_mark_blk_bad(struct nvm_dev *dev, struct nvm_rq *rqd) gennvm_blk_set_type(dev, &rqd->ppa_addr, 2); } -static int gennvm_end_io(struct nvm_rq *rqd, int error) +static void gennvm_end_io(struct nvm_rq *rqd, int error) { struct nvm_tgt_instance *ins = rqd->ins; - int ret = 0; switch (error) { case NVM_RSP_SUCCESS: - break; case NVM_RSP_ERR_EMPTYPAGE: break; case NVM_RSP_ERR_FAILWRITE: gennvm_mark_blk_bad(rqd->dev, rqd); - default: - ret++; } - ret += ins->tt->end_io(rqd, error); + ins->tt->end_io(rqd, error); +} - return ret; +static int gennvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd) +{ + if (!dev->ops->submit_io) + return -ENODEV; + + /* Convert address space */ + nvm_generic_to_addr_mode(dev, rqd); + + rqd->dev = dev; + rqd->end_io = gennvm_end_io; + return dev->ops->submit_io(dev, rqd); } static int gennvm_erase_blk(struct nvm_dev *dev, struct nvm_block *blk, @@ -442,7 +437,6 @@ static struct nvmm_type gennvm = { .put_blk = gennvm_put_blk, .submit_io = gennvm_submit_io, - .end_io = gennvm_end_io, .erase_blk = gennvm_erase_blk, .get_lun = gennvm_get_lun, diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c index 748cab499580..661c6f370f5a 100644 --- a/drivers/lightnvm/rrpc.c +++ b/drivers/lightnvm/rrpc.c @@ -642,7 +642,7 @@ static void rrpc_end_io_write(struct rrpc *rrpc, struct rrpc_rq *rrqd, } } -static int rrpc_end_io(struct nvm_rq *rqd, int error) +static void rrpc_end_io(struct nvm_rq *rqd, int error) { struct rrpc *rrpc = container_of(rqd->ins, struct rrpc, instance); struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd); @@ -655,7 +655,7 @@ static int rrpc_end_io(struct nvm_rq *rqd, int error) bio_put(rqd->bio); if (rrqd->flags & NVM_IOTYPE_GC) - return 0; + return; rrpc_unlock_rq(rrpc, rqd); @@ -665,8 +665,6 @@ static int rrpc_end_io(struct nvm_rq *rqd, int error) nvm_dev_dma_free(rrpc->dev, rqd->metadata, rqd->dma_metadata); mempool_free(rqd, rrpc->rq_pool); - - return 0; } static int rrpc_read_ppalist_rq(struct rrpc *rrpc, struct bio *bio, diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c index 15f2acb4d5cd..1d1830e2ee10 100644 --- a/drivers/nvme/host/lightnvm.c +++ b/drivers/nvme/host/lightnvm.c @@ -453,11 +453,8 @@ static inline void nvme_nvm_rqtocmd(struct request *rq, struct nvm_rq *rqd, static void nvme_nvm_end_io(struct request *rq, int error) { struct nvm_rq *rqd = rq->end_io_data; - struct nvm_dev *dev = rqd->dev; - if (dev->mt && dev->mt->end_io(rqd, error)) - pr_err("nvme: err status: %x result: %lx\n", - rq->errors, (unsigned long)rq->special); + nvm_end_io(rqd, error); kfree(rq->cmd); blk_mq_free_request(rq); diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h index 2fd6871ac7f5..9c9fe9ca0441 100644 --- a/include/linux/lightnvm.h +++ b/include/linux/lightnvm.h @@ -148,6 +148,9 @@ struct ppa_addr { }; }; +struct nvm_rq; +typedef void (nvm_end_io_fn)(struct nvm_rq *, int); + struct nvm_rq { struct nvm_tgt_instance *ins; struct nvm_dev *dev; @@ -164,6 +167,9 @@ struct nvm_rq { void *metadata; dma_addr_t dma_metadata; + struct completion *wait; + nvm_end_io_fn *end_io; + uint8_t opcode; uint16_t nr_pages; uint16_t flags; @@ -347,7 +353,6 @@ static inline struct ppa_addr block_to_ppa(struct nvm_dev *dev, typedef blk_qc_t (nvm_tgt_make_rq_fn)(struct request_queue *, struct bio *); typedef sector_t (nvm_tgt_capacity_fn)(void *); -typedef int (nvm_tgt_end_io_fn)(struct nvm_rq *, int); typedef void *(nvm_tgt_init_fn)(struct nvm_dev *, struct gendisk *, int, int); typedef void (nvm_tgt_exit_fn)(void *); @@ -358,7 +363,7 @@ struct nvm_tgt_type { /* target entry points */ nvm_tgt_make_rq_fn *make_rq; nvm_tgt_capacity_fn *capacity; - nvm_tgt_end_io_fn *end_io; + nvm_end_io_fn *end_io; /* module-specific init/teardown */ nvm_tgt_init_fn *init; @@ -383,7 +388,6 @@ typedef int (nvmm_open_blk_fn)(struct nvm_dev *, struct nvm_block *); typedef int (nvmm_close_blk_fn)(struct nvm_dev *, struct nvm_block *); typedef void (nvmm_flush_blk_fn)(struct nvm_dev *, struct nvm_block *); typedef int (nvmm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *); -typedef int (nvmm_end_io_fn)(struct nvm_rq *, int); typedef int (nvmm_erase_blk_fn)(struct nvm_dev *, struct nvm_block *, unsigned long); typedef struct nvm_lun *(nvmm_get_lun_fn)(struct nvm_dev *, int); @@ -404,7 +408,6 @@ struct nvmm_type { nvmm_flush_blk_fn *flush_blk; nvmm_submit_io_fn *submit_io; - nvmm_end_io_fn *end_io; nvmm_erase_blk_fn *erase_blk; /* Configuration management */ @@ -434,6 +437,7 @@ extern int nvm_set_rqd_ppalist(struct nvm_dev *, struct nvm_rq *, extern void nvm_free_rqd_ppalist(struct nvm_dev *, struct nvm_rq *); extern int nvm_erase_ppa(struct nvm_dev *, struct ppa_addr); extern int nvm_erase_blk(struct nvm_dev *, struct nvm_block *); +extern void nvm_end_io(struct nvm_rq *, int); #else /* CONFIG_NVM */ struct nvm_dev_ops; -- cgit v1.2.3 From 81e681d3f7424fc2f03b6269e15c63131473c98f Mon Sep 17 00:00:00 2001 From: Matias Bjørling Date: Tue, 12 Jan 2016 07:49:28 +0100 Subject: lightnvm: support multiple ppas in nvm_erase_ppa MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Sometimes a user want to erase multiple PPAs at the same time. Extend nvm_erase_ppa to take multiple ppas and number of ppas to be erased. Signed-off-by: Matias Bjørling Signed-off-by: Jens Axboe --- drivers/lightnvm/core.c | 4 ++-- drivers/lightnvm/gennvm.c | 2 +- include/linux/lightnvm.h | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c index 0c8f42fc5f01..cd674af3d17d 100644 --- a/drivers/lightnvm/core.c +++ b/drivers/lightnvm/core.c @@ -265,7 +265,7 @@ void nvm_free_rqd_ppalist(struct nvm_dev *dev, struct nvm_rq *rqd) } EXPORT_SYMBOL(nvm_free_rqd_ppalist); -int nvm_erase_ppa(struct nvm_dev *dev, struct ppa_addr ppa) +int nvm_erase_ppa(struct nvm_dev *dev, struct ppa_addr *ppas, int nr_ppas) { struct nvm_rq rqd; int ret; @@ -275,7 +275,7 @@ int nvm_erase_ppa(struct nvm_dev *dev, struct ppa_addr ppa) memset(&rqd, 0, sizeof(struct nvm_rq)); - ret = nvm_set_rqd_ppalist(dev, &rqd, &ppa, 1); + ret = nvm_set_rqd_ppalist(dev, &rqd, ppas, nr_ppas); if (ret) return ret; diff --git a/drivers/lightnvm/gennvm.c b/drivers/lightnvm/gennvm.c index 12ddcaa343e9..262da6dd9056 100644 --- a/drivers/lightnvm/gennvm.c +++ b/drivers/lightnvm/gennvm.c @@ -396,7 +396,7 @@ static int gennvm_erase_blk(struct nvm_dev *dev, struct nvm_block *blk, { struct ppa_addr addr = block_to_ppa(dev, blk); - return nvm_erase_ppa(dev, addr); + return nvm_erase_ppa(dev, &addr, 1); } static struct nvm_lun *gennvm_get_lun(struct nvm_dev *dev, int lunid) diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h index 9c9fe9ca0441..a83298f62122 100644 --- a/include/linux/lightnvm.h +++ b/include/linux/lightnvm.h @@ -435,7 +435,7 @@ extern void nvm_addr_to_generic_mode(struct nvm_dev *, struct nvm_rq *); extern int nvm_set_rqd_ppalist(struct nvm_dev *, struct nvm_rq *, struct ppa_addr *, int); extern void nvm_free_rqd_ppalist(struct nvm_dev *, struct nvm_rq *); -extern int nvm_erase_ppa(struct nvm_dev *, struct ppa_addr); +extern int nvm_erase_ppa(struct nvm_dev *, struct ppa_addr *, int); extern int nvm_erase_blk(struct nvm_dev *, struct nvm_block *); extern void nvm_end_io(struct nvm_rq *, int); #else /* CONFIG_NVM */ -- cgit v1.2.3 From 72d256ecc5d0c8cbcc0bd5c6d983b434df556cb4 Mon Sep 17 00:00:00 2001 From: Matias Bjørling Date: Tue, 12 Jan 2016 07:49:29 +0100 Subject: lightnvm: move rq->error to nvm_rq->error MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Instead of passing request error into the LightNVM modules, incorporate it into the nvm_rq. Signed-off-by: Matias Bjørling Signed-off-by: Jens Axboe --- drivers/lightnvm/core.c | 3 ++- drivers/lightnvm/gennvm.c | 6 +++--- drivers/lightnvm/rrpc.c | 2 +- include/linux/lightnvm.h | 4 +++- 4 files changed, 9 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c index cd674af3d17d..dad84ddbefb4 100644 --- a/drivers/lightnvm/core.c +++ b/drivers/lightnvm/core.c @@ -291,7 +291,8 @@ EXPORT_SYMBOL(nvm_erase_ppa); void nvm_end_io(struct nvm_rq *rqd, int error) { - rqd->end_io(rqd, error); + rqd->error = error; + rqd->end_io(rqd); } EXPORT_SYMBOL(nvm_end_io); diff --git a/drivers/lightnvm/gennvm.c b/drivers/lightnvm/gennvm.c index 262da6dd9056..4c15846b327f 100644 --- a/drivers/lightnvm/gennvm.c +++ b/drivers/lightnvm/gennvm.c @@ -363,11 +363,11 @@ static void gennvm_mark_blk_bad(struct nvm_dev *dev, struct nvm_rq *rqd) gennvm_blk_set_type(dev, &rqd->ppa_addr, 2); } -static void gennvm_end_io(struct nvm_rq *rqd, int error) +static void gennvm_end_io(struct nvm_rq *rqd) { struct nvm_tgt_instance *ins = rqd->ins; - switch (error) { + switch (rqd->error) { case NVM_RSP_SUCCESS: case NVM_RSP_ERR_EMPTYPAGE: break; @@ -375,7 +375,7 @@ static void gennvm_end_io(struct nvm_rq *rqd, int error) gennvm_mark_blk_bad(rqd->dev, rqd); } - ins->tt->end_io(rqd, error); + ins->tt->end_io(rqd); } static int gennvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd) diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c index ec7aacf78f2f..9a5d94007ec0 100644 --- a/drivers/lightnvm/rrpc.c +++ b/drivers/lightnvm/rrpc.c @@ -658,7 +658,7 @@ static void rrpc_end_io_write(struct rrpc *rrpc, struct rrpc_rq *rrqd, } } -static void rrpc_end_io(struct nvm_rq *rqd, int error) +static void rrpc_end_io(struct nvm_rq *rqd) { struct rrpc *rrpc = container_of(rqd->ins, struct rrpc, instance); struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd); diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h index a83298f62122..9acc71a9a47f 100644 --- a/include/linux/lightnvm.h +++ b/include/linux/lightnvm.h @@ -149,7 +149,7 @@ struct ppa_addr { }; struct nvm_rq; -typedef void (nvm_end_io_fn)(struct nvm_rq *, int); +typedef void (nvm_end_io_fn)(struct nvm_rq *); struct nvm_rq { struct nvm_tgt_instance *ins; @@ -173,6 +173,8 @@ struct nvm_rq { uint8_t opcode; uint16_t nr_pages; uint16_t flags; + + int error; }; static inline struct nvm_rq *nvm_rq_from_pdu(void *pdu) -- cgit v1.2.3 From 09719b62fdab031e39b39a6470364a372abdf3f4 Mon Sep 17 00:00:00 2001 From: Matias Bjørling Date: Tue, 12 Jan 2016 07:49:30 +0100 Subject: lightnvm: introduce nvm_submit_ppa MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Internal logic for both core and media managers, does not have a backing bio for issuing I/Os. Introduce nvm_submit_ppa to allow raw I/Os to be submitted to the underlying device driver. The function request the device, ppa, data buffer and its length and will submit the I/O synchronously to the device. The return value may therefore be used to detect any errors regarding the issued I/O. Signed-off-by: Matias Bjørling Signed-off-by: Jens Axboe --- drivers/lightnvm/core.c | 45 ++++++++++++++++++++++++++++++++++++++++++++- include/linux/lightnvm.h | 2 ++ 2 files changed, 46 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c index dad84ddbefb4..dc83e010d084 100644 --- a/drivers/lightnvm/core.c +++ b/drivers/lightnvm/core.c @@ -296,7 +296,7 @@ void nvm_end_io(struct nvm_rq *rqd, int error) } EXPORT_SYMBOL(nvm_end_io); -static void nvm_end_io_sync(struct nvm_rq *rqd, int errors) +static void nvm_end_io_sync(struct nvm_rq *rqd) { struct completion *waiting = rqd->wait; @@ -305,6 +305,49 @@ static void nvm_end_io_sync(struct nvm_rq *rqd, int errors) complete(waiting); } +int nvm_submit_ppa(struct nvm_dev *dev, struct ppa_addr *ppa, int nr_ppas, + int opcode, int flags, void *buf, int len) +{ + DECLARE_COMPLETION_ONSTACK(wait); + struct nvm_rq rqd; + struct bio *bio; + int ret; + unsigned long hang_check; + + bio = bio_map_kern(dev->q, buf, len, GFP_KERNEL); + if (IS_ERR_OR_NULL(bio)) + return -ENOMEM; + + memset(&rqd, 0, sizeof(struct nvm_rq)); + ret = nvm_set_rqd_ppalist(dev, &rqd, ppa, nr_ppas); + if (ret) { + bio_put(bio); + return ret; + } + + rqd.opcode = opcode; + rqd.bio = bio; + rqd.wait = &wait; + rqd.dev = dev; + rqd.end_io = nvm_end_io_sync; + rqd.flags = flags; + nvm_generic_to_addr_mode(dev, &rqd); + + ret = dev->ops->submit_io(dev, &rqd); + + /* Prevent hang_check timer from firing at us during very long I/O */ + hang_check = sysctl_hung_task_timeout_secs; + if (hang_check) + while (!wait_for_completion_io_timeout(&wait, hang_check * (HZ/2))); + else + wait_for_completion_io(&wait); + + nvm_free_rqd_ppalist(dev, &rqd); + + return rqd.error; +} +EXPORT_SYMBOL(nvm_submit_ppa); + static int nvm_core_init(struct nvm_dev *dev) { struct nvm_id *id = &dev->identity; diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h index 9acc71a9a47f..b7001481e207 100644 --- a/include/linux/lightnvm.h +++ b/include/linux/lightnvm.h @@ -440,6 +440,8 @@ extern void nvm_free_rqd_ppalist(struct nvm_dev *, struct nvm_rq *); extern int nvm_erase_ppa(struct nvm_dev *, struct ppa_addr *, int); extern int nvm_erase_blk(struct nvm_dev *, struct nvm_block *); extern void nvm_end_io(struct nvm_rq *, int); +extern int nvm_submit_ppa(struct nvm_dev *, struct ppa_addr *, int, int, int, + void *, int); #else /* CONFIG_NVM */ struct nvm_dev_ops; -- cgit v1.2.3 From b5d4acd4cbf5029a2616084d9e9f392046d53a37 Mon Sep 17 00:00:00 2001 From: Matias Bjørling Date: Tue, 12 Jan 2016 07:49:32 +0100 Subject: lightnvm: fix missing grown bad block type MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The get/set bad block interface defines good block, factory bad block, grown bad block, device reserved block, and host reserved block. Unfortunately the grown bad block was missing, leaving the offsets wrong for device and host side reserved blocks. This patch adds the missing type and corrects the offsets. Signed-off-by: Matias Bjørling Signed-off-by: Jens Axboe --- include/linux/lightnvm.h | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h index b7001481e207..4a700a137460 100644 --- a/include/linux/lightnvm.h +++ b/include/linux/lightnvm.h @@ -58,8 +58,9 @@ enum { /* Block Types */ NVM_BLK_T_FREE = 0x0, NVM_BLK_T_BAD = 0x1, - NVM_BLK_T_DEV = 0x2, - NVM_BLK_T_HOST = 0x4, + NVM_BLK_T_GRWN_BAD = 0x2, + NVM_BLK_T_DEV = 0x4, + NVM_BLK_T_HOST = 0x8, }; struct nvm_id_group { -- cgit v1.2.3 From ff0e498bfa185fad5e86c4c7a2db4f9648d2344f Mon Sep 17 00:00:00 2001 From: Javier González Date: Tue, 12 Jan 2016 07:49:33 +0100 Subject: lightnvm: manage open and closed blocks separately MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit LightNVM targets need to know the state of the flash block when doing flash optimizations. An example is implementing a write buffer to respect the flash page size. Currently, block state is not accounted for; the media manager only differentiates among free, bad and in-use blocks. This patch adds the logic in the generic media manager to enable targets manage blocks into open and close separately, and it implements such management in rrpc. It also adds a set of flags to describe the state of the block (open, closed, free, bad). In order to avoid taking two locks (nvm_lun and rrpc_lun) consecutively, we introduce lockless get_/put_block primitives so that the open and close list locks and future common logic is handled within the nvm_lun lock. Signed-off-by: Javier González Signed-off-by: Matias Bjørling Signed-off-by: Jens Axboe --- drivers/lightnvm/core.c | 14 +++++++ drivers/lightnvm/gennvm.c | 99 +++++++++++++++++++++++++++++------------------ drivers/lightnvm/rrpc.c | 38 +++++++++++++++--- drivers/lightnvm/rrpc.h | 12 +++++- include/linux/lightnvm.h | 25 ++++++++++-- 5 files changed, 142 insertions(+), 46 deletions(-) (limited to 'include') diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c index dc83e010d084..e5e396338319 100644 --- a/drivers/lightnvm/core.c +++ b/drivers/lightnvm/core.c @@ -167,6 +167,20 @@ static struct nvm_dev *nvm_find_nvm_dev(const char *name) return NULL; } +struct nvm_block *nvm_get_blk_unlocked(struct nvm_dev *dev, struct nvm_lun *lun, + unsigned long flags) +{ + return dev->mt->get_blk_unlocked(dev, lun, flags); +} +EXPORT_SYMBOL(nvm_get_blk_unlocked); + +/* Assumes that all valid pages have already been moved on release to bm */ +void nvm_put_blk_unlocked(struct nvm_dev *dev, struct nvm_block *blk) +{ + return dev->mt->put_blk_unlocked(dev, blk); +} +EXPORT_SYMBOL(nvm_put_blk_unlocked); + struct nvm_block *nvm_get_blk(struct nvm_dev *dev, struct nvm_lun *lun, unsigned long flags) { diff --git a/drivers/lightnvm/gennvm.c b/drivers/lightnvm/gennvm.c index 4c15846b327f..7fb725b16148 100644 --- a/drivers/lightnvm/gennvm.c +++ b/drivers/lightnvm/gennvm.c @@ -60,7 +60,8 @@ static int gennvm_luns_init(struct nvm_dev *dev, struct gen_nvm *gn) lun->vlun.lun_id = i % dev->luns_per_chnl; lun->vlun.chnl_id = i / dev->luns_per_chnl; lun->vlun.nr_free_blocks = dev->blks_per_lun; - lun->vlun.nr_inuse_blocks = 0; + lun->vlun.nr_open_blocks = 0; + lun->vlun.nr_closed_blocks = 0; lun->vlun.nr_bad_blocks = 0; } return 0; @@ -134,15 +135,15 @@ static int gennvm_block_map(u64 slba, u32 nlb, __le64 *entries, void *private) pba = pba - (dev->sec_per_lun * lun_id); blk = &lun->vlun.blocks[div_u64(pba, dev->sec_per_blk)]; - if (!blk->type) { + if (!blk->state) { /* at this point, we don't know anything about the * block. It's up to the FTL on top to re-etablish the - * block state + * block state. The block is assumed to be open. */ list_move_tail(&blk->list, &lun->used_list); - blk->type = 1; + blk->state = NVM_BLK_ST_OPEN; lun->vlun.nr_free_blocks--; - lun->vlun.nr_inuse_blocks++; + lun->vlun.nr_open_blocks++; } } @@ -256,14 +257,14 @@ static void gennvm_unregister(struct nvm_dev *dev) module_put(THIS_MODULE); } -static struct nvm_block *gennvm_get_blk(struct nvm_dev *dev, +static struct nvm_block *gennvm_get_blk_unlocked(struct nvm_dev *dev, struct nvm_lun *vlun, unsigned long flags) { struct gen_lun *lun = container_of(vlun, struct gen_lun, vlun); struct nvm_block *blk = NULL; int is_gc = flags & NVM_IOTYPE_GC; - spin_lock(&vlun->lock); + assert_spin_locked(&vlun->lock); if (list_empty(&lun->free_list)) { pr_err_ratelimited("gennvm: lun %u have no free pages available", @@ -276,44 +277,63 @@ static struct nvm_block *gennvm_get_blk(struct nvm_dev *dev, blk = list_first_entry(&lun->free_list, struct nvm_block, list); list_move_tail(&blk->list, &lun->used_list); - blk->type = 1; + blk->state = NVM_BLK_ST_OPEN; lun->vlun.nr_free_blocks--; - lun->vlun.nr_inuse_blocks++; + lun->vlun.nr_open_blocks++; out: + return blk; +} + +static struct nvm_block *gennvm_get_blk(struct nvm_dev *dev, + struct nvm_lun *vlun, unsigned long flags) +{ + struct nvm_block *blk; + + spin_lock(&vlun->lock); + blk = gennvm_get_blk_unlocked(dev, vlun, flags); spin_unlock(&vlun->lock); return blk; } -static void gennvm_put_blk(struct nvm_dev *dev, struct nvm_block *blk) +static void gennvm_put_blk_unlocked(struct nvm_dev *dev, struct nvm_block *blk) { struct nvm_lun *vlun = blk->lun; struct gen_lun *lun = container_of(vlun, struct gen_lun, vlun); - spin_lock(&vlun->lock); + assert_spin_locked(&vlun->lock); - switch (blk->type) { - case 1: + if (blk->state & NVM_BLK_ST_OPEN) { list_move_tail(&blk->list, &lun->free_list); + lun->vlun.nr_open_blocks--; lun->vlun.nr_free_blocks++; - lun->vlun.nr_inuse_blocks--; - blk->type = 0; - break; - case 2: + blk->state = NVM_BLK_ST_FREE; + } else if (blk->state & NVM_BLK_ST_CLOSED) { + list_move_tail(&blk->list, &lun->free_list); + lun->vlun.nr_closed_blocks--; + lun->vlun.nr_free_blocks++; + blk->state = NVM_BLK_ST_FREE; + } else if (blk->state & NVM_BLK_ST_BAD) { list_move_tail(&blk->list, &lun->bb_list); lun->vlun.nr_bad_blocks++; - lun->vlun.nr_inuse_blocks--; - break; - default: + blk->state = NVM_BLK_ST_BAD; + } else { WARN_ON_ONCE(1); pr_err("gennvm: erroneous block type (%lu -> %u)\n", - blk->id, blk->type); + blk->id, blk->state); list_move_tail(&blk->list, &lun->bb_list); lun->vlun.nr_bad_blocks++; - lun->vlun.nr_inuse_blocks--; + blk->state = NVM_BLK_ST_BAD; } +} +static void gennvm_put_blk(struct nvm_dev *dev, struct nvm_block *blk) +{ + struct nvm_lun *vlun = blk->lun; + + spin_lock(&vlun->lock); + gennvm_put_blk_unlocked(dev, blk); spin_unlock(&vlun->lock); } @@ -339,7 +359,7 @@ static void gennvm_blk_set_type(struct nvm_dev *dev, struct ppa_addr *ppa, blk = &lun->vlun.blocks[ppa->g.blk]; /* will be moved to bb list on put_blk from target */ - blk->type = type; + blk->state = type; } /* mark block bad. It is expected the target recover from the error. */ @@ -358,9 +378,10 @@ static void gennvm_mark_blk_bad(struct nvm_dev *dev, struct nvm_rq *rqd) /* look up blocks and mark them as bad */ if (rqd->nr_pages > 1) for (i = 0; i < rqd->nr_pages; i++) - gennvm_blk_set_type(dev, &rqd->ppa_list[i], 2); + gennvm_blk_set_type(dev, &rqd->ppa_list[i], + NVM_BLK_ST_BAD); else - gennvm_blk_set_type(dev, &rqd->ppa_addr, 2); + gennvm_blk_set_type(dev, &rqd->ppa_addr, NVM_BLK_ST_BAD); } static void gennvm_end_io(struct nvm_rq *rqd) @@ -416,10 +437,11 @@ static void gennvm_lun_info_print(struct nvm_dev *dev) gennvm_for_each_lun(gn, lun, i) { spin_lock(&lun->vlun.lock); - pr_info("%s: lun%8u\t%u\t%u\t%u\n", + pr_info("%s: lun%8u\t%u\t%u\t%u\t%u\n", dev->name, i, lun->vlun.nr_free_blocks, - lun->vlun.nr_inuse_blocks, + lun->vlun.nr_open_blocks, + lun->vlun.nr_closed_blocks, lun->vlun.nr_bad_blocks); spin_unlock(&lun->vlun.lock); @@ -427,20 +449,23 @@ static void gennvm_lun_info_print(struct nvm_dev *dev) } static struct nvmm_type gennvm = { - .name = "gennvm", - .version = {0, 1, 0}, + .name = "gennvm", + .version = {0, 1, 0}, + + .register_mgr = gennvm_register, + .unregister_mgr = gennvm_unregister, - .register_mgr = gennvm_register, - .unregister_mgr = gennvm_unregister, + .get_blk_unlocked = gennvm_get_blk_unlocked, + .put_blk_unlocked = gennvm_put_blk_unlocked, - .get_blk = gennvm_get_blk, - .put_blk = gennvm_put_blk, + .get_blk = gennvm_get_blk, + .put_blk = gennvm_put_blk, - .submit_io = gennvm_submit_io, - .erase_blk = gennvm_erase_blk, + .submit_io = gennvm_submit_io, + .erase_blk = gennvm_erase_blk, - .get_lun = gennvm_get_lun, - .lun_info_print = gennvm_lun_info_print, + .get_lun = gennvm_get_lun, + .lun_info_print = gennvm_lun_info_print, }; static int __init gennvm_module_init(void) diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c index 280350c24cec..d8c75958ced3 100644 --- a/drivers/lightnvm/rrpc.c +++ b/drivers/lightnvm/rrpc.c @@ -179,16 +179,23 @@ static void rrpc_set_lun_cur(struct rrpc_lun *rlun, struct rrpc_block *rblk) static struct rrpc_block *rrpc_get_blk(struct rrpc *rrpc, struct rrpc_lun *rlun, unsigned long flags) { + struct nvm_lun *lun = rlun->parent; struct nvm_block *blk; struct rrpc_block *rblk; - blk = nvm_get_blk(rrpc->dev, rlun->parent, flags); - if (!blk) + spin_lock(&lun->lock); + blk = nvm_get_blk_unlocked(rrpc->dev, rlun->parent, flags); + if (!blk) { + pr_err("nvm: rrpc: cannot get new block from media manager\n"); + spin_unlock(&lun->lock); return NULL; + } rblk = &rlun->blocks[blk->id]; - blk->priv = rblk; + list_add_tail(&rblk->list, &rlun->open_list); + spin_unlock(&lun->lock); + blk->priv = rblk; bitmap_zero(rblk->invalid_pages, rrpc->dev->pgs_per_blk); rblk->next_page = 0; rblk->nr_invalid_pages = 0; @@ -199,7 +206,13 @@ static struct rrpc_block *rrpc_get_blk(struct rrpc *rrpc, struct rrpc_lun *rlun, static void rrpc_put_blk(struct rrpc *rrpc, struct rrpc_block *rblk) { - nvm_put_blk(rrpc->dev, rblk->parent); + struct rrpc_lun *rlun = rblk->rlun; + struct nvm_lun *lun = rlun->parent; + + spin_lock(&lun->lock); + nvm_put_blk_unlocked(rrpc->dev, rblk->parent); + list_del(&rblk->list); + spin_unlock(&lun->lock); } static void rrpc_put_blks(struct rrpc *rrpc) @@ -653,8 +666,20 @@ static void rrpc_end_io_write(struct rrpc *rrpc, struct rrpc_rq *rrqd, lun = rblk->parent->lun; cmnt_size = atomic_inc_return(&rblk->data_cmnt_size); - if (unlikely(cmnt_size == rrpc->dev->pgs_per_blk)) + if (unlikely(cmnt_size == rrpc->dev->pgs_per_blk)) { + struct nvm_block *blk = rblk->parent; + struct rrpc_lun *rlun = rblk->rlun; + + spin_lock(&lun->lock); + lun->nr_open_blocks--; + lun->nr_closed_blocks++; + blk->state &= ~NVM_BLK_ST_OPEN; + blk->state |= NVM_BLK_ST_CLOSED; + list_move_tail(&rblk->list, &rlun->closed_list); + spin_unlock(&lun->lock); + rrpc_run_gc(rrpc, rblk); + } } } @@ -1134,6 +1159,9 @@ static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end) rlun->rrpc = rrpc; rlun->parent = lun; INIT_LIST_HEAD(&rlun->prio_list); + INIT_LIST_HEAD(&rlun->open_list); + INIT_LIST_HEAD(&rlun->closed_list); + INIT_WORK(&rlun->ws_gc, rrpc_lun_gc); spin_lock_init(&rlun->lock); diff --git a/drivers/lightnvm/rrpc.h b/drivers/lightnvm/rrpc.h index 7c5fa4dd9722..ef13ac7700c8 100644 --- a/drivers/lightnvm/rrpc.h +++ b/drivers/lightnvm/rrpc.h @@ -56,6 +56,7 @@ struct rrpc_block { struct nvm_block *parent; struct rrpc_lun *rlun; struct list_head prio; + struct list_head list; #define MAX_INVALID_PAGES_STORAGE 8 /* Bitmap for invalid page intries */ @@ -74,7 +75,16 @@ struct rrpc_lun { struct nvm_lun *parent; struct rrpc_block *cur, *gc_cur; struct rrpc_block *blocks; /* Reference to block allocation */ - struct list_head prio_list; /* Blocks that may be GC'ed */ + + struct list_head prio_list; /* Blocks that may be GC'ed */ + struct list_head open_list; /* In-use open blocks. These are blocks + * that can be both written to and read + * from + */ + struct list_head closed_list; /* In-use closed blocks. These are + * blocks that can _only_ be read from + */ + struct work_struct ws_gc; spinlock_t lock; diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h index 4a700a137460..aa35907714b8 100644 --- a/include/linux/lightnvm.h +++ b/include/linux/lightnvm.h @@ -229,12 +229,25 @@ struct nvm_lun { int lun_id; int chnl_id; - unsigned int nr_inuse_blocks; /* Number of used blocks */ + /* It is up to the target to mark blocks as closed. If the target does + * not do it, all blocks are marked as open, and nr_open_blocks + * represents the number of blocks in use + */ + unsigned int nr_open_blocks; /* Number of used, writable blocks */ + unsigned int nr_closed_blocks; /* Number of used, read-only blocks */ unsigned int nr_free_blocks; /* Number of unused blocks */ unsigned int nr_bad_blocks; /* Number of bad blocks */ - struct nvm_block *blocks; spinlock_t lock; + + struct nvm_block *blocks; +}; + +enum { + NVM_BLK_ST_FREE = 0x1, /* Free block */ + NVM_BLK_ST_OPEN = 0x2, /* Open block - read-write */ + NVM_BLK_ST_CLOSED = 0x4, /* Closed block - read-only */ + NVM_BLK_ST_BAD = 0x8, /* Bad block */ }; struct nvm_block { @@ -243,7 +256,7 @@ struct nvm_block { unsigned long id; void *priv; - int type; + int state; }; struct nvm_dev { @@ -404,6 +417,8 @@ struct nvmm_type { nvmm_unregister_fn *unregister_mgr; /* Block administration callbacks */ + nvmm_get_blk_fn *get_blk_unlocked; + nvmm_put_blk_fn *put_blk_unlocked; nvmm_get_blk_fn *get_blk; nvmm_put_blk_fn *put_blk; nvmm_open_blk_fn *open_blk; @@ -424,6 +439,10 @@ struct nvmm_type { extern int nvm_register_mgr(struct nvmm_type *); extern void nvm_unregister_mgr(struct nvmm_type *); +extern struct nvm_block *nvm_get_blk_unlocked(struct nvm_dev *, + struct nvm_lun *, unsigned long); +extern void nvm_put_blk_unlocked(struct nvm_dev *, struct nvm_block *); + extern struct nvm_block *nvm_get_blk(struct nvm_dev *, struct nvm_lun *, unsigned long); extern void nvm_put_blk(struct nvm_dev *, struct nvm_block *); -- cgit v1.2.3 From f9a9995072904f2d67d649545f17f81e00f4985e Mon Sep 17 00:00:00 2001 From: Matias Bjørling Date: Tue, 12 Jan 2016 07:49:34 +0100 Subject: lightnvm: add mccap support MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Some flash media has extended capabilities, such as programming SLC pages on MLC/TLC flash, erase/program suspend, scramble and encryption. MCCAP is introduced to detect support for these capabilities in the command set. Signed-off-by: Matias Bjørling Signed-off-by: Jens Axboe --- drivers/lightnvm/core.c | 1 + include/linux/lightnvm.h | 7 +++++++ 2 files changed, 8 insertions(+) (limited to 'include') diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c index e5e396338319..5199c12fead0 100644 --- a/drivers/lightnvm/core.c +++ b/drivers/lightnvm/core.c @@ -376,6 +376,7 @@ static int nvm_core_init(struct nvm_dev *dev) dev->sec_size = grp->csecs; dev->oob_size = grp->sos; dev->sec_per_pg = grp->fpg_sz / grp->csecs; + dev->mccap = grp->mccap; memcpy(&dev->ppaf, &id->ppaf, sizeof(struct nvm_addr_format)); dev->plane_mode = NVM_PLANE_SINGLE; diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h index aa35907714b8..b90d28344e3d 100644 --- a/include/linux/lightnvm.h +++ b/include/linux/lightnvm.h @@ -61,6 +61,12 @@ enum { NVM_BLK_T_GRWN_BAD = 0x2, NVM_BLK_T_DEV = 0x4, NVM_BLK_T_HOST = 0x8, + + /* Memory capabilities */ + NVM_ID_CAP_SLC = 0x1, + NVM_ID_CAP_CMD_SUSPEND = 0x2, + NVM_ID_CAP_SCRAMBLE = 0x4, + NVM_ID_CAP_ENCRYPT = 0x8, }; struct nvm_id_group { @@ -278,6 +284,7 @@ struct nvm_dev { int blks_per_lun; int sec_size; int oob_size; + int mccap; struct nvm_addr_format ppaf; /* Calculated/Cached values. These do not reflect the actual usable -- cgit v1.2.3 From ca5927e7ab5307965104ca58bbb29d110b1d4545 Mon Sep 17 00:00:00 2001 From: Matias Bjørling Date: Tue, 12 Jan 2016 07:49:35 +0100 Subject: lightnvm: introduce mlc lower page table mappings MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit NAND MLC memories have both lower and upper pages. When programming, both of these must be written, before data can be read. However, these lower and upper pages might not placed at even and odd flash pages, but can be skipped. Therefore each flash memory has its lower pages defined, which can then be used when programming and to know when padding are necessary. This patch implements the lower page definition in the specification, and exposes it through a simple lookup table at dev->lptbl. Signed-off-by: Matias Bjørling Signed-off-by: Jens Axboe --- drivers/lightnvm/core.c | 61 +++++++++++++++++++++++++++++++++++++++++++- drivers/nvme/host/lightnvm.c | 22 +++++++++++++++- include/linux/lightnvm.h | 20 +++++++++++++++ 3 files changed, 101 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c index 5199c12fead0..1f302cc73e0b 100644 --- a/drivers/lightnvm/core.c +++ b/drivers/lightnvm/core.c @@ -362,6 +362,51 @@ int nvm_submit_ppa(struct nvm_dev *dev, struct ppa_addr *ppa, int nr_ppas, } EXPORT_SYMBOL(nvm_submit_ppa); +static int nvm_init_slc_tbl(struct nvm_dev *dev, struct nvm_id_group *grp) +{ + int i; + + dev->lps_per_blk = dev->pgs_per_blk; + dev->lptbl = kcalloc(dev->lps_per_blk, sizeof(int), GFP_KERNEL); + if (!dev->lptbl) + return -ENOMEM; + + /* Just a linear array */ + for (i = 0; i < dev->lps_per_blk; i++) + dev->lptbl[i] = i; + + return 0; +} + +static int nvm_init_mlc_tbl(struct nvm_dev *dev, struct nvm_id_group *grp) +{ + int i, p; + struct nvm_id_lp_mlc *mlc = &grp->lptbl.mlc; + + if (!mlc->num_pairs) + return 0; + + dev->lps_per_blk = mlc->num_pairs; + dev->lptbl = kcalloc(dev->lps_per_blk, sizeof(int), GFP_KERNEL); + if (!dev->lptbl) + return -ENOMEM; + + /* The lower page table encoding consists of a list of bytes, where each + * has a lower and an upper half. The first half byte maintains the + * increment value and every value after is an offset added to the + * previous incrementation value */ + dev->lptbl[0] = mlc->pairs[0] & 0xF; + for (i = 1; i < dev->lps_per_blk; i++) { + p = mlc->pairs[i >> 1]; + if (i & 0x1) /* upper */ + dev->lptbl[i] = dev->lptbl[i - 1] + ((p & 0xF0) >> 4); + else /* lower */ + dev->lptbl[i] = dev->lptbl[i - 1] + (p & 0xF); + } + + return 0; +} + static int nvm_core_init(struct nvm_dev *dev) { struct nvm_id *id = &dev->identity; @@ -387,11 +432,23 @@ static int nvm_core_init(struct nvm_dev *dev) return -EINVAL; } - if (grp->fmtype != 0 && grp->fmtype != 1) { + switch (grp->fmtype) { + case NVM_ID_FMTYPE_SLC: + if (nvm_init_slc_tbl(dev, grp)) + return -ENOMEM; + break; + case NVM_ID_FMTYPE_MLC: + if (nvm_init_mlc_tbl(dev, grp)) + return -ENOMEM; + break; + default: pr_err("nvm: flash type not supported\n"); return -EINVAL; } + if (!dev->lps_per_blk) + pr_info("nvm: lower page programming table missing\n"); + if (grp->mpos & 0x020202) dev->plane_mode = NVM_PLANE_DOUBLE; if (grp->mpos & 0x040404) @@ -420,6 +477,8 @@ static void nvm_free(struct nvm_dev *dev) if (dev->mt) dev->mt->unregister_mgr(dev); + + kfree(dev->lptbl); } static int nvm_init(struct nvm_dev *dev) diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c index b112f022dee2..215c02512811 100644 --- a/drivers/nvme/host/lightnvm.c +++ b/drivers/nvme/host/lightnvm.c @@ -146,6 +146,16 @@ struct nvme_nvm_command { }; }; +struct nvme_nvm_lp_mlc { + __u16 num_pairs; + __u8 pairs[886]; +}; + +struct nvme_nvm_lp_tbl { + __u8 id[8]; + struct nvme_nvm_lp_mlc mlc; +}; + struct nvme_nvm_id_group { __u8 mtype; __u8 fmtype; @@ -169,7 +179,8 @@ struct nvme_nvm_id_group { __le32 mpos; __le32 mccap; __le16 cpar; - __u8 reserved[906]; + __u8 reserved[10]; + struct nvme_nvm_lp_tbl lptbl; } __packed; struct nvme_nvm_addr_format { @@ -266,6 +277,15 @@ static int init_grps(struct nvm_id *nvm_id, struct nvme_nvm_id *nvme_nvm_id) dst->mccap = le32_to_cpu(src->mccap); dst->cpar = le16_to_cpu(src->cpar); + + if (dst->fmtype == NVM_ID_FMTYPE_MLC) { + memcpy(dst->lptbl.id, src->lptbl.id, 8); + dst->lptbl.mlc.num_pairs = + le16_to_cpu(src->lptbl.mlc.num_pairs); + /* 4 bits per pair */ + memcpy(dst->lptbl.mlc.pairs, src->lptbl.mlc.pairs, + dst->lptbl.mlc.num_pairs >> 1); + } } return 0; diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h index b90d28344e3d..678fd91a1f99 100644 --- a/include/linux/lightnvm.h +++ b/include/linux/lightnvm.h @@ -67,6 +67,20 @@ enum { NVM_ID_CAP_CMD_SUSPEND = 0x2, NVM_ID_CAP_SCRAMBLE = 0x4, NVM_ID_CAP_ENCRYPT = 0x8, + + /* Memory types */ + NVM_ID_FMTYPE_SLC = 0, + NVM_ID_FMTYPE_MLC = 1, +}; + +struct nvm_id_lp_mlc { + u16 num_pairs; + u8 pairs[886]; +}; + +struct nvm_id_lp_tbl { + __u8 id[8]; + struct nvm_id_lp_mlc mlc; }; struct nvm_id_group { @@ -89,6 +103,8 @@ struct nvm_id_group { u32 mpos; u32 mccap; u16 cpar; + + struct nvm_id_lp_tbl lptbl; }; struct nvm_addr_format { @@ -297,6 +313,10 @@ struct nvm_dev { int sec_per_blk; int sec_per_lun; + /* lower page table */ + int lps_per_blk; + int *lptbl; + unsigned long total_pages; unsigned long total_blocks; int nr_luns; -- cgit v1.2.3 From e3eb3799f7e0d0924ceeba672ab271865de2802d Mon Sep 17 00:00:00 2001 From: Matias Bjørling Date: Tue, 12 Jan 2016 07:49:36 +0100 Subject: lightnvm: core on-disk initialization MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit An Open-Channel SSD shall be initialized before use. To initialize, we define an on-disk format, that keeps a small set of metadata to bring up the media manager on top of the device. The initial step is introduced to allow a user to format the disks for a given media manager. During format, a system block is stored on one to three separate luns on the device. Each lun has the system block duplicated. During initialization, the system block can be retrieved and the appropriate media manager can initialized. The on-disk format currently covers (struct nvm_system_block): - Magic value "NVMS". - Monotonic increasing sequence number. - The physical block erase count. - Version of the system block format. - Media manager type. - Media manager superblock physical address. The interface provides three functions to manage the system block: int nvm_init_sysblock(struct nvm_dev *, struct nvm_sb_info *) int nvm_get_sysblock(struct nvm *dev, struct nvm_sb_info *) int nvm_update_sysblock(struct nvm *dev, struct nvm_sb_info *) Each implement a part of the logic to manage the system block. The initialization creates the first system blocks and mark them on the device. Get retrieves the latest system block by scanning all pages in the associated system blocks. The update sysblock writes new metadata and allocates new block if necessary. Signed-off-by: Matias Bjørling Signed-off-by: Jens Axboe --- drivers/lightnvm/Makefile | 2 +- drivers/lightnvm/core.c | 1 + drivers/lightnvm/sysblk.c | 562 ++++++++++++++++++++++++++++++++++++++++++ include/linux/lightnvm.h | 35 +++ include/uapi/linux/lightnvm.h | 1 + 5 files changed, 600 insertions(+), 1 deletion(-) create mode 100644 drivers/lightnvm/sysblk.c (limited to 'include') diff --git a/drivers/lightnvm/Makefile b/drivers/lightnvm/Makefile index 7e0f42acb737..a7a0a22cf1a5 100644 --- a/drivers/lightnvm/Makefile +++ b/drivers/lightnvm/Makefile @@ -2,6 +2,6 @@ # Makefile for Open-Channel SSDs. # -obj-$(CONFIG_NVM) := core.o +obj-$(CONFIG_NVM) := core.o sysblk.o obj-$(CONFIG_NVM_GENNVM) += gennvm.o obj-$(CONFIG_NVM_RRPC) += rrpc.o diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c index 1f302cc73e0b..73b8ae1dafc4 100644 --- a/drivers/lightnvm/core.c +++ b/drivers/lightnvm/core.c @@ -466,6 +466,7 @@ static int nvm_core_init(struct nvm_dev *dev) dev->nr_chnls; dev->total_pages = dev->total_blocks * dev->pgs_per_blk; INIT_LIST_HEAD(&dev->online_targets); + mutex_init(&dev->mlock); return 0; } diff --git a/drivers/lightnvm/sysblk.c b/drivers/lightnvm/sysblk.c new file mode 100644 index 000000000000..b8489f425b05 --- /dev/null +++ b/drivers/lightnvm/sysblk.c @@ -0,0 +1,562 @@ +/* + * Copyright (C) 2015 Matias Bjorling. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; see the file COPYING. If not, write to + * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, + * USA. + * + */ + +#include + +#define MAX_SYSBLKS 3 /* remember to update mapping scheme on change */ +#define MAX_BLKS_PR_SYSBLK 2 /* 2 blks with 256 pages and 3000 erases + * enables ~1.5M updates per sysblk unit + */ + +struct sysblk_scan { + /* A row is a collection of flash blocks for a system block. */ + int nr_rows; + int row; + int act_blk[MAX_SYSBLKS]; + + int nr_ppas; + struct ppa_addr ppas[MAX_SYSBLKS * MAX_BLKS_PR_SYSBLK];/* all sysblks */ +}; + +static inline int scan_ppa_idx(int row, int blkid) +{ + return (row * MAX_BLKS_PR_SYSBLK) + blkid; +} + +void nvm_sysblk_to_cpu(struct nvm_sb_info *info, struct nvm_system_block *sb) +{ + info->seqnr = be32_to_cpu(sb->seqnr); + info->erase_cnt = be32_to_cpu(sb->erase_cnt); + info->version = be16_to_cpu(sb->version); + strncpy(info->mmtype, sb->mmtype, NVM_MMTYPE_LEN); + info->fs_ppa.ppa = be64_to_cpu(sb->fs_ppa); +} + +void nvm_cpu_to_sysblk(struct nvm_system_block *sb, struct nvm_sb_info *info) +{ + sb->magic = cpu_to_be32(NVM_SYSBLK_MAGIC); + sb->seqnr = cpu_to_be32(info->seqnr); + sb->erase_cnt = cpu_to_be32(info->erase_cnt); + sb->version = cpu_to_be16(info->version); + strncpy(sb->mmtype, info->mmtype, NVM_MMTYPE_LEN); + sb->fs_ppa = cpu_to_be64(info->fs_ppa.ppa); +} + +static int nvm_setup_sysblks(struct nvm_dev *dev, struct ppa_addr *sysblk_ppas) +{ + int nr_rows = min_t(int, MAX_SYSBLKS, dev->nr_chnls); + int i; + + for (i = 0; i < nr_rows; i++) + sysblk_ppas[i].ppa = 0; + + /* if possible, place sysblk at first channel, middle channel and last + * channel of the device. If not, create only one or two sys blocks + */ + switch (dev->nr_chnls) { + case 2: + sysblk_ppas[1].g.ch = 1; + /* fall-through */ + case 1: + sysblk_ppas[0].g.ch = 0; + break; + default: + sysblk_ppas[0].g.ch = 0; + sysblk_ppas[1].g.ch = dev->nr_chnls / 2; + sysblk_ppas[2].g.ch = dev->nr_chnls - 1; + break; + } + + return nr_rows; +} + +void nvm_setup_sysblk_scan(struct nvm_dev *dev, struct sysblk_scan *s, + struct ppa_addr *sysblk_ppas) +{ + memset(s, 0, sizeof(struct sysblk_scan)); + s->nr_rows = nvm_setup_sysblks(dev, sysblk_ppas); +} + +static int sysblk_get_host_blks(struct ppa_addr ppa, int nr_blks, u8 *blks, + void *private) +{ + struct sysblk_scan *s = private; + int i, nr_sysblk = 0; + + for (i = 0; i < nr_blks; i++) { + if (blks[i] != NVM_BLK_T_HOST) + continue; + + if (s->nr_ppas == MAX_BLKS_PR_SYSBLK * MAX_SYSBLKS) { + pr_err("nvm: too many host blks\n"); + return -EINVAL; + } + + ppa.g.blk = i; + + s->ppas[scan_ppa_idx(s->row, nr_sysblk)] = ppa; + s->nr_ppas++; + nr_sysblk++; + } + + return 0; +} + +static int nvm_get_all_sysblks(struct nvm_dev *dev, struct sysblk_scan *s, + struct ppa_addr *ppas, nvm_bb_update_fn *fn) +{ + struct ppa_addr dppa; + int i, ret; + + s->nr_ppas = 0; + + for (i = 0; i < s->nr_rows; i++) { + dppa = generic_to_dev_addr(dev, ppas[i]); + s->row = i; + + ret = dev->ops->get_bb_tbl(dev, dppa, dev->blks_per_lun, fn, s); + if (ret) { + pr_err("nvm: failed bb tbl for ppa (%u %u)\n", + ppas[i].g.ch, + ppas[i].g.blk); + return ret; + } + } + + return ret; +} + +/* + * scans a block for latest sysblk. + * Returns: + * 0 - newer sysblk not found. PPA is updated to latest page. + * 1 - newer sysblk found and stored in *cur. PPA is updated to + * next valid page. + * <0- error. + */ +static int nvm_scan_block(struct nvm_dev *dev, struct ppa_addr *ppa, + struct nvm_system_block *sblk) +{ + struct nvm_system_block *cur; + int pg, cursz, ret, found = 0; + + /* the full buffer for a flash page is allocated. Only the first of it + * contains the system block information + */ + cursz = dev->sec_size * dev->sec_per_pg * dev->nr_planes; + cur = kmalloc(cursz, GFP_KERNEL); + if (!cur) + return -ENOMEM; + + /* perform linear scan through the block */ + for (pg = 0; pg < dev->lps_per_blk; pg++) { + ppa->g.pg = ppa_to_slc(dev, pg); + + ret = nvm_submit_ppa(dev, ppa, 1, NVM_OP_PREAD, NVM_IO_SLC_MODE, + cur, cursz); + if (ret) { + if (ret == NVM_RSP_ERR_EMPTYPAGE) { + pr_debug("nvm: sysblk scan empty ppa (%u %u %u %u)\n", + ppa->g.ch, + ppa->g.lun, + ppa->g.blk, + ppa->g.pg); + break; + } + pr_err("nvm: read failed (%x) for ppa (%u %u %u %u)", + ret, + ppa->g.ch, + ppa->g.lun, + ppa->g.blk, + ppa->g.pg); + break; /* if we can't read a page, continue to the + * next blk + */ + } + + if (be32_to_cpu(cur->magic) != NVM_SYSBLK_MAGIC) { + pr_debug("nvm: scan break for ppa (%u %u %u %u)\n", + ppa->g.ch, + ppa->g.lun, + ppa->g.blk, + ppa->g.pg); + break; /* last valid page already found */ + } + + if (be32_to_cpu(cur->seqnr) < be32_to_cpu(sblk->seqnr)) + continue; + + memcpy(sblk, cur, sizeof(struct nvm_system_block)); + found = 1; + } + + kfree(cur); + + return found; +} + +static int nvm_set_bb_tbl(struct nvm_dev *dev, struct sysblk_scan *s, int type) +{ + struct nvm_rq rqd; + int ret; + + if (s->nr_ppas > dev->ops->max_phys_sect) { + pr_err("nvm: unable to update all sysblocks atomically\n"); + return -EINVAL; + } + + memset(&rqd, 0, sizeof(struct nvm_rq)); + + nvm_set_rqd_ppalist(dev, &rqd, s->ppas, s->nr_ppas); + nvm_generic_to_addr_mode(dev, &rqd); + + ret = dev->ops->set_bb_tbl(dev, &rqd, type); + nvm_free_rqd_ppalist(dev, &rqd); + if (ret) { + pr_err("nvm: sysblk failed bb mark\n"); + return -EINVAL; + } + + return 0; +} + +static int sysblk_get_free_blks(struct ppa_addr ppa, int nr_blks, u8 *blks, + void *private) +{ + struct sysblk_scan *s = private; + struct ppa_addr *sppa; + int i, blkid = 0; + + for (i = 0; i < nr_blks; i++) { + if (blks[i] == NVM_BLK_T_HOST) + return -EEXIST; + + if (blks[i] != NVM_BLK_T_FREE) + continue; + + sppa = &s->ppas[scan_ppa_idx(s->row, blkid)]; + sppa->g.ch = ppa.g.ch; + sppa->g.lun = ppa.g.lun; + sppa->g.blk = i; + s->nr_ppas++; + blkid++; + + pr_debug("nvm: use (%u %u %u) as sysblk\n", + sppa->g.ch, sppa->g.lun, sppa->g.blk); + if (blkid > MAX_BLKS_PR_SYSBLK - 1) + return 0; + } + + pr_err("nvm: sysblk failed get sysblk\n"); + return -EINVAL; +} + +static int nvm_write_and_verify(struct nvm_dev *dev, struct nvm_sb_info *info, + struct sysblk_scan *s) +{ + struct nvm_system_block nvmsb; + void *buf; + int i, sect, ret, bufsz; + struct ppa_addr *ppas; + + nvm_cpu_to_sysblk(&nvmsb, info); + + /* buffer for flash page */ + bufsz = dev->sec_size * dev->sec_per_pg * dev->nr_planes; + buf = kzalloc(bufsz, GFP_KERNEL); + if (!buf) + return -ENOMEM; + memcpy(buf, &nvmsb, sizeof(struct nvm_system_block)); + + ppas = kcalloc(dev->sec_per_pg, sizeof(struct ppa_addr), GFP_KERNEL); + if (!ppas) { + ret = -ENOMEM; + goto err; + } + + /* Write and verify */ + for (i = 0; i < s->nr_rows; i++) { + ppas[0] = s->ppas[scan_ppa_idx(i, s->act_blk[i])]; + + pr_debug("nvm: writing sysblk to ppa (%u %u %u %u)\n", + ppas[0].g.ch, + ppas[0].g.lun, + ppas[0].g.blk, + ppas[0].g.pg); + + /* Expand to all sectors within a flash page */ + if (dev->sec_per_pg > 1) { + for (sect = 1; sect < dev->sec_per_pg; sect++) { + ppas[sect].ppa = ppas[0].ppa; + ppas[sect].g.sec = sect; + } + } + + ret = nvm_submit_ppa(dev, ppas, dev->sec_per_pg, NVM_OP_PWRITE, + NVM_IO_SLC_MODE, buf, bufsz); + if (ret) { + pr_err("nvm: sysblk failed program (%u %u %u)\n", + ppas[0].g.ch, + ppas[0].g.lun, + ppas[0].g.blk); + break; + } + + ret = nvm_submit_ppa(dev, ppas, dev->sec_per_pg, NVM_OP_PREAD, + NVM_IO_SLC_MODE, buf, bufsz); + if (ret) { + pr_err("nvm: sysblk failed read (%u %u %u)\n", + ppas[0].g.ch, + ppas[0].g.lun, + ppas[0].g.blk); + break; + } + + if (memcmp(buf, &nvmsb, sizeof(struct nvm_system_block))) { + pr_err("nvm: sysblk failed verify (%u %u %u)\n", + ppas[0].g.ch, + ppas[0].g.lun, + ppas[0].g.blk); + ret = -EINVAL; + break; + } + } + + kfree(ppas); +err: + kfree(buf); + + return ret; +} + +static int nvm_prepare_new_sysblks(struct nvm_dev *dev, struct sysblk_scan *s) +{ + int i, ret; + unsigned long nxt_blk; + struct ppa_addr *ppa; + + for (i = 0; i < s->nr_rows; i++) { + nxt_blk = (s->act_blk[i] + 1) % MAX_BLKS_PR_SYSBLK; + ppa = &s->ppas[scan_ppa_idx(i, nxt_blk)]; + ppa->g.pg = ppa_to_slc(dev, 0); + + ret = nvm_erase_ppa(dev, ppa, 1); + if (ret) + return ret; + + s->act_blk[i] = nxt_blk; + } + + return 0; +} + +int nvm_get_sysblock(struct nvm_dev *dev, struct nvm_sb_info *info) +{ + struct ppa_addr sysblk_ppas[MAX_SYSBLKS]; + struct sysblk_scan s; + struct nvm_system_block *cur; + int i, j, found = 0; + int ret = -ENOMEM; + + /* + * 1. setup sysblk locations + * 2. get bad block list + * 3. filter on host-specific (type 3) + * 4. iterate through all and find the highest seq nr. + * 5. return superblock information + */ + + if (!dev->ops->get_bb_tbl) + return -EINVAL; + + nvm_setup_sysblk_scan(dev, &s, sysblk_ppas); + + mutex_lock(&dev->mlock); + ret = nvm_get_all_sysblks(dev, &s, sysblk_ppas, sysblk_get_host_blks); + if (ret) + goto err_sysblk; + + /* no sysblocks initialized */ + if (!s.nr_ppas) + goto err_sysblk; + + cur = kzalloc(sizeof(struct nvm_system_block), GFP_KERNEL); + if (!cur) + goto err_sysblk; + + /* find the latest block across all sysblocks */ + for (i = 0; i < s.nr_rows; i++) { + for (j = 0; j < MAX_BLKS_PR_SYSBLK; j++) { + struct ppa_addr ppa = s.ppas[scan_ppa_idx(i, j)]; + + ret = nvm_scan_block(dev, &ppa, cur); + if (ret > 0) + found = 1; + else if (ret < 0) + break; + } + } + + nvm_sysblk_to_cpu(info, cur); + + kfree(cur); +err_sysblk: + mutex_unlock(&dev->mlock); + + if (found) + return 1; + return ret; +} + +int nvm_update_sysblock(struct nvm_dev *dev, struct nvm_sb_info *new) +{ + /* 1. for each latest superblock + * 2. if room + * a. write new flash page entry with the updated information + * 3. if no room + * a. find next available block on lun (linear search) + * if none, continue to next lun + * if none at all, report error. also report that it wasn't + * possible to write to all superblocks. + * c. write data to block. + */ + struct ppa_addr sysblk_ppas[MAX_SYSBLKS]; + struct sysblk_scan s; + struct nvm_system_block *cur; + int i, j, ppaidx, found = 0; + int ret = -ENOMEM; + + if (!dev->ops->get_bb_tbl) + return -EINVAL; + + nvm_setup_sysblk_scan(dev, &s, sysblk_ppas); + + mutex_lock(&dev->mlock); + ret = nvm_get_all_sysblks(dev, &s, sysblk_ppas, sysblk_get_host_blks); + if (ret) + goto err_sysblk; + + cur = kzalloc(sizeof(struct nvm_system_block), GFP_KERNEL); + if (!cur) + goto err_sysblk; + + /* Get the latest sysblk for each sysblk row */ + for (i = 0; i < s.nr_rows; i++) { + found = 0; + for (j = 0; j < MAX_BLKS_PR_SYSBLK; j++) { + ppaidx = scan_ppa_idx(i, j); + ret = nvm_scan_block(dev, &s.ppas[ppaidx], cur); + if (ret > 0) { + s.act_blk[i] = j; + found = 1; + } else if (ret < 0) + break; + } + } + + if (!found) { + pr_err("nvm: no valid sysblks found to update\n"); + ret = -EINVAL; + goto err_cur; + } + + /* + * All sysblocks found. Check that they have same page id in their flash + * blocks + */ + for (i = 1; i < s.nr_rows; i++) { + struct ppa_addr l = s.ppas[scan_ppa_idx(0, s.act_blk[0])]; + struct ppa_addr r = s.ppas[scan_ppa_idx(i, s.act_blk[i])]; + + if (l.g.pg != r.g.pg) { + pr_err("nvm: sysblks not on same page. Previous update failed.\n"); + ret = -EINVAL; + goto err_cur; + } + } + + /* + * Check that there haven't been another update to the seqnr since we + * began + */ + if ((new->seqnr - 1) != be32_to_cpu(cur->seqnr)) { + pr_err("nvm: seq is not sequential\n"); + ret = -EINVAL; + goto err_cur; + } + + /* + * When all pages in a block has been written, a new block is selected + * and writing is performed on the new block. + */ + if (s.ppas[scan_ppa_idx(0, s.act_blk[0])].g.pg == + dev->lps_per_blk - 1) { + ret = nvm_prepare_new_sysblks(dev, &s); + if (ret) + goto err_cur; + } + + ret = nvm_write_and_verify(dev, new, &s); +err_cur: + kfree(cur); +err_sysblk: + mutex_unlock(&dev->mlock); + + return ret; +} + +int nvm_init_sysblock(struct nvm_dev *dev, struct nvm_sb_info *info) +{ + struct ppa_addr sysblk_ppas[MAX_SYSBLKS]; + struct sysblk_scan s; + int ret; + + /* + * 1. select master blocks and select first available blks + * 2. get bad block list + * 3. mark MAX_SYSBLKS block as host-based device allocated. + * 4. write and verify data to block + */ + + if (!dev->ops->get_bb_tbl || !dev->ops->set_bb_tbl) + return -EINVAL; + + if (!(dev->mccap & NVM_ID_CAP_SLC) || !dev->lps_per_blk) { + pr_err("nvm: memory does not support SLC access\n"); + return -EINVAL; + } + + /* Index all sysblocks and mark them as host-driven */ + nvm_setup_sysblk_scan(dev, &s, sysblk_ppas); + + mutex_lock(&dev->mlock); + ret = nvm_get_all_sysblks(dev, &s, sysblk_ppas, sysblk_get_free_blks); + if (ret) + goto err_mark; + + ret = nvm_set_bb_tbl(dev, &s, NVM_BLK_T_HOST); + if (ret) + goto err_mark; + + /* Write to the first block of each row */ + ret = nvm_write_and_verify(dev, info, &s); +err_mark: + mutex_unlock(&dev->mlock); + return ret; +} diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h index 678fd91a1f99..7ad22d3f0d2e 100644 --- a/include/linux/lightnvm.h +++ b/include/linux/lightnvm.h @@ -17,6 +17,7 @@ enum { #include #include #include +#include enum { /* HW Responsibilities */ @@ -281,6 +282,15 @@ struct nvm_block { int state; }; +/* system block cpu representation */ +struct nvm_sb_info { + unsigned long seqnr; + unsigned long erase_cnt; + unsigned int version; + char mmtype[NVM_MMTYPE_LEN]; + struct ppa_addr fs_ppa; +}; + struct nvm_dev { struct nvm_dev_ops *ops; @@ -329,6 +339,8 @@ struct nvm_dev { /* Backend device */ struct request_queue *q; char name[DISK_NAME_LEN]; + + struct mutex mlock; }; static inline struct ppa_addr generic_to_dev_addr(struct nvm_dev *dev, @@ -394,6 +406,11 @@ static inline struct ppa_addr block_to_ppa(struct nvm_dev *dev, return ppa; } +static inline int ppa_to_slc(struct nvm_dev *dev, int slc_pg) +{ + return dev->lptbl[slc_pg]; +} + typedef blk_qc_t (nvm_tgt_make_rq_fn)(struct request_queue *, struct bio *); typedef sector_t (nvm_tgt_capacity_fn)(void *); typedef void *(nvm_tgt_init_fn)(struct nvm_dev *, struct gendisk *, int, int); @@ -489,6 +506,24 @@ extern int nvm_erase_blk(struct nvm_dev *, struct nvm_block *); extern void nvm_end_io(struct nvm_rq *, int); extern int nvm_submit_ppa(struct nvm_dev *, struct ppa_addr *, int, int, int, void *, int); + +/* sysblk.c */ +#define NVM_SYSBLK_MAGIC 0x4E564D53 /* "NVMS" */ + +/* system block on disk representation */ +struct nvm_system_block { + __be32 magic; /* magic signature */ + __be32 seqnr; /* sequence number */ + __be32 erase_cnt; /* erase count */ + __be16 version; /* version number */ + u8 mmtype[NVM_MMTYPE_LEN]; /* media manager name */ + __be64 fs_ppa; /* PPA for media manager + * superblock */ +}; + +extern int nvm_get_sysblock(struct nvm_dev *, struct nvm_sb_info *); +extern int nvm_update_sysblock(struct nvm_dev *, struct nvm_sb_info *); +extern int nvm_init_sysblock(struct nvm_dev *, struct nvm_sb_info *); #else /* CONFIG_NVM */ struct nvm_dev_ops; diff --git a/include/uapi/linux/lightnvm.h b/include/uapi/linux/lightnvm.h index 928f98997d8a..0171b85e7efc 100644 --- a/include/uapi/linux/lightnvm.h +++ b/include/uapi/linux/lightnvm.h @@ -33,6 +33,7 @@ #define NVM_TTYPE_NAME_MAX 48 #define NVM_TTYPE_MAX 63 +#define NVM_MMTYPE_LEN 8 #define NVM_CTRL_FILE "/dev/lightnvm/control" -- cgit v1.2.3 From 5569615424613aa006005f18b03a3a12738a47d7 Mon Sep 17 00:00:00 2001 From: Matias Bjørling Date: Tue, 12 Jan 2016 07:49:37 +0100 Subject: lightnvm: introduce ioctl to initialize device MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Based on the previous patch, we now introduce an ioctl to initialize the device using nvm_init_sysblock and create the necessary system blocks. The user may specify the media manager that they wish to instantiate on top. Default from user-space will be "gennvm". Signed-off-by: Matias Bjørling Signed-off-by: Jens Axboe --- drivers/lightnvm/core.c | 50 +++++++++++++++++++++++++++++++++++++++++++ include/uapi/linux/lightnvm.h | 11 ++++++++++ 2 files changed, 61 insertions(+) (limited to 'include') diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c index 73b8ae1dafc4..ee08fac6f327 100644 --- a/drivers/lightnvm/core.c +++ b/drivers/lightnvm/core.c @@ -1019,6 +1019,54 @@ static long nvm_ioctl_dev_remove(struct file *file, void __user *arg) return __nvm_configure_remove(&remove); } +static void nvm_setup_nvm_sb_info(struct nvm_sb_info *info) +{ + info->seqnr = 1; + info->erase_cnt = 0; + info->version = 1; +} + +static long __nvm_ioctl_dev_init(struct nvm_ioctl_dev_init *init) +{ + struct nvm_dev *dev; + struct nvm_sb_info info; + + down_write(&nvm_lock); + dev = nvm_find_nvm_dev(init->dev); + up_write(&nvm_lock); + if (!dev) { + pr_err("nvm: device not found\n"); + return -EINVAL; + } + + nvm_setup_nvm_sb_info(&info); + + strncpy(info.mmtype, init->mmtype, NVM_MMTYPE_LEN); + info.fs_ppa.ppa = -1; + + return nvm_init_sysblock(dev, &info); +} + +static long nvm_ioctl_dev_init(struct file *file, void __user *arg) +{ + struct nvm_ioctl_dev_init init; + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + if (copy_from_user(&init, arg, sizeof(struct nvm_ioctl_dev_init))) + return -EFAULT; + + if (init.flags != 0) { + pr_err("nvm: no flags supported\n"); + return -EINVAL; + } + + init.dev[DISK_NAME_LEN - 1] = '\0'; + + return __nvm_ioctl_dev_init(&init); +} + static long nvm_ctl_ioctl(struct file *file, uint cmd, unsigned long arg) { void __user *argp = (void __user *)arg; @@ -1032,6 +1080,8 @@ static long nvm_ctl_ioctl(struct file *file, uint cmd, unsigned long arg) return nvm_ioctl_dev_create(file, argp); case NVM_DEV_REMOVE: return nvm_ioctl_dev_remove(file, argp); + case NVM_DEV_INIT: + return nvm_ioctl_dev_init(file, argp); } return 0; } diff --git a/include/uapi/linux/lightnvm.h b/include/uapi/linux/lightnvm.h index 0171b85e7efc..56339e2e0a46 100644 --- a/include/uapi/linux/lightnvm.h +++ b/include/uapi/linux/lightnvm.h @@ -101,6 +101,12 @@ struct nvm_ioctl_remove { __u32 flags; }; +struct nvm_ioctl_dev_init { + char dev[DISK_NAME_LEN]; /* open-channel SSD device */ + char mmtype[NVM_MMTYPE_LEN]; /* register to media manager */ + + __u32 flags; +}; /* The ioctl type, 'L', 0x20 - 0x2F documented in ioctl-number.txt */ enum { @@ -111,6 +117,9 @@ enum { /* device level cmds */ NVM_DEV_CREATE_CMD, NVM_DEV_REMOVE_CMD, + + /* Init a device to support LightNVM media managers */ + NVM_DEV_INIT_CMD, }; #define NVM_IOCTL 'L' /* 0x4c */ @@ -123,6 +132,8 @@ enum { struct nvm_ioctl_create) #define NVM_DEV_REMOVE _IOW(NVM_IOCTL, NVM_DEV_REMOVE_CMD, \ struct nvm_ioctl_remove) +#define NVM_DEV_INIT _IOW(NVM_IOCTL, NVM_DEV_INIT_CMD, \ + struct nvm_ioctl_dev_init) #define NVM_VERSION_MAJOR 1 #define NVM_VERSION_MINOR 0 -- cgit v1.2.3 From b769207678176d590ea61ce7a64c9100925668b7 Mon Sep 17 00:00:00 2001 From: Matias Bjørling Date: Tue, 12 Jan 2016 07:49:38 +0100 Subject: lightnvm: use system block for mm initialization MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Use system block information to register the appropriate media manager. This enables the LightNVM subsystem to instantiate a media manager selected by the user, instead of relying on automatic detection by each media manager loaded in the kernel. A device must now be initialized before it can proceed to initialize its media manager. Upon initialization, the configured media manager is automatically initialized as well. Signed-off-by: Matias Bjørling Signed-off-by: Jens Axboe --- drivers/lightnvm/core.c | 25 +++++++++++++++++++++++-- include/linux/lightnvm.h | 3 +++ 2 files changed, 26 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c index ee08fac6f327..9e5712dda062 100644 --- a/drivers/lightnvm/core.c +++ b/drivers/lightnvm/core.c @@ -106,6 +106,9 @@ struct nvmm_type *nvm_init_mgr(struct nvm_dev *dev) lockdep_assert_held(&nvm_lock); list_for_each_entry(mt, &nvm_mgrs, list) { + if (strncmp(dev->sb.mmtype, mt->name, NVM_MMTYPE_LEN)) + continue; + ret = mt->register_mgr(dev); if (ret < 0) { pr_err("nvm: media mgr failed to init (%d) on dev %s\n", @@ -569,9 +572,16 @@ int nvm_register(struct request_queue *q, char *disk_name, } } + ret = nvm_get_sysblock(dev, &dev->sb); + if (!ret) + pr_err("nvm: device not initialized.\n"); + else if (ret < 0) + pr_err("nvm: err (%d) on device initialization\n", ret); + /* register device with a supported media manager */ down_write(&nvm_lock); - dev->mt = nvm_init_mgr(dev); + if (ret > 0) + dev->mt = nvm_init_mgr(dev); list_add(&dev->devices, &nvm_devices); up_write(&nvm_lock); @@ -1030,6 +1040,7 @@ static long __nvm_ioctl_dev_init(struct nvm_ioctl_dev_init *init) { struct nvm_dev *dev; struct nvm_sb_info info; + int ret; down_write(&nvm_lock); dev = nvm_find_nvm_dev(init->dev); @@ -1044,7 +1055,17 @@ static long __nvm_ioctl_dev_init(struct nvm_ioctl_dev_init *init) strncpy(info.mmtype, init->mmtype, NVM_MMTYPE_LEN); info.fs_ppa.ppa = -1; - return nvm_init_sysblock(dev, &info); + ret = nvm_init_sysblock(dev, &info); + if (ret) + return ret; + + memcpy(&dev->sb, &info, sizeof(struct nvm_sb_info)); + + down_write(&nvm_lock); + dev->mt = nvm_init_mgr(dev); + up_write(&nvm_lock); + + return 0; } static long nvm_ioctl_dev_init(struct file *file, void __user *arg) diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h index 7ad22d3f0d2e..02f36bdf216e 100644 --- a/include/linux/lightnvm.h +++ b/include/linux/lightnvm.h @@ -301,6 +301,9 @@ struct nvm_dev { struct nvmm_type *mt; void *mp; + /* System blocks */ + struct nvm_sb_info sb; + /* Device information */ int nr_chnls; int nr_planes; -- cgit v1.2.3 From 8b4970c41f88ad772771f87b1c82c395248a84d8 Mon Sep 17 00:00:00 2001 From: Matias Bjørling Date: Tue, 12 Jan 2016 07:49:39 +0100 Subject: lightnvm: introduce factory reset MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Now that a device can be managed using the system blocks, a method to reset the device is necessary as well. This patch introduces logic to reset the device easily to factory state and exposes it through an ioctl. The ioctl takes the following flags: NVM_FACTORY_ERASE_ONLY_USER By default all blocks, except host-reserved blocks are erased upon factory reset. Instead of this, only erase host-reserved blocks. NVM_FACTORY_RESET_HOST_BLKS Mark host-reserved blocks to be erased and set their type to free. NVM_FACTORY_RESET_GRWN_BBLKS Mark "grown bad blocks" to be erased and set their type to free. Signed-off-by: Matias Bjørling Signed-off-by: Jens Axboe --- drivers/lightnvm/core.c | 34 ++++++++ drivers/lightnvm/sysblk.c | 179 ++++++++++++++++++++++++++++++++++++++++++ include/linux/lightnvm.h | 2 + include/uapi/linux/lightnvm.h | 19 +++++ 4 files changed, 234 insertions(+) (limited to 'include') diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c index 9e5712dda062..33224cb91c5b 100644 --- a/drivers/lightnvm/core.c +++ b/drivers/lightnvm/core.c @@ -1088,6 +1088,38 @@ static long nvm_ioctl_dev_init(struct file *file, void __user *arg) return __nvm_ioctl_dev_init(&init); } +static long nvm_ioctl_dev_factory(struct file *file, void __user *arg) +{ + struct nvm_ioctl_dev_factory fact; + struct nvm_dev *dev; + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + if (copy_from_user(&fact, arg, sizeof(struct nvm_ioctl_dev_factory))) + return -EFAULT; + + fact.dev[DISK_NAME_LEN - 1] = '\0'; + + if (fact.flags & ~(NVM_FACTORY_NR_BITS - 1)) + return -EINVAL; + + down_write(&nvm_lock); + dev = nvm_find_nvm_dev(fact.dev); + up_write(&nvm_lock); + if (!dev) { + pr_err("nvm: device not found\n"); + return -EINVAL; + } + + if (dev->mt) { + dev->mt->unregister_mgr(dev); + dev->mt = NULL; + } + + return nvm_dev_factory(dev, fact.flags); +} + static long nvm_ctl_ioctl(struct file *file, uint cmd, unsigned long arg) { void __user *argp = (void __user *)arg; @@ -1103,6 +1135,8 @@ static long nvm_ctl_ioctl(struct file *file, uint cmd, unsigned long arg) return nvm_ioctl_dev_remove(file, argp); case NVM_DEV_INIT: return nvm_ioctl_dev_init(file, argp); + case NVM_DEV_FACTORY: + return nvm_ioctl_dev_factory(file, argp); } return 0; } diff --git a/drivers/lightnvm/sysblk.c b/drivers/lightnvm/sysblk.c index b8489f425b05..321de1f154c5 100644 --- a/drivers/lightnvm/sysblk.c +++ b/drivers/lightnvm/sysblk.c @@ -560,3 +560,182 @@ err_mark: mutex_unlock(&dev->mlock); return ret; } + +struct factory_blks { + struct nvm_dev *dev; + int flags; + unsigned long *blks; +}; + +static int factory_nblks(int nblks) +{ + /* Round up to nearest BITS_PER_LONG */ + return (nblks + (BITS_PER_LONG - 1)) & ~(BITS_PER_LONG - 1); +} + +static unsigned int factory_blk_offset(struct nvm_dev *dev, int ch, int lun) +{ + int nblks = factory_nblks(dev->blks_per_lun); + + return ((ch * dev->luns_per_chnl * nblks) + (lun * nblks)) / + BITS_PER_LONG; +} + +static int nvm_factory_blks(struct ppa_addr ppa, int nr_blks, u8 *blks, + void *private) +{ + struct factory_blks *f = private; + struct nvm_dev *dev = f->dev; + int i, lunoff; + + lunoff = factory_blk_offset(dev, ppa.g.ch, ppa.g.lun); + + /* non-set bits correspond to the block must be erased */ + for (i = 0; i < nr_blks; i++) { + switch (blks[i]) { + case NVM_BLK_T_FREE: + if (f->flags & NVM_FACTORY_ERASE_ONLY_USER) + set_bit(i, &f->blks[lunoff]); + break; + case NVM_BLK_T_HOST: + if (!(f->flags & NVM_FACTORY_RESET_HOST_BLKS)) + set_bit(i, &f->blks[lunoff]); + break; + case NVM_BLK_T_GRWN_BAD: + if (!(f->flags & NVM_FACTORY_RESET_GRWN_BBLKS)) + set_bit(i, &f->blks[lunoff]); + break; + default: + set_bit(i, &f->blks[lunoff]); + break; + } + } + + return 0; +} + +static int nvm_fact_get_blks(struct nvm_dev *dev, struct ppa_addr *erase_list, + int max_ppas, struct factory_blks *f) +{ + struct ppa_addr ppa; + int ch, lun, blkid, idx, done = 0, ppa_cnt = 0; + unsigned long *offset; + + while (!done) { + done = 1; + for (ch = 0; ch < dev->nr_chnls; ch++) { + for (lun = 0; lun < dev->luns_per_chnl; lun++) { + idx = factory_blk_offset(dev, ch, lun); + offset = &f->blks[idx]; + + blkid = find_first_zero_bit(offset, + dev->blks_per_lun); + if (blkid >= dev->blks_per_lun) + continue; + set_bit(blkid, offset); + + ppa.ppa = 0; + ppa.g.ch = ch; + ppa.g.lun = lun; + ppa.g.blk = blkid; + pr_debug("nvm: erase ppa (%u %u %u)\n", + ppa.g.ch, + ppa.g.lun, + ppa.g.blk); + + erase_list[ppa_cnt] = ppa; + ppa_cnt++; + done = 0; + + if (ppa_cnt == max_ppas) + return ppa_cnt; + } + } + } + + return ppa_cnt; +} + +static int nvm_fact_get_bb_tbl(struct nvm_dev *dev, struct ppa_addr ppa, + nvm_bb_update_fn *fn, void *priv) +{ + struct ppa_addr dev_ppa; + int ret; + + dev_ppa = generic_to_dev_addr(dev, ppa); + + ret = dev->ops->get_bb_tbl(dev, dev_ppa, dev->blks_per_lun, fn, priv); + if (ret) + pr_err("nvm: failed bb tbl for ch%u lun%u\n", + ppa.g.ch, ppa.g.blk); + return ret; +} + +static int nvm_fact_select_blks(struct nvm_dev *dev, struct factory_blks *f) +{ + int ch, lun, ret; + struct ppa_addr ppa; + + ppa.ppa = 0; + for (ch = 0; ch < dev->nr_chnls; ch++) { + for (lun = 0; lun < dev->luns_per_chnl; lun++) { + ppa.g.ch = ch; + ppa.g.lun = lun; + + ret = nvm_fact_get_bb_tbl(dev, ppa, nvm_factory_blks, + f); + if (ret) + return ret; + } + } + + return 0; +} + +int nvm_dev_factory(struct nvm_dev *dev, int flags) +{ + struct factory_blks f; + struct ppa_addr *ppas; + int ppa_cnt, ret = -ENOMEM; + int max_ppas = dev->ops->max_phys_sect / dev->nr_planes; + struct ppa_addr sysblk_ppas[MAX_SYSBLKS]; + struct sysblk_scan s; + + f.blks = kzalloc(factory_nblks(dev->blks_per_lun) * dev->nr_luns, + GFP_KERNEL); + if (!f.blks) + return ret; + + ppas = kcalloc(max_ppas, sizeof(struct ppa_addr), GFP_KERNEL); + if (!ppas) + goto err_blks; + + f.dev = dev; + f.flags = flags; + + /* create list of blks to be erased */ + ret = nvm_fact_select_blks(dev, &f); + if (ret) + goto err_ppas; + + /* continue to erase until list of blks until empty */ + while ((ppa_cnt = nvm_fact_get_blks(dev, ppas, max_ppas, &f)) > 0) + nvm_erase_ppa(dev, ppas, ppa_cnt); + + /* mark host reserved blocks free */ + if (flags & NVM_FACTORY_RESET_HOST_BLKS) { + nvm_setup_sysblk_scan(dev, &s, sysblk_ppas); + mutex_lock(&dev->mlock); + ret = nvm_get_all_sysblks(dev, &s, sysblk_ppas, + sysblk_get_host_blks); + if (!ret) + ret = nvm_set_bb_tbl(dev, &s, NVM_BLK_T_FREE); + mutex_unlock(&dev->mlock); + } +err_ppas: + kfree(ppas); +err_blks: + kfree(f.blks); + return ret; +} +EXPORT_SYMBOL(nvm_dev_factory); diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h index 02f36bdf216e..fc0e7c924967 100644 --- a/include/linux/lightnvm.h +++ b/include/linux/lightnvm.h @@ -527,6 +527,8 @@ struct nvm_system_block { extern int nvm_get_sysblock(struct nvm_dev *, struct nvm_sb_info *); extern int nvm_update_sysblock(struct nvm_dev *, struct nvm_sb_info *); extern int nvm_init_sysblock(struct nvm_dev *, struct nvm_sb_info *); + +extern int nvm_dev_factory(struct nvm_dev *, int flags); #else /* CONFIG_NVM */ struct nvm_dev_ops; diff --git a/include/uapi/linux/lightnvm.h b/include/uapi/linux/lightnvm.h index 56339e2e0a46..774a43128a7a 100644 --- a/include/uapi/linux/lightnvm.h +++ b/include/uapi/linux/lightnvm.h @@ -108,6 +108,20 @@ struct nvm_ioctl_dev_init { __u32 flags; }; +enum { + NVM_FACTORY_ERASE_ONLY_USER = 1 << 0, /* erase only blocks used as + * host blks or grown blks */ + NVM_FACTORY_RESET_HOST_BLKS = 1 << 1, /* remove host blk marks */ + NVM_FACTORY_RESET_GRWN_BBLKS = 1 << 2, /* remove grown blk marks */ + NVM_FACTORY_NR_BITS = 1 << 3, /* stops here */ +}; + +struct nvm_ioctl_dev_factory { + char dev[DISK_NAME_LEN]; + + __u32 flags; +}; + /* The ioctl type, 'L', 0x20 - 0x2F documented in ioctl-number.txt */ enum { /* top level cmds */ @@ -120,6 +134,9 @@ enum { /* Init a device to support LightNVM media managers */ NVM_DEV_INIT_CMD, + + /* Factory reset device */ + NVM_DEV_FACTORY_CMD, }; #define NVM_IOCTL 'L' /* 0x4c */ @@ -134,6 +151,8 @@ enum { struct nvm_ioctl_remove) #define NVM_DEV_INIT _IOW(NVM_IOCTL, NVM_DEV_INIT_CMD, \ struct nvm_ioctl_dev_init) +#define NVM_DEV_FACTORY _IOW(NVM_IOCTL, NVM_DEV_FACTORY_CMD, \ + struct nvm_ioctl_dev_factory) #define NVM_VERSION_MAJOR 1 #define NVM_VERSION_MINOR 0 -- cgit v1.2.3 From a7fd9a4f3e8179bab31e4637236ebb0e0b7867c6 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Wed, 13 Jan 2016 13:04:11 -0700 Subject: lightnvm: ensure that nvm_dev_ops can be used without CONFIG_NVM null_blk defines an empty version of this ops structure if CONFIG_NVM isn't set, but it doesn't know the type. Move those bits out of the protection of CONFIG_NVM in the main lightnvm include. Signed-off-by: Jens Axboe --- include/linux/lightnvm.h | 121 +++++++++++++++++++++++++---------------------- 1 file changed, 64 insertions(+), 57 deletions(-) (limited to 'include') diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h index fc0e7c924967..d6750111e48e 100644 --- a/include/linux/lightnvm.h +++ b/include/linux/lightnvm.h @@ -1,6 +1,8 @@ #ifndef NVM_H #define NVM_H +#include + enum { NVM_IO_OK = 0, NVM_IO_REQUEUE = 1, @@ -11,10 +13,71 @@ enum { NVM_IOTYPE_GC = 1, }; +#define NVM_BLK_BITS (16) +#define NVM_PG_BITS (16) +#define NVM_SEC_BITS (8) +#define NVM_PL_BITS (8) +#define NVM_LUN_BITS (8) +#define NVM_CH_BITS (8) + +struct ppa_addr { + /* Generic structure for all addresses */ + union { + struct { + u64 blk : NVM_BLK_BITS; + u64 pg : NVM_PG_BITS; + u64 sec : NVM_SEC_BITS; + u64 pl : NVM_PL_BITS; + u64 lun : NVM_LUN_BITS; + u64 ch : NVM_CH_BITS; + } g; + + u64 ppa; + }; +}; + +struct nvm_rq; +struct nvm_id; +struct nvm_dev; + +typedef int (nvm_l2p_update_fn)(u64, u32, __le64 *, void *); +typedef int (nvm_bb_update_fn)(struct ppa_addr, int, u8 *, void *); +typedef int (nvm_id_fn)(struct nvm_dev *, struct nvm_id *); +typedef int (nvm_get_l2p_tbl_fn)(struct nvm_dev *, u64, u32, + nvm_l2p_update_fn *, void *); +typedef int (nvm_op_bb_tbl_fn)(struct nvm_dev *, struct ppa_addr, int, + nvm_bb_update_fn *, void *); +typedef int (nvm_op_set_bb_fn)(struct nvm_dev *, struct nvm_rq *, int); +typedef int (nvm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *); +typedef int (nvm_erase_blk_fn)(struct nvm_dev *, struct nvm_rq *); +typedef void *(nvm_create_dma_pool_fn)(struct nvm_dev *, char *); +typedef void (nvm_destroy_dma_pool_fn)(void *); +typedef void *(nvm_dev_dma_alloc_fn)(struct nvm_dev *, void *, gfp_t, + dma_addr_t *); +typedef void (nvm_dev_dma_free_fn)(void *, void*, dma_addr_t); + +struct nvm_dev_ops { + nvm_id_fn *identity; + nvm_get_l2p_tbl_fn *get_l2p_tbl; + nvm_op_bb_tbl_fn *get_bb_tbl; + nvm_op_set_bb_fn *set_bb_tbl; + + nvm_submit_io_fn *submit_io; + nvm_erase_blk_fn *erase_block; + + nvm_create_dma_pool_fn *create_dma_pool; + nvm_destroy_dma_pool_fn *destroy_dma_pool; + nvm_dev_dma_alloc_fn *dev_dma_alloc; + nvm_dev_dma_free_fn *dev_dma_free; + + unsigned int max_phys_sect; +}; + + + #ifdef CONFIG_NVM #include -#include #include #include #include @@ -149,29 +212,6 @@ struct nvm_tgt_instance { #define NVM_VERSION_MINOR 0 #define NVM_VERSION_PATCH 0 -#define NVM_BLK_BITS (16) -#define NVM_PG_BITS (16) -#define NVM_SEC_BITS (8) -#define NVM_PL_BITS (8) -#define NVM_LUN_BITS (8) -#define NVM_CH_BITS (8) - -struct ppa_addr { - /* Generic structure for all addresses */ - union { - struct { - u64 blk : NVM_BLK_BITS; - u64 pg : NVM_PG_BITS; - u64 sec : NVM_SEC_BITS; - u64 pl : NVM_PL_BITS; - u64 lun : NVM_LUN_BITS; - u64 ch : NVM_CH_BITS; - } g; - - u64 ppa; - }; -}; - struct nvm_rq; typedef void (nvm_end_io_fn)(struct nvm_rq *); @@ -213,39 +253,6 @@ static inline void *nvm_rq_to_pdu(struct nvm_rq *rqdata) struct nvm_block; -typedef int (nvm_l2p_update_fn)(u64, u32, __le64 *, void *); -typedef int (nvm_bb_update_fn)(struct ppa_addr, int, u8 *, void *); -typedef int (nvm_id_fn)(struct nvm_dev *, struct nvm_id *); -typedef int (nvm_get_l2p_tbl_fn)(struct nvm_dev *, u64, u32, - nvm_l2p_update_fn *, void *); -typedef int (nvm_op_bb_tbl_fn)(struct nvm_dev *, struct ppa_addr, int, - nvm_bb_update_fn *, void *); -typedef int (nvm_op_set_bb_fn)(struct nvm_dev *, struct nvm_rq *, int); -typedef int (nvm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *); -typedef int (nvm_erase_blk_fn)(struct nvm_dev *, struct nvm_rq *); -typedef void *(nvm_create_dma_pool_fn)(struct nvm_dev *, char *); -typedef void (nvm_destroy_dma_pool_fn)(void *); -typedef void *(nvm_dev_dma_alloc_fn)(struct nvm_dev *, void *, gfp_t, - dma_addr_t *); -typedef void (nvm_dev_dma_free_fn)(void *, void*, dma_addr_t); - -struct nvm_dev_ops { - nvm_id_fn *identity; - nvm_get_l2p_tbl_fn *get_l2p_tbl; - nvm_op_bb_tbl_fn *get_bb_tbl; - nvm_op_set_bb_fn *set_bb_tbl; - - nvm_submit_io_fn *submit_io; - nvm_erase_blk_fn *erase_block; - - nvm_create_dma_pool_fn *create_dma_pool; - nvm_destroy_dma_pool_fn *destroy_dma_pool; - nvm_dev_dma_alloc_fn *dev_dma_alloc; - nvm_dev_dma_free_fn *dev_dma_free; - - unsigned int max_phys_sect; -}; - struct nvm_lun { int id; -- cgit v1.2.3