diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-03-18 17:13:31 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-03-18 17:13:31 -0700 |
commit | 237045fc3c67d44088f767dca5a9fa30815eba62 (patch) | |
tree | 50773de6320e4b3358e3935c2f3c6f93d0dc93f3 /drivers/lightnvm | |
parent | 35d88d97bee90fc09286d28209a64a991291a64a (diff) | |
parent | 118472ab8532e55f48395ef5764b354fe48b1d73 (diff) | |
download | linux-237045fc3c67d44088f767dca5a9fa30815eba62.tar.bz2 |
Merge branch 'for-4.6/drivers' of git://git.kernel.dk/linux-block
Pull block driver updates from Jens Axboe:
"This is the block driver pull request for this merge window. It sits
on top of for-4.6/core, that was just sent out.
This contains:
- A set of fixes for lightnvm. One from Alan, fixing an overflow,
and the rest from the usual suspects, Javier and Matias.
- A set of fixes for nbd from Markus and Dan, and a fixup from Arnd
for correct usage of the signed 64-bit divider.
- A set of bug fixes for the Micron mtip32xx, from Asai.
- A fix for the brd discard handling from Bart.
- Update the maintainers entry for cciss, since that hardware has
transferred ownership.
- Three bug fixes for bcache from Eric Wheeler.
- Set of fixes for xen-blk{back,front} from Jan and Konrad.
- Removal of the cpqarray driver. It has been disabled in Kconfig
since 2013, and we were initially scheduled to remove it in 3.15.
- Various updates and fixes for NVMe, with the most important being:
- Removal of the per-device NVMe thread, replacing that with a
watchdog timer instead. From Christoph.
- Exposing the namespace WWID through sysfs, from Keith.
- Set of cleanups from Ming Lin.
- Logging the controller device name instead of the underlying
PCI device name, from Sagi.
- And a bunch of fixes and optimizations from the usual suspects
in this area"
* 'for-4.6/drivers' of git://git.kernel.dk/linux-block: (49 commits)
NVMe: Expose ns wwid through single sysfs entry
drivers:block: cpqarray clean up
brd: Fix discard request processing
cpqarray: remove it from the kernel
cciss: update MAINTAINERS
NVMe: Remove unused sq_head read in completion path
bcache: fix cache_set_flush() NULL pointer dereference on OOM
bcache: cleaned up error handling around register_cache()
bcache: fix race of writeback thread starting before complete initialization
NVMe: Create discard zero quirk white list
nbd: use correct div_s64 helper
mtip32xx: remove unneeded variable in mtip_cmd_timeout()
lightnvm: generalize rrpc ppa calculations
lightnvm: remove struct nvm_dev->total_blocks
lightnvm: rename ->nr_pages to ->nr_sects
lightnvm: update closed list outside of intr context
xen/blback: Fit the important information of the thread in 17 characters
lightnvm: fold get bb tbl when using dual/quad plane mode
lightnvm: fix up nonsensical configure overrun checking
xen-blkback: advertise indirect segment support earlier
...
Diffstat (limited to 'drivers/lightnvm')
-rw-r--r-- | drivers/lightnvm/core.c | 19 | ||||
-rw-r--r-- | drivers/lightnvm/gennvm.c | 7 | ||||
-rw-r--r-- | drivers/lightnvm/rrpc.c | 98 | ||||
-rw-r--r-- | drivers/lightnvm/rrpc.h | 15 |
4 files changed, 76 insertions, 63 deletions
diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c index 9f6acd5d1d2e..0d1fb6b40c46 100644 --- a/drivers/lightnvm/core.c +++ b/drivers/lightnvm/core.c @@ -250,7 +250,7 @@ int nvm_set_rqd_ppalist(struct nvm_dev *dev, struct nvm_rq *rqd, return 0; } - plane_cnt = (1 << dev->plane_mode); + plane_cnt = dev->plane_mode; rqd->nr_pages = plane_cnt * nr_ppas; if (dev->ops->max_phys_sect < rqd->nr_pages) @@ -463,11 +463,7 @@ static int nvm_core_init(struct nvm_dev *dev) dev->sec_per_lun = dev->sec_per_blk * dev->blks_per_lun; dev->nr_luns = dev->luns_per_chnl * dev->nr_chnls; - dev->total_blocks = dev->nr_planes * - dev->blks_per_lun * - dev->luns_per_chnl * - dev->nr_chnls; - dev->total_pages = dev->total_blocks * dev->pgs_per_blk; + dev->total_secs = dev->nr_luns * dev->sec_per_lun; INIT_LIST_HEAD(&dev->online_targets); mutex_init(&dev->mlock); @@ -872,20 +868,19 @@ static int nvm_configure_by_str_event(const char *val, static int nvm_configure_get(char *buf, const struct kernel_param *kp) { - int sz = 0; - char *buf_start = buf; + int sz; struct nvm_dev *dev; - buf += sprintf(buf, "available devices:\n"); + sz = sprintf(buf, "available devices:\n"); down_write(&nvm_lock); list_for_each_entry(dev, &nvm_devices, devices) { - if (sz > 4095 - DISK_NAME_LEN) + if (sz > 4095 - DISK_NAME_LEN - 2) break; - buf += sprintf(buf, " %32s\n", dev->name); + sz += sprintf(buf + sz, " %32s\n", dev->name); } up_write(&nvm_lock); - return buf - buf_start - 1; + return sz; } static const struct kernel_param_ops nvm_configure_by_str_event_param_ops = { diff --git a/drivers/lightnvm/gennvm.c b/drivers/lightnvm/gennvm.c index 7fb725b16148..d65ec36a2231 100644 --- a/drivers/lightnvm/gennvm.c +++ b/drivers/lightnvm/gennvm.c @@ -100,14 +100,13 @@ static int gennvm_block_map(u64 slba, u32 nlb, __le64 *entries, void *private) { struct nvm_dev *dev = private; struct gen_nvm *gn = dev->mp; - sector_t max_pages = dev->total_pages * (dev->sec_size >> 9); u64 elba = slba + nlb; struct gen_lun *lun; struct nvm_block *blk; u64 i; int lun_id; - if (unlikely(elba > dev->total_pages)) { + if (unlikely(elba > dev->total_secs)) { pr_err("gennvm: L2P data from device is out of bounds!\n"); return -EINVAL; } @@ -115,7 +114,7 @@ static int gennvm_block_map(u64 slba, u32 nlb, __le64 *entries, void *private) for (i = 0; i < nlb; i++) { u64 pba = le64_to_cpu(entries[i]); - if (unlikely(pba >= max_pages && pba != U64_MAX)) { + if (unlikely(pba >= dev->total_secs && pba != U64_MAX)) { pr_err("gennvm: L2P data entry is out of bounds!\n"); return -EINVAL; } @@ -197,7 +196,7 @@ static int gennvm_blocks_init(struct nvm_dev *dev, struct gen_nvm *gn) } if (dev->ops->get_l2p_tbl) { - ret = dev->ops->get_l2p_tbl(dev, 0, dev->total_pages, + ret = dev->ops->get_l2p_tbl(dev, 0, dev->total_secs, gennvm_block_map, dev); if (ret) { pr_err("gennvm: could not read L2P table.\n"); diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c index 307db1ea22de..82343783aa47 100644 --- a/drivers/lightnvm/rrpc.c +++ b/drivers/lightnvm/rrpc.c @@ -38,7 +38,7 @@ static void rrpc_page_invalidate(struct rrpc *rrpc, struct rrpc_addr *a) spin_lock(&rblk->lock); - div_u64_rem(a->addr, rrpc->dev->pgs_per_blk, &pg_offset); + div_u64_rem(a->addr, rrpc->dev->sec_per_blk, &pg_offset); WARN_ON(test_and_set_bit(pg_offset, rblk->invalid_pages)); rblk->nr_invalid_pages++; @@ -113,14 +113,24 @@ static void rrpc_discard(struct rrpc *rrpc, struct bio *bio) static int block_is_full(struct rrpc *rrpc, struct rrpc_block *rblk) { - return (rblk->next_page == rrpc->dev->pgs_per_blk); + return (rblk->next_page == rrpc->dev->sec_per_blk); } +/* Calculate relative addr for the given block, considering instantiated LUNs */ +static u64 block_to_rel_addr(struct rrpc *rrpc, struct rrpc_block *rblk) +{ + struct nvm_block *blk = rblk->parent; + int lun_blk = blk->id % (rrpc->dev->blks_per_lun * rrpc->nr_luns); + + return lun_blk * rrpc->dev->sec_per_blk; +} + +/* Calculate global addr for the given block */ static u64 block_to_addr(struct rrpc *rrpc, struct rrpc_block *rblk) { struct nvm_block *blk = rblk->parent; - return blk->id * rrpc->dev->pgs_per_blk; + return blk->id * rrpc->dev->sec_per_blk; } static struct ppa_addr linear_to_generic_addr(struct nvm_dev *dev, @@ -136,7 +146,7 @@ static struct ppa_addr linear_to_generic_addr(struct nvm_dev *dev, l.g.sec = secs; sector_div(ppa, dev->sec_per_pg); - div_u64_rem(ppa, dev->sec_per_blk, &pgs); + div_u64_rem(ppa, dev->pgs_per_blk, &pgs); l.g.pg = pgs; sector_div(ppa, dev->pgs_per_blk); @@ -191,12 +201,12 @@ static struct rrpc_block *rrpc_get_blk(struct rrpc *rrpc, struct rrpc_lun *rlun, return NULL; } - rblk = &rlun->blocks[blk->id]; + rblk = rrpc_get_rblk(rlun, blk->id); list_add_tail(&rblk->list, &rlun->open_list); spin_unlock(&lun->lock); blk->priv = rblk; - bitmap_zero(rblk->invalid_pages, rrpc->dev->pgs_per_blk); + bitmap_zero(rblk->invalid_pages, rrpc->dev->sec_per_blk); rblk->next_page = 0; rblk->nr_invalid_pages = 0; atomic_set(&rblk->data_cmnt_size, 0); @@ -286,11 +296,11 @@ static int rrpc_move_valid_pages(struct rrpc *rrpc, struct rrpc_block *rblk) struct bio *bio; struct page *page; int slot; - int nr_pgs_per_blk = rrpc->dev->pgs_per_blk; + int nr_sec_per_blk = rrpc->dev->sec_per_blk; u64 phys_addr; DECLARE_COMPLETION_ONSTACK(wait); - if (bitmap_full(rblk->invalid_pages, nr_pgs_per_blk)) + if (bitmap_full(rblk->invalid_pages, nr_sec_per_blk)) return 0; bio = bio_alloc(GFP_NOIO, 1); @@ -306,10 +316,10 @@ static int rrpc_move_valid_pages(struct rrpc *rrpc, struct rrpc_block *rblk) } while ((slot = find_first_zero_bit(rblk->invalid_pages, - nr_pgs_per_blk)) < nr_pgs_per_blk) { + nr_sec_per_blk)) < nr_sec_per_blk) { /* Lock laddr */ - phys_addr = (rblk->parent->id * nr_pgs_per_blk) + slot; + phys_addr = rblk->parent->id * nr_sec_per_blk + slot; try: spin_lock(&rrpc->rev_lock); @@ -381,7 +391,7 @@ finished: mempool_free(page, rrpc->page_pool); bio_put(bio); - if (!bitmap_full(rblk->invalid_pages, nr_pgs_per_blk)) { + if (!bitmap_full(rblk->invalid_pages, nr_sec_per_blk)) { pr_err("nvm: failed to garbage collect block\n"); return -EIO; } @@ -499,12 +509,21 @@ static void rrpc_gc_queue(struct work_struct *work) struct rrpc *rrpc = gcb->rrpc; struct rrpc_block *rblk = gcb->rblk; struct nvm_lun *lun = rblk->parent->lun; + struct nvm_block *blk = rblk->parent; struct rrpc_lun *rlun = &rrpc->luns[lun->id - rrpc->lun_offset]; spin_lock(&rlun->lock); list_add_tail(&rblk->prio, &rlun->prio_list); spin_unlock(&rlun->lock); + spin_lock(&lun->lock); + lun->nr_open_blocks--; + lun->nr_closed_blocks++; + blk->state &= ~NVM_BLK_ST_OPEN; + blk->state |= NVM_BLK_ST_CLOSED; + list_move_tail(&rblk->list, &rlun->closed_list); + spin_unlock(&lun->lock); + mempool_free(gcb, rrpc->gcb_pool); pr_debug("nvm: block '%lu' is full, allow GC (sched)\n", rblk->parent->id); @@ -545,7 +564,7 @@ static struct rrpc_addr *rrpc_update_map(struct rrpc *rrpc, sector_t laddr, struct rrpc_addr *gp; struct rrpc_rev_addr *rev; - BUG_ON(laddr >= rrpc->nr_pages); + BUG_ON(laddr >= rrpc->nr_sects); gp = &rrpc->trans_map[laddr]; spin_lock(&rrpc->rev_lock); @@ -668,20 +687,8 @@ static void rrpc_end_io_write(struct rrpc *rrpc, struct rrpc_rq *rrqd, lun = rblk->parent->lun; cmnt_size = atomic_inc_return(&rblk->data_cmnt_size); - if (unlikely(cmnt_size == rrpc->dev->pgs_per_blk)) { - struct nvm_block *blk = rblk->parent; - struct rrpc_lun *rlun = rblk->rlun; - - spin_lock(&lun->lock); - lun->nr_open_blocks--; - lun->nr_closed_blocks++; - blk->state &= ~NVM_BLK_ST_OPEN; - blk->state |= NVM_BLK_ST_CLOSED; - list_move_tail(&rblk->list, &rlun->closed_list); - spin_unlock(&lun->lock); - + if (unlikely(cmnt_size == rrpc->dev->sec_per_blk)) rrpc_run_gc(rrpc, rblk); - } } } @@ -726,7 +733,7 @@ static int rrpc_read_ppalist_rq(struct rrpc *rrpc, struct bio *bio, for (i = 0; i < npages; i++) { /* We assume that mapping occurs at 4KB granularity */ - BUG_ON(!(laddr + i >= 0 && laddr + i < rrpc->nr_pages)); + BUG_ON(!(laddr + i >= 0 && laddr + i < rrpc->nr_sects)); gp = &rrpc->trans_map[laddr + i]; if (gp->rblk) { @@ -757,7 +764,7 @@ static int rrpc_read_rq(struct rrpc *rrpc, struct bio *bio, struct nvm_rq *rqd, if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd)) return NVM_IO_REQUEUE; - BUG_ON(!(laddr >= 0 && laddr < rrpc->nr_pages)); + BUG_ON(!(laddr >= 0 && laddr < rrpc->nr_sects)); gp = &rrpc->trans_map[laddr]; if (gp->rblk) { @@ -1007,21 +1014,21 @@ static int rrpc_l2p_update(u64 slba, u32 nlb, __le64 *entries, void *private) struct nvm_dev *dev = rrpc->dev; struct rrpc_addr *addr = rrpc->trans_map + slba; struct rrpc_rev_addr *raddr = rrpc->rev_trans_map; - sector_t max_pages = dev->total_pages * (dev->sec_size >> 9); u64 elba = slba + nlb; u64 i; - if (unlikely(elba > dev->total_pages)) { + if (unlikely(elba > dev->total_secs)) { pr_err("nvm: L2P data from device is out of bounds!\n"); return -EINVAL; } for (i = 0; i < nlb; i++) { u64 pba = le64_to_cpu(entries[i]); + unsigned int mod; /* LNVM treats address-spaces as silos, LBA and PBA are * equally large and zero-indexed. */ - if (unlikely(pba >= max_pages && pba != U64_MAX)) { + if (unlikely(pba >= dev->total_secs && pba != U64_MAX)) { pr_err("nvm: L2P data entry is out of bounds!\n"); return -EINVAL; } @@ -1033,8 +1040,10 @@ static int rrpc_l2p_update(u64 slba, u32 nlb, __le64 *entries, void *private) if (!pba) continue; + div_u64_rem(pba, rrpc->nr_sects, &mod); + addr[i].addr = pba; - raddr[pba].addr = slba + i; + raddr[mod].addr = slba + i; } return 0; @@ -1046,16 +1055,16 @@ static int rrpc_map_init(struct rrpc *rrpc) sector_t i; int ret; - rrpc->trans_map = vzalloc(sizeof(struct rrpc_addr) * rrpc->nr_pages); + rrpc->trans_map = vzalloc(sizeof(struct rrpc_addr) * rrpc->nr_sects); if (!rrpc->trans_map) return -ENOMEM; rrpc->rev_trans_map = vmalloc(sizeof(struct rrpc_rev_addr) - * rrpc->nr_pages); + * rrpc->nr_sects); if (!rrpc->rev_trans_map) return -ENOMEM; - for (i = 0; i < rrpc->nr_pages; i++) { + for (i = 0; i < rrpc->nr_sects; i++) { struct rrpc_addr *p = &rrpc->trans_map[i]; struct rrpc_rev_addr *r = &rrpc->rev_trans_map[i]; @@ -1067,8 +1076,8 @@ static int rrpc_map_init(struct rrpc *rrpc) return 0; /* Bring up the mapping table from device */ - ret = dev->ops->get_l2p_tbl(dev, 0, dev->total_pages, - rrpc_l2p_update, rrpc); + ret = dev->ops->get_l2p_tbl(dev, 0, dev->total_secs, rrpc_l2p_update, + rrpc); if (ret) { pr_err("nvm: rrpc: could not read L2P table.\n"); return -EINVAL; @@ -1141,7 +1150,7 @@ static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end) struct rrpc_lun *rlun; int i, j; - if (dev->pgs_per_blk > MAX_INVALID_PAGES_STORAGE * BITS_PER_LONG) { + if (dev->sec_per_blk > MAX_INVALID_PAGES_STORAGE * BITS_PER_LONG) { pr_err("rrpc: number of pages per block too high."); return -EINVAL; } @@ -1168,7 +1177,7 @@ static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end) spin_lock_init(&rlun->lock); rrpc->total_blocks += dev->blks_per_lun; - rrpc->nr_pages += dev->sec_per_lun; + rrpc->nr_sects += dev->sec_per_lun; rlun->blocks = vzalloc(sizeof(struct rrpc_block) * rrpc->dev->blks_per_lun); @@ -1221,9 +1230,9 @@ static sector_t rrpc_capacity(void *private) /* cur, gc, and two emergency blocks for each lun */ reserved = rrpc->nr_luns * dev->max_pages_per_blk * 4; - provisioned = rrpc->nr_pages - reserved; + provisioned = rrpc->nr_sects - reserved; - if (reserved > rrpc->nr_pages) { + if (reserved > rrpc->nr_sects) { pr_err("rrpc: not enough space available to expose storage.\n"); return 0; } @@ -1242,10 +1251,11 @@ static void rrpc_block_map_update(struct rrpc *rrpc, struct rrpc_block *rblk) struct nvm_dev *dev = rrpc->dev; int offset; struct rrpc_addr *laddr; - u64 paddr, pladdr; + u64 bpaddr, paddr, pladdr; - for (offset = 0; offset < dev->pgs_per_blk; offset++) { - paddr = block_to_addr(rrpc, rblk) + offset; + bpaddr = block_to_rel_addr(rrpc, rblk); + for (offset = 0; offset < dev->sec_per_blk; offset++) { + paddr = bpaddr + offset; pladdr = rrpc->rev_trans_map[paddr].addr; if (pladdr == ADDR_EMPTY) @@ -1386,7 +1396,7 @@ static void *rrpc_init(struct nvm_dev *dev, struct gendisk *tdisk, blk_queue_max_hw_sectors(tqueue, queue_max_hw_sectors(bqueue)); pr_info("nvm: rrpc initialized with %u luns and %llu pages.\n", - rrpc->nr_luns, (unsigned long long)rrpc->nr_pages); + rrpc->nr_luns, (unsigned long long)rrpc->nr_sects); mod_timer(&rrpc->gc_timer, jiffies + msecs_to_jiffies(10)); diff --git a/drivers/lightnvm/rrpc.h b/drivers/lightnvm/rrpc.h index f7b37336353f..855f4a5ca7dd 100644 --- a/drivers/lightnvm/rrpc.h +++ b/drivers/lightnvm/rrpc.h @@ -104,7 +104,7 @@ struct rrpc { struct rrpc_lun *luns; /* calculated values */ - unsigned long long nr_pages; + unsigned long long nr_sects; unsigned long total_blocks; /* Write strategy variables. Move these into each for structure for each @@ -156,6 +156,15 @@ struct rrpc_rev_addr { u64 addr; }; +static inline struct rrpc_block *rrpc_get_rblk(struct rrpc_lun *rlun, + int blk_id) +{ + struct rrpc *rrpc = rlun->rrpc; + int lun_blk = blk_id % rrpc->dev->blks_per_lun; + + return &rlun->blocks[lun_blk]; +} + static inline sector_t rrpc_get_laddr(struct bio *bio) { return bio->bi_iter.bi_sector / NR_PHY_IN_LOG; @@ -206,7 +215,7 @@ static inline int rrpc_lock_laddr(struct rrpc *rrpc, sector_t laddr, unsigned pages, struct rrpc_inflight_rq *r) { - BUG_ON((laddr + pages) > rrpc->nr_pages); + BUG_ON((laddr + pages) > rrpc->nr_sects); return __rrpc_lock_laddr(rrpc, laddr, pages, r); } @@ -243,7 +252,7 @@ static inline void rrpc_unlock_rq(struct rrpc *rrpc, struct nvm_rq *rqd) struct rrpc_inflight_rq *r = rrpc_get_inflight_rq(rqd); uint8_t pages = rqd->nr_pages; - BUG_ON((r->l_start + pages) > rrpc->nr_pages); + BUG_ON((r->l_start + pages) > rrpc->nr_sects); rrpc_unlock_laddr(rrpc, r); } |