From 949928c1c731417cc0f070912c63878b62b544f4 Mon Sep 17 00:00:00 2001 From: Keith Busch Date: Thu, 17 Dec 2015 17:08:15 -0700 Subject: NVMe: Fix possible queue use after freed This notifies blk-mq when the tag set contains a different number of queues prior to freeing unused ones that the request queue points to. Signed-off-by: Keith Busch Reviewed-by: Christoph Hellwig Signed-off-by: Jens Axboe --- drivers/nvme/host/pci.c | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) (limited to 'drivers/nvme') diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 72ef8322d32a..08791338ce75 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -1381,7 +1381,7 @@ static int nvme_kthread(void *data) static int nvme_create_io_queues(struct nvme_dev *dev) { - unsigned i; + unsigned i, max; int ret = 0; for (i = dev->queue_count; i <= dev->max_qid; i++) { @@ -1391,7 +1391,8 @@ static int nvme_create_io_queues(struct nvme_dev *dev) } } - for (i = dev->online_queues; i <= dev->queue_count - 1; i++) { + max = min(dev->max_qid, dev->queue_count - 1); + for (i = dev->online_queues; i <= max; i++) { ret = nvme_create_queue(dev->queues[i], i); if (ret) { nvme_free_queues(dev, i); @@ -1548,9 +1549,6 @@ static int nvme_setup_io_queues(struct nvme_dev *dev) adminq->cq_vector = -1; goto free_queues; } - - /* Free previously allocated queues that are no longer usable */ - nvme_free_queues(dev, nr_io_queues + 1); return nvme_create_io_queues(dev); free_queues: @@ -1684,7 +1682,13 @@ static int nvme_dev_add(struct nvme_dev *dev) if (blk_mq_alloc_tag_set(&dev->tagset)) return 0; dev->ctrl.tagset = &dev->tagset; + } else { + blk_mq_update_nr_hw_queues(&dev->tagset, dev->online_queues - 1); + + /* Free previously allocated queues that are no longer usable */ + nvme_free_queues(dev, dev->online_queues); } + queue_work(nvme_workq, &dev->scan_work); return 0; } -- cgit v1.2.3 From f4f0f63e6f01055dfbdb7bc5e83935e1bdfa1980 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 9 Feb 2016 12:44:03 -0700 Subject: nvme: fix drvdata setup for the nvme device Pass the right private data to device_create_with_groups from the beginning, and remove the superflous call to dev_set_drvdata. Signed-off-by: Christoph Hellwig Reviewed-by: Jon Derrick Signed-off-by: Jens Axboe --- drivers/nvme/host/core.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers/nvme') diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index c5bf001af559..c326931d9b4d 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -1383,14 +1383,13 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev, ctrl->device = device_create_with_groups(nvme_class, ctrl->dev, MKDEV(nvme_char_major, ctrl->instance), - dev, nvme_dev_attr_groups, + ctrl, nvme_dev_attr_groups, "nvme%d", ctrl->instance); if (IS_ERR(ctrl->device)) { ret = PTR_ERR(ctrl->device); goto out_release_instance; } get_device(ctrl->device); - dev_set_drvdata(ctrl->device, ctrl); spin_lock(&dev_list_lock); list_add_tail(&ctrl->node, &nvme_ctrl_list); -- cgit v1.2.3 From 1b3c47c182aac70c4487105d2e22a17f0193525f Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Wed, 10 Feb 2016 08:51:15 -0700 Subject: nvme: Log the ctrl device name instead of the underlying pci device name Having the ctrl name "nvmeX" seems much more friendly than the underlying device name. Also, with other nvme transports such as the soon to come nvme-loop we don't have an underlying device so it doesn't makes sense to make up one. In order to help matching an instance name to a pci function, we add a info print in nvme_probe. Signed-off-by: Sagi Grimberg Acked-by: Keith Busch Manually fixed up the hunk in nvme_cancel_queue_ios(). Signed-off-by: Jens Axboe --- drivers/nvme/host/core.c | 24 ++++++++++++------------ drivers/nvme/host/pci.c | 37 +++++++++++++++++++++---------------- 2 files changed, 33 insertions(+), 28 deletions(-) (limited to 'drivers/nvme') diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index c326931d9b4d..a7c29f24976c 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -557,8 +557,8 @@ static int nvme_revalidate_disk(struct gendisk *disk) unsigned short bs; if (nvme_identify_ns(ns->ctrl, ns->ns_id, &id)) { - dev_warn(ns->ctrl->dev, "%s: Identify failure nvme%dn%d\n", - __func__, ns->ctrl->instance, ns->ns_id); + dev_warn(disk_to_dev(ns->disk), "%s: Identify failure\n", + __func__); return -ENODEV; } if (id->ncap == 0) { @@ -568,7 +568,7 @@ static int nvme_revalidate_disk(struct gendisk *disk) if (nvme_nvm_ns_supported(ns, id) && ns->type != NVME_NS_LIGHTNVM) { if (nvme_nvm_register(ns->queue, disk->disk_name)) { - dev_warn(ns->ctrl->dev, + dev_warn(disk_to_dev(ns->disk), "%s: LightNVM init failure\n", __func__); kfree(id); return -ENODEV; @@ -741,7 +741,7 @@ static int nvme_wait_ready(struct nvme_ctrl *ctrl, u64 cap, bool enabled) if (fatal_signal_pending(current)) return -EINTR; if (time_after(jiffies, timeout)) { - dev_err(ctrl->dev, + dev_err(ctrl->device, "Device not ready; aborting %s\n", enabled ? "initialisation" : "reset"); return -ENODEV; @@ -781,7 +781,7 @@ int nvme_enable_ctrl(struct nvme_ctrl *ctrl, u64 cap) int ret; if (page_shift < dev_page_min) { - dev_err(ctrl->dev, + dev_err(ctrl->device, "Minimum device page size %u too large for host (%u)\n", 1 << dev_page_min, 1 << page_shift); return -ENODEV; @@ -822,7 +822,7 @@ int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl) if (fatal_signal_pending(current)) return -EINTR; if (time_after(jiffies, timeout)) { - dev_err(ctrl->dev, + dev_err(ctrl->device, "Device shutdown incomplete; abort shutdown\n"); return -ENODEV; } @@ -844,13 +844,13 @@ int nvme_init_identify(struct nvme_ctrl *ctrl) ret = ctrl->ops->reg_read32(ctrl, NVME_REG_VS, &ctrl->vs); if (ret) { - dev_err(ctrl->dev, "Reading VS failed (%d)\n", ret); + dev_err(ctrl->device, "Reading VS failed (%d)\n", ret); return ret; } ret = ctrl->ops->reg_read64(ctrl, NVME_REG_CAP, &cap); if (ret) { - dev_err(ctrl->dev, "Reading CAP failed (%d)\n", ret); + dev_err(ctrl->device, "Reading CAP failed (%d)\n", ret); return ret; } page_shift = NVME_CAP_MPSMIN(cap) + 12; @@ -860,7 +860,7 @@ int nvme_init_identify(struct nvme_ctrl *ctrl) ret = nvme_identify_ctrl(ctrl, &id); if (ret) { - dev_err(ctrl->dev, "Identify Controller failed (%d)\n", ret); + dev_err(ctrl->device, "Identify Controller failed (%d)\n", ret); return -EIO; } @@ -937,13 +937,13 @@ static int nvme_dev_user_cmd(struct nvme_ctrl *ctrl, void __user *argp) ns = list_first_entry(&ctrl->namespaces, struct nvme_ns, list); if (ns != list_last_entry(&ctrl->namespaces, struct nvme_ns, list)) { - dev_warn(ctrl->dev, + dev_warn(ctrl->device, "NVME_IOCTL_IO_CMD not supported when multiple namespaces present!\n"); ret = -EINVAL; goto out_unlock; } - dev_warn(ctrl->dev, + dev_warn(ctrl->device, "using deprecated NVME_IOCTL_IO_CMD ioctl on the char device!\n"); kref_get(&ns->kref); mutex_unlock(&ctrl->namespaces_mutex); @@ -969,7 +969,7 @@ static long nvme_dev_ioctl(struct file *file, unsigned int cmd, case NVME_IOCTL_IO_CMD: return nvme_dev_user_cmd(ctrl, argp); case NVME_IOCTL_RESET: - dev_warn(ctrl->dev, "resetting controller\n"); + dev_warn(ctrl->device, "resetting controller\n"); return ctrl->ops->reset_ctrl(ctrl); case NVME_IOCTL_SUBSYS_RESET: return nvme_reset_subsystem(ctrl); diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 08791338ce75..f2f55b504cf2 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -299,10 +299,10 @@ static void nvme_complete_async_event(struct nvme_dev *dev, switch (result & 0xff07) { case NVME_AER_NOTICE_NS_CHANGED: - dev_info(dev->dev, "rescanning\n"); + dev_info(dev->ctrl.device, "rescanning\n"); queue_work(nvme_workq, &dev->scan_work); default: - dev_warn(dev->dev, "async event result %08x\n", result); + dev_warn(dev->ctrl.device, "async event result %08x\n", result); } } @@ -708,7 +708,7 @@ static void nvme_complete_rq(struct request *req) } if (unlikely(iod->aborted)) { - dev_warn(dev->dev, + dev_warn(dev->ctrl.device, "completing aborted command with status: %04x\n", req->errors); } @@ -740,7 +740,7 @@ static void __nvme_process_cq(struct nvme_queue *nvmeq, unsigned int *tag) *tag = -1; if (unlikely(cqe.command_id >= nvmeq->q_depth)) { - dev_warn(nvmeq->q_dmadev, + dev_warn(nvmeq->dev->ctrl.device, "invalid id %d completed on queue %d\n", cqe.command_id, le16_to_cpu(cqe.sq_id)); continue; @@ -908,7 +908,8 @@ static void abort_endio(struct request *req, int error) u32 result = (u32)(uintptr_t)req->special; u16 status = req->errors; - dev_warn(nvmeq->q_dmadev, "Abort status:%x result:%x", status, result); + dev_warn(nvmeq->dev->ctrl.device, + "Abort status:%x result:%x", status, result); atomic_inc(&nvmeq->dev->ctrl.abort_limit); blk_mq_free_request(req); @@ -929,7 +930,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved) * shutdown, so we return BLK_EH_HANDLED. */ if (test_bit(NVME_CTRL_RESETTING, &dev->flags)) { - dev_warn(dev->dev, + dev_warn(dev->ctrl.device, "I/O %d QID %d timeout, disable controller\n", req->tag, nvmeq->qid); nvme_dev_disable(dev, false); @@ -943,7 +944,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved) * returned to the driver, or if this is the admin queue. */ if (!nvmeq->qid || iod->aborted) { - dev_warn(dev->dev, + dev_warn(dev->ctrl.device, "I/O %d QID %d timeout, reset controller\n", req->tag, nvmeq->qid); nvme_dev_disable(dev, false); @@ -969,8 +970,9 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved) cmd.abort.cid = req->tag; cmd.abort.sqid = cpu_to_le16(nvmeq->qid); - dev_warn(nvmeq->q_dmadev, "I/O %d QID %d timeout, aborting\n", - req->tag, nvmeq->qid); + dev_warn(nvmeq->dev->ctrl.device, + "I/O %d QID %d timeout, aborting\n", + req->tag, nvmeq->qid); abort_req = nvme_alloc_request(dev->ctrl.admin_q, &cmd, BLK_MQ_REQ_NOWAIT); @@ -999,7 +1001,7 @@ static void nvme_cancel_queue_ios(struct request *req, void *data, bool reserved if (!blk_mq_request_started(req)) return; - dev_warn(nvmeq->q_dmadev, + dev_warn(nvmeq->dev->ctrl.device, "Cancelling I/O %d QID %d\n", req->tag, nvmeq->qid); status = NVME_SC_ABORT_REQ; @@ -1355,7 +1357,7 @@ static int nvme_kthread(void *data) if ((dev->subsystem && (csts & NVME_CSTS_NSSRO)) || csts & NVME_CSTS_CFS) { if (queue_work(nvme_workq, &dev->reset_work)) { - dev_warn(dev->dev, + dev_warn(dev->ctrl.device, "Failed status: %x, reset controller\n", readl(dev->bar + NVME_REG_CSTS)); } @@ -1483,7 +1485,8 @@ static int nvme_setup_io_queues(struct nvme_dev *dev) * access to the admin queue, as that might be only way to fix them up. */ if (result > 0) { - dev_err(dev->dev, "Could not set queue count (%d)\n", result); + dev_err(dev->ctrl.device, + "Could not set queue count (%d)\n", result); nr_io_queues = 0; result = 0; } @@ -1947,7 +1950,7 @@ static void nvme_reset_work(struct work_struct *work) * any working I/O queue. */ if (dev->online_queues < 2) { - dev_warn(dev->dev, "IO queues not created\n"); + dev_warn(dev->ctrl.device, "IO queues not created\n"); nvme_remove_namespaces(&dev->ctrl); } else { nvme_start_queues(&dev->ctrl); @@ -1984,7 +1987,7 @@ static void nvme_remove_dead_ctrl_work(struct work_struct *work) static void nvme_remove_dead_ctrl(struct nvme_dev *dev) { - dev_warn(dev->dev, "Removing after probe failure\n"); + dev_warn(dev->ctrl.device, "Removing after probe failure\n"); kref_get(&dev->ctrl.kref); if (!schedule_work(&dev->remove_work)) nvme_put_ctrl(&dev->ctrl); @@ -2081,6 +2084,8 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (result) goto release_pools; + dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev)); + queue_work(nvme_workq, &dev->reset_work); return 0; @@ -2164,7 +2169,7 @@ static pci_ers_result_t nvme_error_detected(struct pci_dev *pdev, * shutdown the controller to quiesce. The controller will be restarted * after the slot reset through driver's slot_reset callback. */ - dev_warn(&pdev->dev, "error detected: state:%d\n", state); + dev_warn(dev->ctrl.device, "error detected: state:%d\n", state); switch (state) { case pci_channel_io_normal: return PCI_ERS_RESULT_CAN_RECOVER; @@ -2181,7 +2186,7 @@ static pci_ers_result_t nvme_slot_reset(struct pci_dev *pdev) { struct nvme_dev *dev = pci_get_drvdata(pdev); - dev_info(&pdev->dev, "restart after slot reset\n"); + dev_info(dev->ctrl.device, "restart after slot reset\n"); pci_restore_state(pdev); queue_work(nvme_workq, &dev->reset_work); return PCI_ERS_RESULT_RECOVERED; -- cgit v1.2.3 From e439bb12e75c2807029853493fa787c6d70c763a Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Wed, 10 Feb 2016 10:03:29 -0800 Subject: nvme/host: reference the fabric module for each bdev open callout We don't want to be able to unload the fabric driver when we have openened referenced to our namespaces. Thus, for each nvme_open we take a reference on the fabric driver and put it in nvme_release. This behavior is consistent with the scsi model. This resolves the panic when unloading a fabric module with mpath holders. Signed-off-by: Sagi Grimberg Reviewed-by: Christoph Hellwig Reviewed-by: Ian Bakshan Reviewed-by: Johannes Thumshirn Signed-off-by: Ming Lin Signed-off-by: Jens Axboe --- drivers/nvme/host/core.c | 19 ++++++++++++++++--- drivers/nvme/host/nvme.h | 1 + drivers/nvme/host/pci.c | 1 + 3 files changed, 18 insertions(+), 3 deletions(-) (limited to 'drivers/nvme') diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index a7c29f24976c..c9cd07f02cc2 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -71,11 +71,21 @@ static struct nvme_ns *nvme_get_ns_from_disk(struct gendisk *disk) spin_lock(&dev_list_lock); ns = disk->private_data; - if (ns && !kref_get_unless_zero(&ns->kref)) - ns = NULL; + if (ns) { + if (!kref_get_unless_zero(&ns->kref)) + goto fail; + if (!try_module_get(ns->ctrl->ops->module)) + goto fail_put_ns; + } spin_unlock(&dev_list_lock); return ns; + +fail_put_ns: + kref_put(&ns->kref, nvme_free_ns); +fail: + spin_unlock(&dev_list_lock); + return NULL; } void nvme_requeue_req(struct request *req) @@ -499,7 +509,10 @@ static int nvme_open(struct block_device *bdev, fmode_t mode) static void nvme_release(struct gendisk *disk, fmode_t mode) { - nvme_put_ns(disk->private_data); + struct nvme_ns *ns = disk->private_data; + + module_put(ns->ctrl->ops->module); + nvme_put_ns(ns); } static int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo) diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index 4fb5bb737868..9f77386f7d1e 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -117,6 +117,7 @@ struct nvme_ns { }; struct nvme_ctrl_ops { + struct module *module; int (*reg_read32)(struct nvme_ctrl *ctrl, u32 off, u32 *val); int (*reg_write32)(struct nvme_ctrl *ctrl, u32 off, u32 val); int (*reg_read64)(struct nvme_ctrl *ctrl, u32 off, u64 *val); diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index f2f55b504cf2..cb303ac91b9d 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -2036,6 +2036,7 @@ static int nvme_pci_reset_ctrl(struct nvme_ctrl *ctrl) } static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = { + .module = THIS_MODULE, .reg_read32 = nvme_pci_reg_read32, .reg_write32 = nvme_pci_reg_write32, .reg_read64 = nvme_pci_reg_read64, -- cgit v1.2.3 From ba0ba7d3e5266111ec865b0bf1ad48dd0e2a2314 Mon Sep 17 00:00:00 2001 From: Ming Lin Date: Wed, 10 Feb 2016 10:03:30 -0800 Subject: nvme: move timeout variables to core.c These variables are used by PCI driver and will also be used in the forthcoming NVMe over Fabrics drivers. Reviewed-by: Christoph Hellwig Reviewed-by: Johannes Thumshirn Reviewed-by: Sagi Grimberg Signed-off-by: Ming Lin Signed-off-by: Jens Axboe --- drivers/nvme/host/core.c | 12 ++++++++++++ drivers/nvme/host/pci.c | 12 ------------ 2 files changed, 12 insertions(+), 12 deletions(-) (limited to 'drivers/nvme') diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index c9cd07f02cc2..0c0011b5e1b9 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -33,6 +33,18 @@ #define NVME_MINORS (1U << MINORBITS) +unsigned char admin_timeout = 60; +module_param(admin_timeout, byte, 0644); +MODULE_PARM_DESC(admin_timeout, "timeout in seconds for admin commands"); + +unsigned char nvme_io_timeout = 30; +module_param_named(io_timeout, nvme_io_timeout, byte, 0644); +MODULE_PARM_DESC(io_timeout, "timeout in seconds for I/O"); + +unsigned char shutdown_timeout = 5; +module_param(shutdown_timeout, byte, 0644); +MODULE_PARM_DESC(shutdown_timeout, "timeout in seconds for controller shutdown"); + static int nvme_major; module_param(nvme_major, int, 0); diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index cb303ac91b9d..53a99422d44d 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -57,18 +57,6 @@ #define NVME_NR_AEN_COMMANDS 1 #define NVME_AQ_BLKMQ_DEPTH (NVME_AQ_DEPTH - NVME_NR_AEN_COMMANDS) -unsigned char admin_timeout = 60; -module_param(admin_timeout, byte, 0644); -MODULE_PARM_DESC(admin_timeout, "timeout in seconds for admin commands"); - -unsigned char nvme_io_timeout = 30; -module_param_named(io_timeout, nvme_io_timeout, byte, 0644); -MODULE_PARM_DESC(io_timeout, "timeout in seconds for I/O"); - -unsigned char shutdown_timeout = 5; -module_param(shutdown_timeout, byte, 0644); -MODULE_PARM_DESC(shutdown_timeout, "timeout in seconds for controller shutdown"); - static int use_threaded_interrupts; module_param(use_threaded_interrupts, int, 0); -- cgit v1.2.3 From 9f2482b91bcd02ac2999cf04b3fb1b89e1c4d559 Mon Sep 17 00:00:00 2001 From: Ming Lin Date: Wed, 10 Feb 2016 10:03:31 -0800 Subject: nvme: split dev_list_lock Split dev_list_lock into one in the core and one in the PCI driver. Reviewed-by: Christoph Hellwig Reviewed-by: Johannes Thumshirn Reviewed-by: Sagi Grimberg Signed-off-by: Ming Lin Signed-off-by: Jens Axboe --- drivers/nvme/host/core.c | 2 +- drivers/nvme/host/nvme.h | 2 -- drivers/nvme/host/pci.c | 1 + 3 files changed, 2 insertions(+), 3 deletions(-) (limited to 'drivers/nvme') diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 0c0011b5e1b9..6eb42d24a5e9 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -52,7 +52,7 @@ static int nvme_char_major; module_param(nvme_char_major, int, 0); static LIST_HEAD(nvme_ctrl_list); -DEFINE_SPINLOCK(dev_list_lock); +static DEFINE_SPINLOCK(dev_list_lock); static struct class *nvme_class; diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index 9f77386f7d1e..63ba8a500ee1 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -266,8 +266,6 @@ int nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword11, dma_addr_t dma_addr, u32 *result); int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count); -extern spinlock_t dev_list_lock; - struct sg_io_hdr; int nvme_sg_io(struct nvme_ns *ns, struct sg_io_hdr __user *u_hdr); diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 53a99422d44d..54e79c035913 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -65,6 +65,7 @@ module_param(use_cmb_sqes, bool, 0644); MODULE_PARM_DESC(use_cmb_sqes, "use controller's memory buffer for I/O SQes"); static LIST_HEAD(dev_list); +static DEFINE_SPINLOCK(dev_list_lock); static struct task_struct *nvme_thread; static struct workqueue_struct *nvme_workq; static wait_queue_head_t nvme_kthread_wait; -- cgit v1.2.3 From 576d55d625664a20ee4bae6500952febfb2d7b10 Mon Sep 17 00:00:00 2001 From: Ming Lin Date: Wed, 10 Feb 2016 10:03:32 -0800 Subject: nvme: split pci module out of core module NVMe over Fabrics drivers are going to reuse the core, so splits nvme.ko into 2 modules: nvme-core.ko: the core part nvme.ko: the PCI driver Export symbols from nvme-core.ko. Reviewed-by: Christoph Hellwig Signed-off-by: Ming Lin Signed-off-by: Jens Axboe --- drivers/nvme/host/Kconfig | 6 +++++- drivers/nvme/host/Makefile | 10 ++++++---- drivers/nvme/host/core.c | 24 +++++++++++++++++++++++- drivers/nvme/host/pci.c | 13 +------------ 4 files changed, 35 insertions(+), 18 deletions(-) (limited to 'drivers/nvme') diff --git a/drivers/nvme/host/Kconfig b/drivers/nvme/host/Kconfig index 5d6237391dcd..2ed30f063a13 100644 --- a/drivers/nvme/host/Kconfig +++ b/drivers/nvme/host/Kconfig @@ -1,6 +1,10 @@ +config NVME_CORE + tristate + config BLK_DEV_NVME tristate "NVM Express block device" depends on PCI && BLOCK + select NVME_CORE ---help--- The NVM Express driver is for solid state drives directly connected to the PCI or PCI Express bus. If you know you @@ -11,7 +15,7 @@ config BLK_DEV_NVME config BLK_DEV_NVME_SCSI bool "SCSI emulation for NVMe device nodes" - depends on BLK_DEV_NVME + depends on NVME_CORE ---help--- This adds support for the SG_IO ioctl on the NVMe character and block devices nodes, as well a a translation for a small diff --git a/drivers/nvme/host/Makefile b/drivers/nvme/host/Makefile index 51bf90871549..9a3ca892b4a7 100644 --- a/drivers/nvme/host/Makefile +++ b/drivers/nvme/host/Makefile @@ -1,6 +1,8 @@ +obj-$(CONFIG_NVME_CORE) += nvme-core.o +obj-$(CONFIG_BLK_DEV_NVME) += nvme.o -obj-$(CONFIG_BLK_DEV_NVME) += nvme.o +nvme-core-y := core.o +nvme-core-$(CONFIG_BLK_DEV_NVME_SCSI) += scsi.o +nvme-core-$(CONFIG_NVM) += lightnvm.o -lightnvm-$(CONFIG_NVM) := lightnvm.o -nvme-y += core.o pci.o $(lightnvm-y) -nvme-$(CONFIG_BLK_DEV_NVME_SCSI) += scsi.o +nvme-y += pci.o diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 6eb42d24a5e9..07b7ec699e92 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -36,10 +36,12 @@ unsigned char admin_timeout = 60; module_param(admin_timeout, byte, 0644); MODULE_PARM_DESC(admin_timeout, "timeout in seconds for admin commands"); +EXPORT_SYMBOL_GPL(admin_timeout); unsigned char nvme_io_timeout = 30; module_param_named(io_timeout, nvme_io_timeout, byte, 0644); MODULE_PARM_DESC(io_timeout, "timeout in seconds for I/O"); +EXPORT_SYMBOL_GPL(nvme_io_timeout); unsigned char shutdown_timeout = 5; module_param(shutdown_timeout, byte, 0644); @@ -110,6 +112,7 @@ void nvme_requeue_req(struct request *req) blk_mq_kick_requeue_list(req->q); spin_unlock_irqrestore(req->q->queue_lock, flags); } +EXPORT_SYMBOL_GPL(nvme_requeue_req); struct request *nvme_alloc_request(struct request_queue *q, struct nvme_command *cmd, unsigned int flags) @@ -133,6 +136,7 @@ struct request *nvme_alloc_request(struct request_queue *q, return req; } +EXPORT_SYMBOL_GPL(nvme_alloc_request); /* * Returns 0 on success. If the result is negative, it's a Linux error code; @@ -170,6 +174,7 @@ int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, { return __nvme_submit_sync_cmd(q, cmd, buffer, bufflen, NULL, 0); } +EXPORT_SYMBOL_GPL(nvme_submit_sync_cmd); int __nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd, void __user *ubuffer, unsigned bufflen, @@ -385,6 +390,7 @@ int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count) *count = min(*count, nr_io_queues); return 0; } +EXPORT_SYMBOL_GPL(nvme_set_queue_count); static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) { @@ -794,6 +800,7 @@ int nvme_disable_ctrl(struct nvme_ctrl *ctrl, u64 cap) return ret; return nvme_wait_ready(ctrl, cap, false); } +EXPORT_SYMBOL_GPL(nvme_disable_ctrl); int nvme_enable_ctrl(struct nvme_ctrl *ctrl, u64 cap) { @@ -825,6 +832,7 @@ int nvme_enable_ctrl(struct nvme_ctrl *ctrl, u64 cap) return ret; return nvme_wait_ready(ctrl, cap, true); } +EXPORT_SYMBOL_GPL(nvme_enable_ctrl); int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl) { @@ -855,6 +863,7 @@ int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl) return ret; } +EXPORT_SYMBOL_GPL(nvme_shutdown_ctrl); /* * Initialize the cached copies of the Identify data and various controller @@ -916,6 +925,7 @@ int nvme_init_identify(struct nvme_ctrl *ctrl) kfree(id); return 0; } +EXPORT_SYMBOL_GPL(nvme_init_identify); static int nvme_dev_open(struct inode *inode, struct file *file) { @@ -1321,6 +1331,7 @@ void nvme_scan_namespaces(struct nvme_ctrl *ctrl) mutex_unlock(&ctrl->namespaces_mutex); kfree(id); } +EXPORT_SYMBOL_GPL(nvme_scan_namespaces); void nvme_remove_namespaces(struct nvme_ctrl *ctrl) { @@ -1331,6 +1342,7 @@ void nvme_remove_namespaces(struct nvme_ctrl *ctrl) nvme_ns_remove(ns); mutex_unlock(&ctrl->namespaces_mutex); } +EXPORT_SYMBOL_GPL(nvme_remove_namespaces); static DEFINE_IDA(nvme_instance_ida); @@ -1362,13 +1374,14 @@ static void nvme_release_instance(struct nvme_ctrl *ctrl) } void nvme_uninit_ctrl(struct nvme_ctrl *ctrl) - { +{ device_destroy(nvme_class, MKDEV(nvme_char_major, ctrl->instance)); spin_lock(&dev_list_lock); list_del(&ctrl->node); spin_unlock(&dev_list_lock); } +EXPORT_SYMBOL_GPL(nvme_uninit_ctrl); static void nvme_free_ctrl(struct kref *kref) { @@ -1384,6 +1397,7 @@ void nvme_put_ctrl(struct nvme_ctrl *ctrl) { kref_put(&ctrl->kref, nvme_free_ctrl); } +EXPORT_SYMBOL_GPL(nvme_put_ctrl); /* * Initialize a NVMe controller structures. This needs to be called during @@ -1426,6 +1440,7 @@ out_release_instance: out: return ret; } +EXPORT_SYMBOL_GPL(nvme_init_ctrl); void nvme_stop_queues(struct nvme_ctrl *ctrl) { @@ -1442,6 +1457,7 @@ void nvme_stop_queues(struct nvme_ctrl *ctrl) } mutex_unlock(&ctrl->namespaces_mutex); } +EXPORT_SYMBOL_GPL(nvme_stop_queues); void nvme_start_queues(struct nvme_ctrl *ctrl) { @@ -1455,6 +1471,7 @@ void nvme_start_queues(struct nvme_ctrl *ctrl) } mutex_unlock(&ctrl->namespaces_mutex); } +EXPORT_SYMBOL_GPL(nvme_start_queues); int __init nvme_core_init(void) { @@ -1494,3 +1511,8 @@ void nvme_core_exit(void) class_destroy(nvme_class); __unregister_chrdev(nvme_char_major, 0, NVME_MINORS, "nvme"); } + +MODULE_LICENSE("GPL"); +MODULE_VERSION("1.0"); +module_init(nvme_core_init); +module_exit(nvme_core_exit); diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 54e79c035913..fec747917690 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -2230,26 +2230,15 @@ static int __init nvme_init(void) if (!nvme_workq) return -ENOMEM; - result = nvme_core_init(); - if (result < 0) - goto kill_workq; - result = pci_register_driver(&nvme_driver); if (result) - goto core_exit; - return 0; - - core_exit: - nvme_core_exit(); - kill_workq: - destroy_workqueue(nvme_workq); + destroy_workqueue(nvme_workq); return result; } static void __exit nvme_exit(void) { pci_unregister_driver(&nvme_driver); - nvme_core_exit(); destroy_workqueue(nvme_workq); BUG_ON(nvme_thread && !IS_ERR(nvme_thread)); _nvme_check_size(); -- cgit v1.2.3 From 9396dec916c052855dbb5b876c13d163df397319 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 29 Feb 2016 15:59:44 +0100 Subject: nvme: use a work item to submit async event requests Use a dedicated work item to submit async event requests instead of the global kthread. This simplifies the code and reduces the latencies to resubmit a request once an even notification happened. Signed-off-by: Christoph Hellwig Reviewed-by: Keith Busch Reviewed-by: Sagi Grimberg Reviewed-by: Johannes Thumshirn Signed-off-by: Jens Axboe --- drivers/nvme/host/pci.c | 25 ++++++++++++++++++------- 1 file changed, 18 insertions(+), 7 deletions(-) (limited to 'drivers/nvme') diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index fec747917690..21b0be480fa5 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -100,6 +100,7 @@ struct nvme_dev { struct work_struct reset_work; struct work_struct scan_work; struct work_struct remove_work; + struct work_struct async_work; struct mutex shutdown_lock; bool subsystem; void __iomem *cmb; @@ -281,8 +282,11 @@ static void nvme_complete_async_event(struct nvme_dev *dev, u16 status = le16_to_cpu(cqe->status) >> 1; u32 result = le32_to_cpu(cqe->result); - if (status == NVME_SC_SUCCESS || status == NVME_SC_ABORT_REQ) + if (status == NVME_SC_SUCCESS || status == NVME_SC_ABORT_REQ) { ++dev->ctrl.event_limit; + queue_work(nvme_workq, &dev->async_work); + } + if (status != NVME_SC_SUCCESS) return; @@ -816,15 +820,22 @@ static int nvme_poll(struct blk_mq_hw_ctx *hctx, unsigned int tag) return 0; } -static void nvme_submit_async_event(struct nvme_dev *dev) +static void nvme_async_event_work(struct work_struct *work) { + struct nvme_dev *dev = container_of(work, struct nvme_dev, async_work); + struct nvme_queue *nvmeq = dev->queues[0]; struct nvme_command c; memset(&c, 0, sizeof(c)); c.common.opcode = nvme_admin_async_event; - c.common.command_id = NVME_AQ_BLKMQ_DEPTH + --dev->ctrl.event_limit; - __nvme_submit_cmd(dev->queues[0], &c); + spin_lock_irq(&nvmeq->q_lock); + while (dev->ctrl.event_limit > 0) { + c.common.command_id = NVME_AQ_BLKMQ_DEPTH + + --dev->ctrl.event_limit; + __nvme_submit_cmd(nvmeq, &c); + } + spin_unlock_irq(&nvmeq->q_lock); } static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id) @@ -1358,9 +1369,6 @@ static int nvme_kthread(void *data) continue; spin_lock_irq(&nvmeq->q_lock); nvme_process_cq(nvmeq); - - while (i == 0 && dev->ctrl.event_limit > 0) - nvme_submit_async_event(dev); spin_unlock_irq(&nvmeq->q_lock); } } @@ -1929,6 +1937,7 @@ static void nvme_reset_work(struct work_struct *work) goto free_tags; dev->ctrl.event_limit = NVME_NR_AEN_COMMANDS; + queue_work(nvme_workq, &dev->async_work); result = nvme_dev_list_add(dev); if (result) @@ -2062,6 +2071,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) INIT_WORK(&dev->scan_work, nvme_dev_scan); INIT_WORK(&dev->reset_work, nvme_reset_work); INIT_WORK(&dev->remove_work, nvme_remove_dead_ctrl_work); + INIT_WORK(&dev->async_work, nvme_async_event_work); mutex_init(&dev->shutdown_lock); init_completion(&dev->ioq_wait); @@ -2115,6 +2125,7 @@ static void nvme_remove(struct pci_dev *pdev) spin_unlock(&dev_list_lock); pci_set_drvdata(pdev, NULL); + flush_work(&dev->async_work); flush_work(&dev->reset_work); flush_work(&dev->scan_work); nvme_remove_namespaces(&dev->ctrl); -- cgit v1.2.3 From 79f2b358c9ba373943a9284be2861fde58291c4e Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 29 Feb 2016 15:59:45 +0100 Subject: nvme: don't poll the CQ from the kthread There is no reason to do unconditional polling of CQs per the NVMe spec. Signed-off-by: Christoph Hellwig Reviewed-by: Keith Busch Reviewed-by: Sagi Grimberg Reviewed-by: Johannes Thumshirn Signed-off-by: Jens Axboe --- drivers/nvme/host/pci.c | 12 ------------ 1 file changed, 12 deletions(-) (limited to 'drivers/nvme') diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 21b0be480fa5..10839f76179c 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -1156,9 +1156,6 @@ static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid, nvmeq->qid = qid; nvmeq->cq_vector = -1; dev->queues[qid] = nvmeq; - - /* make sure queue descriptor is set before queue count, for kthread */ - mb(); dev->queue_count++; return nvmeq; @@ -1345,7 +1342,6 @@ static int nvme_kthread(void *data) set_current_state(TASK_INTERRUPTIBLE); spin_lock(&dev_list_lock); list_for_each_entry_safe(dev, next, &dev_list, node) { - int i; u32 csts = readl(dev->bar + NVME_REG_CSTS); /* @@ -1363,14 +1359,6 @@ static int nvme_kthread(void *data) } continue; } - for (i = 0; i < dev->queue_count; i++) { - struct nvme_queue *nvmeq = dev->queues[i]; - if (!nvmeq) - continue; - spin_lock_irq(&nvmeq->q_lock); - nvme_process_cq(nvmeq); - spin_unlock_irq(&nvmeq->q_lock); - } } spin_unlock(&dev_list_lock); schedule_timeout(round_jiffies_relative(HZ)); -- cgit v1.2.3 From 2d55cd5f511d6fc377734473b237ac50820bfb9f Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 29 Feb 2016 15:59:46 +0100 Subject: nvme: replace the kthread with a per-device watchdog timer The only work left in the kthread is the periodic health check for each controller. There is no need to run this from process context or keep a thread context around for it, so replace it with a simpler timer. Signed-off-by: Christoph Hellwig Reviewed-by: Keith Busch Reviewed-by: Sagi Grimberg Reviewed-by: Johannes Thumshirn Signed-off-by: Jens Axboe --- drivers/nvme/host/pci.c | 112 ++++++++++-------------------------------------- 1 file changed, 23 insertions(+), 89 deletions(-) (limited to 'drivers/nvme') diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 10839f76179c..a62336051178 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -27,7 +27,6 @@ #include #include #include -#include #include #include #include @@ -39,6 +38,7 @@ #include #include #include +#include #include #include #include @@ -64,11 +64,7 @@ static bool use_cmb_sqes = true; module_param(use_cmb_sqes, bool, 0644); MODULE_PARM_DESC(use_cmb_sqes, "use controller's memory buffer for I/O SQes"); -static LIST_HEAD(dev_list); -static DEFINE_SPINLOCK(dev_list_lock); -static struct task_struct *nvme_thread; static struct workqueue_struct *nvme_workq; -static wait_queue_head_t nvme_kthread_wait; struct nvme_dev; struct nvme_queue; @@ -82,7 +78,6 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown); * Represents an NVM Express device. Each nvme_dev is a PCI function. */ struct nvme_dev { - struct list_head node; struct nvme_queue **queues; struct blk_mq_tag_set tagset; struct blk_mq_tag_set admin_tagset; @@ -101,6 +96,7 @@ struct nvme_dev { struct work_struct scan_work; struct work_struct remove_work; struct work_struct async_work; + struct timer_list watchdog_timer; struct mutex shutdown_lock; bool subsystem; void __iomem *cmb; @@ -1334,36 +1330,26 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev) return result; } -static int nvme_kthread(void *data) +static void nvme_watchdog_timer(unsigned long data) { - struct nvme_dev *dev, *next; + struct nvme_dev *dev = (struct nvme_dev *)data; + u32 csts = readl(dev->bar + NVME_REG_CSTS); - while (!kthread_should_stop()) { - set_current_state(TASK_INTERRUPTIBLE); - spin_lock(&dev_list_lock); - list_for_each_entry_safe(dev, next, &dev_list, node) { - u32 csts = readl(dev->bar + NVME_REG_CSTS); - - /* - * Skip controllers currently under reset. - */ - if (work_pending(&dev->reset_work) || work_busy(&dev->reset_work)) - continue; - - if ((dev->subsystem && (csts & NVME_CSTS_NSSRO)) || - csts & NVME_CSTS_CFS) { - if (queue_work(nvme_workq, &dev->reset_work)) { - dev_warn(dev->ctrl.device, - "Failed status: %x, reset controller\n", - readl(dev->bar + NVME_REG_CSTS)); - } - continue; - } + /* + * Skip controllers currently under reset. + */ + if (!work_pending(&dev->reset_work) && !work_busy(&dev->reset_work) && + ((csts & NVME_CSTS_CFS) || + (dev->subsystem && (csts & NVME_CSTS_NSSRO)))) { + if (queue_work(nvme_workq, &dev->reset_work)) { + dev_warn(dev->dev, + "Failed status: 0x%x, reset controller.\n", + csts); } - spin_unlock(&dev_list_lock); - schedule_timeout(round_jiffies_relative(HZ)); + return; } - return 0; + + mod_timer(&dev->watchdog_timer, round_jiffies(jiffies + HZ)); } static int nvme_create_io_queues(struct nvme_dev *dev) @@ -1777,56 +1763,12 @@ static void nvme_dev_unmap(struct nvme_dev *dev) } } -static int nvme_dev_list_add(struct nvme_dev *dev) -{ - bool start_thread = false; - - spin_lock(&dev_list_lock); - if (list_empty(&dev_list) && IS_ERR_OR_NULL(nvme_thread)) { - start_thread = true; - nvme_thread = NULL; - } - list_add(&dev->node, &dev_list); - spin_unlock(&dev_list_lock); - - if (start_thread) { - nvme_thread = kthread_run(nvme_kthread, NULL, "nvme"); - wake_up_all(&nvme_kthread_wait); - } else - wait_event_killable(nvme_kthread_wait, nvme_thread); - - if (IS_ERR_OR_NULL(nvme_thread)) - return nvme_thread ? PTR_ERR(nvme_thread) : -EINTR; - - return 0; -} - -/* -* Remove the node from the device list and check -* for whether or not we need to stop the nvme_thread. -*/ -static void nvme_dev_list_remove(struct nvme_dev *dev) -{ - struct task_struct *tmp = NULL; - - spin_lock(&dev_list_lock); - list_del_init(&dev->node); - if (list_empty(&dev_list) && !IS_ERR_OR_NULL(nvme_thread)) { - tmp = nvme_thread; - nvme_thread = NULL; - } - spin_unlock(&dev_list_lock); - - if (tmp) - kthread_stop(tmp); -} - static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown) { int i; u32 csts = -1; - nvme_dev_list_remove(dev); + del_timer_sync(&dev->watchdog_timer); mutex_lock(&dev->shutdown_lock); if (dev->bar) { @@ -1927,9 +1869,7 @@ static void nvme_reset_work(struct work_struct *work) dev->ctrl.event_limit = NVME_NR_AEN_COMMANDS; queue_work(nvme_workq, &dev->async_work); - result = nvme_dev_list_add(dev); - if (result) - goto remove; + mod_timer(&dev->watchdog_timer, round_jiffies(jiffies + HZ)); /* * Keep the controller around but remove all namespaces if we don't have @@ -1946,8 +1886,6 @@ static void nvme_reset_work(struct work_struct *work) clear_bit(NVME_CTRL_RESETTING, &dev->flags); return; - remove: - nvme_dev_list_remove(dev); free_tags: nvme_dev_remove_admin(dev); blk_put_queue(dev->ctrl.admin_q); @@ -2055,11 +1993,12 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) dev->dev = get_device(&pdev->dev); pci_set_drvdata(pdev, dev); - INIT_LIST_HEAD(&dev->node); INIT_WORK(&dev->scan_work, nvme_dev_scan); INIT_WORK(&dev->reset_work, nvme_reset_work); INIT_WORK(&dev->remove_work, nvme_remove_dead_ctrl_work); INIT_WORK(&dev->async_work, nvme_async_event_work); + setup_timer(&dev->watchdog_timer, nvme_watchdog_timer, + (unsigned long)dev); mutex_init(&dev->shutdown_lock); init_completion(&dev->ioq_wait); @@ -2108,9 +2047,7 @@ static void nvme_remove(struct pci_dev *pdev) { struct nvme_dev *dev = pci_get_drvdata(pdev); - spin_lock(&dev_list_lock); - list_del_init(&dev->node); - spin_unlock(&dev_list_lock); + del_timer_sync(&dev->watchdog_timer); pci_set_drvdata(pdev, NULL); flush_work(&dev->async_work); @@ -2223,8 +2160,6 @@ static int __init nvme_init(void) { int result; - init_waitqueue_head(&nvme_kthread_wait); - nvme_workq = alloc_workqueue("nvme", WQ_UNBOUND | WQ_MEM_RECLAIM, 0); if (!nvme_workq) return -ENOMEM; @@ -2239,7 +2174,6 @@ static void __exit nvme_exit(void) { pci_unregister_driver(&nvme_driver); destroy_workqueue(nvme_workq); - BUG_ON(nvme_thread && !IS_ERR(nvme_thread)); _nvme_check_size(); } -- cgit v1.2.3 From 1cb3cce5eb9de335330c8a147e47e3359a51a8b5 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 29 Feb 2016 15:59:47 +0100 Subject: nvme: return the whole CQE through the request passthrough interface Both LighNVM and NVMe over Fabrics need to look at more than just the status and result field. Signed-off-by: Christoph Hellwig Reviewed-by: Matias Bj?rling Reviewed-by: Jay Freyensee Reviewed-by: Sagi Grimberg Signed-off-by: Sagi Grimberg Reviewed-by: Keith Busch Signed-off-by: Jens Axboe --- drivers/nvme/host/core.c | 27 +++++++++++++++++++-------- drivers/nvme/host/nvme.h | 3 ++- drivers/nvme/host/pci.c | 11 +++-------- 3 files changed, 24 insertions(+), 17 deletions(-) (limited to 'drivers/nvme') diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 07b7ec699e92..66fd3d9e4d47 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -132,7 +132,6 @@ struct request *nvme_alloc_request(struct request_queue *q, req->cmd = (unsigned char *)cmd; req->cmd_len = sizeof(struct nvme_command); - req->special = (void *)0; return req; } @@ -143,7 +142,8 @@ EXPORT_SYMBOL_GPL(nvme_alloc_request); * if the result is positive, it's an NVM Express status code */ int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, - void *buffer, unsigned bufflen, u32 *result, unsigned timeout) + struct nvme_completion *cqe, void *buffer, unsigned bufflen, + unsigned timeout) { struct request *req; int ret; @@ -153,6 +153,7 @@ int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, return PTR_ERR(req); req->timeout = timeout ? timeout : ADMIN_TIMEOUT; + req->special = cqe; if (buffer && bufflen) { ret = blk_rq_map_kern(q, req, buffer, bufflen, GFP_KERNEL); @@ -161,8 +162,6 @@ int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, } blk_execute_rq(req->q, NULL, req, 0); - if (result) - *result = (u32)(uintptr_t)req->special; ret = req->errors; out: blk_mq_free_request(req); @@ -172,7 +171,7 @@ int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, void *buffer, unsigned bufflen) { - return __nvme_submit_sync_cmd(q, cmd, buffer, bufflen, NULL, 0); + return __nvme_submit_sync_cmd(q, cmd, NULL, buffer, bufflen, 0); } EXPORT_SYMBOL_GPL(nvme_submit_sync_cmd); @@ -182,6 +181,7 @@ int __nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd, u32 *result, unsigned timeout) { bool write = cmd->common.opcode & 1; + struct nvme_completion cqe; struct nvme_ns *ns = q->queuedata; struct gendisk *disk = ns ? ns->disk : NULL; struct request *req; @@ -194,6 +194,7 @@ int __nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd, return PTR_ERR(req); req->timeout = timeout ? timeout : ADMIN_TIMEOUT; + req->special = &cqe; if (ubuffer && bufflen) { ret = blk_rq_map_user(q, req, NULL, ubuffer, bufflen, @@ -248,7 +249,7 @@ int __nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd, blk_execute_rq(req->q, disk, req, 0); ret = req->errors; if (result) - *result = (u32)(uintptr_t)req->special; + *result = le32_to_cpu(cqe.result); if (meta && !ret && !write) { if (copy_to_user(meta_buffer, meta, meta_len)) ret = -EFAULT; @@ -329,6 +330,8 @@ int nvme_get_features(struct nvme_ctrl *dev, unsigned fid, unsigned nsid, dma_addr_t dma_addr, u32 *result) { struct nvme_command c; + struct nvme_completion cqe; + int ret; memset(&c, 0, sizeof(c)); c.features.opcode = nvme_admin_get_features; @@ -336,13 +339,18 @@ int nvme_get_features(struct nvme_ctrl *dev, unsigned fid, unsigned nsid, c.features.prp1 = cpu_to_le64(dma_addr); c.features.fid = cpu_to_le32(fid); - return __nvme_submit_sync_cmd(dev->admin_q, &c, NULL, 0, result, 0); + ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &cqe, NULL, 0, 0); + if (ret >= 0) + *result = le32_to_cpu(cqe.result); + return ret; } int nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword11, dma_addr_t dma_addr, u32 *result) { struct nvme_command c; + struct nvme_completion cqe; + int ret; memset(&c, 0, sizeof(c)); c.features.opcode = nvme_admin_set_features; @@ -350,7 +358,10 @@ int nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword11, c.features.fid = cpu_to_le32(fid); c.features.dword11 = cpu_to_le32(dword11); - return __nvme_submit_sync_cmd(dev->admin_q, &c, NULL, 0, result, 0); + ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &cqe, NULL, 0, 0); + if (ret >= 0) + *result = le32_to_cpu(cqe.result); + return ret; } int nvme_get_log_page(struct nvme_ctrl *dev, struct nvme_smart_log **log) diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index 63ba8a500ee1..2ac7539fdd17 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -248,7 +248,8 @@ void nvme_requeue_req(struct request *req); int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, void *buf, unsigned bufflen); int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, - void *buffer, unsigned bufflen, u32 *result, unsigned timeout); + struct nvme_completion *cqe, void *buffer, unsigned bufflen, + unsigned timeout); int nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd, void __user *ubuffer, unsigned bufflen, u32 *result, unsigned timeout); diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index a62336051178..d47b08783110 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -748,10 +748,8 @@ static void __nvme_process_cq(struct nvme_queue *nvmeq, unsigned int *tag) } req = blk_mq_tag_to_rq(*nvmeq->tags, cqe.command_id); - if (req->cmd_type == REQ_TYPE_DRV_PRIV) { - u32 result = le32_to_cpu(cqe.result); - req->special = (void *)(uintptr_t)result; - } + if (req->cmd_type == REQ_TYPE_DRV_PRIV && req->special) + memcpy(req->special, &cqe, sizeof(cqe)); blk_mq_complete_request(req, status >> 1); } @@ -901,13 +899,10 @@ static void abort_endio(struct request *req, int error) { struct nvme_iod *iod = blk_mq_rq_to_pdu(req); struct nvme_queue *nvmeq = iod->nvmeq; - u32 result = (u32)(uintptr_t)req->special; u16 status = req->errors; - dev_warn(nvmeq->dev->ctrl.device, - "Abort status:%x result:%x", status, result); + dev_warn(nvmeq->dev->ctrl.device, "Abort status: 0x%x", status); atomic_inc(&nvmeq->dev->ctrl.abort_limit); - blk_mq_free_request(req); } -- cgit v1.2.3 From 931e1c2204c6d00c11c5c1e2e1c20b5ca41f292d Mon Sep 17 00:00:00 2001 From: Ming Lin Date: Fri, 26 Feb 2016 13:24:19 -0800 Subject: nvme: expose cntlid in sysfs For NVMe over Fabrics, the cntlid will be used by systemd/udev to create link to the device, for example, /dev/disk/by-path/-- -> /dev/nvme0n1 Signed-off-by: Ming Lin Reviewed-by: Keith Busch Reviewed-by: Christoph Hellwig Reviewed-by: Sagi Grimberg Signed-off-by: Jens Axboe --- drivers/nvme/host/core.c | 20 ++++++++++++++++---- drivers/nvme/host/nvme.h | 1 + 2 files changed, 17 insertions(+), 4 deletions(-) (limited to 'drivers/nvme') diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 66fd3d9e4d47..f08dccee8143 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -912,6 +912,7 @@ int nvme_init_identify(struct nvme_ctrl *ctrl) ctrl->oncs = le16_to_cpup(&id->oncs); atomic_set(&ctrl->abort_limit, id->acl + 1); ctrl->vwc = id->vwc; + ctrl->cntlid = le16_to_cpup(&id->cntlid); memcpy(ctrl->serial, id->sn, sizeof(id->sn)); memcpy(ctrl->model, id->mn, sizeof(id->mn)); memcpy(ctrl->firmware_rev, id->fr, sizeof(id->fr)); @@ -1099,7 +1100,7 @@ static const struct attribute_group nvme_ns_attr_group = { .is_visible = nvme_attrs_are_visible, }; -#define nvme_show_function(field) \ +#define nvme_show_str_function(field) \ static ssize_t field##_show(struct device *dev, \ struct device_attribute *attr, char *buf) \ { \ @@ -1108,15 +1109,26 @@ static ssize_t field##_show(struct device *dev, \ } \ static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL); -nvme_show_function(model); -nvme_show_function(serial); -nvme_show_function(firmware_rev); +#define nvme_show_int_function(field) \ +static ssize_t field##_show(struct device *dev, \ + struct device_attribute *attr, char *buf) \ +{ \ + struct nvme_ctrl *ctrl = dev_get_drvdata(dev); \ + return sprintf(buf, "%d\n", ctrl->field); \ +} \ +static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL); + +nvme_show_str_function(model); +nvme_show_str_function(serial); +nvme_show_str_function(firmware_rev); +nvme_show_int_function(cntlid); static struct attribute *nvme_dev_attrs[] = { &dev_attr_reset_controller.attr, &dev_attr_model.attr, &dev_attr_serial.attr, &dev_attr_firmware_rev.attr, + &dev_attr_cntlid.attr, NULL }; diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index 2ac7539fdd17..9b71fa8c75e4 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -77,6 +77,7 @@ struct nvme_ctrl { char serial[20]; char model[40]; char firmware_rev[8]; + int cntlid; u32 ctrl_config; -- cgit v1.2.3 From d5bdec8ddb9f5fac3b351bed463a7132f6ba907b Mon Sep 17 00:00:00 2001 From: Matias Bjørling Date: Fri, 19 Feb 2016 13:56:58 +0100 Subject: lightnvm: fold get bb tbl when using dual/quad plane mode MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When the media manager runs in dual or quad plane mode, lightnvm abstracts away plane specific commands. This poses a problem for get bad block table, as it reports bad blocks per plane, making the table either two or four times bigger than expected. Fold the bad block list before returning. Signed-off-by: Matias Bjørling Signed-off-by: Jens Axboe --- drivers/lightnvm/core.c | 2 +- drivers/nvme/host/lightnvm.c | 46 +++++++++++++++++++++++++++++++++++++++----- include/linux/lightnvm.h | 6 +++--- 3 files changed, 45 insertions(+), 9 deletions(-) (limited to 'drivers/nvme') diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c index 782ac5d60a49..968ba7ed4158 100644 --- a/drivers/lightnvm/core.c +++ b/drivers/lightnvm/core.c @@ -250,7 +250,7 @@ int nvm_set_rqd_ppalist(struct nvm_dev *dev, struct nvm_rq *rqd, return 0; } - plane_cnt = (1 << dev->plane_mode); + plane_cnt = dev->plane_mode; rqd->nr_pages = plane_cnt * nr_ppas; if (dev->ops->max_phys_sect < rqd->nr_pages) diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c index 5cd3725e2fa4..d4f81f07f296 100644 --- a/drivers/nvme/host/lightnvm.c +++ b/drivers/nvme/host/lightnvm.c @@ -373,8 +373,31 @@ out: return ret; } +static void nvme_nvm_bb_tbl_fold(struct nvm_dev *nvmdev, + int nr_dst_blks, u8 *dst_blks, + int nr_src_blks, u8 *src_blks) +{ + int blk, offset, pl, blktype; + + for (blk = 0; blk < nr_dst_blks; blk++) { + offset = blk * nvmdev->plane_mode; + blktype = src_blks[offset]; + + /* Bad blocks on any planes take precedence over other types */ + for (pl = 0; pl < nvmdev->plane_mode; pl++) { + if (src_blks[offset + pl] & + (NVM_BLK_T_BAD|NVM_BLK_T_GRWN_BAD)) { + blktype = src_blks[offset + pl]; + break; + } + } + + dst_blks[blk] = blktype; + } +} + static int nvme_nvm_get_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr ppa, - int nr_blocks, nvm_bb_update_fn *update_bbtbl, + int nr_dst_blks, nvm_bb_update_fn *update_bbtbl, void *priv) { struct request_queue *q = nvmdev->q; @@ -382,7 +405,9 @@ static int nvme_nvm_get_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr ppa, struct nvme_ctrl *ctrl = ns->ctrl; struct nvme_nvm_command c = {}; struct nvme_nvm_bb_tbl *bb_tbl; - int tblsz = sizeof(struct nvme_nvm_bb_tbl) + nr_blocks; + u8 *dst_blks = NULL; + int nr_src_blks = nr_dst_blks * nvmdev->plane_mode; + int tblsz = sizeof(struct nvme_nvm_bb_tbl) + nr_src_blks; int ret = 0; c.get_bb.opcode = nvme_nvm_admin_get_bb_tbl; @@ -393,6 +418,12 @@ static int nvme_nvm_get_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr ppa, if (!bb_tbl) return -ENOMEM; + dst_blks = kzalloc(nr_dst_blks, GFP_KERNEL); + if (!dst_blks) { + ret = -ENOMEM; + goto out; + } + ret = nvme_submit_sync_cmd(ctrl->admin_q, (struct nvme_command *)&c, bb_tbl, tblsz); if (ret) { @@ -414,16 +445,21 @@ static int nvme_nvm_get_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr ppa, goto out; } - if (le32_to_cpu(bb_tbl->tblks) != nr_blocks) { + if (le32_to_cpu(bb_tbl->tblks) != nr_src_blks) { ret = -EINVAL; dev_err(ctrl->dev, "bbt unsuspected blocks returned (%u!=%u)", - le32_to_cpu(bb_tbl->tblks), nr_blocks); + le32_to_cpu(bb_tbl->tblks), nr_src_blks); goto out; } + nvme_nvm_bb_tbl_fold(nvmdev, nr_dst_blks, dst_blks, + nr_src_blks, bb_tbl->blk); + ppa = dev_to_generic_addr(nvmdev, ppa); - ret = update_bbtbl(ppa, nr_blocks, bb_tbl->blk, priv); + ret = update_bbtbl(ppa, nr_dst_blks, dst_blks, priv); + out: + kfree(dst_blks); kfree(bb_tbl); return ret; } diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h index d6750111e48e..7fa1838f7356 100644 --- a/include/linux/lightnvm.h +++ b/include/linux/lightnvm.h @@ -92,9 +92,9 @@ enum { NVM_ADDRMODE_CHANNEL = 1, /* Plane programming mode for LUN */ - NVM_PLANE_SINGLE = 0, - NVM_PLANE_DOUBLE = 1, - NVM_PLANE_QUAD = 2, + NVM_PLANE_SINGLE = 1, + NVM_PLANE_DOUBLE = 2, + NVM_PLANE_QUAD = 4, /* Status codes */ NVM_RSP_SUCCESS = 0x0, -- cgit v1.2.3 From 08095e70783f1d8296f858d37a9e1878f5da0623 Mon Sep 17 00:00:00 2001 From: Keith Busch Date: Fri, 4 Mar 2016 13:15:17 -0700 Subject: NVMe: Create discard zero quirk white list The NVMe specification does not require discarded blocks return zeroes on read, but provides that behavior as a possibility. Some applications more efficiently use an SSD if reads on discarded blocks were deterministically zero, based on the "discard_zeroes_data" queue attribute. There is no specification defined way to determine device behavior on discarded blocks, so the driver always left the queue setting disabled. We can only know behavior based on individual device models, so this patch adds a flag to the NVMe "quirk" list that vendors may set if they know their controller works that way. The patch also sets the new flag for one such known device. Signed-off-by: Keith Busch Suggested-by: Artur Paszkiewicz Reviewed-by: Christoph Hellwig Reviewed-by: Martin K. Petersen Reviewed-by: Johannes Thumshirn Reviewed-by: Sagi Grimberg Signed-off-by: Jens Axboe --- drivers/nvme/host/core.c | 8 +++++++- drivers/nvme/host/nvme.h | 6 ++++++ drivers/nvme/host/pci.c | 3 ++- 3 files changed, 15 insertions(+), 2 deletions(-) (limited to 'drivers/nvme') diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index f08dccee8143..4304be00e556 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -582,8 +582,14 @@ static void nvme_init_integrity(struct nvme_ns *ns) static void nvme_config_discard(struct nvme_ns *ns) { + struct nvme_ctrl *ctrl = ns->ctrl; u32 logical_block_size = queue_logical_block_size(ns->queue); - ns->queue->limits.discard_zeroes_data = 0; + + if (ctrl->quirks & NVME_QUIRK_DISCARD_ZEROES) + ns->queue->limits.discard_zeroes_data = 1; + else + ns->queue->limits.discard_zeroes_data = 0; + ns->queue->limits.discard_alignment = logical_block_size; ns->queue->limits.discard_granularity = logical_block_size; blk_queue_max_discard_sectors(ns->queue, 0xffffffff); diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index 9b71fa8c75e4..a402a0ebf471 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -59,6 +59,12 @@ enum nvme_quirks { * correctly. */ NVME_QUIRK_IDENTIFY_CNS = (1 << 1), + + /* + * The controller deterministically returns O's on reads to discarded + * logical blocks. + */ + NVME_QUIRK_DISCARD_ZEROES = (1 << 2), }; struct nvme_ctrl { diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index d47b08783110..74514c767429 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -2130,7 +2130,8 @@ static const struct pci_error_handlers nvme_err_handler = { static const struct pci_device_id nvme_id_table[] = { { PCI_VDEVICE(INTEL, 0x0953), - .driver_data = NVME_QUIRK_STRIPE_SIZE, }, + .driver_data = NVME_QUIRK_STRIPE_SIZE | + NVME_QUIRK_DISCARD_ZEROES, }, { PCI_VDEVICE(INTEL, 0x5845), /* Qemu emulated controller */ .driver_data = NVME_QUIRK_IDENTIFY_CNS, }, { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) }, -- cgit v1.2.3 From 48c7823f42da2bc881ae2e325ed40123871c2fb9 Mon Sep 17 00:00:00 2001 From: Jon Derrick Date: Tue, 8 Mar 2016 10:34:54 -0700 Subject: NVMe: Remove unused sq_head read in completion path Signed-off-by: Jon Derrick Reviewed-by: Sagi Grimberg Signed-off-by: Jens Axboe --- drivers/nvme/host/pci.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'drivers/nvme') diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 74514c767429..e9f18e1d73e5 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -134,7 +134,6 @@ struct nvme_queue { u32 __iomem *q_db; u16 q_depth; s16 cq_vector; - u16 sq_head; u16 sq_tail; u16 cq_head; u16 qid; @@ -719,7 +718,6 @@ static void __nvme_process_cq(struct nvme_queue *nvmeq, unsigned int *tag) if ((status & 1) != phase) break; - nvmeq->sq_head = le16_to_cpu(cqe.sq_head); if (++head == nvmeq->q_depth) { head = 0; phase = !phase; -- cgit v1.2.3 From 118472ab8532e55f48395ef5764b354fe48b1d73 Mon Sep 17 00:00:00 2001 From: Keith Busch Date: Thu, 18 Feb 2016 09:57:48 -0700 Subject: NVMe: Expose ns wwid through single sysfs entry The method to uniquely identify a namespace depends on the controller's specification revision level and implemented capabilities. This patch has the driver figure this out and exports the unique string through a single 'wwid' attribute so the user doesn't have this burden. The longest namespace unique identifier is used if available. If not available, the driver will concat the controller's vendor, serial, and model with the namespace ID. The specification provides this as a unique indentifier. Signed-off-by: Keith Busch Reviewed-by: Hannes Reinecke Reviewed-by: Christoph Hellwig Reviewed-by: Sagi Grimberg Signed-off-by: Jens Axboe --- drivers/nvme/host/core.c | 26 ++++++++++++++++++++++++++ drivers/nvme/host/nvme.h | 1 + 2 files changed, 27 insertions(+) (limited to 'drivers/nvme') diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 4304be00e556..266918b9bb84 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -915,6 +915,7 @@ int nvme_init_identify(struct nvme_ctrl *ctrl) return -EIO; } + ctrl->vid = le16_to_cpu(id->vid); ctrl->oncs = le16_to_cpup(&id->oncs); atomic_set(&ctrl->abort_limit, id->acl + 1); ctrl->vwc = id->vwc; @@ -1053,6 +1054,30 @@ static ssize_t nvme_sysfs_reset(struct device *dev, } static DEVICE_ATTR(reset_controller, S_IWUSR, NULL, nvme_sysfs_reset); +static ssize_t wwid_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct nvme_ns *ns = dev_to_disk(dev)->private_data; + struct nvme_ctrl *ctrl = ns->ctrl; + int serial_len = sizeof(ctrl->serial); + int model_len = sizeof(ctrl->model); + + if (memchr_inv(ns->uuid, 0, sizeof(ns->uuid))) + return sprintf(buf, "eui.%16phN\n", ns->uuid); + + if (memchr_inv(ns->eui, 0, sizeof(ns->eui))) + return sprintf(buf, "eui.%8phN\n", ns->eui); + + while (ctrl->serial[serial_len - 1] == ' ') + serial_len--; + while (ctrl->model[model_len - 1] == ' ') + model_len--; + + return sprintf(buf, "nvme.%04x-%*phN-%*phN-%08x\n", ctrl->vid, + serial_len, ctrl->serial, model_len, ctrl->model, ns->ns_id); +} +static DEVICE_ATTR(wwid, S_IRUGO, wwid_show, NULL); + static ssize_t uuid_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -1078,6 +1103,7 @@ static ssize_t nsid_show(struct device *dev, struct device_attribute *attr, static DEVICE_ATTR(nsid, S_IRUGO, nsid_show, NULL); static struct attribute *nvme_ns_attrs[] = { + &dev_attr_wwid.attr, &dev_attr_uuid.attr, &dev_attr_eui.attr, &dev_attr_nsid.attr, diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index a402a0ebf471..bf3f143e975b 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -91,6 +91,7 @@ struct nvme_ctrl { u32 max_hw_sectors; u32 stripe_size; u16 oncs; + u16 vid; atomic_t abort_limit; u8 event_limit; u8 vwc; -- cgit v1.2.3