From 1d800f32b2574c1d055984ad17223198caddbb54 Mon Sep 17 00:00:00 2001 From: Jonathan Derrick Date: Mon, 3 Oct 2022 14:25:11 -0600 Subject: MAINTAINERS: Update SED-Opal Maintainers Add my new email address and remove Revanth Signed-off-by: Jonathan Derrick Link: https://lore.kernel.org/r/20221003202511.5124-1-jonathan.derrick@linux.dev Signed-off-by: Jens Axboe --- MAINTAINERS | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/MAINTAINERS b/MAINTAINERS index 47f27eea29ba..f9f5184e7e74 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -18298,8 +18298,7 @@ S: Maintained F: drivers/mmc/host/sdhci-esdhc-imx.c SECURE ENCRYPTING DEVICE (SED) OPAL DRIVER -M: Jonathan Derrick -M: Revanth Rajashekar +M: Jonathan Derrick L: linux-block@vger.kernel.org S: Supported F: block/opal_proto.h -- cgit v1.2.3 From 340e134727c9adaefadc7e79b765c038e18e55c3 Mon Sep 17 00:00:00 2001 From: Deming Wang Date: Thu, 6 Oct 2022 04:44:50 -0400 Subject: block: Remove the repeat word 'can' Remove the repeat word 'can' from the comments of bio_kmalloc. Signed-off-by: Deming Wang Link: https://lore.kernel.org/r/20221006084450.1513-1-wangdeming@inspur.com Signed-off-by: Jens Axboe --- block/bio.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/block/bio.c b/block/bio.c index 7cb7d2ff139b..6c470a50a36d 100644 --- a/block/bio.c +++ b/block/bio.c @@ -567,7 +567,7 @@ EXPORT_SYMBOL(bio_alloc_bioset); * be reused by calling bio_uninit() before calling bio_init() again. * * Note that unlike bio_alloc() or bio_alloc_bioset() allocations from this - * function are not backed by a mempool can can fail. Do not use this function + * function are not backed by a mempool can fail. Do not use this function * for allocations in the file system I/O path. * * Returns: Pointer to new bio on success, NULL on failure. -- cgit v1.2.3 From 285febabac4a16655372d23ff43e89ff6f216691 Mon Sep 17 00:00:00 2001 From: Yu Kuai Date: Sun, 9 Oct 2022 18:10:38 +0800 Subject: blk-wbt: fix that 'rwb->wc' is always set to 1 in wbt_init() commit 8c5035dfbb94 ("blk-wbt: call rq_qos_add() after wb_normal is initialized") moves wbt_set_write_cache() before rq_qos_add(), which is wrong because wbt_rq_qos() is still NULL. Fix the problem by removing wbt_set_write_cache() and setting 'rwb->wc' directly. Noted that this patch also remove the redundant setting of 'rab->wc'. Fixes: 8c5035dfbb94 ("blk-wbt: call rq_qos_add() after wb_normal is initialized") Reported-by: kernel test robot Link: https://lore.kernel.org/r/202210081045.77ddf59b-yujie.liu@intel.com Signed-off-by: Yu Kuai Reviewed-by: Ming Lei Link: https://lore.kernel.org/r/20221009101038.1692875-1-yukuai1@huaweicloud.com Signed-off-by: Jens Axboe --- block/blk-wbt.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/block/blk-wbt.c b/block/blk-wbt.c index 246467926253..c293e08b301f 100644 --- a/block/blk-wbt.c +++ b/block/blk-wbt.c @@ -841,12 +841,11 @@ int wbt_init(struct request_queue *q) rwb->last_comp = rwb->last_issue = jiffies; rwb->win_nsec = RWB_WINDOW_NSEC; rwb->enable_state = WBT_STATE_ON_DEFAULT; - rwb->wc = 1; + rwb->wc = test_bit(QUEUE_FLAG_WC, &q->queue_flags); rwb->rq_depth.default_depth = RWB_DEF_DEPTH; rwb->min_lat_nsec = wbt_default_latency_nsec(q); wbt_queue_depth_changed(&rwb->rqos); - wbt_set_write_cache(q, test_bit(QUEUE_FLAG_WC, &q->queue_flags)); /* * Assign rwb and add the stats callback. -- cgit v1.2.3 From ca5eebda3e1c1a58a1c5a337da393ed6734593e3 Mon Sep 17 00:00:00 2001 From: Brian Foster Date: Mon, 3 Oct 2022 09:35:34 -0400 Subject: block: avoid sign extend problem with default queue flags mask request_queue->queue_flags is unsigned long, which is 8-bytes on 64-bit architectures. Most queue flag modifications occur through bit field helpers, but default flags can be logically OR'd via the QUEUE_FLAG_MQ_DEFAULT mask. If this mask happens to include bit 31, the assignment can sign extend the field and set all upper 32 bits. This exact problem has been observed on a downstream kernel that happens to use bit 31 for QUEUE_FLAG_NOWAIT. This is not an immediate problem for current upstream because bit 31 is not included in the default flag assignment (and is not used at all, actually). Regardless, fix up the QUEUE_FLAG_MQ_DEFAULT mask definition to avoid the landmine in the future. Signed-off-by: Brian Foster Reviewed-by: Christoph Hellwig Link: https://lore.kernel.org/r/20221003133534.1075582-1-bfoster@redhat.com Signed-off-by: Jens Axboe --- include/linux/blkdev.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 49373d002631..e4fb47753177 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -580,9 +580,9 @@ struct request_queue { #define QUEUE_FLAG_NOWAIT 29 /* device supports NOWAIT */ #define QUEUE_FLAG_SQ_SCHED 30 /* single queue style io dispatch */ -#define QUEUE_FLAG_MQ_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ - (1 << QUEUE_FLAG_SAME_COMP) | \ - (1 << QUEUE_FLAG_NOWAIT)) +#define QUEUE_FLAG_MQ_DEFAULT ((1UL << QUEUE_FLAG_IO_STAT) | \ + (1UL << QUEUE_FLAG_SAME_COMP) | \ + (1UL << QUEUE_FLAG_NOWAIT)) void blk_queue_flag_set(unsigned int flag, struct request_queue *q); void blk_queue_flag_clear(unsigned int flag, struct request_queue *q); -- cgit v1.2.3 From a0a6314ae774f8a5e52a599946aa2ad0db867b83 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 10 Oct 2022 15:18:57 +0200 Subject: block: fix leaking minors of hidden disks The major/minor of a hidden gendisk is not propagated to the block device because it is never registered using bdev_add. But the lack of bd_dev also causes the dynamic major minor number not to be freed. Assign bd_dev manually to ensure the dynamic major minor gets freed. Based on a patch by Keith Busch. Fixes: 8ddcd653257c ("block: introduce GENHD_FL_HIDDEN") Reported-by: Daniel Wagner Signed-off-by: Christoph Hellwig Tested-by: Daniel Wagner Reviewed-by: Keith Busch Link: https://lore.kernel.org/r/20221010131857.748129-1-hch@lst.de Signed-off-by: Jens Axboe --- block/genhd.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/block/genhd.c b/block/genhd.c index d6a21803a57e..dc9b61dfb692 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -507,6 +507,13 @@ int __must_check device_add_disk(struct device *parent, struct gendisk *disk, */ dev_set_uevent_suppress(ddev, 0); disk_uevent(disk, KOBJ_ADD); + } else { + /* + * Even if the block_device for a hidden gendisk is not + * registered, it needs to have a valid bd_dev so that the + * freeing of the dynamic major works. + */ + disk->part0->bd_dev = MKDEV(disk->major, disk->first_minor); } disk_update_readahead(disk); -- cgit v1.2.3 From a1ae8d4d9be0178132df7c4931a1ba77d0e76039 Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Wed, 28 Sep 2022 09:23:26 +0300 Subject: nvme-rdma: fix possible hang caused during ctrl deletion When we delete a controller, we execute the following: 1. nvme_stop_ctrl() - stop some work elements that may be inflight or scheduled (specifically also .stop_ctrl which cancels ctrl error recovery work) 2. nvme_remove_namespaces() - which first flushes scan_work to avoid competing ns addition/removal 3. continue to teardown the controller However, if err_work was scheduled to run in (1), it is designed to cancel any inflight I/O, particularly I/O that is originating from ns scan_work in (2), but because it is cancelled in .stop_ctrl(), we can prevent forward progress of (2) as ns scanning is blocking on I/O (that will never be cancelled). The race is: 1. transport layer error observed -> err_work is scheduled 2. scan_work executes, discovers ns, generate I/O to it 3. nvme_ctop_ctrl() -> .stop_ctrl() -> cancel_work_sync(err_work) - err_work never executed 4. nvme_remove_namespaces() -> flush_work(scan_work) --> deadlock, because scan_work is blocked on I/O that was supposed to be cancelled by err_work, but was cancelled before executing. Fix this by flushing err_work instead of cancelling it, to force it to execute and cancel all inflight I/O. Fixes: b435ecea2a4d ("nvme: Add .stop_ctrl to nvme ctrl ops") Fixes: f6c8e432cb04 ("nvme: flush namespace scanning work just before removing namespaces") Signed-off-by: Sagi Grimberg Signed-off-by: Christoph Hellwig --- drivers/nvme/host/rdma.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 5ad0ab2853a4..6e079abb22ee 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -996,7 +996,7 @@ static void nvme_rdma_stop_ctrl(struct nvme_ctrl *nctrl) { struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl); - cancel_work_sync(&ctrl->err_work); + flush_work(&ctrl->err_work); cancel_delayed_work_sync(&ctrl->reconnect_work); } -- cgit v1.2.3 From c4abd8757189c7ca5803828f9c892328d7d94943 Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Wed, 28 Sep 2022 09:23:25 +0300 Subject: nvme-tcp: fix possible hang caused during ctrl deletion When we delete a controller, we execute the following: 1. nvme_stop_ctrl() - stop some work elements that may be inflight or scheduled (specifically also .stop_ctrl which cancels ctrl error recovery work) 2. nvme_remove_namespaces() - which first flushes scan_work to avoid competing ns addition/removal 3. continue to teardown the controller However, if err_work was scheduled to run in (1), it is designed to cancel any inflight I/O, particularly I/O that is originating from ns scan_work in (2), but because it is cancelled in .stop_ctrl(), we can prevent forward progress of (2) as ns scanning is blocking on I/O (that will never be cancelled). The race is: 1. transport layer error observed -> err_work is scheduled 2. scan_work executes, discovers ns, generate I/O to it 3. nvme_ctop_ctrl() -> .stop_ctrl() -> cancel_work_sync(err_work) - err_work never executed 4. nvme_remove_namespaces() -> flush_work(scan_work) --> deadlock, because scan_work is blocked on I/O that was supposed to be cancelled by err_work, but was cancelled before executing (see stack trace [1]). Fix this by flushing err_work instead of cancelling it, to force it to execute and cancel all inflight I/O. [1]: -- Call Trace: __schedule+0x390/0x910 ? scan_shadow_nodes+0x40/0x40 schedule+0x55/0xe0 io_schedule+0x16/0x40 do_read_cache_page+0x55d/0x850 ? __page_cache_alloc+0x90/0x90 read_cache_page+0x12/0x20 read_part_sector+0x3f/0x110 amiga_partition+0x3d/0x3e0 ? osf_partition+0x33/0x220 ? put_partition+0x90/0x90 bdev_disk_changed+0x1fe/0x4d0 blkdev_get_whole+0x7b/0x90 blkdev_get_by_dev+0xda/0x2d0 device_add_disk+0x356/0x3b0 nvme_mpath_set_live+0x13c/0x1a0 [nvme_core] ? nvme_parse_ana_log+0xae/0x1a0 [nvme_core] nvme_update_ns_ana_state+0x3a/0x40 [nvme_core] nvme_mpath_add_disk+0x120/0x160 [nvme_core] nvme_alloc_ns+0x594/0xa00 [nvme_core] nvme_validate_or_alloc_ns+0xb9/0x1a0 [nvme_core] ? __nvme_submit_sync_cmd+0x1d2/0x210 [nvme_core] nvme_scan_work+0x281/0x410 [nvme_core] process_one_work+0x1be/0x380 worker_thread+0x37/0x3b0 ? process_one_work+0x380/0x380 kthread+0x12d/0x150 ? set_kthread_struct+0x50/0x50 ret_from_fork+0x1f/0x30 INFO: task nvme:6725 blocked for more than 491 seconds. Not tainted 5.15.65-f0.el7.x86_64 #1 "echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message. task:nvme state:D stack: 0 pid: 6725 ppid: 1761 flags:0x00004000 Call Trace: __schedule+0x390/0x910 ? sched_clock+0x9/0x10 schedule+0x55/0xe0 schedule_timeout+0x24b/0x2e0 ? try_to_wake_up+0x358/0x510 ? finish_task_switch+0x88/0x2c0 wait_for_completion+0xa5/0x110 __flush_work+0x144/0x210 ? worker_attach_to_pool+0xc0/0xc0 flush_work+0x10/0x20 nvme_remove_namespaces+0x41/0xf0 [nvme_core] nvme_do_delete_ctrl+0x47/0x66 [nvme_core] nvme_sysfs_delete.cold.96+0x8/0xd [nvme_core] dev_attr_store+0x14/0x30 sysfs_kf_write+0x38/0x50 kernfs_fop_write_iter+0x146/0x1d0 new_sync_write+0x114/0x1b0 ? intel_pmu_handle_irq+0xe0/0x420 vfs_write+0x18d/0x270 ksys_write+0x61/0xe0 __x64_sys_write+0x1a/0x20 do_syscall_64+0x37/0x90 entry_SYSCALL_64_after_hwframe+0x61/0xcb -- Fixes: 3f2304f8c6d6 ("nvme-tcp: add NVMe over TCP host driver") Reported-by: Jonathan Nicklin Signed-off-by: Sagi Grimberg Tested-by: Jonathan Nicklin Signed-off-by: Christoph Hellwig --- drivers/nvme/host/tcp.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c index 93e2e313fa70..1eed0fc26b3a 100644 --- a/drivers/nvme/host/tcp.c +++ b/drivers/nvme/host/tcp.c @@ -2181,7 +2181,7 @@ out_fail: static void nvme_tcp_stop_ctrl(struct nvme_ctrl *ctrl) { - cancel_work_sync(&to_tcp_ctrl(ctrl)->err_work); + flush_work(&to_tcp_ctrl(ctrl)->err_work); cancel_delayed_work_sync(&to_tcp_ctrl(ctrl)->connect_work); } -- cgit v1.2.3 From 80b2624094c8d369a3c6eab515e8f1564d2e5db2 Mon Sep 17 00:00:00 2001 From: Abhijit Date: Mon, 10 Oct 2022 10:30:05 +0200 Subject: nvme-pci: add NVME_QUIRK_BOGUS_NID for Lexar NM760 Add a quirk to fix Lexar NM760 SSD drives reporting duplicate nsids. Signed-off-by: Abhijit Signed-off-by: Christoph Hellwig --- drivers/nvme/host/pci.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 5b796efa325b..fd2c0231d2e2 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -3527,6 +3527,8 @@ static const struct pci_device_id nvme_id_table[] = { .driver_data = NVME_QUIRK_BOGUS_NID, }, { PCI_DEVICE(0x1d97, 0x2263), /* Lexar NM610 */ .driver_data = NVME_QUIRK_BOGUS_NID, }, + { PCI_DEVICE(0x1d97, 0x2269), /* Lexar NM760 */ + .driver_data = NVME_QUIRK_BOGUS_NID, }, { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0061), .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, }, { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0065), -- cgit v1.2.3 From d5d3c100ac40dcb03959a6f1d2f0f13204c4f145 Mon Sep 17 00:00:00 2001 From: Xi Ruoyao Date: Wed, 28 Sep 2022 17:39:13 +0800 Subject: nvme-pci: avoid the deepest sleep state on ZHITAI TiPro5000 SSDs ZHITAI TiPro5000 SSDs has the same APST sleep problem as its cousin, TiPro7000. The quirk for TiPro7000 has been added in commit 6b961bce50e4 ("nvme-pci: avoid the deepest sleep state on ZHITAI TiPro7000 SSDs"), use the same quirk for TiPro5000. The ASPT data from "nvme id-ctrl /dev/nvme1": vid : 0x1e49 ssvid : 0x1e49 sn : ZTA21T0KA2227304LM mn : ZHITAI TiPlus5000 1TB fr : ZTA09139 [...] ps 0 : mp:6.50W operational enlat:0 exlat:0 rrt:0 rrl:0 rwt:0 rwl:0 idle_power:- active_power:- ps 1 : mp:5.80W operational enlat:0 exlat:0 rrt:1 rrl:1 rwt:1 rwl:1 idle_power:- active_power:- ps 2 : mp:3.60W operational enlat:0 exlat:0 rrt:2 rrl:2 rwt:2 rwl:2 idle_power:- active_power:- ps 3 : mp:0.0500W non-operational enlat:5000 exlat:10000 rrt:3 rrl:3 rwt:3 rwl:3 idle_power:- active_power:- ps 4 : mp:0.0025W non-operational enlat:8000 exlat:45000 rrt:4 rrl:4 rwt:4 rwl:4 idle_power:- active_power:- Reported-and-tested-by: Chang Feng Signed-off-by: Xi Ruoyao Reviewed-by: Chaitanya Kulkarni Signed-off-by: Christoph Hellwig --- drivers/nvme/host/pci.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index fd2c0231d2e2..bcbef6bc5672 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -3521,6 +3521,8 @@ static const struct pci_device_id nvme_id_table[] = { .driver_data = NVME_QUIRK_BOGUS_NID, }, { PCI_DEVICE(0x1dbe, 0x5236), /* ADATA XPG GAMMIX S70 */ .driver_data = NVME_QUIRK_BOGUS_NID, }, + { PCI_DEVICE(0x1e49, 0x0021), /* ZHITAI TiPro5000 NVMe SSD */ + .driver_data = NVME_QUIRK_NO_DEEPEST_PS, }, { PCI_DEVICE(0x1e49, 0x0041), /* ZHITAI TiPro7000 NVMe SSD */ .driver_data = NVME_QUIRK_NO_DEEPEST_PS, }, { PCI_DEVICE(0xc0a9, 0x540a), /* Crucial P2 */ -- cgit v1.2.3 From 72e3b8883a36e80ebfa41015c7b6926ce31ace05 Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Thu, 29 Sep 2022 10:36:47 +0300 Subject: nvme-multipath: fix possible hang in live ns resize with ANA access When we revalidate paths as part of ns size change (as of commit e7d65803e2bb), it is possible that during the path revalidation, the only paths that is IO capable (i.e. optimized/non-optimized) are the ones that ns resize was not yet informed to the host, which will cause inflight requests to be requeued (as we have available paths but none are IO capable). These requests on the requeue list are waiting for someone to resubmit them at some point. The IO capable paths will eventually notify the ns resize change to the host, but there is nothing that will kick the requeue list to resubmit the queued requests. Fix this by always kicking the requeue list, and if no IO capable path exists, these requests will be queued again. A typical log that indicates that IOs are requeued: -- nvme nvme1: creating 4 I/O queues. nvme nvme1: new ctrl: "testnqn1" nvme nvme2: creating 4 I/O queues. nvme nvme2: mapped 4/0/0 default/read/poll queues. nvme nvme2: new ctrl: NQN "testnqn1", addr 127.0.0.1:8009 nvme nvme1: rescanning namespaces. nvme1n1: detected capacity change from 2097152 to 4194304 block nvme1n1: no usable path - requeuing I/O block nvme1n1: no usable path - requeuing I/O block nvme1n1: no usable path - requeuing I/O block nvme1n1: no usable path - requeuing I/O block nvme1n1: no usable path - requeuing I/O block nvme1n1: no usable path - requeuing I/O block nvme1n1: no usable path - requeuing I/O block nvme1n1: no usable path - requeuing I/O block nvme1n1: no usable path - requeuing I/O block nvme1n1: no usable path - requeuing I/O nvme nvme2: rescanning namespaces. -- Reported-by: Yogev Cohen Fixes: e7d65803e2bb ("nvme-multipath: revalidate paths during rescan") Signed-off-by: Sagi Grimberg Cc: # v5.15+ Signed-off-by: Christoph Hellwig --- drivers/nvme/host/multipath.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index 00f2f81e20fa..0ea7e441e080 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c @@ -182,6 +182,7 @@ void nvme_mpath_revalidate_paths(struct nvme_ns *ns) for_each_node(node) rcu_assign_pointer(head->current_path[node], NULL); + kblockd_schedule_work(&head->requeue_work); } static bool nvme_path_is_disabled(struct nvme_ns *ns) -- cgit v1.2.3