summaryrefslogtreecommitdiffstats
path: root/drivers/nvme/host/nvme.h
diff options
context:
space:
mode:
authorTao Chiu <taochiu@synology.com>2021-04-26 10:53:10 +0800
committerChristoph Hellwig <hch@lst.de>2021-05-04 09:35:49 +0200
commita97157440e1e69c35d7804d3b72da0c626ef28e6 (patch)
treeb7dce4a25cb6a7388d3218a9c399d19fe97d4218 /drivers/nvme/host/nvme.h
parent51ad06cd698cb9ff280a769ed8d57210a1d2266d (diff)
downloadlinux-a97157440e1e69c35d7804d3b72da0c626ef28e6.tar.bz2
nvme: move the fabrics queue ready check routines to core
queue_rq() in pci only checks if the dispatched queue (nvmeq) is ready, e.g. not being suspended. Since nvme_alloc_admin_tags() in reset flow restarts the admin queue, users are able to submit admin commands to a controller before reset_work() completes. Commands submitted under this condition may interfere with commands that performs identify, IO queue setup in reset_work(), and may result in a hang described in the following patch. As seen in the fabrics, user commands are prevented from being executed under inproper controller states. We may reuse this logic to maintain a clear admin queue during reset_work(). Signed-off-by: Tao Chiu <taochiu@synology.com> Signed-off-by: Cody Wong <codywong@synology.com> Reviewed-by: Leon Chien <leonchien@synology.com> Reviewed-by: Keith Busch <kbusch@kernel.org> Signed-off-by: Christoph Hellwig <hch@lst.de>
Diffstat (limited to 'drivers/nvme/host/nvme.h')
-rw-r--r--drivers/nvme/host/nvme.h15
1 files changed, 15 insertions, 0 deletions
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index c1e086a0bc3f..05f31a2c64bb 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -638,6 +638,21 @@ struct request *nvme_alloc_request(struct request_queue *q,
struct nvme_command *cmd, blk_mq_req_flags_t flags);
void nvme_cleanup_cmd(struct request *req);
blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req);
+blk_status_t nvme_fail_nonready_command(struct nvme_ctrl *ctrl,
+ struct request *req);
+bool __nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
+ bool queue_live);
+
+static inline bool nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
+ bool queue_live)
+{
+ if (likely(ctrl->state == NVME_CTRL_LIVE))
+ return true;
+ if (ctrl->ops->flags & NVME_F_FABRICS &&
+ ctrl->state == NVME_CTRL_DELETING)
+ return true;
+ return __nvme_check_ready(ctrl, rq, queue_live);
+}
int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
void *buf, unsigned bufflen);
int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,