summaryrefslogtreecommitdiffstats
path: root/drivers/nvme/host/fabrics.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/nvme/host/fabrics.c')
-rw-r--r--drivers/nvme/host/fabrics.c83
1 files changed, 82 insertions, 1 deletions
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index 8f0f34d06d46..124c458806df 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -536,6 +536,85 @@ static struct nvmf_transport_ops *nvmf_lookup_transport(
return NULL;
}
+blk_status_t nvmf_check_if_ready(struct nvme_ctrl *ctrl, struct request *rq,
+ bool queue_live, bool is_connected)
+{
+ struct nvme_command *cmd = nvme_req(rq)->cmd;
+
+ if (likely(ctrl->state == NVME_CTRL_LIVE && is_connected))
+ return BLK_STS_OK;
+
+ switch (ctrl->state) {
+ case NVME_CTRL_DELETING:
+ goto reject_io;
+
+ case NVME_CTRL_NEW:
+ case NVME_CTRL_CONNECTING:
+ if (!is_connected)
+ /*
+ * This is the case of starting a new
+ * association but connectivity was lost
+ * before it was fully created. We need to
+ * error the commands used to initialize the
+ * controller so the reconnect can go into a
+ * retry attempt. The commands should all be
+ * marked REQ_FAILFAST_DRIVER, which will hit
+ * the reject path below. Anything else will
+ * be queued while the state settles.
+ */
+ goto reject_or_queue_io;
+
+ if ((queue_live &&
+ !(nvme_req(rq)->flags & NVME_REQ_USERCMD)) ||
+ (!queue_live && blk_rq_is_passthrough(rq) &&
+ cmd->common.opcode == nvme_fabrics_command &&
+ cmd->fabrics.fctype == nvme_fabrics_type_connect))
+ /*
+ * If queue is live, allow only commands that
+ * are internally generated pass through. These
+ * are commands on the admin queue to initialize
+ * the controller. This will reject any ioctl
+ * admin cmds received while initializing.
+ *
+ * If the queue is not live, allow only a
+ * connect command. This will reject any ioctl
+ * admin cmd as well as initialization commands
+ * if the controller reverted the queue to non-live.
+ */
+ return BLK_STS_OK;
+
+ /*
+ * fall-thru to the reject_or_queue_io clause
+ */
+ break;
+
+ /* these cases fall-thru
+ * case NVME_CTRL_LIVE:
+ * case NVME_CTRL_RESETTING:
+ */
+ default:
+ break;
+ }
+
+reject_or_queue_io:
+ /*
+ * Any other new io is something we're not in a state to send
+ * to the device. Default action is to busy it and retry it
+ * after the controller state is recovered. However, anything
+ * marked for failfast or nvme multipath is immediately failed.
+ * Note: commands used to initialize the controller will be
+ * marked for failfast.
+ * Note: nvme cli/ioctl commands are marked for failfast.
+ */
+ if (!blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH))
+ return BLK_STS_RESOURCE;
+
+reject_io:
+ nvme_req(rq)->status = NVME_SC_ABORT_REQ;
+ return BLK_STS_IOERR;
+}
+EXPORT_SYMBOL_GPL(nvmf_check_if_ready);
+
static const match_table_t opt_tokens = {
{ NVMF_OPT_TRANSPORT, "transport=%s" },
{ NVMF_OPT_TRADDR, "traddr=%s" },
@@ -608,8 +687,10 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
opts->discovery_nqn =
!(strcmp(opts->subsysnqn,
NVME_DISC_SUBSYS_NAME));
- if (opts->discovery_nqn)
+ if (opts->discovery_nqn) {
+ opts->kato = 0;
opts->nr_io_queues = 0;
+ }
break;
case NVMF_OPT_TRADDR:
p = match_strdup(args);