summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChad Dupuis <cdupuis@marvell.com>2019-03-26 00:38:39 -0700
committerMartin K. Petersen <martin.petersen@oracle.com>2019-03-27 21:54:52 -0400
commit96b1765a099b3b38d5a77796c45ee11a6ea6bf84 (patch)
tree4dfd003412baf9c039d65debe0725f595965e927
parent69ef2c692510d5154c613569caeeed3c74806231 (diff)
downloadlinux-96b1765a099b3b38d5a77796c45ee11a6ea6bf84.tar.bz2
scsi: qedf: Use a separate completion for cleanup commands
- If a TMF and cleanup are issued at the same time they could cause a call trace if issued against the same xid as the io_req->tm_done completion is used for both. - Set and clear cleanup bit in cleanup routine. Signed-off-by: Chad Dupuis <cdupuis@marvell.com> Signed-off-by: Saurav Kashyap <skashyap@marvell.com> Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
-rw-r--r--drivers/scsi/qedf/qedf.h1
-rw-r--r--drivers/scsi/qedf/qedf_io.c11
2 files changed, 8 insertions, 4 deletions
diff --git a/drivers/scsi/qedf/qedf.h b/drivers/scsi/qedf/qedf.h
index 9e5e18316ddc..fb7d0d5ffa67 100644
--- a/drivers/scsi/qedf/qedf.h
+++ b/drivers/scsi/qedf/qedf.h
@@ -128,6 +128,7 @@ struct qedf_ioreq {
struct delayed_work timeout_work;
struct completion tm_done;
struct completion abts_done;
+ struct completion cleanup_done;
struct e4_fcoe_task_context *task;
struct fcoe_task_params *task_params;
struct scsi_sgl_task_params *sgl_task_params;
diff --git a/drivers/scsi/qedf/qedf_io.c b/drivers/scsi/qedf/qedf_io.c
index c22dbb3afe7d..58257ecfaf08 100644
--- a/drivers/scsi/qedf/qedf_io.c
+++ b/drivers/scsi/qedf/qedf_io.c
@@ -2065,10 +2065,13 @@ int qedf_initiate_cleanup(struct qedf_ioreq *io_req,
io_req->xid);
return SUCCESS;
}
+ set_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
/* Ensure room on SQ */
if (!atomic_read(&fcport->free_sqes)) {
QEDF_ERR(&(qedf->dbg_ctx), "No SQ entries available\n");
+ /* Need to make sure we clear the flag since it was set */
+ clear_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
return FAILED;
}
@@ -2094,7 +2097,7 @@ int qedf_initiate_cleanup(struct qedf_ioreq *io_req,
task = qedf_get_task_mem(&qedf->tasks, xid);
- init_completion(&io_req->tm_done);
+ init_completion(&io_req->cleanup_done);
spin_lock_irqsave(&fcport->rport_lock, flags);
@@ -2108,8 +2111,8 @@ int qedf_initiate_cleanup(struct qedf_ioreq *io_req,
spin_unlock_irqrestore(&fcport->rport_lock, flags);
- tmo = wait_for_completion_timeout(&io_req->tm_done,
- QEDF_CLEANUP_TIMEOUT * HZ);
+ tmo = wait_for_completion_timeout(&io_req->cleanup_done,
+ QEDF_CLEANUP_TIMEOUT * HZ);
if (!tmo) {
rc = FAILED;
@@ -2153,7 +2156,7 @@ void qedf_process_cleanup_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
clear_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
/* Complete so we can finish cleaning up the I/O */
- complete(&io_req->tm_done);
+ complete(&io_req->cleanup_done);
}
static int qedf_execute_tmf(struct qedf_rport *fcport, struct scsi_cmnd *sc_cmd,