summaryrefslogtreecommitdiffstats
path: root/drivers/scsi/lpfc/lpfc_nvmet.c
diff options
context:
space:
mode:
authorJames Smart <jsmart2021@gmail.com>2019-01-28 11:14:21 -0800
committerMartin K. Petersen <martin.petersen@oracle.com>2019-02-05 22:22:42 -0500
commitcdb42becdd40eeb320af3f21ac9a34e9d7517516 (patch)
treee72414894b82213f59141f78b83fcbbdfdf7bea2 /drivers/scsi/lpfc/lpfc_nvmet.c
parent7370d10ac99e8ebc5501c0fcdec482cb939ecbd4 (diff)
downloadlinux-cdb42becdd40eeb320af3f21ac9a34e9d7517516.tar.bz2
scsi: lpfc: Replace io_channels for nvme and fcp with general hdw_queues per cpu
Currently, both nvme and fcp each have their own concept of an io_channel, which is a combination wq/cq and associated msix. Different cpus would share an io_channel. The driver is now moving to per-cpu wq/cq pairs and msix vectors. The driver will still use separate wq/cq pairs per protocol on each cpu, but the protocols will share the msix vector. Given the elimination of the nvme and fcp io channels, the module parameters will be removed. A new parameter, lpfc_hdw_queue is added which allows the wq/cq pair allocation per cpu to be overridden and allocated to lesser value. If lpfc_hdw_queue is zero, the number of pairs allocated will be based on the number of cpus. If non-zero, the parameter specifies the number of queues to allocate. At this time, the maximum non-zero value is 64. To manage this new paradigm, a new hardware queue structure is created to track queue activity and relationships. As MSIX vector allocation must be known before setting up the relationships, msix allocation now occurs before queue datastructures are allocated. If the number of vectors allocated is less than the desired hardware queues, the hardware queue counts will be reduced to the number of vectors Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com> Signed-off-by: James Smart <jsmart2021@gmail.com> Reviewed-by: Hannes Reinecke <hare@suse.com> Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
Diffstat (limited to 'drivers/scsi/lpfc/lpfc_nvmet.c')
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.c10
1 files changed, 5 insertions, 5 deletions
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c
index 6245f442d784..c64a8234d5bd 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.c
+++ b/drivers/scsi/lpfc/lpfc_nvmet.c
@@ -973,7 +973,7 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
* WQE release CQE
*/
ctxp->flag |= LPFC_NVMET_DEFER_WQFULL;
- wq = phba->sli4_hba.nvme_wq[rsp->hwqid];
+ wq = phba->sli4_hba.hdwq[rsp->hwqid].nvme_wq;
pring = wq->pring;
spin_lock_irqsave(&pring->ring_lock, iflags);
list_add_tail(&nvmewqeq->list, &wq->wqfull_list);
@@ -1047,7 +1047,7 @@ lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport,
if (ctxp->flag & LPFC_NVMET_DEFER_WQFULL) {
lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
ctxp->oxid);
- wq = phba->sli4_hba.nvme_wq[ctxp->wqeq->hba_wqidx];
+ wq = phba->sli4_hba.hdwq[ctxp->wqeq->hba_wqidx].nvme_wq;
spin_unlock_irqrestore(&ctxp->ctxlock, flags);
lpfc_nvmet_wqfull_flush(phba, wq, ctxp);
return;
@@ -1377,7 +1377,7 @@ lpfc_nvmet_create_targetport(struct lpfc_hba *phba)
* allocate + 3, one for cmd, one for rsp and one for this alignment
*/
lpfc_tgttemplate.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1;
- lpfc_tgttemplate.max_hw_queues = phba->cfg_nvme_io_channel;
+ lpfc_tgttemplate.max_hw_queues = phba->cfg_hdw_queue;
lpfc_tgttemplate.target_features = NVMET_FCTGTFEAT_READDATA_RSP;
#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
@@ -1697,8 +1697,8 @@ lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba)
return;
if (phba->targetport) {
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
- for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++) {
- wq = phba->sli4_hba.nvme_wq[qidx];
+ for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
+ wq = phba->sli4_hba.hdwq[qidx].nvme_wq;
lpfc_nvmet_wqfull_flush(phba, wq, NULL);
}
init_completion(&tgtp->tport_unreg_done);