summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/ata/libata-scsi.c30
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c2
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c2
-rw-r--r--drivers/message/fusion/mptbase.c2
-rw-r--r--drivers/message/fusion/mptsas.c119
-rw-r--r--drivers/nvme/host/fc.c72
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c5
-rw-r--r--drivers/scsi/3w-9xxx.c74
-rw-r--r--drivers/scsi/3w-9xxx.h121
-rw-r--r--drivers/scsi/3w-xxxx.c6
-rw-r--r--drivers/scsi/53c700.c6
-rw-r--r--drivers/scsi/FlashPoint.c197
-rw-r--r--drivers/scsi/Kconfig18
-rw-r--r--drivers/scsi/Makefile2
-rw-r--r--drivers/scsi/NCR5380.c10
-rw-r--r--drivers/scsi/aacraid/aachba.c10
-rw-r--r--drivers/scsi/aacraid/aacraid.h2
-rw-r--r--drivers/scsi/advansys.c4
-rw-r--r--drivers/scsi/aha152x.c33
-rw-r--r--drivers/scsi/aha1740.c7
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm.c19
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm.c1
-rw-r--r--drivers/scsi/aic94xx/aic94xx_task.c2
-rw-r--r--drivers/scsi/arcmsr/arcmsr.h2
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c43
-rw-r--r--drivers/scsi/arm/acornscsi.c46
-rw-r--r--drivers/scsi/arm/fas216.c17
-rw-r--r--drivers/scsi/be2iscsi/be_iscsi.c25
-rw-r--r--drivers/scsi/be2iscsi/be_main.c7
-rw-r--r--drivers/scsi/bfa/bfa_defs_svc.h2
-rw-r--r--drivers/scsi/bfa/bfa_svc.c8
-rw-r--r--drivers/scsi/bnx2i/bnx2i_iscsi.c32
-rw-r--r--drivers/scsi/ch.c5
-rw-r--r--drivers/scsi/constants.c17
-rw-r--r--drivers/scsi/cxgbi/cxgb3i/cxgb3i.c1
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c1
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.c17
-rw-r--r--drivers/scsi/cxlflash/superpipe.c3
-rw-r--r--drivers/scsi/dc395x.c80
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c81
-rw-r--r--drivers/scsi/elx/Kconfig9
-rw-r--r--drivers/scsi/elx/Makefile18
-rw-r--r--drivers/scsi/elx/efct/efct_driver.c786
-rw-r--r--drivers/scsi/elx/efct/efct_driver.h109
-rw-r--r--drivers/scsi/elx/efct/efct_hw.c3581
-rw-r--r--drivers/scsi/elx/efct/efct_hw.h764
-rw-r--r--drivers/scsi/elx/efct/efct_hw_queues.c677
-rw-r--r--drivers/scsi/elx/efct/efct_io.c191
-rw-r--r--drivers/scsi/elx/efct/efct_io.h174
-rw-r--r--drivers/scsi/elx/efct/efct_lio.c1698
-rw-r--r--drivers/scsi/elx/efct/efct_lio.h189
-rw-r--r--drivers/scsi/elx/efct/efct_scsi.c1159
-rw-r--r--drivers/scsi/elx/efct/efct_scsi.h203
-rw-r--r--drivers/scsi/elx/efct/efct_unsol.c492
-rw-r--r--drivers/scsi/elx/efct/efct_unsol.h17
-rw-r--r--drivers/scsi/elx/efct/efct_xport.c1111
-rw-r--r--drivers/scsi/elx/efct/efct_xport.h186
-rw-r--r--drivers/scsi/elx/include/efc_common.h37
-rw-r--r--drivers/scsi/elx/libefc/efc.h52
-rw-r--r--drivers/scsi/elx/libefc/efc_cmds.c777
-rw-r--r--drivers/scsi/elx/libefc/efc_cmds.h35
-rw-r--r--drivers/scsi/elx/libefc/efc_device.c1603
-rw-r--r--drivers/scsi/elx/libefc/efc_device.h72
-rw-r--r--drivers/scsi/elx/libefc/efc_domain.c1088
-rw-r--r--drivers/scsi/elx/libefc/efc_domain.h54
-rw-r--r--drivers/scsi/elx/libefc/efc_els.c1098
-rw-r--r--drivers/scsi/elx/libefc/efc_els.h107
-rw-r--r--drivers/scsi/elx/libefc/efc_fabric.c1564
-rw-r--r--drivers/scsi/elx/libefc/efc_fabric.h116
-rw-r--r--drivers/scsi/elx/libefc/efc_node.c1102
-rw-r--r--drivers/scsi/elx/libefc/efc_node.h191
-rw-r--r--drivers/scsi/elx/libefc/efc_nport.c777
-rw-r--r--drivers/scsi/elx/libefc/efc_nport.h50
-rw-r--r--drivers/scsi/elx/libefc/efc_sm.c54
-rw-r--r--drivers/scsi/elx/libefc/efc_sm.h197
-rw-r--r--drivers/scsi/elx/libefc/efclib.c81
-rw-r--r--drivers/scsi/elx/libefc/efclib.h620
-rw-r--r--drivers/scsi/elx/libefc_sli/sli4.c5162
-rw-r--r--drivers/scsi/elx/libefc_sli/sli4.h4132
-rw-r--r--drivers/scsi/esas2r/atioctl.h2
-rw-r--r--drivers/scsi/esas2r/esas2r_main.c2
-rw-r--r--drivers/scsi/esp_scsi.c4
-rw-r--r--drivers/scsi/fcoe/fcoe.c6
-rw-r--r--drivers/scsi/fdomain.c22
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas.h7
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c99
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v1_hw.c20
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v2_hw.c10
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v3_hw.c10
-rw-r--r--drivers/scsi/hosts.c13
-rw-r--r--drivers/scsi/hptiop.c2
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c60
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.h3
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c2
-rw-r--r--drivers/scsi/imm.c15
-rw-r--r--drivers/scsi/ips.c10
-rw-r--r--drivers/scsi/isci/request.c10
-rw-r--r--drivers/scsi/isci/task.c6
-rw-r--r--drivers/scsi/iscsi_tcp.c7
-rw-r--r--drivers/scsi/libfc/fc_encode.h256
-rw-r--r--drivers/scsi/libfc/fc_lport.c88
-rw-r--r--drivers/scsi/libiscsi.c234
-rw-r--r--drivers/scsi/libsas/sas_ata.c7
-rw-r--r--drivers/scsi/libsas/sas_expander.c2
-rw-r--r--drivers/scsi/libsas/sas_task.c4
-rw-r--r--drivers/scsi/lpfc/lpfc.h124
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c59
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h12
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c298
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c11
-rw-r--r--drivers/scsi/lpfc/lpfc_disc.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c665
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c229
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h124
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h12
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c109
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c9
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c40
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.c14
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c416
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c66
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h11
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/megaraid.c20
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.c27
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h16
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c102
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fp.c6
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c21
-rw-r--r--drivers/scsi/mesh.c9
-rw-r--r--drivers/scsi/mpi3mr/Kconfig7
-rw-r--r--drivers/scsi/mpi3mr/Makefile4
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h1880
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_image.h216
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_init.h159
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_ioc.h1004
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_sas.h33
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_transport.h463
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr.h901
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_debug.h60
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_fw.c3958
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_os.c4045
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c349
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.h8
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_config.c18
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c193
-rw-r--r--drivers/scsi/mvsas/mv_sas.c10
-rw-r--r--drivers/scsi/mvumi.c10
-rw-r--r--drivers/scsi/myrb.c64
-rw-r--r--drivers/scsi/myrs.c9
-rw-r--r--drivers/scsi/nsp32.c419
-rw-r--r--drivers/scsi/pcmcia/nsp_cs.c2
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.c16
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.c10
-rw-r--r--drivers/scsi/pm8001/pm80xx_hwi.c14
-rw-r--r--drivers/scsi/pmcraid.h4
-rw-r--r--drivers/scsi/ppa.c14
-rw-r--r--drivers/scsi/ps3rom.c7
-rw-r--r--drivers/scsi/qedf/qedf_dbg.c3
-rw-r--r--drivers/scsi/qedf/qedf_io.c5
-rw-r--r--drivers/scsi/qedf/qedf_main.c9
-rw-r--r--drivers/scsi/qedi/qedi.h1
-rw-r--r--drivers/scsi/qedi/qedi_fw.c291
-rw-r--r--drivers/scsi/qedi/qedi_gbl.h4
-rw-r--r--drivers/scsi/qedi/qedi_iscsi.c105
-rw-r--r--drivers/scsi/qedi/qedi_iscsi.h5
-rw-r--r--drivers/scsi/qedi/qedi_main.c9
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h4
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c15
-rw-r--r--drivers/scsi/qla2xxx/qla_nvme.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c3
-rw-r--r--drivers/scsi/qla4xxx/ql4_83xx.c3
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c4
-rw-r--r--drivers/scsi/qlogicfas408.c138
-rw-r--r--drivers/scsi/scsi.c11
-rw-r--r--drivers/scsi/scsi_debug.c20
-rw-r--r--drivers/scsi/scsi_error.c70
-rw-r--r--drivers/scsi/scsi_ioctl.c7
-rw-r--r--drivers/scsi/scsi_lib.c119
-rw-r--r--drivers/scsi/scsi_logging.c10
-rw-r--r--drivers/scsi/scsi_scan.c6
-rw-r--r--drivers/scsi/scsi_transport_fc.c2
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c499
-rw-r--r--drivers/scsi/scsi_transport_sas.c9
-rw-r--r--drivers/scsi/scsi_transport_spi.c2
-rw-r--r--drivers/scsi/sd.c63
-rw-r--r--drivers/scsi/sd_zbc.c3
-rw-r--r--drivers/scsi/sg.c9
-rw-r--r--drivers/scsi/smartpqi/smartpqi_init.c3
-rw-r--r--drivers/scsi/snic/snic_ctl.c5
-rw-r--r--drivers/scsi/sr.c4
-rw-r--r--drivers/scsi/sr_ioctl.c6
-rw-r--r--drivers/scsi/st.c8
-rw-r--r--drivers/scsi/stex.c9
-rw-r--r--drivers/scsi/storvsc_drv.c119
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_glue.c6
-rw-r--r--drivers/scsi/ufs/Kconfig1
-rw-r--r--drivers/scsi/ufs/cdns-pltfrm.c2
-rw-r--r--drivers/scsi/ufs/tc-dwc-g210-pci.c2
-rw-r--r--drivers/scsi/ufs/ufs-debugfs.c6
-rw-r--r--drivers/scsi/ufs/ufs-debugfs.h2
-rw-r--r--drivers/scsi/ufs/ufs-exynos.c31
-rw-r--r--drivers/scsi/ufs/ufs-exynos.h26
-rw-r--r--drivers/scsi/ufs/ufs-hisi.c4
-rw-r--r--drivers/scsi/ufs/ufs-mediatek.c45
-rw-r--r--drivers/scsi/ufs/ufs-qcom.c2
-rw-r--r--drivers/scsi/ufs/ufs-sysfs.c269
-rw-r--r--drivers/scsi/ufs/ufs_bsg.c6
-rw-r--r--drivers/scsi/ufs/ufshcd-pci.c36
-rw-r--r--drivers/scsi/ufs/ufshcd.c1163
-rw-r--r--drivers/scsi/ufs/ufshcd.h82
-rw-r--r--drivers/scsi/ufs/ufshci.h1
-rw-r--r--drivers/scsi/virtio_scsi.c5
-rw-r--r--drivers/scsi/vmw_pvscsi.c6
-rw-r--r--drivers/scsi/wd33c93.c43
-rw-r--r--drivers/scsi/xen-scsifront.c8
-rw-r--r--drivers/target/iscsi/cxgbit/cxgbit_ddp.c19
-rw-r--r--drivers/target/iscsi/cxgbit/cxgbit_target.c21
-rw-r--r--drivers/target/iscsi/iscsi_target_erl1.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_nego.c11
-rw-r--r--drivers/target/iscsi/iscsi_target_parameters.c4
-rw-r--r--drivers/target/loopback/tcm_loop.c1
-rw-r--r--drivers/target/sbp/sbp_target.c1
-rw-r--r--drivers/target/target_core_alua.c6
-rw-r--r--drivers/target/target_core_configfs.c50
-rw-r--r--drivers/target/target_core_device.c5
-rw-r--r--drivers/target/target_core_iblock.c2
-rw-r--r--drivers/target/target_core_pr.c8
-rw-r--r--drivers/target/target_core_pr.h2
-rw-r--r--drivers/target/target_core_pscsi.c2
-rw-r--r--drivers/target/target_core_sbc.c10
-rw-r--r--drivers/target/target_core_spc.c97
-rw-r--r--drivers/target/target_core_user.c10
-rw-r--r--drivers/target/target_core_xcopy.c19
-rw-r--r--drivers/usb/storage/cypress_atacb.c4
-rw-r--r--drivers/xen/xen-scsiback.c17
237 files changed, 49461 insertions, 2809 deletions
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index fd8b6febbf70..b9588c52815d 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -196,9 +196,7 @@ void ata_scsi_set_sense(struct ata_device *dev, struct scsi_cmnd *cmd,
if (!cmd)
return;
- cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
-
- scsi_build_sense_buffer(d_sense, cmd->sense_buffer, sk, asc, ascq);
+ scsi_build_sense(cmd, d_sense, sk, asc, ascq);
}
void ata_scsi_set_sense_information(struct ata_device *dev,
@@ -409,13 +407,16 @@ int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg)
cmd_result = scsi_execute(scsidev, scsi_cmd, data_dir, argbuf, argsize,
sensebuf, &sshdr, (10*HZ), 5, 0, 0, NULL);
- if (driver_byte(cmd_result) == DRIVER_SENSE) {/* sense data available */
+ if (cmd_result < 0) {
+ rc = cmd_result;
+ goto error;
+ }
+ if (scsi_sense_valid(&sshdr)) {/* sense data available */
u8 *desc = sensebuf + 8;
- cmd_result &= ~(0xFF<<24); /* DRIVER_SENSE is not an error */
/* If we set cc then ATA pass-through will cause a
* check condition even if no error. Filter that. */
- if (cmd_result & SAM_STAT_CHECK_CONDITION) {
+ if (scsi_status_is_check_condition(cmd_result)) {
if (sshdr.sense_key == RECOVERED_ERROR &&
sshdr.asc == 0 && sshdr.ascq == 0x1d)
cmd_result &= ~SAM_STAT_CHECK_CONDITION;
@@ -490,9 +491,12 @@ int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg)
cmd_result = scsi_execute(scsidev, scsi_cmd, DMA_NONE, NULL, 0,
sensebuf, &sshdr, (10*HZ), 5, 0, 0, NULL);
- if (driver_byte(cmd_result) == DRIVER_SENSE) {/* sense data available */
+ if (cmd_result < 0) {
+ rc = cmd_result;
+ goto error;
+ }
+ if (scsi_sense_valid(&sshdr)) {/* sense data available */
u8 *desc = sensebuf + 8;
- cmd_result &= ~(0xFF<<24); /* DRIVER_SENSE is not an error */
/* If we set cc then ATA pass-through will cause a
* check condition even if no error. Filter that. */
@@ -638,7 +642,7 @@ static struct ata_queued_cmd *ata_scsi_qc_new(struct ata_device *dev,
if (cmd->request->rq_flags & RQF_QUIET)
qc->flags |= ATA_QCFLAG_QUIET;
} else {
- cmd->result = (DID_OK << 16) | (QUEUE_FULL << 1);
+ cmd->result = (DID_OK << 16) | SAM_STAT_TASK_SET_FULL;
cmd->scsi_done(cmd);
}
@@ -858,8 +862,6 @@ static void ata_gen_passthru_sense(struct ata_queued_cmd *qc)
memset(sb, 0, SCSI_SENSE_BUFFERSIZE);
- cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
-
/*
* Use ata_to_sense_error() to map status register bits
* onto sense key, asc & ascq.
@@ -874,8 +876,7 @@ static void ata_gen_passthru_sense(struct ata_queued_cmd *qc)
* ATA PASS-THROUGH INFORMATION AVAILABLE
* Always in descriptor format sense.
*/
- scsi_build_sense_buffer(1, cmd->sense_buffer,
- RECOVERED_ERROR, 0, 0x1D);
+ scsi_build_sense(cmd, 1, RECOVERED_ERROR, 0, 0x1D);
}
if ((cmd->sense_buffer[0] & 0x7f) >= 0x72) {
@@ -957,8 +958,6 @@ static void ata_gen_ata_sense(struct ata_queued_cmd *qc)
memset(sb, 0, SCSI_SENSE_BUFFERSIZE);
- cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
-
if (ata_dev_disabled(dev)) {
/* Device disabled after error recovery */
/* LOGICAL UNIT NOT READY, HARD RESET REQUIRED */
@@ -4196,7 +4195,6 @@ void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd)
case REQUEST_SENSE:
ata_scsi_set_sense(dev, cmd, 0, 0, 0);
- cmd->result = (DRIVER_SENSE << 24);
break;
/* if we reach this, then writeback caching is disabled,
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 8fcaa1136f2c..776e46ee95da 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -506,6 +506,7 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session,
iser_conn->iscsi_conn = conn;
out:
+ iscsi_put_endpoint(ep);
mutex_unlock(&iser_conn->state_mutex);
return error;
}
@@ -1002,6 +1003,7 @@ static struct iscsi_transport iscsi_iser_transport = {
/* connection management */
.create_conn = iscsi_iser_conn_create,
.bind_conn = iscsi_iser_conn_bind,
+ .unbind_conn = iscsi_conn_unbind,
.destroy_conn = iscsi_conn_teardown,
.attr_is_visible = iser_attr_is_visible,
.set_param = iscsi_iser_set_param,
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 6ba48a09eac4..8d5cf5eb5778 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -2209,7 +2209,7 @@ static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
* to reduce queue depth temporarily.
*/
scmnd->result = len == -ENOMEM ?
- DID_OK << 16 | QUEUE_FULL << 1 : DID_ERROR << 16;
+ DID_OK << 16 | SAM_STAT_TASK_SET_FULL : DID_ERROR << 16;
goto err_iu;
}
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index f4f89cf23631..7f7abc9069f7 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -6993,8 +6993,6 @@ mpt_SoftResetHandler(MPT_ADAPTER *ioc, int sleepFlag)
ioc->ioc_reset_in_progress = 1;
spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
- rc = -1;
-
for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--) {
if (MptResetHandlers[cb_idx])
mpt_signal_reset(cb_idx, ioc, MPT_IOC_SETUP_RESET);
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index e0a65a348502..85285ba8e817 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -86,7 +86,7 @@ MODULE_PARM_DESC(mpt_pt_clear,
" Clear persistency table: enable=1 "
"(default=MPTSCSIH_PT_CLEAR=0)");
-/* scsi-mid layer global parmeter is max_report_luns, which is 511 */
+/* scsi-mid layer global parameter is max_report_luns, which is 511 */
#define MPTSAS_MAX_LUN (16895)
static int max_lun = MPTSAS_MAX_LUN;
module_param(max_lun, int, 0);
@@ -420,12 +420,14 @@ mptsas_find_portinfo_by_handle(MPT_ADAPTER *ioc, u16 handle)
}
/**
- * mptsas_find_portinfo_by_sas_address -
+ * mptsas_find_portinfo_by_sas_address - find and return portinfo for
+ * this sas_address
* @ioc: Pointer to MPT_ADAPTER structure
- * @handle:
+ * @sas_address: expander sas address
*
- * This function should be called with the sas_topology_mutex already held
+ * This function should be called with the sas_topology_mutex already held.
*
+ * Return: %NULL if not found.
**/
static struct mptsas_portinfo *
mptsas_find_portinfo_by_sas_address(MPT_ADAPTER *ioc, u64 sas_address)
@@ -567,12 +569,14 @@ starget)
}
/**
- * mptsas_add_device_component -
+ * mptsas_add_device_component - adds a new device component to our lists
* @ioc: Pointer to MPT_ADAPTER structure
- * @channel: fw mapped id's
- * @id:
- * @sas_address:
- * @device_info:
+ * @channel: channel number
+ * @id: Logical Target ID for reset (if appropriate)
+ * @sas_address: expander sas address
+ * @device_info: specific bits (flags) for devices
+ * @slot: enclosure slot ID
+ * @enclosure_logical_id: enclosure WWN
*
**/
static void
@@ -634,10 +638,10 @@ mptsas_add_device_component(MPT_ADAPTER *ioc, u8 channel, u8 id,
}
/**
- * mptsas_add_device_component_by_fw -
+ * mptsas_add_device_component_by_fw - adds a new device component by FW ID
* @ioc: Pointer to MPT_ADAPTER structure
- * @channel: fw mapped id's
- * @id:
+ * @channel: channel number
+ * @id: Logical Target ID
*
**/
static void
@@ -668,8 +672,7 @@ mptsas_add_device_component_by_fw(MPT_ADAPTER *ioc, u8 channel, u8 id)
/**
* mptsas_add_device_component_starget_ir - Handle Integrated RAID, adding each individual device to list
* @ioc: Pointer to MPT_ADAPTER structure
- * @channel: fw mapped id's
- * @id:
+ * @starget: SCSI target for this SCSI device
*
**/
static void
@@ -771,9 +774,9 @@ mptsas_add_device_component_starget_ir(MPT_ADAPTER *ioc,
}
/**
- * mptsas_add_device_component_starget -
+ * mptsas_add_device_component_starget - adds a SCSI target device component
* @ioc: Pointer to MPT_ADAPTER structure
- * @starget:
+ * @starget: SCSI target for this SCSI device
*
**/
static void
@@ -806,7 +809,7 @@ mptsas_add_device_component_starget(MPT_ADAPTER *ioc,
* mptsas_del_device_component_by_os - Once a device has been removed, we mark the entry in the list as being cached
* @ioc: Pointer to MPT_ADAPTER structure
* @channel: os mapped id's
- * @id:
+ * @id: Logical Target ID
*
**/
static void
@@ -978,11 +981,12 @@ mptsas_setup_wide_ports(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info)
}
/**
- * csmisas_find_vtarget
+ * mptsas_find_vtarget - find a virtual target device (FC LUN device or
+ * SCSI target device)
*
- * @ioc
- * @volume_id
- * @volume_bus
+ * @ioc: Pointer to MPT_ADAPTER structure
+ * @channel: channel number
+ * @id: Logical Target ID
*
**/
static VirtTarget *
@@ -1047,15 +1051,14 @@ mptsas_queue_rescan(MPT_ADAPTER *ioc)
/**
- * mptsas_target_reset
- *
- * Issues TARGET_RESET to end device using handshaking method
+ * mptsas_target_reset - Issues TARGET_RESET to end device using
+ * handshaking method
*
- * @ioc
- * @channel
- * @id
+ * @ioc: Pointer to MPT_ADAPTER structure
+ * @channel: channel number
+ * @id: Logical Target ID for reset
*
- * Returns (1) success
+ * Return: (1) success
* (0) failure
*
**/
@@ -1119,15 +1122,15 @@ mptsas_block_io_starget(struct scsi_target *starget)
}
/**
- * mptsas_target_reset_queue
+ * mptsas_target_reset_queue - queue a target reset
*
- * Receive request for TARGET_RESET after receiving an firmware
+ * @ioc: Pointer to MPT_ADAPTER structure
+ * @sas_event_data: SAS Device Status Change Event data
+ *
+ * Receive request for TARGET_RESET after receiving a firmware
* event NOT_RESPONDING_EVENT, then put command in link list
* and queue if task_queue already in use.
*
- * @ioc
- * @sas_event_data
- *
**/
static void
mptsas_target_reset_queue(MPT_ADAPTER *ioc,
@@ -1207,9 +1210,11 @@ mptsas_schedule_target_reset(void *iocp)
/**
* mptsas_taskmgmt_complete - complete SAS task management function
* @ioc: Pointer to MPT_ADAPTER structure
+ * @mf: MPT message frame
+ * @mr: SCSI Task Management Reply structure ptr (may be %NULL)
*
* Completion for TARGET_RESET after NOT_RESPONDING_EVENT, enable work
- * queue to finish off removing device from upper layers. then send next
+ * queue to finish off removing device from upper layers, then send next
* TARGET_RESET in the queue.
**/
static int
@@ -1300,10 +1305,10 @@ mptsas_taskmgmt_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
}
/**
- * mptscsih_ioc_reset
+ * mptsas_ioc_reset - issue an IOC reset for this reset phase
*
- * @ioc
- * @reset_phase
+ * @ioc: Pointer to MPT_ADAPTER structure
+ * @reset_phase: id of phase of reset
*
**/
static int
@@ -1350,7 +1355,7 @@ mptsas_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
/**
- * enum device_state -
+ * enum device_state - TUR device state
* @DEVICE_RETRY: need to retry the TUR
* @DEVICE_ERROR: TUR return error, don't add device
* @DEVICE_READY: device can be added
@@ -1941,7 +1946,7 @@ mptsas_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *SCpnt)
}
/**
- * mptsas_mptsas_eh_timed_out - resets the scsi_cmnd timeout
+ * mptsas_eh_timed_out - resets the scsi_cmnd timeout
* if the device under question is currently in the
* device removal delay.
* @sc: scsi command that the midlayer is about to time out
@@ -2839,14 +2844,15 @@ struct rep_manu_reply{
};
/**
- * mptsas_exp_repmanufacture_info -
+ * mptsas_exp_repmanufacture_info - sets expander manufacturer info
* @ioc: per adapter object
* @sas_address: expander sas address
* @edev: the sas_expander_device object
*
- * Fills in the sas_expander_device object when SMP port is created.
+ * For an edge expander or a fanout expander:
+ * fills in the sas_expander_device object when SMP port is created.
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
static int
mptsas_exp_repmanufacture_info(MPT_ADAPTER *ioc,
@@ -3284,7 +3290,7 @@ static int mptsas_probe_one_phy(struct device *dev,
rphy_to_expander_device(rphy));
}
- /* If the device exists,verify it wasn't previously flagged
+ /* If the device exists, verify it wasn't previously flagged
as a missing device. If so, clear it */
vtarget = mptsas_find_vtarget(ioc,
phy_info->attached.channel,
@@ -3611,8 +3617,7 @@ static void mptsas_expander_delete(MPT_ADAPTER *ioc,
/**
* mptsas_send_expander_event - expanders events
- * @ioc: Pointer to MPT_ADAPTER structure
- * @expander_data: event data
+ * @fw_event: event data
*
*
* This function handles adding, removing, and refreshing
@@ -3657,9 +3662,9 @@ mptsas_send_expander_event(struct fw_event_work *fw_event)
/**
- * mptsas_expander_add -
+ * mptsas_expander_add - adds a newly discovered expander
* @ioc: Pointer to MPT_ADAPTER structure
- * @handle:
+ * @handle: device handle
*
*/
static struct mptsas_portinfo *
@@ -4000,9 +4005,9 @@ mptsas_probe_devices(MPT_ADAPTER *ioc)
}
/**
- * mptsas_scan_sas_topology -
+ * mptsas_scan_sas_topology - scans new SAS topology
+ * (part of probe or rescan)
* @ioc: Pointer to MPT_ADAPTER structure
- * @sas_address:
*
**/
static void
@@ -4150,11 +4155,12 @@ mptsas_find_phyinfo_by_sas_address(MPT_ADAPTER *ioc, u64 sas_address)
}
/**
- * mptsas_find_phyinfo_by_phys_disk_num -
+ * mptsas_find_phyinfo_by_phys_disk_num - find phyinfo for the
+ * specified @phys_disk_num
* @ioc: Pointer to MPT_ADAPTER structure
- * @phys_disk_num:
- * @channel:
- * @id:
+ * @phys_disk_num: (hot plug) physical disk number (for RAID support)
+ * @channel: channel number
+ * @id: Logical Target ID
*
**/
static struct mptsas_phyinfo *
@@ -4773,8 +4779,9 @@ mptsas_send_raid_event(struct fw_event_work *fw_event)
* @lun: Logical unit for reset (if appropriate)
* @task_context: Context for the task to be aborted
* @timeout: timeout for task management control
+ * @issue_reset: set to 1 on return if reset is needed, else 0
*
- * return 0 on success and -1 on failure:
+ * Return: 0 on success or -1 on failure.
*
*/
static int
@@ -4847,9 +4854,9 @@ mptsas_issue_tm(MPT_ADAPTER *ioc, u8 type, u8 channel, u8 id, u64 lun,
/**
* mptsas_broadcast_primitive_work - Handle broadcast primitives
- * @work: work queue payload containing info describing the event
+ * @fw_event: work queue payload containing info describing the event
*
- * this will be handled in workqueue context.
+ * This will be handled in workqueue context.
*/
static void
mptsas_broadcast_primitive_work(struct fw_event_work *fw_event)
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index 7600863f7752..95aad3fed571 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -9,7 +9,7 @@
#include <uapi/scsi/fc/fc_els.h>
#include <linux/delay.h>
#include <linux/overflow.h>
-
+#include <linux/blk-cgroup.h>
#include "nvme.h"
#include "fabrics.h"
#include <linux/nvme-fc-driver.h>
@@ -3808,10 +3808,80 @@ process_local_list:
return count;
}
+
+/* Parse the cgroup id from a buf and return the length of cgrpid */
+static int fc_parse_cgrpid(const char *buf, u64 *id)
+{
+ char cgrp_id[16+1];
+ int cgrpid_len, j;
+
+ memset(cgrp_id, 0x0, sizeof(cgrp_id));
+ for (cgrpid_len = 0, j = 0; cgrpid_len < 17; cgrpid_len++) {
+ if (buf[cgrpid_len] != ':')
+ cgrp_id[cgrpid_len] = buf[cgrpid_len];
+ else {
+ j = 1;
+ break;
+ }
+ }
+ if (!j)
+ return -EINVAL;
+ if (kstrtou64(cgrp_id, 16, id) < 0)
+ return -EINVAL;
+ return cgrpid_len;
+}
+
+/*
+ * fc_update_appid: Parse and update the appid in the blkcg associated with
+ * cgroupid.
+ * @buf: buf contains both cgrpid and appid info
+ * @count: size of the buffer
+ */
+static int fc_update_appid(const char *buf, size_t count)
+{
+ u64 cgrp_id;
+ int appid_len = 0;
+ int cgrpid_len = 0;
+ char app_id[FC_APPID_LEN];
+ int ret = 0;
+
+ if (buf[count-1] == '\n')
+ count--;
+
+ if ((count > (16+1+FC_APPID_LEN)) || (!strchr(buf, ':')))
+ return -EINVAL;
+
+ cgrpid_len = fc_parse_cgrpid(buf, &cgrp_id);
+ if (cgrpid_len < 0)
+ return -EINVAL;
+ appid_len = count - cgrpid_len - 1;
+ if (appid_len > FC_APPID_LEN)
+ return -EINVAL;
+
+ memset(app_id, 0x0, sizeof(app_id));
+ memcpy(app_id, &buf[cgrpid_len+1], appid_len);
+ ret = blkcg_set_fc_appid(app_id, cgrp_id, sizeof(app_id));
+ if (ret < 0)
+ return ret;
+ return count;
+}
+
+static ssize_t fc_appid_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int ret = 0;
+
+ ret = fc_update_appid(buf, count);
+ if (ret < 0)
+ return -EINVAL;
+ return count;
+}
static DEVICE_ATTR(nvme_discovery, 0200, NULL, nvme_fc_nvme_discovery_store);
+static DEVICE_ATTR(appid_store, 0200, NULL, fc_appid_store);
static struct attribute *nvme_fc_attrs[] = {
&dev_attr_nvme_discovery.attr,
+ &dev_attr_appid_store.attr,
NULL
};
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index d58bf79892f2..9da9b2b2a580 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -856,10 +856,7 @@ void zfcp_scsi_set_prot(struct zfcp_adapter *adapter)
*/
void zfcp_scsi_dif_sense_error(struct scsi_cmnd *scmd, int ascq)
{
- scsi_build_sense_buffer(1, scmd->sense_buffer,
- ILLEGAL_REQUEST, 0x10, ascq);
- set_driver_byte(scmd, DRIVER_SENSE);
- scmd->result |= SAM_STAT_CHECK_CONDITION;
+ scsi_build_sense(scmd, 1, ILLEGAL_REQUEST, 0x10, ascq);
set_host_byte(scmd, DID_SOFT_ERROR);
}
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
index 47028f5e57ab..e41cc354cc8a 100644
--- a/drivers/scsi/3w-9xxx.c
+++ b/drivers/scsi/3w-9xxx.c
@@ -303,10 +303,10 @@ static int twa_aen_drain_queue(TW_Device_Extension *tw_dev, int no_check_reset)
/* Initialize sglist */
memset(&sglist, 0, sizeof(TW_SG_Entry));
- sglist[0].length = TW_SECTOR_SIZE;
- sglist[0].address = tw_dev->generic_buffer_phys[request_id];
+ sglist[0].length = cpu_to_le32(TW_SECTOR_SIZE);
+ sglist[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
- if (sglist[0].address & TW_ALIGNMENT_9000_SGL) {
+ if (tw_dev->generic_buffer_phys[request_id] & TW_ALIGNMENT_9000_SGL) {
TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1, "Found unaligned address during AEN drain");
goto out;
}
@@ -440,8 +440,8 @@ static int twa_aen_read_queue(TW_Device_Extension *tw_dev, int request_id)
/* Initialize sglist */
memset(&sglist, 0, sizeof(TW_SG_Entry));
- sglist[0].length = TW_SECTOR_SIZE;
- sglist[0].address = tw_dev->generic_buffer_phys[request_id];
+ sglist[0].length = cpu_to_le32(TW_SECTOR_SIZE);
+ sglist[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
/* Mark internal command */
tw_dev->srb[request_id] = NULL;
@@ -501,9 +501,8 @@ static void twa_aen_sync_time(TW_Device_Extension *tw_dev, int request_id)
Sunday 12:00AM */
local_time = (ktime_get_real_seconds() - (sys_tz.tz_minuteswest * 60));
div_u64_rem(local_time - (3 * 86400), 604800, &schedulertime);
- schedulertime = cpu_to_le32(schedulertime % 604800);
- memcpy(param->data, &schedulertime, sizeof(u32));
+ memcpy(param->data, &(__le32){cpu_to_le32(schedulertime)}, sizeof(__le32));
/* Mark internal command */
tw_dev->srb[request_id] = NULL;
@@ -676,7 +675,9 @@ static long twa_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long
data_buffer_length_adjusted = (driver_command.buffer_length + 511) & ~511;
/* Now allocate ioctl buf memory */
- cpu_addr = dma_alloc_coherent(&tw_dev->tw_pci_dev->dev, data_buffer_length_adjusted+sizeof(TW_Ioctl_Buf_Apache) - 1, &dma_handle, GFP_KERNEL);
+ cpu_addr = dma_alloc_coherent(&tw_dev->tw_pci_dev->dev,
+ sizeof(TW_Ioctl_Buf_Apache) + data_buffer_length_adjusted,
+ &dma_handle, GFP_KERNEL);
if (!cpu_addr) {
retval = TW_IOCTL_ERROR_OS_ENOMEM;
goto out2;
@@ -685,7 +686,7 @@ static long twa_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long
tw_ioctl = (TW_Ioctl_Buf_Apache *)cpu_addr;
/* Now copy down the entire ioctl */
- if (copy_from_user(tw_ioctl, argp, driver_command.buffer_length + sizeof(TW_Ioctl_Buf_Apache) - 1))
+ if (copy_from_user(tw_ioctl, argp, sizeof(TW_Ioctl_Buf_Apache) + driver_command.buffer_length))
goto out3;
/* See which ioctl we are doing */
@@ -867,11 +868,13 @@ static long twa_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long
}
/* Now copy the entire response to userspace */
- if (copy_to_user(argp, tw_ioctl, sizeof(TW_Ioctl_Buf_Apache) + driver_command.buffer_length - 1) == 0)
+ if (copy_to_user(argp, tw_ioctl, sizeof(TW_Ioctl_Buf_Apache) + driver_command.buffer_length) == 0)
retval = 0;
out3:
/* Now free ioctl buf memory */
- dma_free_coherent(&tw_dev->tw_pci_dev->dev, data_buffer_length_adjusted+sizeof(TW_Ioctl_Buf_Apache) - 1, cpu_addr, dma_handle);
+ dma_free_coherent(&tw_dev->tw_pci_dev->dev,
+ sizeof(TW_Ioctl_Buf_Apache) + data_buffer_length_adjusted,
+ cpu_addr, dma_handle);
out2:
mutex_unlock(&tw_dev->ioctl_lock);
out:
@@ -1000,19 +1003,13 @@ static int twa_fill_sense(TW_Device_Extension *tw_dev, int request_id, int copy_
if (print_host)
printk(KERN_WARNING "3w-9xxx: scsi%d: ERROR: (0x%02X:0x%04X): %s:%s.\n",
tw_dev->host->host_no,
- TW_MESSAGE_SOURCE_CONTROLLER_ERROR,
- full_command_packet->header.status_block.error,
- error_str[0] == '\0' ?
- twa_string_lookup(twa_error_table,
- full_command_packet->header.status_block.error) : error_str,
+ TW_MESSAGE_SOURCE_CONTROLLER_ERROR, error,
+ error_str[0] ? error_str : twa_string_lookup(twa_error_table, error),
full_command_packet->header.err_specific_desc);
else
printk(KERN_WARNING "3w-9xxx: ERROR: (0x%02X:0x%04X): %s:%s.\n",
- TW_MESSAGE_SOURCE_CONTROLLER_ERROR,
- full_command_packet->header.status_block.error,
- error_str[0] == '\0' ?
- twa_string_lookup(twa_error_table,
- full_command_packet->header.status_block.error) : error_str,
+ TW_MESSAGE_SOURCE_CONTROLLER_ERROR, error,
+ error_str[0] ? error_str : twa_string_lookup(twa_error_table, error),
full_command_packet->header.err_specific_desc);
}
@@ -1129,12 +1126,11 @@ static int twa_initconnection(TW_Device_Extension *tw_dev, int message_credits,
tw_initconnect->opcode__reserved = TW_OPRES_IN(0, TW_OP_INIT_CONNECTION);
tw_initconnect->request_id = request_id;
tw_initconnect->message_credits = cpu_to_le16(message_credits);
- tw_initconnect->features = set_features;
/* Turn on 64-bit sgl support if we need to */
- tw_initconnect->features |= sizeof(dma_addr_t) > 4 ? 1 : 0;
+ set_features |= sizeof(dma_addr_t) > 4 ? 1 : 0;
- tw_initconnect->features = cpu_to_le32(tw_initconnect->features);
+ tw_initconnect->features = cpu_to_le32(set_features);
if (set_features & TW_EXTENDED_INIT_CONNECT) {
tw_initconnect->size = TW_INIT_COMMAND_PACKET_SIZE_EXTENDED;
@@ -1342,13 +1338,15 @@ static irqreturn_t twa_interrupt(int irq, void *dev_instance)
/* If error, command failed */
if (error == 1) {
/* Ask for a host reset */
- cmd->result = (DID_OK << 16) | (CHECK_CONDITION << 1);
+ cmd->result = (DID_OK << 16) | SAM_STAT_CHECK_CONDITION;
}
/* Report residual bytes for single sgl */
if ((scsi_sg_count(cmd) <= 1) && (full_command_packet->command.newcommand.status == 0)) {
- if (full_command_packet->command.newcommand.sg_list[0].length < scsi_bufflen(tw_dev->srb[request_id]))
- scsi_set_resid(cmd, scsi_bufflen(cmd) - full_command_packet->command.newcommand.sg_list[0].length);
+ u32 length = le32_to_cpu(full_command_packet->command.newcommand.sg_list[0].length);
+
+ if (length < scsi_bufflen(cmd))
+ scsi_set_resid(cmd, scsi_bufflen(cmd) - length);
}
/* Now complete the io */
@@ -1390,13 +1388,13 @@ static void twa_load_sgl(TW_Device_Extension *tw_dev, TW_Command_Full *full_comm
if (TW_OP_OUT(full_command_packet->command.newcommand.opcode__reserved) == TW_OP_EXECUTE_SCSI) {
newcommand = &full_command_packet->command.newcommand;
newcommand->request_id__lunl =
- cpu_to_le16(TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->request_id__lunl), request_id));
+ TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->request_id__lunl), request_id);
if (length) {
- newcommand->sg_list[0].address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache) - 1);
+ newcommand->sg_list[0].address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache));
newcommand->sg_list[0].length = cpu_to_le32(length);
}
newcommand->sgl_entries__lunh =
- cpu_to_le16(TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->sgl_entries__lunh), length ? 1 : 0));
+ TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->sgl_entries__lunh), length ? 1 : 0);
} else {
oldcommand = &full_command_packet->command.oldcommand;
oldcommand->request_id = request_id;
@@ -1407,7 +1405,7 @@ static void twa_load_sgl(TW_Device_Extension *tw_dev, TW_Command_Full *full_comm
sgl = (TW_SG_Entry *)((u32 *)oldcommand+oldcommand->size - (sizeof(TW_SG_Entry)/4) + pae);
else
sgl = (TW_SG_Entry *)((u32 *)oldcommand+TW_SGL_OUT(oldcommand->opcode__sgloffset));
- sgl->address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache) - 1);
+ sgl->address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache));
sgl->length = cpu_to_le32(length);
oldcommand->size += pae;
@@ -1831,10 +1829,10 @@ static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id,
if (srb) {
command_packet->unit = srb->device->id;
command_packet->request_id__lunl =
- cpu_to_le16(TW_REQ_LUN_IN(srb->device->lun, request_id));
+ TW_REQ_LUN_IN(srb->device->lun, request_id);
} else {
command_packet->request_id__lunl =
- cpu_to_le16(TW_REQ_LUN_IN(0, request_id));
+ TW_REQ_LUN_IN(0, request_id);
command_packet->unit = 0;
}
@@ -1866,19 +1864,19 @@ static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id,
}
}
}
- command_packet->sgl_entries__lunh = cpu_to_le16(TW_REQ_LUN_IN((srb->device->lun >> 4), scsi_sg_count(tw_dev->srb[request_id])));
+ command_packet->sgl_entries__lunh = TW_REQ_LUN_IN((srb->device->lun >> 4), scsi_sg_count(tw_dev->srb[request_id]));
}
} else {
/* Internal cdb post */
for (i = 0; i < use_sg; i++) {
- command_packet->sg_list[i].address = TW_CPU_TO_SGL(sglistarg[i].address);
- command_packet->sg_list[i].length = cpu_to_le32(sglistarg[i].length);
+ command_packet->sg_list[i].address = sglistarg[i].address;
+ command_packet->sg_list[i].length = sglistarg[i].length;
if (command_packet->sg_list[i].address & TW_CPU_TO_SGL(TW_ALIGNMENT_9000_SGL)) {
TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2f, "Found unaligned sgl address during internal post");
goto out;
}
}
- command_packet->sgl_entries__lunh = cpu_to_le16(TW_REQ_LUN_IN(0, use_sg));
+ command_packet->sgl_entries__lunh = TW_REQ_LUN_IN(0, use_sg);
}
if (srb) {
@@ -2103,7 +2101,7 @@ static int twa_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
TW_PARAM_FWVER, TW_PARAM_FWVER_LENGTH),
(char *)twa_get_param(tw_dev, 1, TW_VERSION_TABLE,
TW_PARAM_BIOSVER, TW_PARAM_BIOSVER_LENGTH),
- le32_to_cpu(*(int *)twa_get_param(tw_dev, 2, TW_INFORMATION_TABLE,
+ le32_to_cpu(*(__le32 *)twa_get_param(tw_dev, 2, TW_INFORMATION_TABLE,
TW_PARAM_PORTCOUNT, TW_PARAM_PORTCOUNT_LENGTH)));
/* Try to enable MSI */
diff --git a/drivers/scsi/3w-9xxx.h b/drivers/scsi/3w-9xxx.h
index d3f479324527..0b23b0422e88 100644
--- a/drivers/scsi/3w-9xxx.h
+++ b/drivers/scsi/3w-9xxx.h
@@ -50,7 +50,7 @@
/* AEN string type */
typedef struct TAG_twa_message_type {
unsigned int code;
- char* text;
+ char *text;
} twa_message_type;
/* AEN strings */
@@ -435,8 +435,8 @@ static twa_message_type twa_error_table[] = {
/* request_id: 12, lun: 4 */
#define TW_REQ_LUN_IN(lun, request_id) \
- (((lun << 12) & 0xf000) | (request_id & 0xfff))
-#define TW_LUN_OUT(lun) ((lun >> 12) & 0xf)
+ cpu_to_le16(((lun << 12) & 0xf000) | (request_id & 0xfff))
+#define TW_LUN_OUT(lun) ((le16_to_cpu(lun) >> 12) & 0xf)
/* Macros */
#define TW_CONTROL_REG_ADDR(x) (x->base_addr)
@@ -483,70 +483,75 @@ printk(KERN_WARNING "3w-9xxx: ERROR: (0x%02X:0x%04X): %s.\n",a,b,c); \
#define TW_APACHE_MAX_SGL_LENGTH (sizeof(dma_addr_t) > 4 ? 72 : 109)
#define TW_ESCALADE_MAX_SGL_LENGTH (sizeof(dma_addr_t) > 4 ? 41 : 62)
#define TW_PADDING_LENGTH (sizeof(dma_addr_t) > 4 ? 8 : 0)
-#define TW_CPU_TO_SGL(x) (sizeof(dma_addr_t) > 4 ? cpu_to_le64(x) : cpu_to_le32(x))
-#pragma pack(1)
+#if IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT)
+typedef __le64 twa_addr_t;
+#define TW_CPU_TO_SGL(x) cpu_to_le64(x)
+#else
+typedef __le32 twa_addr_t;
+#define TW_CPU_TO_SGL(x) cpu_to_le32(x)
+#endif
/* Scatter Gather List Entry */
typedef struct TAG_TW_SG_Entry {
- dma_addr_t address;
- u32 length;
-} TW_SG_Entry;
+ twa_addr_t address;
+ __le32 length;
+} __packed TW_SG_Entry;
/* Command Packet */
typedef struct TW_Command {
- unsigned char opcode__sgloffset;
- unsigned char size;
- unsigned char request_id;
- unsigned char unit__hostid;
+ u8 opcode__sgloffset;
+ u8 size;
+ u8 request_id;
+ u8 unit__hostid;
/* Second DWORD */
- unsigned char status;
- unsigned char flags;
+ u8 status;
+ u8 flags;
union {
- unsigned short block_count;
- unsigned short parameter_count;
+ __le16 block_count;
+ __le16 parameter_count;
} byte6_offset;
union {
struct {
- u32 lba;
- TW_SG_Entry sgl[TW_ESCALADE_MAX_SGL_LENGTH];
- dma_addr_t padding;
+ __le32 lba;
+ TW_SG_Entry sgl[TW_ESCALADE_MAX_SGL_LENGTH];
+ twa_addr_t padding;
} io;
struct {
- TW_SG_Entry sgl[TW_ESCALADE_MAX_SGL_LENGTH];
- u32 padding;
- dma_addr_t padding2;
+ TW_SG_Entry sgl[TW_ESCALADE_MAX_SGL_LENGTH];
+ __le32 padding;
+ twa_addr_t padding2;
} param;
} byte8_offset;
} TW_Command;
/* Command Packet for 9000+ controllers */
typedef struct TAG_TW_Command_Apache {
- unsigned char opcode__reserved;
- unsigned char unit;
- unsigned short request_id__lunl;
- unsigned char status;
- unsigned char sgl_offset;
- unsigned short sgl_entries__lunh;
- unsigned char cdb[16];
- TW_SG_Entry sg_list[TW_APACHE_MAX_SGL_LENGTH];
- unsigned char padding[TW_PADDING_LENGTH];
+ u8 opcode__reserved;
+ u8 unit;
+ __le16 request_id__lunl;
+ u8 status;
+ u8 sgl_offset;
+ __le16 sgl_entries__lunh;
+ u8 cdb[16];
+ TW_SG_Entry sg_list[TW_APACHE_MAX_SGL_LENGTH];
+ u8 padding[TW_PADDING_LENGTH];
} TW_Command_Apache;
/* New command packet header */
typedef struct TAG_TW_Command_Apache_Header {
unsigned char sense_data[TW_SENSE_DATA_LENGTH];
struct {
- char reserved[4];
- unsigned short error;
- unsigned char padding;
- unsigned char severity__reserved;
+ u8 reserved[4];
+ __le16 error;
+ u8 padding;
+ u8 severity__reserved;
} status_block;
unsigned char err_specific_desc[98];
struct {
- unsigned char size_header;
- unsigned short reserved;
- unsigned char size_sense;
+ u8 size_header;
+ u8 reserved[2];
+ u8 size_sense;
} header_desc;
} TW_Command_Apache_Header;
@@ -561,19 +566,19 @@ typedef struct TAG_TW_Command_Full {
/* Initconnection structure */
typedef struct TAG_TW_Initconnect {
- unsigned char opcode__reserved;
- unsigned char size;
- unsigned char request_id;
- unsigned char res2;
- unsigned char status;
- unsigned char flags;
- unsigned short message_credits;
- u32 features;
- unsigned short fw_srl;
- unsigned short fw_arch_id;
- unsigned short fw_branch;
- unsigned short fw_build;
- u32 result;
+ u8 opcode__reserved;
+ u8 size;
+ u8 request_id;
+ u8 res2;
+ u8 status;
+ u8 flags;
+ __le16 message_credits;
+ __le32 features;
+ __le16 fw_srl;
+ __le16 fw_arch_id;
+ __le16 fw_branch;
+ __le16 fw_build;
+ __le32 result;
} TW_Initconnect;
/* Event info structure */
@@ -602,7 +607,7 @@ typedef struct TAG_TW_Ioctl_Apache {
TW_Ioctl_Driver_Command driver_command;
char padding[488];
TW_Command_Full firmware_command;
- char data_buffer[1];
+ char data_buffer[];
} TW_Ioctl_Buf_Apache;
/* Lock structure for ioctl get/release lock */
@@ -614,11 +619,11 @@ typedef struct TAG_TW_Lock {
/* GetParam descriptor */
typedef struct {
- unsigned short table_id;
- unsigned short parameter_id;
- unsigned short parameter_size_bytes;
- unsigned short actual_parameter_size_bytes;
- unsigned char data[1];
+ __le16 table_id;
+ __le16 parameter_id;
+ __le16 parameter_size_bytes;
+ __le16 actual_parameter_size_bytes;
+ u8 data[];
} TW_Param_Apache, *PTW_Param_Apache;
/* Response queue */
@@ -645,8 +650,6 @@ typedef struct TAG_TW_Compatibility_Info
unsigned short fw_on_ctlr_build;
} TW_Compatibility_Info;
-#pragma pack()
-
typedef struct TAG_TW_Device_Extension {
u32 __iomem *base_addr;
unsigned long *generic_buffer_virt[TW_Q_LENGTH];
diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c
index a7292883b72b..4ee485ab2714 100644
--- a/drivers/scsi/3w-xxxx.c
+++ b/drivers/scsi/3w-xxxx.c
@@ -429,7 +429,7 @@ static int tw_decode_sense(TW_Device_Extension *tw_dev, int request_id, int fill
/* Additional sense code qualifier */
tw_dev->srb[request_id]->sense_buffer[13] = tw_sense_table[i][3];
- tw_dev->srb[request_id]->result = (DID_OK << 16) | (CHECK_CONDITION << 1);
+ tw_dev->srb[request_id]->result = (DID_OK << 16) | SAM_STAT_CHECK_CONDITION;
return TW_ISR_DONT_RESULT; /* Special case for isr to not over-write result */
}
}
@@ -1977,7 +1977,7 @@ static int tw_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_c
printk(KERN_NOTICE "3w-xxxx: scsi%d: Unknown scsi opcode: 0x%x\n", tw_dev->host->host_no, *command);
tw_dev->state[request_id] = TW_S_COMPLETED;
tw_state_request_finish(tw_dev, request_id);
- scsi_build_sense_buffer(1, SCpnt->sense_buffer, ILLEGAL_REQUEST, 0x20, 0);
+ scsi_build_sense(SCpnt, 1, ILLEGAL_REQUEST, 0x20, 0);
done(SCpnt);
retval = 0;
}
@@ -2159,7 +2159,7 @@ static irqreturn_t tw_interrupt(int irq, void *dev_instance)
/* If error, command failed */
if (error == 1) {
/* Ask for a host reset */
- tw_dev->srb[request_id]->result = (DID_OK << 16) | (CHECK_CONDITION << 1);
+ tw_dev->srb[request_id]->result = (DID_OK << 16) | SAM_STAT_CHECK_CONDITION;
}
/* Now complete the io */
diff --git a/drivers/scsi/53c700.c b/drivers/scsi/53c700.c
index 77ccb96e5ed4..1c6b4e672687 100644
--- a/drivers/scsi/53c700.c
+++ b/drivers/scsi/53c700.c
@@ -978,10 +978,10 @@ process_script_interrupt(__u32 dsps, __u32 dsp, struct scsi_cmnd *SCp,
if (NCR_700_get_tag_neg_state(SCp->device) == NCR_700_DURING_TAG_NEGOTIATION)
NCR_700_set_tag_neg_state(SCp->device,
NCR_700_FINISHED_TAG_NEGOTIATION);
-
+
/* check for contingent allegiance conditions */
- if (hostdata->status[0] >> 1 == CHECK_CONDITION ||
- hostdata->status[0] >> 1 == COMMAND_TERMINATED) {
+ if (hostdata->status[0] == SAM_STAT_CHECK_CONDITION ||
+ hostdata->status[0] == SAM_STAT_COMMAND_TERMINATED) {
struct NCR_700_command_slot *slot =
(struct NCR_700_command_slot *)SCp->host_scribble;
if(slot->flags == NCR_700_FLAG_AUTOSENSE) {
diff --git a/drivers/scsi/FlashPoint.c b/drivers/scsi/FlashPoint.c
index 0464e37c806a..90253208a72f 100644
--- a/drivers/scsi/FlashPoint.c
+++ b/drivers/scsi/FlashPoint.c
@@ -40,7 +40,7 @@ struct sccb_mgr_info {
u16 si_per_targ_ultra_nego;
u16 si_per_targ_no_disc;
u16 si_per_targ_wide_nego;
- u16 si_flags;
+ u16 si_mflags;
unsigned char si_card_family;
unsigned char si_bustype;
unsigned char si_card_model[3];
@@ -304,40 +304,12 @@ typedef struct SCCBscam_info {
} SCCBSCAM_INFO;
-#define SCSI_REQUEST_SENSE 0x03
-#define SCSI_READ 0x08
-#define SCSI_WRITE 0x0A
-#define SCSI_START_STOP_UNIT 0x1B
-#define SCSI_READ_EXTENDED 0x28
-#define SCSI_WRITE_EXTENDED 0x2A
-#define SCSI_WRITE_AND_VERIFY 0x2E
-
-#define SSGOOD 0x00
-#define SSCHECK 0x02
-#define SSQ_FULL 0x28
-
-#define SMCMD_COMP 0x00
-#define SMEXT 0x01
-#define SMSAVE_DATA_PTR 0x02
-#define SMREST_DATA_PTR 0x03
-#define SMDISC 0x04
-#define SMABORT 0x06
-#define SMREJECT 0x07
-#define SMNO_OP 0x08
-#define SMPARITY 0x09
-#define SMDEV_RESET 0x0C
-#define SMABORT_TAG 0x0D
-#define SMINIT_RECOVERY 0x0F
-#define SMREL_RECOVERY 0x10
#define SMIDENT 0x80
#define DISC_PRIV 0x40
-#define SMSYNC 0x01
-#define SMWDTR 0x03
#define SM8BIT 0x00
#define SM16BIT 0x01
-#define SMIGNORWR 0x23 /* Ignore Wide Residue */
#define SIX_BYTE_CMD 0x06
#define TWELVE_BYTE_CMD 0x0C
@@ -1073,22 +1045,22 @@ static int FlashPoint_ProbeHostAdapter(struct sccb_mgr_info *pCardInfo)
ScamFlg =
(unsigned char)FPT_utilEERead(ioport, SCAM_CONFIG / 2);
- pCardInfo->si_flags = 0x0000;
+ pCardInfo->si_mflags = 0x0000;
if (i & 0x01)
- pCardInfo->si_flags |= SCSI_PARITY_ENA;
+ pCardInfo->si_mflags |= SCSI_PARITY_ENA;
if (!(i & 0x02))
- pCardInfo->si_flags |= SOFT_RESET;
+ pCardInfo->si_mflags |= SOFT_RESET;
if (i & 0x10)
- pCardInfo->si_flags |= EXTENDED_TRANSLATION;
+ pCardInfo->si_mflags |= EXTENDED_TRANSLATION;
if (ScamFlg & SCAM_ENABLED)
- pCardInfo->si_flags |= FLAG_SCAM_ENABLED;
+ pCardInfo->si_mflags |= FLAG_SCAM_ENABLED;
if (ScamFlg & SCAM_LEVEL2)
- pCardInfo->si_flags |= FLAG_SCAM_LEVEL2;
+ pCardInfo->si_mflags |= FLAG_SCAM_LEVEL2;
j = (RD_HARPOON(ioport + hp_bm_ctrl) & ~SCSI_TERM_ENA_L);
if (i & 0x04) {
@@ -1104,7 +1076,7 @@ static int FlashPoint_ProbeHostAdapter(struct sccb_mgr_info *pCardInfo)
if (!(RD_HARPOON(ioport + hp_page_ctrl) & NARROW_SCSI_CARD))
- pCardInfo->si_flags |= SUPPORT_16TAR_32LUN;
+ pCardInfo->si_mflags |= SUPPORT_16TAR_32LUN;
pCardInfo->si_card_family = HARPOON_FAMILY;
pCardInfo->si_bustype = BUSTYPE_PCI;
@@ -1140,15 +1112,15 @@ static int FlashPoint_ProbeHostAdapter(struct sccb_mgr_info *pCardInfo)
if (pCardInfo->si_card_model[1] == '3') {
if (RD_HARPOON(ioport + hp_ee_ctrl) & BIT(7))
- pCardInfo->si_flags |= LOW_BYTE_TERM;
+ pCardInfo->si_mflags |= LOW_BYTE_TERM;
} else if (pCardInfo->si_card_model[2] == '0') {
temp = RD_HARPOON(ioport + hp_xfer_pad);
WR_HARPOON(ioport + hp_xfer_pad, (temp & ~BIT(4)));
if (RD_HARPOON(ioport + hp_ee_ctrl) & BIT(7))
- pCardInfo->si_flags |= LOW_BYTE_TERM;
+ pCardInfo->si_mflags |= LOW_BYTE_TERM;
WR_HARPOON(ioport + hp_xfer_pad, (temp | BIT(4)));
if (RD_HARPOON(ioport + hp_ee_ctrl) & BIT(7))
- pCardInfo->si_flags |= HIGH_BYTE_TERM;
+ pCardInfo->si_mflags |= HIGH_BYTE_TERM;
WR_HARPOON(ioport + hp_xfer_pad, temp);
} else {
temp = RD_HARPOON(ioport + hp_ee_ctrl);
@@ -1166,9 +1138,9 @@ static int FlashPoint_ProbeHostAdapter(struct sccb_mgr_info *pCardInfo)
WR_HARPOON(ioport + hp_ee_ctrl, temp);
WR_HARPOON(ioport + hp_xfer_pad, temp2);
if (!(temp3 & BIT(7)))
- pCardInfo->si_flags |= LOW_BYTE_TERM;
+ pCardInfo->si_mflags |= LOW_BYTE_TERM;
if (!(temp3 & BIT(6)))
- pCardInfo->si_flags |= HIGH_BYTE_TERM;
+ pCardInfo->si_mflags |= HIGH_BYTE_TERM;
}
ARAM_ACCESS(ioport);
@@ -1275,7 +1247,7 @@ static void *FlashPoint_HardwareResetHostAdapter(struct sccb_mgr_info
WR_HARPOON(ioport + hp_arb_id, pCardInfo->si_id);
CurrCard->ourId = pCardInfo->si_id;
- i = (unsigned char)pCardInfo->si_flags;
+ i = (unsigned char)pCardInfo->si_mflags;
if (i & SCSI_PARITY_ENA)
WR_HARPOON(ioport + hp_portctrl_1, (HOST_MODE8 | CHK_SCSI_P));
@@ -1289,14 +1261,14 @@ static void *FlashPoint_HardwareResetHostAdapter(struct sccb_mgr_info
j |= SCSI_TERM_ENA_H;
WR_HARPOON(ioport + hp_ee_ctrl, j);
- if (!(pCardInfo->si_flags & SOFT_RESET)) {
+ if (!(pCardInfo->si_mflags & SOFT_RESET)) {
FPT_sresb(ioport, thisCard);
FPT_scini(thisCard, pCardInfo->si_id, 0);
}
- if (pCardInfo->si_flags & POST_ALL_UNDERRRUNS)
+ if (pCardInfo->si_mflags & POST_ALL_UNDERRRUNS)
CurrCard->globalFlags |= F_NO_FILTER;
if (pCurrNvRam) {
@@ -1660,7 +1632,7 @@ static int FlashPoint_AbortCCB(void *pCurrCard, struct sccb *p_Sccb)
p_Sccb->Sccb_scsistat =
ABORT_ST;
p_Sccb->Sccb_scsimsg =
- SMABORT_TAG;
+ ABORT_TASK;
if (((struct sccb_card *)
pCurrCard)->currentSCCB ==
@@ -1812,7 +1784,7 @@ static int FlashPoint_HandleInterrupt(void *pcard)
FPT_phaseChkFifo(ioport, thisCard);
if (RD_HARPOON(ioport + hp_gp_reg_1) ==
- SMSAVE_DATA_PTR) {
+ SAVE_POINTERS) {
WR_HARPOON(ioport + hp_gp_reg_1, 0x00);
currSCCB->Sccb_XferState |= F_NO_DATA_YET;
@@ -1865,7 +1837,7 @@ static int FlashPoint_HandleInterrupt(void *pcard)
FPT_phaseChkFifo(ioport, thisCard);
if (RD_HARPOON(ioport + hp_gp_reg_1) ==
- SMSAVE_DATA_PTR) {
+ SAVE_POINTERS) {
WR_HARPOON(ioport + hp_gp_reg_1, 0x00);
currSCCB->Sccb_XferState |=
F_NO_DATA_YET;
@@ -2258,7 +2230,7 @@ static unsigned char FPT_sfm(u32 port, struct sccb *pCurrSCCB)
WR_HARPOON(port + hp_fiforead, 0);
WR_HARPOON(port + hp_fifowrite, 0);
if (pCurrSCCB != NULL) {
- pCurrSCCB->Sccb_scsimsg = SMPARITY;
+ pCurrSCCB->Sccb_scsimsg = MSG_PARITY_ERROR;
}
message = 0x00;
do {
@@ -2411,7 +2383,7 @@ static void FPT_ssel(u32 port, unsigned char p_card)
WRW_HARPOON((port + ID_MSG_STRT + 2), BRH_OP + ALWAYS + NP);
- currSCCB->Sccb_scsimsg = SMDEV_RESET;
+ currSCCB->Sccb_scsimsg = TARGET_RESET;
WR_HARPOON(port + hp_autostart_3, (SELECT + SELCHK_STRT));
auto_loaded = 1;
@@ -2758,9 +2730,9 @@ static void FPT_sres(u32 port, unsigned char p_card,
if (message == 0) {
msgRetryCount++;
if (msgRetryCount == 1) {
- FPT_SendMsg(port, SMPARITY);
+ FPT_SendMsg(port, MSG_PARITY_ERROR);
} else {
- FPT_SendMsg(port, SMDEV_RESET);
+ FPT_SendMsg(port, TARGET_RESET);
FPT_sssyncv(port, our_target, NARROW_SCSI,
currTar_Info);
@@ -2860,8 +2832,8 @@ static void FPT_SendMsg(u32 port, unsigned char message)
WR_HARPOON(port + hp_portctrl_0, 0x00);
- if ((message == SMABORT) || (message == SMDEV_RESET) ||
- (message == SMABORT_TAG)) {
+ if ((message == ABORT_TASK_SET) || (message == TARGET_RESET) ||
+ (message == ABORT_TASK)) {
while (!
(RDW_HARPOON((port + hp_intstat)) &
(BUS_FREE | PHASE))) {
@@ -2893,7 +2865,7 @@ static void FPT_sdecm(unsigned char message, u32 port, unsigned char p_card)
currTar_Info = &FPT_sccbMgrTbl[p_card][currSCCB->TargID];
- if (message == SMREST_DATA_PTR) {
+ if (message == RESTORE_POINTERS) {
if (!(currSCCB->Sccb_XferState & F_NO_DATA_YET)) {
currSCCB->Sccb_ATC = currSCCB->Sccb_savedATC;
@@ -2905,7 +2877,7 @@ static void FPT_sdecm(unsigned char message, u32 port, unsigned char p_card)
(AUTO_IMMED + DISCONNECT_START));
}
- else if (message == SMCMD_COMP) {
+ else if (message == COMMAND_COMPLETE) {
if (currSCCB->Sccb_scsistat == SELECT_Q_ST) {
currTar_Info->TarStatus &=
@@ -2917,15 +2889,16 @@ static void FPT_sdecm(unsigned char message, u32 port, unsigned char p_card)
}
- else if ((message == SMNO_OP) || (message >= SMIDENT)
- || (message == SMINIT_RECOVERY) || (message == SMREL_RECOVERY)) {
+ else if ((message == NOP) || (message >= IDENTIFY_BASE) ||
+ (message == INITIATE_RECOVERY) ||
+ (message == RELEASE_RECOVERY)) {
ACCEPT_MSG(port);
WR_HARPOON(port + hp_autostart_1,
(AUTO_IMMED + DISCONNECT_START));
}
- else if (message == SMREJECT) {
+ else if (message == MESSAGE_REJECT) {
if ((currSCCB->Sccb_scsistat == SELECT_SN_ST) ||
(currSCCB->Sccb_scsistat == SELECT_WN_ST) ||
@@ -3026,19 +2999,19 @@ static void FPT_sdecm(unsigned char message, u32 port, unsigned char p_card)
}
}
- else if (message == SMEXT) {
+ else if (message == EXTENDED_MESSAGE) {
ACCEPT_MSG(port);
FPT_shandem(port, p_card, currSCCB);
}
- else if (message == SMIGNORWR) {
+ else if (message == IGNORE_WIDE_RESIDUE) {
ACCEPT_MSG(port); /* ACK the RESIDUE MSG */
message = FPT_sfm(port, currSCCB);
- if (currSCCB->Sccb_scsimsg != SMPARITY)
+ if (currSCCB->Sccb_scsimsg != MSG_PARITY_ERROR)
ACCEPT_MSG(port);
WR_HARPOON(port + hp_autostart_1,
(AUTO_IMMED + DISCONNECT_START));
@@ -3047,7 +3020,7 @@ static void FPT_sdecm(unsigned char message, u32 port, unsigned char p_card)
else {
currSCCB->HostStatus = SCCB_PHASE_SEQUENCE_FAIL;
- currSCCB->Sccb_scsimsg = SMREJECT;
+ currSCCB->Sccb_scsimsg = MESSAGE_REJECT;
ACCEPT_MSG_ATN(port);
WR_HARPOON(port + hp_autostart_1,
@@ -3073,7 +3046,7 @@ static void FPT_shandem(u32 port, unsigned char p_card, struct sccb *pCurrSCCB)
message = FPT_sfm(port, pCurrSCCB);
if (message) {
- if (message == SMSYNC) {
+ if (message == EXTENDED_SDTR) {
if (length == 0x03) {
@@ -3081,10 +3054,10 @@ static void FPT_shandem(u32 port, unsigned char p_card, struct sccb *pCurrSCCB)
FPT_stsyncn(port, p_card);
} else {
- pCurrSCCB->Sccb_scsimsg = SMREJECT;
+ pCurrSCCB->Sccb_scsimsg = MESSAGE_REJECT;
ACCEPT_MSG_ATN(port);
}
- } else if (message == SMWDTR) {
+ } else if (message == EXTENDED_WDTR) {
if (length == 0x02) {
@@ -3092,7 +3065,7 @@ static void FPT_shandem(u32 port, unsigned char p_card, struct sccb *pCurrSCCB)
FPT_stwidn(port, p_card);
} else {
- pCurrSCCB->Sccb_scsimsg = SMREJECT;
+ pCurrSCCB->Sccb_scsimsg = MESSAGE_REJECT;
ACCEPT_MSG_ATN(port);
WR_HARPOON(port + hp_autostart_1,
@@ -3101,20 +3074,20 @@ static void FPT_shandem(u32 port, unsigned char p_card, struct sccb *pCurrSCCB)
}
} else {
- pCurrSCCB->Sccb_scsimsg = SMREJECT;
+ pCurrSCCB->Sccb_scsimsg = MESSAGE_REJECT;
ACCEPT_MSG_ATN(port);
WR_HARPOON(port + hp_autostart_1,
(AUTO_IMMED + DISCONNECT_START));
}
} else {
- if (pCurrSCCB->Sccb_scsimsg != SMPARITY)
+ if (pCurrSCCB->Sccb_scsimsg != MSG_PARITY_ERROR)
ACCEPT_MSG(port);
WR_HARPOON(port + hp_autostart_1,
(AUTO_IMMED + DISCONNECT_START));
}
} else {
- if (pCurrSCCB->Sccb_scsimsg == SMPARITY)
+ if (pCurrSCCB->Sccb_scsimsg == MSG_PARITY_ERROR)
WR_HARPOON(port + hp_autostart_1,
(AUTO_IMMED + DISCONNECT_START));
}
@@ -3148,10 +3121,10 @@ static unsigned char FPT_sisyncn(u32 port, unsigned char p_card,
WRW_HARPOON((port + ID_MSG_STRT + 2), BRH_OP + ALWAYS + CMDPZ);
WRW_HARPOON((port + SYNC_MSGS + 0),
- (MPM_OP + AMSG_OUT + SMEXT));
+ (MPM_OP + AMSG_OUT + EXTENDED_MESSAGE));
WRW_HARPOON((port + SYNC_MSGS + 2), (MPM_OP + AMSG_OUT + 0x03));
WRW_HARPOON((port + SYNC_MSGS + 4),
- (MPM_OP + AMSG_OUT + SMSYNC));
+ (MPM_OP + AMSG_OUT + EXTENDED_SDTR));
if ((currTar_Info->TarEEValue & EE_SYNC_MASK) == EE_SYNC_20MB)
@@ -3221,7 +3194,7 @@ static void FPT_stsyncn(u32 port, unsigned char p_card)
sync_msg = FPT_sfm(port, currSCCB);
- if ((sync_msg == 0x00) && (currSCCB->Sccb_scsimsg == SMPARITY)) {
+ if ((sync_msg == 0x00) && (currSCCB->Sccb_scsimsg == MSG_PARITY_ERROR)) {
WR_HARPOON(port + hp_autostart_1,
(AUTO_IMMED + DISCONNECT_START));
return;
@@ -3231,7 +3204,7 @@ static void FPT_stsyncn(u32 port, unsigned char p_card)
offset = FPT_sfm(port, currSCCB);
- if ((offset == 0x00) && (currSCCB->Sccb_scsimsg == SMPARITY)) {
+ if ((offset == 0x00) && (currSCCB->Sccb_scsimsg == MSG_PARITY_ERROR)) {
WR_HARPOON(port + hp_autostart_1,
(AUTO_IMMED + DISCONNECT_START));
return;
@@ -3343,9 +3316,11 @@ static void FPT_sisyncr(u32 port, unsigned char sync_pulse,
unsigned char offset)
{
ARAM_ACCESS(port);
- WRW_HARPOON((port + SYNC_MSGS + 0), (MPM_OP + AMSG_OUT + SMEXT));
+ WRW_HARPOON((port + SYNC_MSGS + 0),
+ (MPM_OP + AMSG_OUT + EXTENDED_MESSAGE));
WRW_HARPOON((port + SYNC_MSGS + 2), (MPM_OP + AMSG_OUT + 0x03));
- WRW_HARPOON((port + SYNC_MSGS + 4), (MPM_OP + AMSG_OUT + SMSYNC));
+ WRW_HARPOON((port + SYNC_MSGS + 4),
+ (MPM_OP + AMSG_OUT + EXTENDED_SDTR));
WRW_HARPOON((port + SYNC_MSGS + 6), (MPM_OP + AMSG_OUT + sync_pulse));
WRW_HARPOON((port + SYNC_MSGS + 8), (RAT_OP));
WRW_HARPOON((port + SYNC_MSGS + 10), (MPM_OP + AMSG_OUT + offset));
@@ -3388,10 +3363,10 @@ static unsigned char FPT_siwidn(u32 port, unsigned char p_card)
WRW_HARPOON((port + ID_MSG_STRT + 2), BRH_OP + ALWAYS + CMDPZ);
WRW_HARPOON((port + SYNC_MSGS + 0),
- (MPM_OP + AMSG_OUT + SMEXT));
+ (MPM_OP + AMSG_OUT + EXTENDED_MESSAGE));
WRW_HARPOON((port + SYNC_MSGS + 2), (MPM_OP + AMSG_OUT + 0x02));
WRW_HARPOON((port + SYNC_MSGS + 4),
- (MPM_OP + AMSG_OUT + SMWDTR));
+ (MPM_OP + AMSG_OUT + EXTENDED_WDTR));
WRW_HARPOON((port + SYNC_MSGS + 6), (RAT_OP));
WRW_HARPOON((port + SYNC_MSGS + 8),
(MPM_OP + AMSG_OUT + SM16BIT));
@@ -3436,7 +3411,7 @@ static void FPT_stwidn(u32 port, unsigned char p_card)
width = FPT_sfm(port, currSCCB);
- if ((width == 0x00) && (currSCCB->Sccb_scsimsg == SMPARITY)) {
+ if ((width == 0x00) && (currSCCB->Sccb_scsimsg == MSG_PARITY_ERROR)) {
WR_HARPOON(port + hp_autostart_1,
(AUTO_IMMED + DISCONNECT_START));
return;
@@ -3499,9 +3474,11 @@ static void FPT_stwidn(u32 port, unsigned char p_card)
static void FPT_siwidr(u32 port, unsigned char width)
{
ARAM_ACCESS(port);
- WRW_HARPOON((port + SYNC_MSGS + 0), (MPM_OP + AMSG_OUT + SMEXT));
+ WRW_HARPOON((port + SYNC_MSGS + 0),
+ (MPM_OP + AMSG_OUT + EXTENDED_MESSAGE));
WRW_HARPOON((port + SYNC_MSGS + 2), (MPM_OP + AMSG_OUT + 0x02));
- WRW_HARPOON((port + SYNC_MSGS + 4), (MPM_OP + AMSG_OUT + SMWDTR));
+ WRW_HARPOON((port + SYNC_MSGS + 4),
+ (MPM_OP + AMSG_OUT + EXTENDED_WDTR));
WRW_HARPOON((port + SYNC_MSGS + 6), (RAT_OP));
WRW_HARPOON((port + SYNC_MSGS + 8), (MPM_OP + AMSG_OUT + width));
WRW_HARPOON((port + SYNC_MSGS + 10), (BRH_OP + ALWAYS + NP));
@@ -3682,7 +3659,7 @@ static void FPT_ssenss(struct sccb_card *pCurrCard)
}
currSCCB->CdbLength = SIX_BYTE_CMD;
- currSCCB->Cdb[0] = SCSI_REQUEST_SENSE;
+ currSCCB->Cdb[0] = REQUEST_SENSE;
currSCCB->Cdb[1] = currSCCB->Cdb[1] & (unsigned char)0xE0; /*Keep LUN. */
currSCCB->Cdb[2] = 0x00;
currSCCB->Cdb[3] = 0x00;
@@ -3939,13 +3916,9 @@ static void FPT_sinits(struct sccb *p_sccb, unsigned char p_card)
*/
if ((currTar_Info->TarStatus & TAR_ALLOW_DISC) ||
(currTar_Info->TarStatus & TAG_Q_TRYING)) {
- p_sccb->Sccb_idmsg =
- (unsigned char)(SMIDENT | DISC_PRIV) | p_sccb->Lun;
- }
-
- else {
-
- p_sccb->Sccb_idmsg = (unsigned char)SMIDENT | p_sccb->Lun;
+ p_sccb->Sccb_idmsg = IDENTIFY(true, p_sccb->Lun);
+ } else {
+ p_sccb->Sccb_idmsg = IDENTIFY(false, p_sccb->Lun);
}
p_sccb->HostStatus = 0x00;
@@ -3962,7 +3935,7 @@ static void FPT_sinits(struct sccb *p_sccb, unsigned char p_card)
*/
p_sccb->Sccb_scsistat = BUS_FREE_ST;
p_sccb->SccbStatus = SCCB_IN_PROCESS;
- p_sccb->Sccb_scsimsg = SMNO_OP;
+ p_sccb->Sccb_scsimsg = NOP;
}
@@ -4167,7 +4140,7 @@ static void FPT_phaseMsgOut(u32 port, unsigned char p_card)
message = currSCCB->Sccb_scsimsg;
scsiID = currSCCB->TargID;
- if (message == SMDEV_RESET) {
+ if (message == TARGET_RESET) {
currTar_Info = &FPT_sccbMgrTbl[p_card][scsiID];
currTar_Info->TarSyncCtrl = 0;
@@ -4203,7 +4176,7 @@ static void FPT_phaseMsgOut(u32 port, unsigned char p_card)
else if (currSCCB->Sccb_scsistat < COMMAND_ST) {
- if (message == SMNO_OP) {
+ if (message == NOP) {
currSCCB->Sccb_MGRFlags |= F_DEV_SELECTED;
FPT_ssel(port, p_card);
@@ -4211,13 +4184,13 @@ static void FPT_phaseMsgOut(u32 port, unsigned char p_card)
}
} else {
- if (message == SMABORT)
+ if (message == ABORT_TASK_SET)
FPT_queueFlushSccb(p_card, SCCB_COMPLETE);
}
} else {
- message = SMABORT;
+ message = ABORT_TASK_SET;
}
WRW_HARPOON((port + hp_intstat), (BUS_FREE | PHASE | XFER_CNT_0));
@@ -4232,8 +4205,8 @@ static void FPT_phaseMsgOut(u32 port, unsigned char p_card)
WR_HARPOON(port + hp_portctrl_0, 0x00);
- if ((message == SMABORT) || (message == SMDEV_RESET) ||
- (message == SMABORT_TAG)) {
+ if ((message == ABORT_TASK_SET) || (message == TARGET_RESET) ||
+ (message == ABORT_TASK)) {
while (!(RDW_HARPOON((port + hp_intstat)) & (BUS_FREE | PHASE))) {
}
@@ -4275,8 +4248,8 @@ static void FPT_phaseMsgOut(u32 port, unsigned char p_card)
else {
- if (message == SMPARITY) {
- currSCCB->Sccb_scsimsg = SMNO_OP;
+ if (message == MSG_PARITY_ERROR) {
+ currSCCB->Sccb_scsimsg = NOP;
WR_HARPOON(port + hp_autostart_1,
(AUTO_IMMED + DISCONNECT_START));
} else {
@@ -4306,7 +4279,7 @@ static void FPT_phaseMsgIn(u32 port, unsigned char p_card)
}
message = RD_HARPOON(port + hp_scsidata_0);
- if ((message == SMDISC) || (message == SMSAVE_DATA_PTR)) {
+ if ((message == DISCONNECT) || (message == SAVE_POINTERS)) {
WR_HARPOON(port + hp_autostart_1,
(AUTO_IMMED + END_DATA_START));
@@ -4321,7 +4294,7 @@ static void FPT_phaseMsgIn(u32 port, unsigned char p_card)
FPT_sdecm(message, port, p_card);
} else {
- if (currSCCB->Sccb_scsimsg != SMPARITY)
+ if (currSCCB->Sccb_scsimsg != MSG_PARITY_ERROR)
ACCEPT_MSG(port);
WR_HARPOON(port + hp_autostart_1,
(AUTO_IMMED + DISCONNECT_START));
@@ -4351,7 +4324,7 @@ static void FPT_phaseIllegal(u32 port, unsigned char p_card)
currSCCB->HostStatus = SCCB_PHASE_SEQUENCE_FAIL;
currSCCB->Sccb_scsistat = ABORT_ST;
- currSCCB->Sccb_scsimsg = SMABORT;
+ currSCCB->Sccb_scsimsg = ABORT_TASK_SET;
}
ACCEPT_MSG_ATN(port);
@@ -4650,9 +4623,9 @@ static void FPT_autoCmdCmplt(u32 p_port, unsigned char p_card)
FPT_sccbMgrTbl[p_card][currSCCB->TargID].TarLUN_CA = 0;
- if (status_byte != SSGOOD) {
+ if (status_byte != SAM_STAT_GOOD) {
- if (status_byte == SSQ_FULL) {
+ if (status_byte == SAM_STAT_TASK_SET_FULL) {
if (((FPT_BL_Card[p_card].globalFlags & F_CONLUN_IO) &&
((FPT_sccbMgrTbl[p_card][currSCCB->TargID].
@@ -4784,7 +4757,7 @@ static void FPT_autoCmdCmplt(u32 p_port, unsigned char p_card)
}
- if (status_byte == SSCHECK) {
+ if (status_byte == SAM_STAT_CHECK_CONDITION) {
if (FPT_BL_Card[p_card].globalFlags & F_DO_RENEGO) {
if (FPT_sccbMgrTbl[p_card][currSCCB->TargID].
TarEEValue & EE_SYNC_MASK) {
@@ -4806,7 +4779,7 @@ static void FPT_autoCmdCmplt(u32 p_port, unsigned char p_card)
currSCCB->SccbStatus = SCCB_ERROR;
currSCCB->TargetStatus = status_byte;
- if (status_byte == SSCHECK) {
+ if (status_byte == SAM_STAT_CHECK_CONDITION) {
FPT_sccbMgrTbl[p_card][currSCCB->TargID].
TarLUN_CA = 1;
@@ -6868,14 +6841,14 @@ static void FPT_queueCmdComplete(struct sccb_card *pCurrCard,
if ((p_sccb->
ControlByte & (SCCB_DATA_XFER_OUT | SCCB_DATA_XFER_IN))
&& (p_sccb->HostStatus == SCCB_COMPLETE)
- && (p_sccb->TargetStatus != SSCHECK))
-
- if ((SCSIcmd == SCSI_READ) ||
- (SCSIcmd == SCSI_WRITE) ||
- (SCSIcmd == SCSI_READ_EXTENDED) ||
- (SCSIcmd == SCSI_WRITE_EXTENDED) ||
- (SCSIcmd == SCSI_WRITE_AND_VERIFY) ||
- (SCSIcmd == SCSI_START_STOP_UNIT) ||
+ && (p_sccb->TargetStatus != SAM_STAT_CHECK_CONDITION))
+
+ if ((SCSIcmd == READ_6) ||
+ (SCSIcmd == WRITE_6) ||
+ (SCSIcmd == READ_10) ||
+ (SCSIcmd == WRITE_10) ||
+ (SCSIcmd == WRITE_VERIFY) ||
+ (SCSIcmd == START_STOP) ||
(pCurrCard->globalFlags & F_NO_FILTER)
)
p_sccb->HostStatus = SCCB_DATA_UNDER_RUN;
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 3d114be5b662..4dc42a8ff71a 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -235,6 +235,19 @@ config SCSI_FC_ATTRS
each attached FiberChannel device to sysfs, say Y.
Otherwise, say N.
+config FC_APPID
+ bool "Enable support to track FC I/O Traffic"
+ depends on BLOCK && BLK_CGROUP
+ depends on SCSI
+ select BLK_CGROUP_FC_APPID
+ default y
+ help
+ If you say Y here, it enables the support to track
+ FC I/O traffic over fabric. It enables the Fabric and the
+ storage targets to identify, monitor, and handle FC traffic
+ based on VM tags by inserting application specific
+ identification into the FC frame.
+
config SCSI_ISCSI_ATTRS
tristate "iSCSI Transport Attributes"
depends on SCSI && NET
@@ -311,7 +324,7 @@ source "drivers/scsi/cxlflash/Kconfig"
config SGIWD93_SCSI
tristate "SGI WD93C93 SCSI Driver"
depends on SGI_HAS_WD93 && SCSI
- help
+ help
If you have a Western Digital WD93 SCSI controller on
an SGI MIPS system, say Y. Otherwise, say N.
@@ -482,6 +495,7 @@ config SCSI_ARCMSR
source "drivers/scsi/esas2r/Kconfig"
source "drivers/scsi/megaraid/Kconfig.megaraid"
source "drivers/scsi/mpt3sas/Kconfig"
+source "drivers/scsi/mpi3mr/Kconfig"
source "drivers/scsi/smartpqi/Kconfig"
source "drivers/scsi/ufs/Kconfig"
@@ -1157,6 +1171,8 @@ config SCSI_LPFC_DEBUG_FS
This makes debugging information from the lpfc driver
available via the debugfs filesystem.
+source "drivers/scsi/elx/Kconfig"
+
config SCSI_SIM710
tristate "Simple 53c710 SCSI support (Compaq, NCR machines)"
depends on EISA && SCSI
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index bc3882f5cc69..1748d1ec1338 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -85,6 +85,7 @@ obj-$(CONFIG_SCSI_QLOGIC_1280) += qla1280.o
obj-$(CONFIG_SCSI_QLA_FC) += qla2xxx/
obj-$(CONFIG_SCSI_QLA_ISCSI) += libiscsi.o qla4xxx/
obj-$(CONFIG_SCSI_LPFC) += lpfc/
+obj-$(CONFIG_SCSI_EFCT) += elx/
obj-$(CONFIG_SCSI_BFA_FC) += bfa/
obj-$(CONFIG_SCSI_CHELSIO_FCOE) += csiostor/
obj-$(CONFIG_SCSI_DMX3191D) += dmx3191d.o
@@ -99,6 +100,7 @@ obj-$(CONFIG_MEGARAID_LEGACY) += megaraid.o
obj-$(CONFIG_MEGARAID_NEWGEN) += megaraid/
obj-$(CONFIG_MEGARAID_SAS) += megaraid/
obj-$(CONFIG_SCSI_MPT3SAS) += mpt3sas/
+obj-$(CONFIG_SCSI_MPI3MR) += mpi3mr/
obj-$(CONFIG_SCSI_UFSHCD) += ufs/
obj-$(CONFIG_SCSI_ACARD) += atp870u.o
obj-$(CONFIG_SCSI_SUNESP) += esp_scsi.o sun_esp.o
diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c
index 2ddbcaa667d1..3baadd068768 100644
--- a/drivers/scsi/NCR5380.c
+++ b/drivers/scsi/NCR5380.c
@@ -538,11 +538,11 @@ static void complete_cmd(struct Scsi_Host *instance,
if (hostdata->sensing == cmd) {
/* Autosense processing ends here */
- if (status_byte(cmd->result) != GOOD) {
+ if (get_status_byte(cmd) != SAM_STAT_GOOD) {
scsi_eh_restore_cmnd(cmd, &hostdata->ses);
} else {
scsi_eh_restore_cmnd(cmd, &hostdata->ses);
- set_driver_byte(cmd, DRIVER_SENSE);
+ set_status_byte(cmd, SAM_STAT_CHECK_CONDITION);
}
hostdata->sensing = NULL;
}
@@ -1815,6 +1815,8 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
switch (tmp) {
case ABORT:
+ set_host_byte(cmd, DID_ABORT);
+ fallthrough;
case COMMAND_COMPLETE:
/* Accept message by clearing ACK */
sink = 1;
@@ -1826,9 +1828,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
hostdata->connected = NULL;
hostdata->busy[scmd_id(cmd)] &= ~(1 << cmd->device->lun);
- cmd->result &= ~0xffff;
- cmd->result |= cmd->SCp.Status;
- cmd->result |= cmd->SCp.Message << 8;
+ set_status_byte(cmd, cmd->SCp.Status);
set_resid_from_SCp(cmd);
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index f1f62b5da8b7..46b8dffce2dd 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -1235,8 +1235,8 @@ static int aac_read_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u3
if (ret < 0)
return ret;
command = ContainerRawIo2;
- fibsize = sizeof(struct aac_raw_io2) +
- ((le32_to_cpu(readcmd2->sgeCnt)-1) * sizeof(struct sge_ieee1212));
+ fibsize = struct_size(readcmd2, sge,
+ le32_to_cpu(readcmd2->sgeCnt));
} else {
struct aac_raw_io *readcmd;
readcmd = (struct aac_raw_io *) fib_data(fib);
@@ -1366,8 +1366,8 @@ static int aac_write_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u
if (ret < 0)
return ret;
command = ContainerRawIo2;
- fibsize = sizeof(struct aac_raw_io2) +
- ((le32_to_cpu(writecmd2->sgeCnt)-1) * sizeof(struct sge_ieee1212));
+ fibsize = struct_size(writecmd2, sge,
+ le32_to_cpu(writecmd2->sgeCnt));
} else {
struct aac_raw_io *writecmd;
writecmd = (struct aac_raw_io *) fib_data(fib);
@@ -3998,7 +3998,7 @@ static int aac_convert_sgraw2(struct aac_raw_io2 *rio2, int pages, int nseg, int
if (aac_convert_sgl == 0)
return 0;
- sge = kmalloc_array(nseg_new, sizeof(struct sge_ieee1212), GFP_ATOMIC);
+ sge = kmalloc_array(nseg_new, sizeof(*sge), GFP_ATOMIC);
if (sge == NULL)
return -ENOMEM;
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index e3e4ecbea726..3733df77bc65 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -1929,7 +1929,7 @@ struct aac_raw_io2 {
u8 bpComplete; /* reserved for F/W use */
u8 sgeFirstIndex; /* reserved for F/W use */
u8 unused[4];
- struct sge_ieee1212 sge[1];
+ struct sge_ieee1212 sge[];
};
#define CT_FLUSH_CACHE 129
diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c
index 800052f10699..f3377e2ef5fb 100644
--- a/drivers/scsi/advansys.c
+++ b/drivers/scsi/advansys.c
@@ -5964,7 +5964,6 @@ static void adv_isr_callback(ADV_DVC_VAR *adv_dvc_varp, ADV_SCSI_REQ_Q *scsiqp)
ASC_DBG(2, "SAM_STAT_CHECK_CONDITION\n");
ASC_DBG_PRT_SENSE(2, scp->sense_buffer,
SCSI_SENSE_BUFFERSIZE);
- set_driver_byte(scp, DRIVER_SENSE);
}
break;
@@ -6715,7 +6714,6 @@ static void asc_isr_callback(ASC_DVC_VAR *asc_dvc_varp, ASC_QDONE_INFO *qdonep)
ASC_DBG(2, "SAM_STAT_CHECK_CONDITION\n");
ASC_DBG_PRT_SENSE(2, scp->sense_buffer,
SCSI_SENSE_BUFFERSIZE);
- set_driver_byte(scp, DRIVER_SENSE);
}
break;
@@ -6730,14 +6728,12 @@ static void asc_isr_callback(ASC_DVC_VAR *asc_dvc_varp, ASC_QDONE_INFO *qdonep)
case QD_ABORTED_BY_HOST:
ASC_DBG(1, "QD_ABORTED_BY_HOST\n");
set_status_byte(scp, qdonep->d3.scsi_stat);
- set_msg_byte(scp, qdonep->d3.scsi_msg);
set_host_byte(scp, DID_ABORT);
break;
default:
ASC_DBG(1, "done_stat 0x%x\n", qdonep->d3.done_stat);
set_status_byte(scp, qdonep->d3.scsi_stat);
- set_msg_byte(scp, qdonep->d3.scsi_msg);
set_host_byte(scp, DID_ERROR);
break;
}
diff --git a/drivers/scsi/aha152x.c b/drivers/scsi/aha152x.c
index d8e19afa7a14..b13b5c85f3de 100644
--- a/drivers/scsi/aha152x.c
+++ b/drivers/scsi/aha152x.c
@@ -619,7 +619,8 @@ static struct {
static irqreturn_t intr(int irq, void *dev_id);
static void reset_ports(struct Scsi_Host *shpnt);
static void aha152x_error(struct Scsi_Host *shpnt, char *msg);
-static void done(struct Scsi_Host *shpnt, int error);
+static void done(struct Scsi_Host *shpnt, unsigned char status_byte,
+ unsigned char host_byte);
/* diagnostics */
static void show_command(struct scsi_cmnd * ptr);
@@ -1271,7 +1272,8 @@ static int aha152x_biosparam(struct scsi_device *sdev, struct block_device *bdev
* Internal done function
*
*/
-static void done(struct Scsi_Host *shpnt, int error)
+static void done(struct Scsi_Host *shpnt, unsigned char status_byte,
+ unsigned char host_byte)
{
if (CURRENT_SC) {
if(DONE_SC)
@@ -1281,7 +1283,8 @@ static void done(struct Scsi_Host *shpnt, int error)
DONE_SC = CURRENT_SC;
CURRENT_SC = NULL;
- DONE_SC->result = error;
+ set_status_byte(DONE_SC, status_byte);
+ set_host_byte(DONE_SC, host_byte);
} else
printk(KERN_ERR "aha152x: done() called outside of command\n");
}
@@ -1376,13 +1379,13 @@ static void busfree_run(struct Scsi_Host *shpnt)
if(CURRENT_SC->SCp.phase & completed) {
/* target sent COMMAND COMPLETE */
- done(shpnt, (CURRENT_SC->SCp.Status & 0xff) | ((CURRENT_SC->SCp.Message & 0xff) << 8) | (DID_OK << 16));
+ done(shpnt, CURRENT_SC->SCp.Status, DID_OK);
} else if(CURRENT_SC->SCp.phase & aborted) {
- done(shpnt, (CURRENT_SC->SCp.Status & 0xff) | ((CURRENT_SC->SCp.Message & 0xff) << 8) | (DID_ABORT << 16));
+ done(shpnt, CURRENT_SC->SCp.Status, DID_ABORT);
} else if(CURRENT_SC->SCp.phase & resetted) {
- done(shpnt, (CURRENT_SC->SCp.Status & 0xff) | ((CURRENT_SC->SCp.Message & 0xff) << 8) | (DID_RESET << 16));
+ done(shpnt, CURRENT_SC->SCp.Status, DID_RESET);
} else if(CURRENT_SC->SCp.phase & disconnected) {
/* target sent DISCONNECT */
@@ -1394,7 +1397,7 @@ static void busfree_run(struct Scsi_Host *shpnt)
CURRENT_SC = NULL;
} else {
- done(shpnt, DID_ERROR << 16);
+ done(shpnt, SAM_STAT_GOOD, DID_ERROR);
}
#if defined(AHA152X_STAT)
} else {
@@ -1515,7 +1518,7 @@ static void seldo_run(struct Scsi_Host *shpnt)
if (TESTLO(SSTAT0, SELDO)) {
scmd_printk(KERN_ERR, CURRENT_SC,
"aha152x: passing bus free condition\n");
- done(shpnt, DID_NO_CONNECT << 16);
+ done(shpnt, SAM_STAT_GOOD, DID_NO_CONNECT);
return;
}
@@ -1552,12 +1555,12 @@ static void selto_run(struct Scsi_Host *shpnt)
CURRENT_SC->SCp.phase &= ~selecting;
if (CURRENT_SC->SCp.phase & aborted)
- done(shpnt, DID_ABORT << 16);
+ done(shpnt, SAM_STAT_GOOD, DID_ABORT);
else if (TESTLO(SSTAT0, SELINGO))
- done(shpnt, DID_BUS_BUSY << 16);
+ done(shpnt, SAM_STAT_GOOD, DID_BUS_BUSY);
else
/* ARBITRATION won, but SELECTION failed */
- done(shpnt, DID_NO_CONNECT << 16);
+ done(shpnt, SAM_STAT_GOOD, DID_NO_CONNECT);
}
/*
@@ -1891,7 +1894,7 @@ static void cmd_init(struct Scsi_Host *shpnt)
if (CURRENT_SC->SCp.sent_command) {
scmd_printk(KERN_ERR, CURRENT_SC,
"command already sent\n");
- done(shpnt, DID_ERROR << 16);
+ done(shpnt, SAM_STAT_GOOD, DID_ERROR);
return;
}
@@ -2231,7 +2234,7 @@ static int update_state(struct Scsi_Host *shpnt)
static void parerr_run(struct Scsi_Host *shpnt)
{
scmd_printk(KERN_ERR, CURRENT_SC, "parity error\n");
- done(shpnt, DID_PARITY << 16);
+ done(shpnt, SAM_STAT_GOOD, DID_PARITY);
}
/*
@@ -2254,7 +2257,7 @@ static void rsti_run(struct Scsi_Host *shpnt)
kfree(ptr->host_scribble);
ptr->host_scribble=NULL;
- ptr->result = DID_RESET << 16;
+ set_host_byte(ptr, DID_RESET);
ptr->scsi_done(ptr);
}
@@ -2262,7 +2265,7 @@ static void rsti_run(struct Scsi_Host *shpnt)
}
if(CURRENT_SC && !CURRENT_SC->device->soft_reset)
- done(shpnt, DID_RESET << 16 );
+ done(shpnt, SAM_STAT_GOOD, DID_RESET);
}
diff --git a/drivers/scsi/aha1740.c b/drivers/scsi/aha1740.c
index 0dc831026e9e..39d8759fe558 100644
--- a/drivers/scsi/aha1740.c
+++ b/drivers/scsi/aha1740.c
@@ -267,8 +267,11 @@ static irqreturn_t aha1740_intr_handle(int irq, void *dev_id)
guarantee that we will still have it in the
cdb when we come back */
if ( (adapstat & G2INTST_MASK) == G2INTST_CCBERROR ) {
- memcpy(SCtmp->sense_buffer, ecbptr->sense,
- SCSI_SENSE_BUFFERSIZE);
+ memcpy_and_pad(SCtmp->sense_buffer,
+ SCSI_SENSE_BUFFERSIZE,
+ ecbptr->sense,
+ sizeof(ecbptr->sense),
+ 0);
errstatus = aha1740_makecode(ecbptr->sense,ecbptr->status);
} else
errstatus = 0;
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.c b/drivers/scsi/aic7xxx/aic79xx_osm.c
index 4f7102f8eeb0..92ea24a075b8 100644
--- a/drivers/scsi/aic7xxx/aic79xx_osm.c
+++ b/drivers/scsi/aic7xxx/aic79xx_osm.c
@@ -1928,7 +1928,7 @@ ahd_linux_handle_scsi_status(struct ahd_softc *ahd,
memcpy(cmd->sense_buffer,
ahd_get_sense_buf(ahd, scb)
+ sense_offset, sense_size);
- cmd->result |= (DRIVER_SENSE << 24);
+ set_status_byte(cmd, SAM_STAT_CHECK_CONDITION);
#ifdef AHD_DEBUG
if (ahd_debug & AHD_SHOW_SENSE) {
@@ -2018,6 +2018,7 @@ ahd_linux_queue_cmd_complete(struct ahd_softc *ahd, struct scsi_cmnd *cmd)
int new_status = DID_OK;
int do_fallback = 0;
int scsi_status;
+ struct scsi_sense_data *sense;
/*
* Map CAM error codes into Linux Error codes. We
@@ -2041,18 +2042,12 @@ ahd_linux_queue_cmd_complete(struct ahd_softc *ahd, struct scsi_cmnd *cmd)
switch(scsi_status) {
case SAM_STAT_COMMAND_TERMINATED:
case SAM_STAT_CHECK_CONDITION:
- if ((cmd->result >> 24) != DRIVER_SENSE) {
+ sense = (struct scsi_sense_data *)
+ cmd->sense_buffer;
+ if (sense->extra_len >= 5 &&
+ (sense->add_sense_code == 0x47
+ || sense->add_sense_code == 0x48))
do_fallback = 1;
- } else {
- struct scsi_sense_data *sense;
-
- sense = (struct scsi_sense_data *)
- cmd->sense_buffer;
- if (sense->extra_len >= 5 &&
- (sense->add_sense_code == 0x47
- || sense->add_sense_code == 0x48))
- do_fallback = 1;
- }
break;
default:
break;
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.c b/drivers/scsi/aic7xxx/aic7xxx_osm.c
index d33f5a00bf0b..8b3d472aa3cc 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm.c
@@ -1838,7 +1838,6 @@ ahc_linux_handle_scsi_status(struct ahc_softc *ahc,
if (sense_size < SCSI_SENSE_BUFFERSIZE)
memset(&cmd->sense_buffer[sense_size], 0,
SCSI_SENSE_BUFFERSIZE - sense_size);
- cmd->result |= (DRIVER_SENSE << 24);
#ifdef AHC_DEBUG
if (ahc_debug & AHC_SHOW_SENSE) {
int i;
diff --git a/drivers/scsi/aic94xx/aic94xx_task.c b/drivers/scsi/aic94xx/aic94xx_task.c
index 71d18f607dae..c6b63eae28f5 100644
--- a/drivers/scsi/aic94xx/aic94xx_task.c
+++ b/drivers/scsi/aic94xx/aic94xx_task.c
@@ -205,7 +205,7 @@ Again:
switch (opcode) {
case TC_NO_ERROR:
ts->resp = SAS_TASK_COMPLETE;
- ts->stat = SAM_STAT_GOOD;
+ ts->stat = SAS_SAM_STAT_GOOD;
break;
case TC_UNDERRUN:
ts->resp = SAS_TASK_COMPLETE;
diff --git a/drivers/scsi/arcmsr/arcmsr.h b/drivers/scsi/arcmsr/arcmsr.h
index 0f6abd233614..6ce57f031df5 100644
--- a/drivers/scsi/arcmsr/arcmsr.h
+++ b/drivers/scsi/arcmsr/arcmsr.h
@@ -49,7 +49,7 @@ struct device_attribute;
#define ARCMSR_MAX_OUTSTANDING_CMD 1024
#define ARCMSR_DEFAULT_OUTSTANDING_CMD 128
#define ARCMSR_MIN_OUTSTANDING_CMD 32
-#define ARCMSR_DRIVER_VERSION "v1.50.00.02-20200819"
+#define ARCMSR_DRIVER_VERSION "v1.50.00.05-20210429"
#define ARCMSR_SCSI_INITIATOR_ID 255
#define ARCMSR_MAX_XFER_SECTORS 512
#define ARCMSR_MAX_XFER_SECTORS_B 4096
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index 4b79661275c9..ec1a834c922d 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -1323,19 +1323,21 @@ static void arcmsr_ccb_complete(struct CommandControlBlock *ccb)
static void arcmsr_report_sense_info(struct CommandControlBlock *ccb)
{
-
struct scsi_cmnd *pcmd = ccb->pcmd;
- struct SENSE_DATA *sensebuffer = (struct SENSE_DATA *)pcmd->sense_buffer;
- pcmd->result = (DID_OK << 16) | (CHECK_CONDITION << 1);
- if (sensebuffer) {
- int sense_data_length =
- sizeof(struct SENSE_DATA) < SCSI_SENSE_BUFFERSIZE
- ? sizeof(struct SENSE_DATA) : SCSI_SENSE_BUFFERSIZE;
- memset(sensebuffer, 0, SCSI_SENSE_BUFFERSIZE);
- memcpy(sensebuffer, ccb->arcmsr_cdb.SenseData, sense_data_length);
+
+ pcmd->result = (DID_OK << 16) | SAM_STAT_CHECK_CONDITION;
+ if (pcmd->sense_buffer) {
+ struct SENSE_DATA *sensebuffer;
+
+ memcpy_and_pad(pcmd->sense_buffer,
+ SCSI_SENSE_BUFFERSIZE,
+ ccb->arcmsr_cdb.SenseData,
+ sizeof(ccb->arcmsr_cdb.SenseData),
+ 0);
+
+ sensebuffer = (struct SENSE_DATA *)pcmd->sense_buffer;
sensebuffer->ErrorCode = SCSI_SENSE_CURRENT_ERRORS;
sensebuffer->Valid = 1;
- pcmd->result |= (DRIVER_SENSE << 24);
}
}
@@ -1923,8 +1925,12 @@ static void arcmsr_post_ccb(struct AdapterControlBlock *acb, struct CommandContr
if (ccb->arc_cdb_size <= 0x300)
arc_cdb_size = (ccb->arc_cdb_size - 1) >> 6 | 1;
- else
- arc_cdb_size = (((ccb->arc_cdb_size + 0xff) >> 8) + 2) << 1 | 1;
+ else {
+ arc_cdb_size = ((ccb->arc_cdb_size + 0xff) >> 8) + 2;
+ if (arc_cdb_size > 0xF)
+ arc_cdb_size = 0xF;
+ arc_cdb_size = (arc_cdb_size << 1) | 1;
+ }
ccb_post_stamp = (ccb->smid | arc_cdb_size);
writel(0, &pmu->inbound_queueport_high);
writel(ccb_post_stamp, &pmu->inbound_queueport_low);
@@ -2415,10 +2421,17 @@ static void arcmsr_hbaD_doorbell_isr(struct AdapterControlBlock *pACB)
static void arcmsr_hbaE_doorbell_isr(struct AdapterControlBlock *pACB)
{
- uint32_t outbound_doorbell, in_doorbell, tmp;
+ uint32_t outbound_doorbell, in_doorbell, tmp, i;
struct MessageUnit_E __iomem *reg = pACB->pmuE;
- in_doorbell = readl(&reg->iobound_doorbell);
+ if (pACB->adapter_type == ACB_ADAPTER_TYPE_F) {
+ for (i = 0; i < 5; i++) {
+ in_doorbell = readl(&reg->iobound_doorbell);
+ if (in_doorbell != 0)
+ break;
+ }
+ } else
+ in_doorbell = readl(&reg->iobound_doorbell);
outbound_doorbell = in_doorbell ^ pACB->in_doorbell;
do {
writel(0, &reg->host_int_status); /* clear interrupt */
@@ -3243,7 +3256,7 @@ static int arcmsr_queue_command_lck(struct scsi_cmnd *cmd,
if (!ccb)
return SCSI_MLQUEUE_HOST_BUSY;
if (arcmsr_build_ccb( acb, ccb, cmd ) == FAILED) {
- cmd->result = (DID_ERROR << 16) | (RESERVATION_CONFLICT << 1);
+ cmd->result = (DID_ERROR << 16) | SAM_STAT_RESERVATION_CONFLICT;
cmd->scsi_done(cmd);
return 0;
}
diff --git a/drivers/scsi/arm/acornscsi.c b/drivers/scsi/arm/acornscsi.c
index 248a5bfad153..84fc7a0c6ff4 100644
--- a/drivers/scsi/arm/acornscsi.c
+++ b/drivers/scsi/arm/acornscsi.c
@@ -794,7 +794,10 @@ static void acornscsi_done(AS_Host *host, struct scsi_cmnd **SCpntp,
acornscsi_dma_cleanup(host);
- SCpnt->result = result << 16 | host->scsi.SCp.Message << 8 | host->scsi.SCp.Status;
+ set_host_byte(SCpnt, result);
+ if (result == DID_OK)
+ scsi_msg_to_host_byte(SCpnt, host->scsi.SCp.Message);
+ set_status_byte(SCpnt, host->scsi.SCp.Status);
/*
* In theory, this should not happen. In practice, it seems to.
@@ -833,12 +836,12 @@ static void acornscsi_done(AS_Host *host, struct scsi_cmnd **SCpntp,
xfer_warn = 0;
if (xfer_warn) {
- switch (status_byte(SCpnt->result)) {
- case CHECK_CONDITION:
- case COMMAND_TERMINATED:
- case BUSY:
- case QUEUE_FULL:
- case RESERVATION_CONFLICT:
+ switch (get_status_byte(SCpnt)) {
+ case SAM_STAT_CHECK_CONDITION:
+ case SAM_STAT_COMMAND_TERMINATED:
+ case SAM_STAT_BUSY:
+ case SAM_STAT_TASK_SET_FULL:
+ case SAM_STAT_RESERVATION_CONFLICT:
break;
default:
@@ -2470,7 +2473,7 @@ static int acornscsi_queuecmd_lck(struct scsi_cmnd *SCpnt,
if (acornscsi_cmdtype(SCpnt->cmnd[0]) == CMD_WRITE && (NO_WRITE & (1 << SCpnt->device->id))) {
printk(KERN_CRIT "scsi%d.%c: WRITE attempted with NO_WRITE flag set\n",
host->host->host_no, '0' + SCpnt->device->id);
- SCpnt->result = DID_NO_CONNECT << 16;
+ set_host_byte(SCpnt, DID_NO_CONNECT);
done(SCpnt);
return 0;
}
@@ -2492,7 +2495,7 @@ static int acornscsi_queuecmd_lck(struct scsi_cmnd *SCpnt,
unsigned long flags;
if (!queue_add_cmd_ordered(&host->queues.issue, SCpnt)) {
- SCpnt->result = DID_ERROR << 16;
+ set_host_byte(SCpnt, DID_ERROR);
done(SCpnt);
return 0;
}
@@ -2506,31 +2509,6 @@ static int acornscsi_queuecmd_lck(struct scsi_cmnd *SCpnt,
DEF_SCSI_QCMD(acornscsi_queuecmd)
-/*
- * Prototype: void acornscsi_reportstatus(struct scsi_cmnd **SCpntp1, struct scsi_cmnd **SCpntp2, int result)
- * Purpose : pass a result to *SCpntp1, and check if *SCpntp1 = *SCpntp2
- * Params : SCpntp1 - pointer to command to return
- * SCpntp2 - pointer to command to check
- * result - result to pass back to mid-level done function
- * Returns : *SCpntp2 = NULL if *SCpntp1 is the same command structure as *SCpntp2.
- */
-static inline void acornscsi_reportstatus(struct scsi_cmnd **SCpntp1,
- struct scsi_cmnd **SCpntp2,
- int result)
-{
- struct scsi_cmnd *SCpnt = *SCpntp1;
-
- if (SCpnt) {
- *SCpntp1 = NULL;
-
- SCpnt->result = result;
- SCpnt->scsi_done(SCpnt);
- }
-
- if (SCpnt == *SCpntp2)
- *SCpntp2 = NULL;
-}
-
enum res_abort { res_not_running, res_success, res_success_clear, res_snooze };
/*
diff --git a/drivers/scsi/arm/fas216.c b/drivers/scsi/arm/fas216.c
index 2e687ce60753..30ed3d23635a 100644
--- a/drivers/scsi/arm/fas216.c
+++ b/drivers/scsi/arm/fas216.c
@@ -1479,7 +1479,7 @@ static void fas216_busservice_intr(FAS216_Info *info, unsigned int stat, unsigne
if (msgqueue_msglength(&info->scsi.msgs) > 1)
fas216_cmd(info, CMD_SETATN);
- /*FALLTHROUGH*/
+ fallthrough;
/*
* Any -> Message Out
@@ -2042,8 +2042,10 @@ fas216_std_done(FAS216_Info *info, struct scsi_cmnd *SCpnt, unsigned int result)
{
info->stats.fins += 1;
- SCpnt->result = result << 16 | info->scsi.SCp.Message << 8 |
- info->scsi.SCp.Status;
+ set_host_byte(SCpnt, result);
+ if (result == DID_OK)
+ scsi_msg_to_host_byte(SCpnt, info->scsi.SCp.Message);
+ set_status_byte(SCpnt, info->scsi.SCp.Status);
fas216_log_command(info, LOG_CONNECT, SCpnt,
"command complete, result=0x%08x", SCpnt->result);
@@ -2051,23 +2053,22 @@ fas216_std_done(FAS216_Info *info, struct scsi_cmnd *SCpnt, unsigned int result)
/*
* If the driver detected an error, we're all done.
*/
- if (host_byte(SCpnt->result) != DID_OK ||
- msg_byte(SCpnt->result) != COMMAND_COMPLETE)
+ if (get_host_byte(SCpnt) != DID_OK)
goto done;
/*
* If the command returned CHECK_CONDITION or COMMAND_TERMINATED
* status, request the sense information.
*/
- if (status_byte(SCpnt->result) == CHECK_CONDITION ||
- status_byte(SCpnt->result) == COMMAND_TERMINATED)
+ if (get_status_byte(SCpnt) == SAM_STAT_CHECK_CONDITION ||
+ get_status_byte(SCpnt) == SAM_STAT_COMMAND_TERMINATED)
goto request_sense;
/*
* If the command did not complete with GOOD status,
* we are all done here.
*/
- if (status_byte(SCpnt->result) != GOOD)
+ if (get_status_byte(SCpnt) != SAM_STAT_GOOD)
goto done;
/*
diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c
index 0e935c49b57b..8aeaddc93b16 100644
--- a/drivers/scsi/be2iscsi/be_iscsi.c
+++ b/drivers/scsi/be2iscsi/be_iscsi.c
@@ -182,6 +182,7 @@ int beiscsi_conn_bind(struct iscsi_cls_session *cls_session,
struct beiscsi_endpoint *beiscsi_ep;
struct iscsi_endpoint *ep;
uint16_t cri_index;
+ int rc = 0;
ep = iscsi_lookup_endpoint(transport_fd);
if (!ep)
@@ -189,15 +190,17 @@ int beiscsi_conn_bind(struct iscsi_cls_session *cls_session,
beiscsi_ep = ep->dd_data;
- if (iscsi_conn_bind(cls_session, cls_conn, is_leading))
- return -EINVAL;
+ if (iscsi_conn_bind(cls_session, cls_conn, is_leading)) {
+ rc = -EINVAL;
+ goto put_ep;
+ }
if (beiscsi_ep->phba != phba) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
"BS_%d : beiscsi_ep->hba=%p not equal to phba=%p\n",
beiscsi_ep->phba, phba);
-
- return -EEXIST;
+ rc = -EEXIST;
+ goto put_ep;
}
cri_index = BE_GET_CRI_FROM_CID(beiscsi_ep->ep_cid);
if (phba->conn_table[cri_index]) {
@@ -209,7 +212,8 @@ int beiscsi_conn_bind(struct iscsi_cls_session *cls_session,
beiscsi_ep->ep_cid,
beiscsi_conn,
phba->conn_table[cri_index]);
- return -EINVAL;
+ rc = -EINVAL;
+ goto put_ep;
}
}
@@ -226,7 +230,10 @@ int beiscsi_conn_bind(struct iscsi_cls_session *cls_session,
"BS_%d : cid %d phba->conn_table[%u]=%p\n",
beiscsi_ep->ep_cid, cri_index, beiscsi_conn);
phba->conn_table[cri_index] = beiscsi_conn;
- return 0;
+
+put_ep:
+ iscsi_put_endpoint(ep);
+ return rc;
}
static int beiscsi_iface_create_ipv4(struct beiscsi_hba *phba)
@@ -1293,7 +1300,6 @@ static int beiscsi_conn_close(struct beiscsi_endpoint *beiscsi_ep)
void beiscsi_ep_disconnect(struct iscsi_endpoint *ep)
{
struct beiscsi_endpoint *beiscsi_ep;
- struct beiscsi_conn *beiscsi_conn;
struct beiscsi_hba *phba;
uint16_t cri_index;
@@ -1312,11 +1318,6 @@ void beiscsi_ep_disconnect(struct iscsi_endpoint *ep)
return;
}
- if (beiscsi_ep->conn) {
- beiscsi_conn = beiscsi_ep->conn;
- iscsi_suspend_queue(beiscsi_conn->conn);
- }
-
if (!beiscsi_hba_is_online(phba)) {
beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
"BS_%d : HBA in error 0x%lx\n", phba->state);
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index 22cf7f4b8d8c..310b801c6c87 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -416,7 +416,7 @@ static struct beiscsi_hba *beiscsi_hba_alloc(struct pci_dev *pcidev)
"beiscsi_hba_alloc - iscsi_host_alloc failed\n");
return NULL;
}
- shost->max_id = BE2_MAX_SESSIONS;
+ shost->max_id = BE2_MAX_SESSIONS - 1;
shost->max_channel = 0;
shost->max_cmd_len = BEISCSI_MAX_CMD_LEN;
shost->max_lun = BEISCSI_NUM_MAX_LUN;
@@ -3858,8 +3858,6 @@ static void beiscsi_free_mem(struct beiscsi_hba *phba)
int i, j;
mem_descr = phba->init_mem;
- i = 0;
- j = 0;
for (i = 0; i < SE_MEM_MAX; i++) {
for (j = mem_descr->num_elements; j > 0; j--) {
dma_free_coherent(&phba->pcidev->dev,
@@ -5318,7 +5316,7 @@ static int beiscsi_enable_port(struct beiscsi_hba *phba)
/* Re-enable UER. If different TPE occurs then it is recoverable. */
beiscsi_set_uer_feature(phba);
- phba->shost->max_id = phba->params.cxns_per_ctrl;
+ phba->shost->max_id = phba->params.cxns_per_ctrl - 1;
phba->shost->can_queue = phba->params.ios_per_ctrl;
ret = beiscsi_init_port(phba);
if (ret < 0) {
@@ -5809,6 +5807,7 @@ struct iscsi_transport beiscsi_iscsi_transport = {
.destroy_session = beiscsi_session_destroy,
.create_conn = beiscsi_conn_create,
.bind_conn = beiscsi_conn_bind,
+ .unbind_conn = iscsi_conn_unbind,
.destroy_conn = iscsi_conn_teardown,
.attr_is_visible = beiscsi_attr_is_visible,
.set_iface_param = beiscsi_iface_set_param,
diff --git a/drivers/scsi/bfa/bfa_defs_svc.h b/drivers/scsi/bfa/bfa_defs_svc.h
index 8439951d95ac..f2c49f0e5c8b 100644
--- a/drivers/scsi/bfa/bfa_defs_svc.h
+++ b/drivers/scsi/bfa/bfa_defs_svc.h
@@ -871,7 +871,7 @@ enum bfa_port_linkstate_rsn {
/*
* Initially flash content may be fff. On making LUN mask enable and disable
- * state chnage. when report lun command is being processed it goes from
+ * state change. when report lun command is being processed it goes from
* BFA_LUN_MASK_ACTIVE to BFA_LUN_MASK_FETCH and comes back to
* BFA_LUN_MASK_ACTIVE.
*/
diff --git a/drivers/scsi/bfa/bfa_svc.c b/drivers/scsi/bfa/bfa_svc.c
index 11c0c3e6f014..4e3cef02f10f 100644
--- a/drivers/scsi/bfa/bfa_svc.c
+++ b/drivers/scsi/bfa/bfa_svc.c
@@ -369,13 +369,10 @@ bfa_plog_fchdr(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
enum bfa_plog_eid event,
u16 misc, struct fchs_s *fchdr)
{
- struct bfa_plog_rec_s lp;
u32 *tmp_int = (u32 *) fchdr;
u32 ints[BFA_PL_INT_LOG_SZ];
if (plog->plog_enabled) {
- memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
-
ints[0] = tmp_int[0];
ints[1] = tmp_int[1];
ints[2] = tmp_int[4];
@@ -389,13 +386,10 @@ bfa_plog_fchdr_and_pl(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
enum bfa_plog_eid event, u16 misc, struct fchs_s *fchdr,
u32 pld_w0)
{
- struct bfa_plog_rec_s lp;
u32 *tmp_int = (u32 *) fchdr;
u32 ints[BFA_PL_INT_LOG_SZ];
if (plog->plog_enabled) {
- memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
-
ints[0] = tmp_int[0];
ints[1] = tmp_int[1];
ints[2] = tmp_int[4];
@@ -3173,7 +3167,7 @@ bfa_fcport_send_enable(struct bfa_fcport_s *fcport)
m->port_cfg = fcport->cfg;
m->msgtag = fcport->msgtag;
m->port_cfg.maxfrsize = cpu_to_be16(fcport->cfg.maxfrsize);
- m->use_flash_cfg = fcport->use_flash_cfg;
+ m->use_flash_cfg = fcport->use_flash_cfg;
bfa_dma_be_addr_set(m->stats_dma_addr, fcport->stats_pa);
bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_lo);
bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_hi);
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
index 1e6d8f62ea3c..1b5f3e143f07 100644
--- a/drivers/scsi/bnx2i/bnx2i_iscsi.c
+++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
@@ -791,7 +791,7 @@ struct bnx2i_hba *bnx2i_alloc_hba(struct cnic_dev *cnic)
return NULL;
shost->dma_boundary = cnic->pcidev->dma_mask;
shost->transportt = bnx2i_scsi_xport_template;
- shost->max_id = ISCSI_MAX_CONNS_PER_HBA;
+ shost->max_id = ISCSI_MAX_CONNS_PER_HBA - 1;
shost->max_channel = 0;
shost->max_lun = 512;
shost->max_cmd_len = 16;
@@ -1420,17 +1420,23 @@ static int bnx2i_conn_bind(struct iscsi_cls_session *cls_session,
* Forcefully terminate all in progress connection recovery at the
* earliest, either in bind(), send_pdu(LOGIN), or conn_start()
*/
- if (bnx2i_adapter_ready(hba))
- return -EIO;
+ if (bnx2i_adapter_ready(hba)) {
+ ret_code = -EIO;
+ goto put_ep;
+ }
bnx2i_ep = ep->dd_data;
if ((bnx2i_ep->state == EP_STATE_TCP_FIN_RCVD) ||
- (bnx2i_ep->state == EP_STATE_TCP_RST_RCVD))
+ (bnx2i_ep->state == EP_STATE_TCP_RST_RCVD)) {
/* Peer disconnect via' FIN or RST */
- return -EINVAL;
+ ret_code = -EINVAL;
+ goto put_ep;
+ }
- if (iscsi_conn_bind(cls_session, cls_conn, is_leading))
- return -EINVAL;
+ if (iscsi_conn_bind(cls_session, cls_conn, is_leading)) {
+ ret_code = -EINVAL;
+ goto put_ep;
+ }
if (bnx2i_ep->hba != hba) {
/* Error - TCP connection does not belong to this device
@@ -1441,7 +1447,8 @@ static int bnx2i_conn_bind(struct iscsi_cls_session *cls_session,
iscsi_conn_printk(KERN_ALERT, cls_conn->dd_data,
"belong to hba (%s)\n",
hba->netdev->name);
- return -EEXIST;
+ ret_code = -EEXIST;
+ goto put_ep;
}
bnx2i_ep->conn = bnx2i_conn;
bnx2i_conn->ep = bnx2i_ep;
@@ -1458,6 +1465,8 @@ static int bnx2i_conn_bind(struct iscsi_cls_session *cls_session,
bnx2i_put_rq_buf(bnx2i_conn, 0);
bnx2i_arm_cq_event_coalescing(bnx2i_conn->ep, CNIC_ARM_CQE);
+put_ep:
+ iscsi_put_endpoint(ep);
return ret_code;
}
@@ -2113,7 +2122,6 @@ static void bnx2i_ep_disconnect(struct iscsi_endpoint *ep)
{
struct bnx2i_endpoint *bnx2i_ep;
struct bnx2i_conn *bnx2i_conn = NULL;
- struct iscsi_conn *conn = NULL;
struct bnx2i_hba *hba;
bnx2i_ep = ep->dd_data;
@@ -2126,11 +2134,8 @@ static void bnx2i_ep_disconnect(struct iscsi_endpoint *ep)
!time_after(jiffies, bnx2i_ep->timestamp + (12 * HZ)))
msleep(250);
- if (bnx2i_ep->conn) {
+ if (bnx2i_ep->conn)
bnx2i_conn = bnx2i_ep->conn;
- conn = bnx2i_conn->cls_conn->dd_data;
- iscsi_suspend_queue(conn);
- }
hba = bnx2i_ep->hba;
mutex_lock(&hba->net_dev_lock);
@@ -2276,6 +2281,7 @@ struct iscsi_transport bnx2i_iscsi_transport = {
.destroy_session = bnx2i_session_destroy,
.create_conn = bnx2i_conn_create,
.bind_conn = bnx2i_conn_bind,
+ .unbind_conn = iscsi_conn_unbind,
.destroy_conn = bnx2i_conn_destroy,
.attr_is_visible = bnx2i_attr_is_visible,
.set_param = iscsi_set_param,
diff --git a/drivers/scsi/ch.c b/drivers/scsi/ch.c
index 9b89c26ccfdb..fc7197abfcdf 100644
--- a/drivers/scsi/ch.c
+++ b/drivers/scsi/ch.c
@@ -198,8 +198,9 @@ ch_do_scsi(scsi_changer *ch, unsigned char *cmd, int cmd_len,
result = scsi_execute_req(ch->device, cmd, direction, buffer,
buflength, &sshdr, timeout * HZ,
MAX_RETRIES, NULL);
-
- if (driver_byte(result) == DRIVER_SENSE) {
+ if (result < 0)
+ return result;
+ if (scsi_sense_valid(&sshdr)) {
if (debug)
scsi_print_sense_hdr(ch->device, ch->name, &sshdr);
errno = ch_find_errno(&sshdr);
diff --git a/drivers/scsi/constants.c b/drivers/scsi/constants.c
index 84d73f57292b..340785536998 100644
--- a/drivers/scsi/constants.c
+++ b/drivers/scsi/constants.c
@@ -406,14 +406,10 @@ static const char * const hostbyte_table[]={
"DID_TRANSPORT_DISRUPTED", "DID_TRANSPORT_FAILFAST", "DID_TARGET_FAILURE",
"DID_NEXUS_FAILURE", "DID_ALLOC_FAILURE", "DID_MEDIUM_ERROR" };
-static const char * const driverbyte_table[]={
-"DRIVER_OK", "DRIVER_BUSY", "DRIVER_SOFT", "DRIVER_MEDIA", "DRIVER_ERROR",
-"DRIVER_INVALID", "DRIVER_TIMEOUT", "DRIVER_HARD", "DRIVER_SENSE"};
-
const char *scsi_hostbyte_string(int result)
{
+ enum scsi_host_status hb = host_byte(result);
const char *hb_string = NULL;
- int hb = host_byte(result);
if (hb < ARRAY_SIZE(hostbyte_table))
hb_string = hostbyte_table[hb];
@@ -421,17 +417,6 @@ const char *scsi_hostbyte_string(int result)
}
EXPORT_SYMBOL(scsi_hostbyte_string);
-const char *scsi_driverbyte_string(int result)
-{
- const char *db_string = NULL;
- int db = driver_byte(result);
-
- if (db < ARRAY_SIZE(driverbyte_table))
- db_string = driverbyte_table[db];
- return db_string;
-}
-EXPORT_SYMBOL(scsi_driverbyte_string);
-
#define scsi_mlreturn_name(result) { result, #result }
static const struct value_name_pair scsi_mlreturn_arr[] = {
scsi_mlreturn_name(NEEDS_RETRY),
diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
index 203f938fca7e..f949a4e00783 100644
--- a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
+++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
@@ -117,6 +117,7 @@ static struct iscsi_transport cxgb3i_iscsi_transport = {
/* connection management */
.create_conn = cxgbi_create_conn,
.bind_conn = cxgbi_bind_conn,
+ .unbind_conn = iscsi_conn_unbind,
.destroy_conn = iscsi_tcp_conn_teardown,
.start_conn = iscsi_conn_start,
.stop_conn = iscsi_conn_stop,
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index 2c3491528d42..efb3e2b3398e 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -134,6 +134,7 @@ static struct iscsi_transport cxgb4i_iscsi_transport = {
/* connection management */
.create_conn = cxgbi_create_conn,
.bind_conn = cxgbi_bind_conn,
+ .unbind_conn = iscsi_conn_unbind,
.destroy_conn = iscsi_tcp_conn_teardown,
.start_conn = iscsi_conn_start,
.stop_conn = iscsi_conn_stop,
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
index f078b3c4e083..8c7d4dda4cf2 100644
--- a/drivers/scsi/cxgbi/libcxgbi.c
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -337,7 +337,7 @@ void cxgbi_hbas_remove(struct cxgbi_device *cdev)
EXPORT_SYMBOL_GPL(cxgbi_hbas_remove);
int cxgbi_hbas_add(struct cxgbi_device *cdev, u64 max_lun,
- unsigned int max_id, struct scsi_host_template *sht,
+ unsigned int max_conns, struct scsi_host_template *sht,
struct scsi_transport_template *stt)
{
struct cxgbi_hba *chba;
@@ -357,7 +357,7 @@ int cxgbi_hbas_add(struct cxgbi_device *cdev, u64 max_lun,
shost->transportt = stt;
shost->max_lun = max_lun;
- shost->max_id = max_id;
+ shost->max_id = max_conns - 1;
shost->max_channel = 0;
shost->max_cmd_len = SCSI_MAX_VARLEN_CDB_SIZE;
@@ -2690,11 +2690,13 @@ int cxgbi_bind_conn(struct iscsi_cls_session *cls_session,
err = csk->cdev->csk_ddp_setup_pgidx(csk, csk->tid,
ppm->tformat.pgsz_idx_dflt);
if (err < 0)
- return err;
+ goto put_ep;
err = iscsi_conn_bind(cls_session, cls_conn, is_leading);
- if (err)
- return -EINVAL;
+ if (err) {
+ err = -EINVAL;
+ goto put_ep;
+ }
/* calculate the tag idx bits needed for this conn based on cmds_max */
cconn->task_idx_bits = (__ilog2_u32(conn->session->cmds_max - 1)) + 1;
@@ -2715,7 +2717,9 @@ int cxgbi_bind_conn(struct iscsi_cls_session *cls_session,
/* init recv engine */
iscsi_tcp_hdr_recv_prep(tcp_conn);
- return 0;
+put_ep:
+ iscsi_put_endpoint(ep);
+ return err;
}
EXPORT_SYMBOL_GPL(cxgbi_bind_conn);
@@ -2968,7 +2972,6 @@ void cxgbi_ep_disconnect(struct iscsi_endpoint *ep)
ep, cep, cconn, csk, csk->state, csk->flags);
if (cconn && cconn->iconn) {
- iscsi_suspend_tx(cconn->iconn);
write_lock_bh(&csk->callback_lock);
cep->csk->user_data = NULL;
cconn->cep = NULL;
diff --git a/drivers/scsi/cxlflash/superpipe.c b/drivers/scsi/cxlflash/superpipe.c
index ee11ec340654..df0ebabbf387 100644
--- a/drivers/scsi/cxlflash/superpipe.c
+++ b/drivers/scsi/cxlflash/superpipe.c
@@ -369,8 +369,7 @@ retry:
goto out;
}
- if (driver_byte(result) == DRIVER_SENSE) {
- result &= ~(0xFF<<24); /* DRIVER_SENSE is not an error */
+ if (result > 0 && scsi_sense_valid(&sshdr)) {
if (result & SAM_STAT_CHECK_CONDITION) {
switch (sshdr.sense_key) {
case NO_SENSE:
diff --git a/drivers/scsi/dc395x.c b/drivers/scsi/dc395x.c
index be87d5a7583d..24c7cefb0b78 100644
--- a/drivers/scsi/dc395x.c
+++ b/drivers/scsi/dc395x.c
@@ -160,22 +160,6 @@
#define DC395x_write16(acb,address,value) outw((value), acb->io_port_base + (address))
#define DC395x_write32(acb,address,value) outl((value), acb->io_port_base + (address))
-/* cmd->result */
-#define RES_TARGET 0x000000FF /* Target State */
-#define RES_TARGET_LNX STATUS_MASK /* Only official ... */
-#define RES_ENDMSG 0x0000FF00 /* End Message */
-#define RES_DID 0x00FF0000 /* DID_ codes */
-#define RES_DRV 0xFF000000 /* DRIVER_ codes */
-
-#define MK_RES(drv,did,msg,tgt) ((int)(drv)<<24 | (int)(did)<<16 | (int)(msg)<<8 | (int)(tgt))
-#define MK_RES_LNX(drv,did,msg,tgt) ((int)(drv)<<24 | (int)(did)<<16 | (int)(msg)<<8 | (int)(tgt)<<1)
-
-#define SET_RES_TARGET(who,tgt) { who &= ~RES_TARGET; who |= (int)(tgt); }
-#define SET_RES_TARGET_LNX(who,tgt) { who &= ~RES_TARGET_LNX; who |= (int)(tgt) << 1; }
-#define SET_RES_MSG(who,msg) { who &= ~RES_ENDMSG; who |= (int)(msg) << 8; }
-#define SET_RES_DID(who,did) { who &= ~RES_DID; who |= (int)(did) << 16; }
-#define SET_RES_DRV(who,drv) { who &= ~RES_DRV; who |= (int)(drv) << 24; }
-
#define TAG_NONE 255
/*
@@ -986,7 +970,7 @@ static int dc395x_queue_command_lck(struct scsi_cmnd *cmd, void (*done)(struct s
cmd, cmd->device->id, (u8)cmd->device->lun, cmd->cmnd[0]);
/* Assume BAD_TARGET; will be cleared later */
- cmd->result = DID_BAD_TARGET << 16;
+ set_host_byte(cmd, DID_BAD_TARGET);
/* ignore invalid targets */
if (cmd->device->id >= acb->scsi_host->max_id ||
@@ -1013,7 +997,8 @@ static int dc395x_queue_command_lck(struct scsi_cmnd *cmd, void (*done)(struct s
/* set callback and clear result in the command */
cmd->scsi_done = done;
- cmd->result = 0;
+ set_host_byte(cmd, DID_OK);
+ set_status_byte(cmd, SAM_STAT_GOOD);
srb = list_first_entry_or_null(&acb->srb_free_list,
struct ScsiReqBlk, list);
@@ -1250,7 +1235,7 @@ static int dc395x_eh_abort(struct scsi_cmnd *cmd)
free_tag(dcb, srb);
list_add_tail(&srb->list, &acb->srb_free_list);
dprintkl(KERN_DEBUG, "eh_abort: Command was waiting\n");
- cmd->result = DID_ABORT << 16;
+ set_host_byte(cmd, DID_ABORT);
return SUCCESS;
}
srb = find_cmd(cmd, &dcb->srb_going_list);
@@ -3178,6 +3163,8 @@ static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
srb, scsi_sg_count(cmd), srb->sg_index, srb->sg_count,
scsi_sgtalbe(cmd));
status = srb->target_status;
+ set_host_byte(cmd, DID_OK);
+ set_status_byte(cmd, SAM_STAT_GOOD);
if (srb->flag & AUTO_REQSENSE) {
dprintkdbg(DBG_0, "srb_done: AUTO_REQSENSE1\n");
pci_unmap_srb_sense(acb, srb);
@@ -3186,7 +3173,7 @@ static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
*/
srb->flag &= ~AUTO_REQSENSE;
srb->adapter_status = 0;
- srb->target_status = CHECK_CONDITION << 1;
+ srb->target_status = SAM_STAT_CHECK_CONDITION;
if (debug_enabled(DBG_1)) {
switch (cmd->sense_buffer[2] & 0x0f) {
case NOT_READY:
@@ -3233,22 +3220,13 @@ static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
*((unsigned int *)(cmd->sense_buffer + 3)));
}
- if (status == (CHECK_CONDITION << 1)) {
- cmd->result = DID_BAD_TARGET << 16;
+ if (status == SAM_STAT_CHECK_CONDITION) {
+ set_host_byte(cmd, DID_BAD_TARGET);
goto ckc_e;
}
dprintkdbg(DBG_0, "srb_done: AUTO_REQSENSE2\n");
- if (srb->total_xfer_length
- && srb->total_xfer_length >= cmd->underflow)
- cmd->result =
- MK_RES_LNX(DRIVER_SENSE, DID_OK,
- srb->end_message, CHECK_CONDITION);
- /*SET_RES_DID(cmd->result,DID_OK) */
- else
- cmd->result =
- MK_RES_LNX(DRIVER_SENSE, DID_OK,
- srb->end_message, CHECK_CONDITION);
+ set_status_byte(cmd, SAM_STAT_CHECK_CONDITION);
goto ckc_e;
}
@@ -3258,10 +3236,10 @@ static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
/*
* target status..........................
*/
- if (status >> 1 == CHECK_CONDITION) {
+ if (status == SAM_STAT_CHECK_CONDITION) {
request_sense(acb, dcb, srb);
return;
- } else if (status >> 1 == QUEUE_FULL) {
+ } else if (status == SAM_STAT_TASK_SET_FULL) {
tempcnt = (u8)list_size(&dcb->srb_going_list);
dprintkl(KERN_INFO, "QUEUE_FULL for dev <%02i-%i> with %i cmnds\n",
dcb->target_id, dcb->target_lun, tempcnt);
@@ -3277,13 +3255,11 @@ static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
} else if (status == SCSI_STAT_SEL_TIMEOUT) {
srb->adapter_status = H_SEL_TIMEOUT;
srb->target_status = 0;
- cmd->result = DID_NO_CONNECT << 16;
+ set_host_byte(cmd, DID_NO_CONNECT);
} else {
srb->adapter_status = 0;
- SET_RES_DID(cmd->result, DID_ERROR);
- SET_RES_MSG(cmd->result, srb->end_message);
- SET_RES_TARGET(cmd->result, status);
-
+ set_host_byte(cmd, DID_ERROR);
+ set_status_byte(cmd, status);
}
} else {
/*
@@ -3292,16 +3268,13 @@ static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
status = srb->adapter_status;
if (status & H_OVER_UNDER_RUN) {
srb->target_status = 0;
- SET_RES_DID(cmd->result, DID_OK);
- SET_RES_MSG(cmd->result, srb->end_message);
+ scsi_msg_to_host_byte(cmd, srb->end_message);
} else if (srb->status & PARITY_ERROR) {
- SET_RES_DID(cmd->result, DID_PARITY);
- SET_RES_MSG(cmd->result, srb->end_message);
+ set_host_byte(cmd, DID_PARITY);
} else { /* No error */
srb->adapter_status = 0;
srb->target_status = 0;
- SET_RES_DID(cmd->result, DID_OK);
}
}
@@ -3322,15 +3295,15 @@ static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
base = scsi_kmap_atomic_sg(sg, scsi_sg_count(cmd), &offset, &len);
ptr = (struct ScsiInqData *)(base + offset);
- if (!ckc_only && (cmd->result & RES_DID) == 0
+ if (!ckc_only && get_host_byte(cmd) == DID_OK
&& cmd->cmnd[2] == 0 && scsi_bufflen(cmd) >= 8
&& dir != DMA_NONE && ptr && (ptr->Vers & 0x07) >= 2)
dcb->inquiry7 = ptr->Flags;
/*if( srb->cmd->cmnd[0] == INQUIRY && */
/* (host_byte(cmd->result) == DID_OK || status_byte(cmd->result) & CHECK_CONDITION) ) */
- if ((cmd->result == (DID_OK << 16) ||
- status_byte(cmd->result) == CHECK_CONDITION)) {
+ if ((get_host_byte(cmd) == DID_OK) ||
+ (get_status_byte(cmd) == SAM_STAT_CHECK_CONDITION)) {
if (!dcb->init_tcq_flag) {
add_dev(acb, dcb, ptr);
dcb->init_tcq_flag = 1;
@@ -3357,7 +3330,7 @@ static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
if (srb != acb->tmp_srb) {
/* Add to free list */
dprintkdbg(DBG_0, "srb_done: (0x%p) done result=0x%08x\n",
- cmd, cmd->result);
+ cmd, cmd->result);
list_move_tail(&srb->list, &acb->srb_free_list);
} else {
dprintkl(KERN_ERR, "srb_done: ERROR! Completed cmd with tmp_srb\n");
@@ -3381,16 +3354,14 @@ static void doing_srb_done(struct AdapterCtlBlk *acb, u8 did_flag,
struct scsi_cmnd *p;
list_for_each_entry_safe(srb, tmp, &dcb->srb_going_list, list) {
- int result;
-
p = srb->cmd;
- result = MK_RES(0, did_flag, 0, 0);
printk("G:%p(%02i-%i) ", p,
p->device->id, (u8)p->device->lun);
list_del(&srb->list);
free_tag(dcb, srb);
list_add_tail(&srb->list, &acb->srb_free_list);
- p->result = result;
+ set_host_byte(p, did_flag);
+ set_status_byte(p, SAM_STAT_GOOD);
pci_unmap_srb_sense(acb, srb);
pci_unmap_srb(acb, srb);
if (force) {
@@ -3411,14 +3382,13 @@ static void doing_srb_done(struct AdapterCtlBlk *acb, u8 did_flag,
/* Waiting queue */
list_for_each_entry_safe(srb, tmp, &dcb->srb_waiting_list, list) {
- int result;
p = srb->cmd;
- result = MK_RES(0, did_flag, 0, 0);
printk("W:%p<%02i-%i>", p, p->device->id,
(u8)p->device->lun);
list_move_tail(&srb->list, &acb->srb_free_list);
- p->result = result;
+ set_host_byte(p, did_flag);
+ set_status_byte(p, SAM_STAT_GOOD);
pci_unmap_srb_sense(acb, srb);
pci_unmap_srb(acb, srb);
if (force) {
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index efa8c0381476..37d06f993b76 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -88,6 +88,7 @@ struct alua_dh_data {
struct scsi_device *sdev;
int init_error;
struct mutex init_mutex;
+ bool disabled;
};
struct alua_queue_data {
@@ -517,7 +518,8 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg)
int len, k, off, bufflen = ALUA_RTPG_SIZE;
int group_id_old, state_old, pref_old, valid_states_old;
unsigned char *desc, *buff;
- unsigned err, retval;
+ unsigned err;
+ int retval;
unsigned int tpg_desc_tbl_off;
unsigned char orig_transition_tmo;
unsigned long flags;
@@ -562,13 +564,15 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg)
kfree(buff);
return SCSI_DH_OK;
}
- if (!scsi_sense_valid(&sense_hdr)) {
+ if (retval < 0 || !scsi_sense_valid(&sense_hdr)) {
sdev_printk(KERN_INFO, sdev,
"%s: rtpg failed, result %d\n",
ALUA_DH_NAME, retval);
kfree(buff);
- if (driver_byte(retval) == DRIVER_ERROR)
+ if (retval < 0)
return SCSI_DH_DEV_TEMP_BUSY;
+ if (host_byte(retval) == DID_NO_CONNECT)
+ return SCSI_DH_RES_TEMP_UNAVAIL;
return SCSI_DH_IO;
}
@@ -791,11 +795,11 @@ static unsigned alua_stpg(struct scsi_device *sdev, struct alua_port_group *pg)
retval = submit_stpg(sdev, pg->group_id, &sense_hdr);
if (retval) {
- if (!scsi_sense_valid(&sense_hdr)) {
+ if (retval < 0 || !scsi_sense_valid(&sense_hdr)) {
sdev_printk(KERN_INFO, sdev,
"%s: stpg failed, result %d",
ALUA_DH_NAME, retval);
- if (driver_byte(retval) == DRIVER_ERROR)
+ if (retval < 0)
return SCSI_DH_DEV_TEMP_BUSY;
} else {
sdev_printk(KERN_INFO, sdev, "%s: stpg failed\n",
@@ -807,6 +811,51 @@ static unsigned alua_stpg(struct scsi_device *sdev, struct alua_port_group *pg)
return SCSI_DH_RETRY;
}
+static bool alua_rtpg_select_sdev(struct alua_port_group *pg)
+{
+ struct alua_dh_data *h;
+ struct scsi_device *sdev = NULL;
+
+ lockdep_assert_held(&pg->lock);
+ if (WARN_ON(!pg->rtpg_sdev))
+ return false;
+
+ /*
+ * RCU protection isn't necessary for dh_list here
+ * as we hold pg->lock, but for access to h->pg.
+ */
+ rcu_read_lock();
+ list_for_each_entry_rcu(h, &pg->dh_list, node) {
+ if (!h->sdev)
+ continue;
+ if (h->sdev == pg->rtpg_sdev) {
+ h->disabled = true;
+ continue;
+ }
+ if (rcu_dereference(h->pg) == pg &&
+ !h->disabled &&
+ !scsi_device_get(h->sdev)) {
+ sdev = h->sdev;
+ break;
+ }
+ }
+ rcu_read_unlock();
+
+ if (!sdev) {
+ pr_warn("%s: no device found for rtpg\n",
+ (pg->device_id_len ?
+ (char *)pg->device_id_str : "(nameless PG)"));
+ return false;
+ }
+
+ sdev_printk(KERN_INFO, sdev, "rtpg retry on different device\n");
+
+ scsi_device_put(pg->rtpg_sdev);
+ pg->rtpg_sdev = sdev;
+
+ return true;
+}
+
static void alua_rtpg_work(struct work_struct *work)
{
struct alua_port_group *pg =
@@ -815,6 +864,7 @@ static void alua_rtpg_work(struct work_struct *work)
LIST_HEAD(qdata_list);
int err = SCSI_DH_OK;
struct alua_queue_data *qdata, *tmp;
+ struct alua_dh_data *h;
unsigned long flags;
spin_lock_irqsave(&pg->lock, flags);
@@ -848,9 +898,18 @@ static void alua_rtpg_work(struct work_struct *work)
}
err = alua_rtpg(sdev, pg);
spin_lock_irqsave(&pg->lock, flags);
- if (err == SCSI_DH_RETRY || pg->flags & ALUA_PG_RUN_RTPG) {
+
+ /* If RTPG failed on the current device, try using another */
+ if (err == SCSI_DH_RES_TEMP_UNAVAIL &&
+ alua_rtpg_select_sdev(pg))
+ err = SCSI_DH_IMM_RETRY;
+
+ if (err == SCSI_DH_RETRY || err == SCSI_DH_IMM_RETRY ||
+ pg->flags & ALUA_PG_RUN_RTPG) {
pg->flags &= ~ALUA_PG_RUNNING;
- if (!pg->interval && !(pg->flags & ALUA_PG_RUN_RTPG))
+ if (err == SCSI_DH_IMM_RETRY)
+ pg->interval = 0;
+ else if (!pg->interval && !(pg->flags & ALUA_PG_RUN_RTPG))
pg->interval = ALUA_RTPG_RETRY_DELAY;
pg->flags |= ALUA_PG_RUN_RTPG;
spin_unlock_irqrestore(&pg->lock, flags);
@@ -878,6 +937,12 @@ static void alua_rtpg_work(struct work_struct *work)
}
list_splice_init(&pg->rtpg_list, &qdata_list);
+ /*
+ * We went through an RTPG, for good or bad.
+ * Re-enable all devices for the next attempt.
+ */
+ list_for_each_entry(h, &pg->dh_list, node)
+ h->disabled = false;
pg->rtpg_sdev = NULL;
spin_unlock_irqrestore(&pg->lock, flags);
@@ -962,6 +1027,7 @@ static int alua_initialize(struct scsi_device *sdev, struct alua_dh_data *h)
int err = SCSI_DH_DEV_UNSUPP, tpgs;
mutex_lock(&h->init_mutex);
+ h->disabled = false;
tpgs = alua_check_tpgs(sdev);
if (tpgs != TPGS_MODE_NONE)
err = alua_check_vpd(sdev, h, tpgs);
@@ -1080,7 +1146,6 @@ static void alua_check(struct scsi_device *sdev, bool force)
return;
}
rcu_read_unlock();
-
alua_rtpg_queue(pg, sdev, NULL, force);
kref_put(&pg->kref, release_port_group);
}
diff --git a/drivers/scsi/elx/Kconfig b/drivers/scsi/elx/Kconfig
new file mode 100644
index 000000000000..831daea7a951
--- /dev/null
+++ b/drivers/scsi/elx/Kconfig
@@ -0,0 +1,9 @@
+config SCSI_EFCT
+ tristate "Emulex Fibre Channel Target"
+ depends on PCI && SCSI
+ depends on TARGET_CORE
+ depends on SCSI_FC_ATTRS
+ select CRC_T10DIF
+ help
+ The efct driver provides enhanced SCSI Target Mode
+ support for specific SLI-4 adapters.
diff --git a/drivers/scsi/elx/Makefile b/drivers/scsi/elx/Makefile
new file mode 100644
index 000000000000..a8537d7a2a6e
--- /dev/null
+++ b/drivers/scsi/elx/Makefile
@@ -0,0 +1,18 @@
+#// SPDX-License-Identifier: GPL-2.0
+#/*
+# * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+# * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+# */
+
+
+obj-$(CONFIG_SCSI_EFCT) := efct.o
+
+efct-objs := efct/efct_driver.o efct/efct_io.o efct/efct_scsi.o \
+ efct/efct_xport.o efct/efct_hw.o efct/efct_hw_queues.o \
+ efct/efct_lio.o efct/efct_unsol.o
+
+efct-objs += libefc/efc_cmds.o libefc/efc_domain.o libefc/efc_fabric.o \
+ libefc/efc_node.o libefc/efc_nport.o libefc/efc_device.o \
+ libefc/efclib.o libefc/efc_sm.o libefc/efc_els.o
+
+efct-objs += libefc_sli/sli4.o
diff --git a/drivers/scsi/elx/efct/efct_driver.c b/drivers/scsi/elx/efct/efct_driver.c
new file mode 100644
index 000000000000..eab68fd9337a
--- /dev/null
+++ b/drivers/scsi/elx/efct/efct_driver.c
@@ -0,0 +1,786 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+#include "efct_driver.h"
+
+#include "efct_hw.h"
+#include "efct_unsol.h"
+#include "efct_scsi.h"
+
+LIST_HEAD(efct_devices);
+
+static int logmask;
+module_param(logmask, int, 0444);
+MODULE_PARM_DESC(logmask, "logging bitmask (default 0)");
+
+static struct libefc_function_template efct_libefc_templ = {
+ .issue_mbox_rqst = efct_issue_mbox_rqst,
+ .send_els = efct_els_hw_srrs_send,
+ .send_bls = efct_efc_bls_send,
+
+ .new_nport = efct_scsi_tgt_new_nport,
+ .del_nport = efct_scsi_tgt_del_nport,
+ .scsi_new_node = efct_scsi_new_initiator,
+ .scsi_del_node = efct_scsi_del_initiator,
+ .hw_seq_free = efct_efc_hw_sequence_free,
+};
+
+static int
+efct_device_init(void)
+{
+ int rc;
+
+ /* driver-wide init for target-server */
+ rc = efct_scsi_tgt_driver_init();
+ if (rc) {
+ pr_err("efct_scsi_tgt_init failed rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = efct_scsi_reg_fc_transport();
+ if (rc) {
+ pr_err("failed to register to FC host\n");
+ return rc;
+ }
+
+ return 0;
+}
+
+static void
+efct_device_shutdown(void)
+{
+ efct_scsi_release_fc_transport();
+
+ efct_scsi_tgt_driver_exit();
+}
+
+static void *
+efct_device_alloc(u32 nid)
+{
+ struct efct *efct = NULL;
+
+ efct = kzalloc_node(sizeof(*efct), GFP_KERNEL, nid);
+ if (!efct)
+ return efct;
+
+ INIT_LIST_HEAD(&efct->list_entry);
+ list_add_tail(&efct->list_entry, &efct_devices);
+
+ return efct;
+}
+
+static void
+efct_teardown_msix(struct efct *efct)
+{
+ u32 i;
+
+ for (i = 0; i < efct->n_msix_vec; i++) {
+ free_irq(pci_irq_vector(efct->pci, i),
+ &efct->intr_context[i]);
+ }
+
+ pci_free_irq_vectors(efct->pci);
+}
+
+static int
+efct_efclib_config(struct efct *efct, struct libefc_function_template *tt)
+{
+ struct efc *efc;
+ struct sli4 *sli;
+ int rc = 0;
+
+ efc = kzalloc(sizeof(*efc), GFP_KERNEL);
+ if (!efc)
+ return -ENOMEM;
+
+ efct->efcport = efc;
+
+ memcpy(&efc->tt, tt, sizeof(*tt));
+ efc->base = efct;
+ efc->pci = efct->pci;
+
+ efc->def_wwnn = efct_get_wwnn(&efct->hw);
+ efc->def_wwpn = efct_get_wwpn(&efct->hw);
+ efc->enable_tgt = 1;
+ efc->log_level = EFC_LOG_LIB;
+
+ sli = &efct->hw.sli;
+ efc->max_xfer_size = sli->sge_supported_length *
+ sli_get_max_sgl(&efct->hw.sli);
+ efc->sli = sli;
+ efc->fcfi = efct->hw.fcf_indicator;
+
+ rc = efcport_init(efc);
+ if (rc)
+ efc_log_err(efc, "efcport_init failed\n");
+
+ return rc;
+}
+
+static int efct_request_firmware_update(struct efct *efct);
+
+static const char*
+efct_pci_model(u16 device)
+{
+ switch (device) {
+ case EFCT_DEVICE_LANCER_G6: return "LPE31004";
+ case EFCT_DEVICE_LANCER_G7: return "LPE36000";
+ default: return "unknown";
+ }
+}
+
+static int
+efct_device_attach(struct efct *efct)
+{
+ u32 rc = 0, i = 0;
+
+ if (efct->attached) {
+ efc_log_err(efct, "Device is already attached\n");
+ return -EIO;
+ }
+
+ snprintf(efct->name, sizeof(efct->name), "[%s%d] ", "fc",
+ efct->instance_index);
+
+ efct->logmask = logmask;
+ efct->filter_def = EFCT_DEFAULT_FILTER;
+ efct->max_isr_time_msec = EFCT_OS_MAX_ISR_TIME_MSEC;
+
+ efct->model = efct_pci_model(efct->pci->device);
+
+ efct->efct_req_fw_upgrade = true;
+
+ /* Allocate transport object and bring online */
+ efct->xport = efct_xport_alloc(efct);
+ if (!efct->xport) {
+ efc_log_err(efct, "failed to allocate transport object\n");
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ rc = efct_xport_attach(efct->xport);
+ if (rc) {
+ efc_log_err(efct, "failed to attach transport object\n");
+ goto xport_out;
+ }
+
+ rc = efct_xport_initialize(efct->xport);
+ if (rc) {
+ efc_log_err(efct, "failed to initialize transport object\n");
+ goto xport_out;
+ }
+
+ rc = efct_efclib_config(efct, &efct_libefc_templ);
+ if (rc) {
+ efc_log_err(efct, "failed to init efclib\n");
+ goto efclib_out;
+ }
+
+ for (i = 0; i < efct->n_msix_vec; i++) {
+ efc_log_debug(efct, "irq %d enabled\n", i);
+ enable_irq(pci_irq_vector(efct->pci, i));
+ }
+
+ efct->attached = true;
+
+ if (efct->efct_req_fw_upgrade)
+ efct_request_firmware_update(efct);
+
+ return rc;
+
+efclib_out:
+ efct_xport_detach(efct->xport);
+xport_out:
+ efct_xport_free(efct->xport);
+ efct->xport = NULL;
+out:
+ return rc;
+}
+
+static int
+efct_device_detach(struct efct *efct)
+{
+ int i;
+
+ if (!efct || !efct->attached) {
+ pr_err("Device is not attached\n");
+ return -EIO;
+ }
+
+ if (efct_xport_control(efct->xport, EFCT_XPORT_SHUTDOWN))
+ efc_log_err(efct, "Transport Shutdown timed out\n");
+
+ for (i = 0; i < efct->n_msix_vec; i++)
+ disable_irq(pci_irq_vector(efct->pci, i));
+
+ efct_xport_detach(efct->xport);
+
+ efct_xport_free(efct->xport);
+ efct->xport = NULL;
+
+ efcport_destroy(efct->efcport);
+ kfree(efct->efcport);
+
+ efct->attached = false;
+
+ return 0;
+}
+
+static void
+efct_fw_write_cb(int status, u32 actual_write_length,
+ u32 change_status, void *arg)
+{
+ struct efct_fw_write_result *result = arg;
+
+ result->status = status;
+ result->actual_xfer = actual_write_length;
+ result->change_status = change_status;
+
+ complete(&result->done);
+}
+
+static int
+efct_firmware_write(struct efct *efct, const u8 *buf, size_t buf_len,
+ u8 *change_status)
+{
+ int rc = 0;
+ u32 bytes_left;
+ u32 xfer_size;
+ u32 offset;
+ struct efc_dma dma;
+ int last = 0;
+ struct efct_fw_write_result result;
+
+ init_completion(&result.done);
+
+ bytes_left = buf_len;
+ offset = 0;
+
+ dma.size = FW_WRITE_BUFSIZE;
+ dma.virt = dma_alloc_coherent(&efct->pci->dev,
+ dma.size, &dma.phys, GFP_DMA);
+ if (!dma.virt)
+ return -ENOMEM;
+
+ while (bytes_left > 0) {
+ if (bytes_left > FW_WRITE_BUFSIZE)
+ xfer_size = FW_WRITE_BUFSIZE;
+ else
+ xfer_size = bytes_left;
+
+ memcpy(dma.virt, buf + offset, xfer_size);
+
+ if (bytes_left == xfer_size)
+ last = 1;
+
+ efct_hw_firmware_write(&efct->hw, &dma, xfer_size, offset,
+ last, efct_fw_write_cb, &result);
+
+ if (wait_for_completion_interruptible(&result.done) != 0) {
+ rc = -ENXIO;
+ break;
+ }
+
+ if (result.actual_xfer == 0 || result.status != 0) {
+ rc = -EFAULT;
+ break;
+ }
+
+ if (last)
+ *change_status = result.change_status;
+
+ bytes_left -= result.actual_xfer;
+ offset += result.actual_xfer;
+ }
+
+ dma_free_coherent(&efct->pci->dev, dma.size, dma.virt, dma.phys);
+ return rc;
+}
+
+static int
+efct_fw_reset(struct efct *efct)
+{
+ /*
+ * Firmware reset to activate the new firmware.
+ * Function 0 will update and load the new firmware
+ * during attach.
+ */
+ if (timer_pending(&efct->xport->stats_timer))
+ del_timer(&efct->xport->stats_timer);
+
+ if (efct_hw_reset(&efct->hw, EFCT_HW_RESET_FIRMWARE)) {
+ efc_log_info(efct, "failed to reset firmware\n");
+ return -EIO;
+ }
+
+ efc_log_info(efct, "successfully reset firmware.Now resetting port\n");
+
+ efct_device_detach(efct);
+ return efct_device_attach(efct);
+}
+
+static int
+efct_request_firmware_update(struct efct *efct)
+{
+ int rc = 0;
+ u8 file_name[256], fw_change_status = 0;
+ const struct firmware *fw;
+ struct efct_hw_grp_hdr *fw_image;
+
+ snprintf(file_name, 256, "%s.grp", efct->model);
+
+ rc = request_firmware(&fw, file_name, &efct->pci->dev);
+ if (rc) {
+ efc_log_debug(efct, "Firmware file(%s) not found.\n", file_name);
+ return rc;
+ }
+
+ fw_image = (struct efct_hw_grp_hdr *)fw->data;
+
+ if (!strncmp(efct->hw.sli.fw_name[0], fw_image->revision,
+ strnlen(fw_image->revision, 16))) {
+ efc_log_debug(efct,
+ "Skip update. Firmware is already up to date.\n");
+ goto exit;
+ }
+
+ efc_log_info(efct, "Firmware update is initiated. %s -> %s\n",
+ efct->hw.sli.fw_name[0], fw_image->revision);
+
+ rc = efct_firmware_write(efct, fw->data, fw->size, &fw_change_status);
+ if (rc) {
+ efc_log_err(efct, "Firmware update failed. rc = %d\n", rc);
+ goto exit;
+ }
+
+ efc_log_info(efct, "Firmware updated successfully\n");
+ switch (fw_change_status) {
+ case 0x00:
+ efc_log_info(efct, "New firmware is active.\n");
+ break;
+ case 0x01:
+ efc_log_info(efct,
+ "System reboot needed to activate the new firmware\n");
+ break;
+ case 0x02:
+ case 0x03:
+ efc_log_info(efct,
+ "firmware reset to activate the new firmware\n");
+ efct_fw_reset(efct);
+ break;
+ default:
+ efc_log_info(efct, "Unexpected value change_status:%d\n",
+ fw_change_status);
+ break;
+ }
+
+exit:
+ release_firmware(fw);
+
+ return rc;
+}
+
+static void
+efct_device_free(struct efct *efct)
+{
+ if (efct) {
+ list_del(&efct->list_entry);
+ kfree(efct);
+ }
+}
+
+static int
+efct_device_interrupts_required(struct efct *efct)
+{
+ int rc;
+
+ rc = efct_hw_setup(&efct->hw, efct, efct->pci);
+ if (rc < 0)
+ return rc;
+
+ return efct->hw.config.n_eq;
+}
+
+static irqreturn_t
+efct_intr_thread(int irq, void *handle)
+{
+ struct efct_intr_context *intr_ctx = handle;
+ struct efct *efct = intr_ctx->efct;
+
+ efct_hw_process(&efct->hw, intr_ctx->index, efct->max_isr_time_msec);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t
+efct_intr_msix(int irq, void *handle)
+{
+ return IRQ_WAKE_THREAD;
+}
+
+static int
+efct_setup_msix(struct efct *efct, u32 num_intrs)
+{
+ int rc = 0, i;
+
+ if (!pci_find_capability(efct->pci, PCI_CAP_ID_MSIX)) {
+ dev_err(&efct->pci->dev,
+ "%s : MSI-X not available\n", __func__);
+ return -EIO;
+ }
+
+ efct->n_msix_vec = num_intrs;
+
+ rc = pci_alloc_irq_vectors(efct->pci, num_intrs, num_intrs,
+ PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
+
+ if (rc < 0) {
+ dev_err(&efct->pci->dev, "Failed to alloc irq : %d\n", rc);
+ return rc;
+ }
+
+ for (i = 0; i < num_intrs; i++) {
+ struct efct_intr_context *intr_ctx = NULL;
+
+ intr_ctx = &efct->intr_context[i];
+ intr_ctx->efct = efct;
+ intr_ctx->index = i;
+
+ rc = request_threaded_irq(pci_irq_vector(efct->pci, i),
+ efct_intr_msix, efct_intr_thread, 0,
+ EFCT_DRIVER_NAME, intr_ctx);
+ if (rc) {
+ dev_err(&efct->pci->dev,
+ "Failed to register %d vector: %d\n", i, rc);
+ goto out;
+ }
+ }
+
+ return rc;
+
+out:
+ while (--i >= 0)
+ free_irq(pci_irq_vector(efct->pci, i),
+ &efct->intr_context[i]);
+
+ pci_free_irq_vectors(efct->pci);
+ return rc;
+}
+
+static struct pci_device_id efct_pci_table[] = {
+ {PCI_DEVICE(EFCT_VENDOR_ID, EFCT_DEVICE_LANCER_G6), 0},
+ {PCI_DEVICE(EFCT_VENDOR_ID, EFCT_DEVICE_LANCER_G7), 0},
+ {} /* terminate list */
+};
+
+static int
+efct_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct efct *efct = NULL;
+ int rc;
+ u32 i, r;
+ int num_interrupts = 0;
+ int nid;
+
+ dev_info(&pdev->dev, "%s\n", EFCT_DRIVER_NAME);
+
+ rc = pci_enable_device_mem(pdev);
+ if (rc)
+ return rc;
+
+ pci_set_master(pdev);
+
+ rc = pci_set_mwi(pdev);
+ if (rc) {
+ dev_info(&pdev->dev, "pci_set_mwi returned %d\n", rc);
+ goto mwi_out;
+ }
+
+ rc = pci_request_regions(pdev, EFCT_DRIVER_NAME);
+ if (rc) {
+ dev_err(&pdev->dev, "pci_request_regions failed %d\n", rc);
+ goto req_regions_out;
+ }
+
+ /* Fetch the Numa node id for this device */
+ nid = dev_to_node(&pdev->dev);
+ if (nid < 0) {
+ dev_err(&pdev->dev, "Warning Numa node ID is %d\n", nid);
+ nid = 0;
+ }
+
+ /* Allocate efct */
+ efct = efct_device_alloc(nid);
+ if (!efct) {
+ dev_err(&pdev->dev, "Failed to allocate efct\n");
+ rc = -ENOMEM;
+ goto alloc_out;
+ }
+
+ efct->pci = pdev;
+ efct->numa_node = nid;
+
+ /* Map all memory BARs */
+ for (i = 0, r = 0; i < EFCT_PCI_MAX_REGS; i++) {
+ if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
+ efct->reg[r] = ioremap(pci_resource_start(pdev, i),
+ pci_resource_len(pdev, i));
+ r++;
+ }
+
+ /*
+ * If the 64-bit attribute is set, both this BAR and the
+ * next form the complete address. Skip processing the
+ * next BAR.
+ */
+ if (pci_resource_flags(pdev, i) & IORESOURCE_MEM_64)
+ i++;
+ }
+
+ pci_set_drvdata(pdev, efct);
+
+ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0 ||
+ pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
+ dev_warn(&pdev->dev, "trying DMA_BIT_MASK(32)\n");
+ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0 ||
+ pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
+ dev_err(&pdev->dev, "setting DMA_BIT_MASK failed\n");
+ rc = -1;
+ goto dma_mask_out;
+ }
+ }
+
+ num_interrupts = efct_device_interrupts_required(efct);
+ if (num_interrupts < 0) {
+ efc_log_err(efct, "efct_device_interrupts_required failed\n");
+ rc = -1;
+ goto dma_mask_out;
+ }
+
+ /*
+ * Initialize MSIX interrupts, note,
+ * efct_setup_msix() enables the interrupt
+ */
+ rc = efct_setup_msix(efct, num_interrupts);
+ if (rc) {
+ dev_err(&pdev->dev, "Can't setup msix\n");
+ goto dma_mask_out;
+ }
+ /* Disable interrupt for now */
+ for (i = 0; i < efct->n_msix_vec; i++) {
+ efc_log_debug(efct, "irq %d disabled\n", i);
+ disable_irq(pci_irq_vector(efct->pci, i));
+ }
+
+ rc = efct_device_attach(efct);
+ if (rc)
+ goto attach_out;
+
+ return 0;
+
+attach_out:
+ efct_teardown_msix(efct);
+dma_mask_out:
+ pci_set_drvdata(pdev, NULL);
+
+ for (i = 0; i < EFCT_PCI_MAX_REGS; i++) {
+ if (efct->reg[i])
+ iounmap(efct->reg[i]);
+ }
+ efct_device_free(efct);
+alloc_out:
+ pci_release_regions(pdev);
+req_regions_out:
+ pci_clear_mwi(pdev);
+mwi_out:
+ pci_disable_device(pdev);
+ return rc;
+}
+
+static void
+efct_pci_remove(struct pci_dev *pdev)
+{
+ struct efct *efct = pci_get_drvdata(pdev);
+ u32 i;
+
+ if (!efct)
+ return;
+
+ efct_device_detach(efct);
+
+ efct_teardown_msix(efct);
+
+ for (i = 0; i < EFCT_PCI_MAX_REGS; i++) {
+ if (efct->reg[i])
+ iounmap(efct->reg[i]);
+ }
+
+ pci_set_drvdata(pdev, NULL);
+
+ efct_device_free(efct);
+
+ pci_release_regions(pdev);
+
+ pci_disable_device(pdev);
+}
+
+static void
+efct_device_prep_for_reset(struct efct *efct, struct pci_dev *pdev)
+{
+ if (efct) {
+ efc_log_debug(efct,
+ "PCI channel disable preparing for reset\n");
+ efct_device_detach(efct);
+ /* Disable interrupt and pci device */
+ efct_teardown_msix(efct);
+ }
+ pci_disable_device(pdev);
+}
+
+static void
+efct_device_prep_for_recover(struct efct *efct)
+{
+ if (efct) {
+ efc_log_debug(efct, "PCI channel preparing for recovery\n");
+ efct_hw_io_abort_all(&efct->hw);
+ }
+}
+
+/**
+ * efct_pci_io_error_detected - method for handling PCI I/O error
+ * @pdev: pointer to PCI device.
+ * @state: the current PCI connection state.
+ *
+ * This routine is registered to the PCI subsystem for error handling. This
+ * function is called by the PCI subsystem after a PCI bus error affecting
+ * this device has been detected. When this routine is invoked, it dispatches
+ * device error detected handling routine, which will perform the proper
+ * error detected operation.
+ *
+ * Return codes
+ * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
+ * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
+ */
+static pci_ers_result_t
+efct_pci_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
+{
+ struct efct *efct = pci_get_drvdata(pdev);
+ pci_ers_result_t rc;
+
+ switch (state) {
+ case pci_channel_io_normal:
+ efct_device_prep_for_recover(efct);
+ rc = PCI_ERS_RESULT_CAN_RECOVER;
+ break;
+ case pci_channel_io_frozen:
+ efct_device_prep_for_reset(efct, pdev);
+ rc = PCI_ERS_RESULT_NEED_RESET;
+ break;
+ case pci_channel_io_perm_failure:
+ efct_device_detach(efct);
+ rc = PCI_ERS_RESULT_DISCONNECT;
+ break;
+ default:
+ efc_log_debug(efct, "Unknown PCI error state:0x%x\n", state);
+ efct_device_prep_for_reset(efct, pdev);
+ rc = PCI_ERS_RESULT_NEED_RESET;
+ break;
+ }
+
+ return rc;
+}
+
+static pci_ers_result_t
+efct_pci_io_slot_reset(struct pci_dev *pdev)
+{
+ int rc;
+ struct efct *efct = pci_get_drvdata(pdev);
+
+ rc = pci_enable_device_mem(pdev);
+ if (rc) {
+ efc_log_err(efct, "failed to enable PCI device after reset\n");
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ /*
+ * As the new kernel behavior of pci_restore_state() API call clears
+ * device saved_state flag, need to save the restored state again.
+ */
+
+ pci_save_state(pdev);
+
+ pci_set_master(pdev);
+
+ rc = efct_setup_msix(efct, efct->n_msix_vec);
+ if (rc)
+ efc_log_err(efct, "rc %d returned, IRQ allocation failed\n",
+ rc);
+
+ /* Perform device reset */
+ efct_device_detach(efct);
+ /* Bring device to online*/
+ efct_device_attach(efct);
+
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+static void
+efct_pci_io_resume(struct pci_dev *pdev)
+{
+ struct efct *efct = pci_get_drvdata(pdev);
+
+ /* Perform device reset */
+ efct_device_detach(efct);
+ /* Bring device to online*/
+ efct_device_attach(efct);
+}
+
+MODULE_DEVICE_TABLE(pci, efct_pci_table);
+
+static struct pci_error_handlers efct_pci_err_handler = {
+ .error_detected = efct_pci_io_error_detected,
+ .slot_reset = efct_pci_io_slot_reset,
+ .resume = efct_pci_io_resume,
+};
+
+static struct pci_driver efct_pci_driver = {
+ .name = EFCT_DRIVER_NAME,
+ .id_table = efct_pci_table,
+ .probe = efct_pci_probe,
+ .remove = efct_pci_remove,
+ .err_handler = &efct_pci_err_handler,
+};
+
+static
+int __init efct_init(void)
+{
+ int rc;
+
+ rc = efct_device_init();
+ if (rc) {
+ pr_err("efct_device_init failed rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = pci_register_driver(&efct_pci_driver);
+ if (rc) {
+ pr_err("pci_register_driver failed rc=%d\n", rc);
+ efct_device_shutdown();
+ }
+
+ return rc;
+}
+
+static void __exit efct_exit(void)
+{
+ pci_unregister_driver(&efct_pci_driver);
+ efct_device_shutdown();
+}
+
+module_init(efct_init);
+module_exit(efct_exit);
+MODULE_VERSION(EFCT_DRIVER_VERSION);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Broadcom");
diff --git a/drivers/scsi/elx/efct/efct_driver.h b/drivers/scsi/elx/efct/efct_driver.h
new file mode 100644
index 000000000000..dab8eac4f243
--- /dev/null
+++ b/drivers/scsi/elx/efct/efct_driver.h
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+#if !defined(__EFCT_DRIVER_H__)
+#define __EFCT_DRIVER_H__
+
+/***************************************************************************
+ * OS specific includes
+ */
+#include <stdarg.h>
+#include <linux/module.h>
+#include <linux/debugfs.h>
+#include <linux/firmware.h>
+#include "../include/efc_common.h"
+#include "../libefc/efclib.h"
+#include "efct_hw.h"
+#include "efct_io.h"
+#include "efct_xport.h"
+
+#define EFCT_DRIVER_NAME "efct"
+#define EFCT_DRIVER_VERSION "1.0.0.0"
+
+/* EFCT_DEFAULT_FILTER-
+ * MRQ filter to segregate the IO flow.
+ */
+#define EFCT_DEFAULT_FILTER "0x01ff22ff,0,0,0"
+
+/* EFCT_OS_MAX_ISR_TIME_MSEC -
+ * maximum time driver code should spend in an interrupt
+ * or kernel thread context without yielding
+ */
+#define EFCT_OS_MAX_ISR_TIME_MSEC 1000
+
+#define EFCT_FC_MAX_SGL 64
+#define EFCT_FC_DIF_SEED 0
+
+/* Watermark */
+#define EFCT_WATERMARK_HIGH_PCT 90
+#define EFCT_WATERMARK_LOW_PCT 80
+#define EFCT_IO_WATERMARK_PER_INITIATOR 8
+
+#define EFCT_PCI_MAX_REGS 6
+#define MAX_PCI_INTERRUPTS 16
+
+struct efct_intr_context {
+ struct efct *efct;
+ u32 index;
+};
+
+struct efct {
+ struct pci_dev *pci;
+ void __iomem *reg[EFCT_PCI_MAX_REGS];
+
+ u32 n_msix_vec;
+ bool attached;
+ bool soft_wwn_enable;
+ u8 efct_req_fw_upgrade;
+ struct efct_intr_context intr_context[MAX_PCI_INTERRUPTS];
+ u32 numa_node;
+
+ char name[EFC_NAME_LENGTH];
+ u32 instance_index;
+ struct list_head list_entry;
+ struct efct_scsi_tgt tgt_efct;
+ struct efct_xport *xport;
+ struct efc *efcport;
+ struct Scsi_Host *shost;
+ int logmask;
+ u32 max_isr_time_msec;
+
+ const char *desc;
+
+ const char *model;
+
+ struct efct_hw hw;
+
+ u32 rq_selection_policy;
+ char *filter_def;
+ int topology;
+
+ /* Look up for target node */
+ struct xarray lookup;
+
+ /*
+ * Target IO timer value:
+ * Zero: target command timeout disabled.
+ * Non-zero: Timeout value, in seconds, for target commands
+ */
+ u32 target_io_timer_sec;
+
+ int speed;
+ struct dentry *sess_debugfs_dir;
+};
+
+#define FW_WRITE_BUFSIZE (64 * 1024)
+
+struct efct_fw_write_result {
+ struct completion done;
+ int status;
+ u32 actual_xfer;
+ u32 change_status;
+};
+
+extern struct list_head efct_devices;
+
+#endif /* __EFCT_DRIVER_H__ */
diff --git a/drivers/scsi/elx/efct/efct_hw.c b/drivers/scsi/elx/efct/efct_hw.c
new file mode 100644
index 000000000000..ba8256b4c782
--- /dev/null
+++ b/drivers/scsi/elx/efct/efct_hw.c
@@ -0,0 +1,3581 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+#include "efct_driver.h"
+#include "efct_hw.h"
+#include "efct_unsol.h"
+
+struct efct_hw_link_stat_cb_arg {
+ void (*cb)(int status, u32 num_counters,
+ struct efct_hw_link_stat_counts *counters, void *arg);
+ void *arg;
+};
+
+struct efct_hw_host_stat_cb_arg {
+ void (*cb)(int status, u32 num_counters,
+ struct efct_hw_host_stat_counts *counters, void *arg);
+ void *arg;
+};
+
+struct efct_hw_fw_wr_cb_arg {
+ void (*cb)(int status, u32 bytes_written, u32 change_status, void *arg);
+ void *arg;
+};
+
+struct efct_mbox_rqst_ctx {
+ int (*callback)(struct efc *efc, int status, u8 *mqe, void *arg);
+ void *arg;
+};
+
+static int
+efct_hw_link_event_init(struct efct_hw *hw)
+{
+ hw->link.status = SLI4_LINK_STATUS_MAX;
+ hw->link.topology = SLI4_LINK_TOPO_NONE;
+ hw->link.medium = SLI4_LINK_MEDIUM_MAX;
+ hw->link.speed = 0;
+ hw->link.loop_map = NULL;
+ hw->link.fc_id = U32_MAX;
+
+ return 0;
+}
+
+static int
+efct_hw_read_max_dump_size(struct efct_hw *hw)
+{
+ u8 buf[SLI4_BMBX_SIZE];
+ struct efct *efct = hw->os;
+ int rc = 0;
+ struct sli4_rsp_cmn_set_dump_location *rsp;
+
+ /* attempt to detemine the dump size for function 0 only. */
+ if (PCI_FUNC(efct->pci->devfn) != 0)
+ return rc;
+
+ if (sli_cmd_common_set_dump_location(&hw->sli, buf, 1, 0, NULL, 0))
+ return -EIO;
+
+ rsp = (struct sli4_rsp_cmn_set_dump_location *)
+ (buf + offsetof(struct sli4_cmd_sli_config, payload.embed));
+
+ rc = efct_hw_command(hw, buf, EFCT_CMD_POLL, NULL, NULL);
+ if (rc != 0) {
+ efc_log_debug(hw->os, "set dump location cmd failed\n");
+ return rc;
+ }
+
+ hw->dump_size =
+ le32_to_cpu(rsp->buffer_length_dword) & SLI4_CMN_SET_DUMP_BUFFER_LEN;
+
+ efc_log_debug(hw->os, "Dump size %x\n", hw->dump_size);
+
+ return rc;
+}
+
+static int
+__efct_read_topology_cb(struct efct_hw *hw, int status, u8 *mqe, void *arg)
+{
+ struct sli4_cmd_read_topology *read_topo =
+ (struct sli4_cmd_read_topology *)mqe;
+ u8 speed;
+ struct efc_domain_record drec = {0};
+ struct efct *efct = hw->os;
+
+ if (status || le16_to_cpu(read_topo->hdr.status)) {
+ efc_log_debug(hw->os, "bad status cqe=%#x mqe=%#x\n", status,
+ le16_to_cpu(read_topo->hdr.status));
+ return -EIO;
+ }
+
+ switch (le32_to_cpu(read_topo->dw2_attentype) &
+ SLI4_READTOPO_ATTEN_TYPE) {
+ case SLI4_READ_TOPOLOGY_LINK_UP:
+ hw->link.status = SLI4_LINK_STATUS_UP;
+ break;
+ case SLI4_READ_TOPOLOGY_LINK_DOWN:
+ hw->link.status = SLI4_LINK_STATUS_DOWN;
+ break;
+ case SLI4_READ_TOPOLOGY_LINK_NO_ALPA:
+ hw->link.status = SLI4_LINK_STATUS_NO_ALPA;
+ break;
+ default:
+ hw->link.status = SLI4_LINK_STATUS_MAX;
+ break;
+ }
+
+ switch (read_topo->topology) {
+ case SLI4_READ_TOPO_NON_FC_AL:
+ hw->link.topology = SLI4_LINK_TOPO_NON_FC_AL;
+ break;
+ case SLI4_READ_TOPO_FC_AL:
+ hw->link.topology = SLI4_LINK_TOPO_FC_AL;
+ if (hw->link.status == SLI4_LINK_STATUS_UP)
+ hw->link.loop_map = hw->loop_map.virt;
+ hw->link.fc_id = read_topo->acquired_al_pa;
+ break;
+ default:
+ hw->link.topology = SLI4_LINK_TOPO_MAX;
+ break;
+ }
+
+ hw->link.medium = SLI4_LINK_MEDIUM_FC;
+
+ speed = (le32_to_cpu(read_topo->currlink_state) &
+ SLI4_READTOPO_LINKSTATE_SPEED) >> 8;
+ switch (speed) {
+ case SLI4_READ_TOPOLOGY_SPEED_1G:
+ hw->link.speed = 1 * 1000;
+ break;
+ case SLI4_READ_TOPOLOGY_SPEED_2G:
+ hw->link.speed = 2 * 1000;
+ break;
+ case SLI4_READ_TOPOLOGY_SPEED_4G:
+ hw->link.speed = 4 * 1000;
+ break;
+ case SLI4_READ_TOPOLOGY_SPEED_8G:
+ hw->link.speed = 8 * 1000;
+ break;
+ case SLI4_READ_TOPOLOGY_SPEED_16G:
+ hw->link.speed = 16 * 1000;
+ break;
+ case SLI4_READ_TOPOLOGY_SPEED_32G:
+ hw->link.speed = 32 * 1000;
+ break;
+ case SLI4_READ_TOPOLOGY_SPEED_64G:
+ hw->link.speed = 64 * 1000;
+ break;
+ case SLI4_READ_TOPOLOGY_SPEED_128G:
+ hw->link.speed = 128 * 1000;
+ break;
+ }
+
+ drec.speed = hw->link.speed;
+ drec.fc_id = hw->link.fc_id;
+ drec.is_nport = true;
+ efc_domain_cb(efct->efcport, EFC_HW_DOMAIN_FOUND, &drec);
+
+ return 0;
+}
+
+static int
+efct_hw_cb_link(void *ctx, void *e)
+{
+ struct efct_hw *hw = ctx;
+ struct sli4_link_event *event = e;
+ struct efc_domain *d = NULL;
+ int rc = 0;
+ struct efct *efct = hw->os;
+
+ efct_hw_link_event_init(hw);
+
+ switch (event->status) {
+ case SLI4_LINK_STATUS_UP:
+
+ hw->link = *event;
+ efct->efcport->link_status = EFC_LINK_STATUS_UP;
+
+ if (event->topology == SLI4_LINK_TOPO_NON_FC_AL) {
+ struct efc_domain_record drec = {0};
+
+ efc_log_info(hw->os, "Link Up, NPORT, speed is %d\n",
+ event->speed);
+ drec.speed = event->speed;
+ drec.fc_id = event->fc_id;
+ drec.is_nport = true;
+ efc_domain_cb(efct->efcport, EFC_HW_DOMAIN_FOUND,
+ &drec);
+ } else if (event->topology == SLI4_LINK_TOPO_FC_AL) {
+ u8 buf[SLI4_BMBX_SIZE];
+
+ efc_log_info(hw->os, "Link Up, LOOP, speed is %d\n",
+ event->speed);
+
+ if (!sli_cmd_read_topology(&hw->sli, buf,
+ &hw->loop_map)) {
+ rc = efct_hw_command(hw, buf, EFCT_CMD_NOWAIT,
+ __efct_read_topology_cb, NULL);
+ }
+
+ if (rc)
+ efc_log_debug(hw->os, "READ_TOPOLOGY failed\n");
+ } else {
+ efc_log_info(hw->os, "%s(%#x), speed is %d\n",
+ "Link Up, unsupported topology ",
+ event->topology, event->speed);
+ }
+ break;
+ case SLI4_LINK_STATUS_DOWN:
+ efc_log_info(hw->os, "Link down\n");
+
+ hw->link.status = event->status;
+ efct->efcport->link_status = EFC_LINK_STATUS_DOWN;
+
+ d = efct->efcport->domain;
+ if (d)
+ efc_domain_cb(efct->efcport, EFC_HW_DOMAIN_LOST, d);
+ break;
+ default:
+ efc_log_debug(hw->os, "unhandled link status %#x\n",
+ event->status);
+ break;
+ }
+
+ return 0;
+}
+
+int
+efct_hw_setup(struct efct_hw *hw, void *os, struct pci_dev *pdev)
+{
+ u32 i, max_sgl, cpus;
+
+ if (hw->hw_setup_called)
+ return 0;
+
+ /*
+ * efct_hw_init() relies on NULL pointers indicating that a structure
+ * needs allocation. If a structure is non-NULL, efct_hw_init() won't
+ * free/realloc that memory
+ */
+ memset(hw, 0, sizeof(struct efct_hw));
+
+ hw->hw_setup_called = true;
+
+ hw->os = os;
+
+ mutex_init(&hw->bmbx_lock);
+ spin_lock_init(&hw->cmd_lock);
+ INIT_LIST_HEAD(&hw->cmd_head);
+ INIT_LIST_HEAD(&hw->cmd_pending);
+ hw->cmd_head_count = 0;
+
+ /* Create mailbox command ctx pool */
+ hw->cmd_ctx_pool = mempool_create_kmalloc_pool(EFCT_CMD_CTX_POOL_SZ,
+ sizeof(struct efct_command_ctx));
+ if (!hw->cmd_ctx_pool) {
+ efc_log_err(hw->os, "failed to allocate mailbox buffer pool\n");
+ return -EIO;
+ }
+
+ /* Create mailbox request ctx pool for library callback */
+ hw->mbox_rqst_pool = mempool_create_kmalloc_pool(EFCT_CMD_CTX_POOL_SZ,
+ sizeof(struct efct_mbox_rqst_ctx));
+ if (!hw->mbox_rqst_pool) {
+ efc_log_err(hw->os, "failed to allocate mbox request pool\n");
+ return -EIO;
+ }
+
+ spin_lock_init(&hw->io_lock);
+ INIT_LIST_HEAD(&hw->io_inuse);
+ INIT_LIST_HEAD(&hw->io_free);
+ INIT_LIST_HEAD(&hw->io_wait_free);
+
+ atomic_set(&hw->io_alloc_failed_count, 0);
+
+ hw->config.speed = SLI4_LINK_SPEED_AUTO_16_8_4;
+ if (sli_setup(&hw->sli, hw->os, pdev, ((struct efct *)os)->reg)) {
+ efc_log_err(hw->os, "SLI setup failed\n");
+ return -EIO;
+ }
+
+ efct_hw_link_event_init(hw);
+
+ sli_callback(&hw->sli, SLI4_CB_LINK, efct_hw_cb_link, hw);
+
+ /*
+ * Set all the queue sizes to the maximum allowed.
+ */
+ for (i = 0; i < ARRAY_SIZE(hw->num_qentries); i++)
+ hw->num_qentries[i] = hw->sli.qinfo.max_qentries[i];
+ /*
+ * Adjust the size of the WQs so that the CQ is twice as big as
+ * the WQ to allow for 2 completions per IO. This allows us to
+ * handle multi-phase as well as aborts.
+ */
+ hw->num_qentries[SLI4_QTYPE_WQ] = hw->num_qentries[SLI4_QTYPE_CQ] / 2;
+
+ /*
+ * The RQ assignment for RQ pair mode.
+ */
+
+ hw->config.rq_default_buffer_size = EFCT_HW_RQ_SIZE_PAYLOAD;
+ hw->config.n_io = hw->sli.ext[SLI4_RSRC_XRI].size;
+
+ cpus = num_possible_cpus();
+ hw->config.n_eq = cpus > EFCT_HW_MAX_NUM_EQ ? EFCT_HW_MAX_NUM_EQ : cpus;
+
+ max_sgl = sli_get_max_sgl(&hw->sli) - SLI4_SGE_MAX_RESERVED;
+ max_sgl = (max_sgl > EFCT_FC_MAX_SGL) ? EFCT_FC_MAX_SGL : max_sgl;
+ hw->config.n_sgl = max_sgl;
+
+ (void)efct_hw_read_max_dump_size(hw);
+
+ return 0;
+}
+
+static void
+efct_logfcfi(struct efct_hw *hw, u32 j, u32 i, u32 id)
+{
+ efc_log_info(hw->os,
+ "REG_FCFI: filter[%d] %08X -> RQ[%d] id=%d\n",
+ j, hw->config.filter_def[j], i, id);
+}
+
+static inline void
+efct_hw_init_free_io(struct efct_hw_io *io)
+{
+ /*
+ * Set io->done to NULL, to avoid any callbacks, should
+ * a completion be received for one of these IOs
+ */
+ io->done = NULL;
+ io->abort_done = NULL;
+ io->status_saved = false;
+ io->abort_in_progress = false;
+ io->type = 0xFFFF;
+ io->wq = NULL;
+}
+
+static bool efct_hw_iotype_is_originator(u16 io_type)
+{
+ switch (io_type) {
+ case EFCT_HW_FC_CT:
+ case EFCT_HW_ELS_REQ:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static void
+efct_hw_io_restore_sgl(struct efct_hw *hw, struct efct_hw_io *io)
+{
+ /* Restore the default */
+ io->sgl = &io->def_sgl;
+ io->sgl_count = io->def_sgl_count;
+}
+
+static void
+efct_hw_wq_process_io(void *arg, u8 *cqe, int status)
+{
+ struct efct_hw_io *io = arg;
+ struct efct_hw *hw = io->hw;
+ struct sli4_fc_wcqe *wcqe = (void *)cqe;
+ u32 len = 0;
+ u32 ext = 0;
+
+ /* clear xbusy flag if WCQE[XB] is clear */
+ if (io->xbusy && (wcqe->flags & SLI4_WCQE_XB) == 0)
+ io->xbusy = false;
+
+ /* get extended CQE status */
+ switch (io->type) {
+ case EFCT_HW_BLS_ACC:
+ case EFCT_HW_BLS_RJT:
+ break;
+ case EFCT_HW_ELS_REQ:
+ sli_fc_els_did(&hw->sli, cqe, &ext);
+ len = sli_fc_response_length(&hw->sli, cqe);
+ break;
+ case EFCT_HW_ELS_RSP:
+ case EFCT_HW_FC_CT_RSP:
+ break;
+ case EFCT_HW_FC_CT:
+ len = sli_fc_response_length(&hw->sli, cqe);
+ break;
+ case EFCT_HW_IO_TARGET_WRITE:
+ len = sli_fc_io_length(&hw->sli, cqe);
+ break;
+ case EFCT_HW_IO_TARGET_READ:
+ len = sli_fc_io_length(&hw->sli, cqe);
+ break;
+ case EFCT_HW_IO_TARGET_RSP:
+ break;
+ case EFCT_HW_IO_DNRX_REQUEUE:
+ /* release the count for re-posting the buffer */
+ /* efct_hw_io_free(hw, io); */
+ break;
+ default:
+ efc_log_err(hw->os, "unhandled io type %#x for XRI 0x%x\n",
+ io->type, io->indicator);
+ break;
+ }
+ if (status) {
+ ext = sli_fc_ext_status(&hw->sli, cqe);
+ /*
+ * If we're not an originator IO, and XB is set, then issue
+ * abort for the IO from within the HW
+ */
+ if (efct_hw_iotype_is_originator(io->type) &&
+ wcqe->flags & SLI4_WCQE_XB) {
+ int rc;
+
+ efc_log_debug(hw->os, "aborting xri=%#x tag=%#x\n",
+ io->indicator, io->reqtag);
+
+ /*
+ * Because targets may send a response when the IO
+ * completes using the same XRI, we must wait for the
+ * XRI_ABORTED CQE to issue the IO callback
+ */
+ rc = efct_hw_io_abort(hw, io, false, NULL, NULL);
+ if (rc == 0) {
+ /*
+ * latch status to return after abort is
+ * complete
+ */
+ io->status_saved = true;
+ io->saved_status = status;
+ io->saved_ext = ext;
+ io->saved_len = len;
+ goto exit_efct_hw_wq_process_io;
+ } else if (rc == -EINPROGRESS) {
+ /*
+ * Already being aborted by someone else (ABTS
+ * perhaps). Just return original
+ * error.
+ */
+ efc_log_debug(hw->os, "%s%#x tag=%#x\n",
+ "abort in progress xri=",
+ io->indicator, io->reqtag);
+
+ } else {
+ /* Failed to abort for some other reason, log
+ * error
+ */
+ efc_log_debug(hw->os, "%s%#x tag=%#x rc=%d\n",
+ "Failed to abort xri=",
+ io->indicator, io->reqtag, rc);
+ }
+ }
+ }
+
+ if (io->done) {
+ efct_hw_done_t done = io->done;
+
+ io->done = NULL;
+
+ if (io->status_saved) {
+ /* use latched status if exists */
+ status = io->saved_status;
+ len = io->saved_len;
+ ext = io->saved_ext;
+ io->status_saved = false;
+ }
+
+ /* Restore default SGL */
+ efct_hw_io_restore_sgl(hw, io);
+ done(io, len, status, ext, io->arg);
+ }
+
+exit_efct_hw_wq_process_io:
+ return;
+}
+
+static int
+efct_hw_setup_io(struct efct_hw *hw)
+{
+ u32 i = 0;
+ struct efct_hw_io *io = NULL;
+ uintptr_t xfer_virt = 0;
+ uintptr_t xfer_phys = 0;
+ u32 index;
+ bool new_alloc = true;
+ struct efc_dma *dma;
+ struct efct *efct = hw->os;
+
+ if (!hw->io) {
+ hw->io = kmalloc_array(hw->config.n_io, sizeof(io), GFP_KERNEL);
+ if (!hw->io)
+ return -ENOMEM;
+
+ memset(hw->io, 0, hw->config.n_io * sizeof(io));
+
+ for (i = 0; i < hw->config.n_io; i++) {
+ hw->io[i] = kzalloc(sizeof(*io), GFP_KERNEL);
+ if (!hw->io[i])
+ goto error;
+ }
+
+ /* Create WQE buffs for IO */
+ hw->wqe_buffs = kzalloc((hw->config.n_io * hw->sli.wqe_size),
+ GFP_KERNEL);
+ if (!hw->wqe_buffs) {
+ kfree(hw->io);
+ return -ENOMEM;
+ }
+
+ } else {
+ /* re-use existing IOs, including SGLs */
+ new_alloc = false;
+ }
+
+ if (new_alloc) {
+ dma = &hw->xfer_rdy;
+ dma->size = sizeof(struct fcp_txrdy) * hw->config.n_io;
+ dma->virt = dma_alloc_coherent(&efct->pci->dev,
+ dma->size, &dma->phys, GFP_DMA);
+ if (!dma->virt)
+ return -ENOMEM;
+ }
+ xfer_virt = (uintptr_t)hw->xfer_rdy.virt;
+ xfer_phys = hw->xfer_rdy.phys;
+
+ /* Initialize the pool of HW IO objects */
+ for (i = 0; i < hw->config.n_io; i++) {
+ struct hw_wq_callback *wqcb;
+
+ io = hw->io[i];
+
+ /* initialize IO fields */
+ io->hw = hw;
+
+ /* Assign a WQE buff */
+ io->wqe.wqebuf = &hw->wqe_buffs[i * hw->sli.wqe_size];
+
+ /* Allocate the request tag for this IO */
+ wqcb = efct_hw_reqtag_alloc(hw, efct_hw_wq_process_io, io);
+ if (!wqcb) {
+ efc_log_err(hw->os, "can't allocate request tag\n");
+ return -ENOSPC;
+ }
+ io->reqtag = wqcb->instance_index;
+
+ /* Now for the fields that are initialized on each free */
+ efct_hw_init_free_io(io);
+
+ /* The XB flag isn't cleared on IO free, so init to zero */
+ io->xbusy = 0;
+
+ if (sli_resource_alloc(&hw->sli, SLI4_RSRC_XRI,
+ &io->indicator, &index)) {
+ efc_log_err(hw->os,
+ "sli_resource_alloc failed @ %d\n", i);
+ return -ENOMEM;
+ }
+
+ if (new_alloc) {
+ dma = &io->def_sgl;
+ dma->size = hw->config.n_sgl *
+ sizeof(struct sli4_sge);
+ dma->virt = dma_alloc_coherent(&efct->pci->dev,
+ dma->size, &dma->phys,
+ GFP_DMA);
+ if (!dma->virt) {
+ efc_log_err(hw->os, "dma_alloc fail %d\n", i);
+ memset(&io->def_sgl, 0,
+ sizeof(struct efc_dma));
+ return -ENOMEM;
+ }
+ }
+ io->def_sgl_count = hw->config.n_sgl;
+ io->sgl = &io->def_sgl;
+ io->sgl_count = io->def_sgl_count;
+
+ if (hw->xfer_rdy.size) {
+ io->xfer_rdy.virt = (void *)xfer_virt;
+ io->xfer_rdy.phys = xfer_phys;
+ io->xfer_rdy.size = sizeof(struct fcp_txrdy);
+
+ xfer_virt += sizeof(struct fcp_txrdy);
+ xfer_phys += sizeof(struct fcp_txrdy);
+ }
+ }
+
+ return 0;
+error:
+ for (i = 0; i < hw->config.n_io && hw->io[i]; i++) {
+ kfree(hw->io[i]);
+ hw->io[i] = NULL;
+ }
+
+ kfree(hw->io);
+ hw->io = NULL;
+
+ return -ENOMEM;
+}
+
+static int
+efct_hw_init_prereg_io(struct efct_hw *hw)
+{
+ u32 i, idx = 0;
+ struct efct_hw_io *io = NULL;
+ u8 cmd[SLI4_BMBX_SIZE];
+ int rc = 0;
+ u32 n_rem;
+ u32 n = 0;
+ u32 sgls_per_request = 256;
+ struct efc_dma **sgls = NULL;
+ struct efc_dma req;
+ struct efct *efct = hw->os;
+
+ sgls = kmalloc_array(sgls_per_request, sizeof(*sgls), GFP_KERNEL);
+ if (!sgls)
+ return -ENOMEM;
+
+ memset(&req, 0, sizeof(struct efc_dma));
+ req.size = 32 + sgls_per_request * 16;
+ req.virt = dma_alloc_coherent(&efct->pci->dev, req.size, &req.phys,
+ GFP_DMA);
+ if (!req.virt) {
+ kfree(sgls);
+ return -ENOMEM;
+ }
+
+ for (n_rem = hw->config.n_io; n_rem; n_rem -= n) {
+ /* Copy address of SGL's into local sgls[] array, break
+ * out if the xri is not contiguous.
+ */
+ u32 min = (sgls_per_request < n_rem) ? sgls_per_request : n_rem;
+
+ for (n = 0; n < min; n++) {
+ /* Check that we have contiguous xri values */
+ if (n > 0) {
+ if (hw->io[idx + n]->indicator !=
+ hw->io[idx + n - 1]->indicator + 1)
+ break;
+ }
+
+ sgls[n] = hw->io[idx + n]->sgl;
+ }
+
+ if (sli_cmd_post_sgl_pages(&hw->sli, cmd,
+ hw->io[idx]->indicator, n, sgls, NULL, &req)) {
+ rc = -EIO;
+ break;
+ }
+
+ rc = efct_hw_command(hw, cmd, EFCT_CMD_POLL, NULL, NULL);
+ if (rc) {
+ efc_log_err(hw->os, "SGL post failed, rc=%d\n", rc);
+ break;
+ }
+
+ /* Add to tail if successful */
+ for (i = 0; i < n; i++, idx++) {
+ io = hw->io[idx];
+ io->state = EFCT_HW_IO_STATE_FREE;
+ INIT_LIST_HEAD(&io->list_entry);
+ list_add_tail(&io->list_entry, &hw->io_free);
+ }
+ }
+
+ dma_free_coherent(&efct->pci->dev, req.size, req.virt, req.phys);
+ memset(&req, 0, sizeof(struct efc_dma));
+ kfree(sgls);
+
+ return rc;
+}
+
+static int
+efct_hw_init_io(struct efct_hw *hw)
+{
+ u32 i, idx = 0;
+ bool prereg = false;
+ struct efct_hw_io *io = NULL;
+ int rc = 0;
+
+ prereg = hw->sli.params.sgl_pre_registered;
+
+ if (prereg)
+ return efct_hw_init_prereg_io(hw);
+
+ for (i = 0; i < hw->config.n_io; i++, idx++) {
+ io = hw->io[idx];
+ io->state = EFCT_HW_IO_STATE_FREE;
+ INIT_LIST_HEAD(&io->list_entry);
+ list_add_tail(&io->list_entry, &hw->io_free);
+ }
+
+ return rc;
+}
+
+static int
+efct_hw_config_set_fdt_xfer_hint(struct efct_hw *hw, u32 fdt_xfer_hint)
+{
+ int rc = 0;
+ u8 buf[SLI4_BMBX_SIZE];
+ struct sli4_rqst_cmn_set_features_set_fdt_xfer_hint param;
+
+ memset(&param, 0, sizeof(param));
+ param.fdt_xfer_hint = cpu_to_le32(fdt_xfer_hint);
+ /* build the set_features command */
+ sli_cmd_common_set_features(&hw->sli, buf,
+ SLI4_SET_FEATURES_SET_FTD_XFER_HINT, sizeof(param), &param);
+
+ rc = efct_hw_command(hw, buf, EFCT_CMD_POLL, NULL, NULL);
+ if (rc)
+ efc_log_warn(hw->os, "set FDT hint %d failed: %d\n",
+ fdt_xfer_hint, rc);
+ else
+ efc_log_info(hw->os, "Set FTD transfer hint to %d\n",
+ le32_to_cpu(param.fdt_xfer_hint));
+
+ return rc;
+}
+
+static int
+efct_hw_config_rq(struct efct_hw *hw)
+{
+ u32 min_rq_count, i, rc;
+ struct sli4_cmd_rq_cfg rq_cfg[SLI4_CMD_REG_FCFI_NUM_RQ_CFG];
+ u8 buf[SLI4_BMBX_SIZE];
+
+ efc_log_info(hw->os, "using REG_FCFI standard\n");
+
+ /*
+ * Set the filter match/mask values from hw's
+ * filter_def values
+ */
+ for (i = 0; i < SLI4_CMD_REG_FCFI_NUM_RQ_CFG; i++) {
+ rq_cfg[i].rq_id = cpu_to_le16(0xffff);
+ rq_cfg[i].r_ctl_mask = (u8)hw->config.filter_def[i];
+ rq_cfg[i].r_ctl_match = (u8)(hw->config.filter_def[i] >> 8);
+ rq_cfg[i].type_mask = (u8)(hw->config.filter_def[i] >> 16);
+ rq_cfg[i].type_match = (u8)(hw->config.filter_def[i] >> 24);
+ }
+
+ /*
+ * Update the rq_id's of the FCF configuration
+ * (don't update more than the number of rq_cfg
+ * elements)
+ */
+ min_rq_count = (hw->hw_rq_count < SLI4_CMD_REG_FCFI_NUM_RQ_CFG) ?
+ hw->hw_rq_count : SLI4_CMD_REG_FCFI_NUM_RQ_CFG;
+ for (i = 0; i < min_rq_count; i++) {
+ struct hw_rq *rq = hw->hw_rq[i];
+ u32 j;
+
+ for (j = 0; j < SLI4_CMD_REG_FCFI_NUM_RQ_CFG; j++) {
+ u32 mask = (rq->filter_mask != 0) ?
+ rq->filter_mask : 1;
+
+ if (!(mask & (1U << j)))
+ continue;
+
+ rq_cfg[i].rq_id = cpu_to_le16(rq->hdr->id);
+ efct_logfcfi(hw, j, i, rq->hdr->id);
+ }
+ }
+
+ rc = -EIO;
+ if (!sli_cmd_reg_fcfi(&hw->sli, buf, 0, rq_cfg))
+ rc = efct_hw_command(hw, buf, EFCT_CMD_POLL, NULL, NULL);
+
+ if (rc != 0) {
+ efc_log_err(hw->os, "FCFI registration failed\n");
+ return rc;
+ }
+ hw->fcf_indicator =
+ le16_to_cpu(((struct sli4_cmd_reg_fcfi *)buf)->fcfi);
+
+ return rc;
+}
+
+static int
+efct_hw_config_mrq(struct efct_hw *hw, u8 mode, u16 fcf_index)
+{
+ u8 buf[SLI4_BMBX_SIZE], mrq_bitmask = 0;
+ struct hw_rq *rq;
+ struct sli4_cmd_reg_fcfi_mrq *rsp = NULL;
+ struct sli4_cmd_rq_cfg rq_filter[SLI4_CMD_REG_FCFI_MRQ_NUM_RQ_CFG];
+ u32 rc, i;
+
+ if (mode == SLI4_CMD_REG_FCFI_SET_FCFI_MODE)
+ goto issue_cmd;
+
+ /* Set the filter match/mask values from hw's filter_def values */
+ for (i = 0; i < SLI4_CMD_REG_FCFI_NUM_RQ_CFG; i++) {
+ rq_filter[i].rq_id = cpu_to_le16(0xffff);
+ rq_filter[i].type_mask = (u8)hw->config.filter_def[i];
+ rq_filter[i].type_match = (u8)(hw->config.filter_def[i] >> 8);
+ rq_filter[i].r_ctl_mask = (u8)(hw->config.filter_def[i] >> 16);
+ rq_filter[i].r_ctl_match = (u8)(hw->config.filter_def[i] >> 24);
+ }
+
+ rq = hw->hw_rq[0];
+ rq_filter[0].rq_id = cpu_to_le16(rq->hdr->id);
+ rq_filter[1].rq_id = cpu_to_le16(rq->hdr->id);
+
+ mrq_bitmask = 0x2;
+issue_cmd:
+ efc_log_debug(hw->os, "Issue reg_fcfi_mrq count:%d policy:%d mode:%d\n",
+ hw->hw_rq_count, hw->config.rq_selection_policy, mode);
+ /* Invoke REG_FCFI_MRQ */
+ rc = sli_cmd_reg_fcfi_mrq(&hw->sli, buf, mode, fcf_index,
+ hw->config.rq_selection_policy, mrq_bitmask,
+ hw->hw_mrq_count, rq_filter);
+ if (rc) {
+ efc_log_err(hw->os, "sli_cmd_reg_fcfi_mrq() failed\n");
+ return -EIO;
+ }
+
+ rc = efct_hw_command(hw, buf, EFCT_CMD_POLL, NULL, NULL);
+
+ rsp = (struct sli4_cmd_reg_fcfi_mrq *)buf;
+
+ if ((rc) || (le16_to_cpu(rsp->hdr.status))) {
+ efc_log_err(hw->os, "FCFI MRQ reg failed. cmd=%x status=%x\n",
+ rsp->hdr.command, le16_to_cpu(rsp->hdr.status));
+ return -EIO;
+ }
+
+ if (mode == SLI4_CMD_REG_FCFI_SET_FCFI_MODE)
+ hw->fcf_indicator = le16_to_cpu(rsp->fcfi);
+
+ return 0;
+}
+
+static void
+efct_hw_queue_hash_add(struct efct_queue_hash *hash,
+ u16 id, u16 index)
+{
+ u32 hash_index = id & (EFCT_HW_Q_HASH_SIZE - 1);
+
+ /*
+ * Since the hash is always bigger than the number of queues, then we
+ * never have to worry about an infinite loop.
+ */
+ while (hash[hash_index].in_use)
+ hash_index = (hash_index + 1) & (EFCT_HW_Q_HASH_SIZE - 1);
+
+ /* not used, claim the entry */
+ hash[hash_index].id = id;
+ hash[hash_index].in_use = true;
+ hash[hash_index].index = index;
+}
+
+static int
+efct_hw_config_sli_port_health_check(struct efct_hw *hw, u8 query, u8 enable)
+{
+ int rc = 0;
+ u8 buf[SLI4_BMBX_SIZE];
+ struct sli4_rqst_cmn_set_features_health_check param;
+ u32 health_check_flag = 0;
+
+ memset(&param, 0, sizeof(param));
+
+ if (enable)
+ health_check_flag |= SLI4_RQ_HEALTH_CHECK_ENABLE;
+
+ if (query)
+ health_check_flag |= SLI4_RQ_HEALTH_CHECK_QUERY;
+
+ param.health_check_dword = cpu_to_le32(health_check_flag);
+
+ /* build the set_features command */
+ sli_cmd_common_set_features(&hw->sli, buf,
+ SLI4_SET_FEATURES_SLI_PORT_HEALTH_CHECK, sizeof(param), &param);
+
+ rc = efct_hw_command(hw, buf, EFCT_CMD_POLL, NULL, NULL);
+ if (rc)
+ efc_log_err(hw->os, "efct_hw_command returns %d\n", rc);
+ else
+ efc_log_debug(hw->os, "SLI Port Health Check is enabled\n");
+
+ return rc;
+}
+
+int
+efct_hw_init(struct efct_hw *hw)
+{
+ int rc;
+ u32 i = 0;
+ int rem_count;
+ unsigned long flags = 0;
+ struct efct_hw_io *temp;
+ struct efc_dma *dma;
+
+ /*
+ * Make sure the command lists are empty. If this is start-of-day,
+ * they'll be empty since they were just initialized in efct_hw_setup.
+ * If we've just gone through a reset, the command and command pending
+ * lists should have been cleaned up as part of the reset
+ * (efct_hw_reset()).
+ */
+ spin_lock_irqsave(&hw->cmd_lock, flags);
+ if (!list_empty(&hw->cmd_head)) {
+ spin_unlock_irqrestore(&hw->cmd_lock, flags);
+ efc_log_err(hw->os, "command found on cmd list\n");
+ return -EIO;
+ }
+ if (!list_empty(&hw->cmd_pending)) {
+ spin_unlock_irqrestore(&hw->cmd_lock, flags);
+ efc_log_err(hw->os, "command found on pending list\n");
+ return -EIO;
+ }
+ spin_unlock_irqrestore(&hw->cmd_lock, flags);
+
+ /* Free RQ buffers if prevously allocated */
+ efct_hw_rx_free(hw);
+
+ /*
+ * The IO queues must be initialized here for the reset case. The
+ * efct_hw_init_io() function will re-add the IOs to the free list.
+ * The cmd_head list should be OK since we free all entries in
+ * efct_hw_command_cancel() that is called in the efct_hw_reset().
+ */
+
+ /* If we are in this function due to a reset, there may be stale items
+ * on lists that need to be removed. Clean them up.
+ */
+ rem_count = 0;
+ while ((!list_empty(&hw->io_wait_free))) {
+ rem_count++;
+ temp = list_first_entry(&hw->io_wait_free, struct efct_hw_io,
+ list_entry);
+ list_del_init(&temp->list_entry);
+ }
+ if (rem_count > 0)
+ efc_log_debug(hw->os, "rmvd %d items from io_wait_free list\n",
+ rem_count);
+
+ rem_count = 0;
+ while ((!list_empty(&hw->io_inuse))) {
+ rem_count++;
+ temp = list_first_entry(&hw->io_inuse, struct efct_hw_io,
+ list_entry);
+ list_del_init(&temp->list_entry);
+ }
+ if (rem_count > 0)
+ efc_log_debug(hw->os, "rmvd %d items from io_inuse list\n",
+ rem_count);
+
+ rem_count = 0;
+ while ((!list_empty(&hw->io_free))) {
+ rem_count++;
+ temp = list_first_entry(&hw->io_free, struct efct_hw_io,
+ list_entry);
+ list_del_init(&temp->list_entry);
+ }
+ if (rem_count > 0)
+ efc_log_debug(hw->os, "rmvd %d items from io_free list\n",
+ rem_count);
+
+ /* If MRQ not required, Make sure we dont request feature. */
+ if (hw->config.n_rq == 1)
+ hw->sli.features &= (~SLI4_REQFEAT_MRQP);
+
+ if (sli_init(&hw->sli)) {
+ efc_log_err(hw->os, "SLI failed to initialize\n");
+ return -EIO;
+ }
+
+ if (hw->sliport_healthcheck) {
+ rc = efct_hw_config_sli_port_health_check(hw, 0, 1);
+ if (rc != 0) {
+ efc_log_err(hw->os, "Enable port Health check fail\n");
+ return rc;
+ }
+ }
+
+ /*
+ * Set FDT transfer hint, only works on Lancer
+ */
+ if (hw->sli.if_type == SLI4_INTF_IF_TYPE_2) {
+ /*
+ * Non-fatal error. In particular, we can disregard failure to
+ * set EFCT_HW_FDT_XFER_HINT on devices with legacy firmware
+ * that do not support EFCT_HW_FDT_XFER_HINT feature.
+ */
+ efct_hw_config_set_fdt_xfer_hint(hw, EFCT_HW_FDT_XFER_HINT);
+ }
+
+ /* zero the hashes */
+ memset(hw->cq_hash, 0, sizeof(hw->cq_hash));
+ efc_log_debug(hw->os, "Max CQs %d, hash size = %d\n",
+ EFCT_HW_MAX_NUM_CQ, EFCT_HW_Q_HASH_SIZE);
+
+ memset(hw->rq_hash, 0, sizeof(hw->rq_hash));
+ efc_log_debug(hw->os, "Max RQs %d, hash size = %d\n",
+ EFCT_HW_MAX_NUM_RQ, EFCT_HW_Q_HASH_SIZE);
+
+ memset(hw->wq_hash, 0, sizeof(hw->wq_hash));
+ efc_log_debug(hw->os, "Max WQs %d, hash size = %d\n",
+ EFCT_HW_MAX_NUM_WQ, EFCT_HW_Q_HASH_SIZE);
+
+ rc = efct_hw_init_queues(hw);
+ if (rc)
+ return rc;
+
+ rc = efct_hw_map_wq_cpu(hw);
+ if (rc)
+ return rc;
+
+ /* Allocate and p_st RQ buffers */
+ rc = efct_hw_rx_allocate(hw);
+ if (rc) {
+ efc_log_err(hw->os, "rx_allocate failed\n");
+ return rc;
+ }
+
+ rc = efct_hw_rx_post(hw);
+ if (rc) {
+ efc_log_err(hw->os, "WARNING - error posting RQ buffers\n");
+ return rc;
+ }
+
+ if (hw->config.n_eq == 1) {
+ rc = efct_hw_config_rq(hw);
+ if (rc) {
+ efc_log_err(hw->os, "config rq failed %d\n", rc);
+ return rc;
+ }
+ } else {
+ rc = efct_hw_config_mrq(hw, SLI4_CMD_REG_FCFI_SET_FCFI_MODE, 0);
+ if (rc != 0) {
+ efc_log_err(hw->os, "REG_FCFI_MRQ FCFI reg failed\n");
+ return rc;
+ }
+
+ rc = efct_hw_config_mrq(hw, SLI4_CMD_REG_FCFI_SET_MRQ_MODE, 0);
+ if (rc != 0) {
+ efc_log_err(hw->os, "REG_FCFI_MRQ MRQ reg failed\n");
+ return rc;
+ }
+ }
+
+ /*
+ * Allocate the WQ request tag pool, if not previously allocated
+ * (the request tag value is 16 bits, thus the pool allocation size
+ * of 64k)
+ */
+ hw->wq_reqtag_pool = efct_hw_reqtag_pool_alloc(hw);
+ if (!hw->wq_reqtag_pool) {
+ efc_log_err(hw->os, "efct_hw_reqtag_pool_alloc failed\n");
+ return -ENOMEM;
+ }
+
+ rc = efct_hw_setup_io(hw);
+ if (rc) {
+ efc_log_err(hw->os, "IO allocation failure\n");
+ return rc;
+ }
+
+ rc = efct_hw_init_io(hw);
+ if (rc) {
+ efc_log_err(hw->os, "IO initialization failure\n");
+ return rc;
+ }
+
+ dma = &hw->loop_map;
+ dma->size = SLI4_MIN_LOOP_MAP_BYTES;
+ dma->virt = dma_alloc_coherent(&hw->os->pci->dev, dma->size, &dma->phys,
+ GFP_DMA);
+ if (!dma->virt)
+ return -EIO;
+
+ /*
+ * Arming the EQ allows (e.g.) interrupts when CQ completions write EQ
+ * entries
+ */
+ for (i = 0; i < hw->eq_count; i++)
+ sli_queue_arm(&hw->sli, &hw->eq[i], true);
+
+ /*
+ * Initialize RQ hash
+ */
+ for (i = 0; i < hw->rq_count; i++)
+ efct_hw_queue_hash_add(hw->rq_hash, hw->rq[i].id, i);
+
+ /*
+ * Initialize WQ hash
+ */
+ for (i = 0; i < hw->wq_count; i++)
+ efct_hw_queue_hash_add(hw->wq_hash, hw->wq[i].id, i);
+
+ /*
+ * Arming the CQ allows (e.g.) MQ completions to write CQ entries
+ */
+ for (i = 0; i < hw->cq_count; i++) {
+ efct_hw_queue_hash_add(hw->cq_hash, hw->cq[i].id, i);
+ sli_queue_arm(&hw->sli, &hw->cq[i], true);
+ }
+
+ /* Set RQ process limit*/
+ for (i = 0; i < hw->hw_rq_count; i++) {
+ struct hw_rq *rq = hw->hw_rq[i];
+
+ hw->cq[rq->cq->instance].proc_limit = hw->config.n_io / 2;
+ }
+
+ /* record the fact that the queues are functional */
+ hw->state = EFCT_HW_STATE_ACTIVE;
+ /*
+ * Allocate a HW IOs for send frame.
+ */
+ hw->hw_wq[0]->send_frame_io = efct_hw_io_alloc(hw);
+ if (!hw->hw_wq[0]->send_frame_io)
+ efc_log_err(hw->os, "alloc for send_frame_io failed\n");
+
+ /* Initialize send frame sequence id */
+ atomic_set(&hw->send_frame_seq_id, 0);
+
+ return 0;
+}
+
+int
+efct_hw_parse_filter(struct efct_hw *hw, void *value)
+{
+ int rc = 0;
+ char *p = NULL;
+ char *token;
+ u32 idx = 0;
+
+ for (idx = 0; idx < ARRAY_SIZE(hw->config.filter_def); idx++)
+ hw->config.filter_def[idx] = 0;
+
+ p = kstrdup(value, GFP_KERNEL);
+ if (!p || !*p) {
+ efc_log_err(hw->os, "p is NULL\n");
+ return -ENOMEM;
+ }
+
+ idx = 0;
+ while ((token = strsep(&p, ",")) && *token) {
+ if (kstrtou32(token, 0, &hw->config.filter_def[idx++]))
+ efc_log_err(hw->os, "kstrtoint failed\n");
+
+ if (!p || !*p)
+ break;
+
+ if (idx == ARRAY_SIZE(hw->config.filter_def))
+ break;
+ }
+ kfree(p);
+
+ return rc;
+}
+
+u64
+efct_get_wwnn(struct efct_hw *hw)
+{
+ struct sli4 *sli = &hw->sli;
+ u8 p[8];
+
+ memcpy(p, sli->wwnn, sizeof(p));
+ return get_unaligned_be64(p);
+}
+
+u64
+efct_get_wwpn(struct efct_hw *hw)
+{
+ struct sli4 *sli = &hw->sli;
+ u8 p[8];
+
+ memcpy(p, sli->wwpn, sizeof(p));
+ return get_unaligned_be64(p);
+}
+
+static struct efc_hw_rq_buffer *
+efct_hw_rx_buffer_alloc(struct efct_hw *hw, u32 rqindex, u32 count,
+ u32 size)
+{
+ struct efct *efct = hw->os;
+ struct efc_hw_rq_buffer *rq_buf = NULL;
+ struct efc_hw_rq_buffer *prq;
+ u32 i;
+
+ if (!count)
+ return NULL;
+
+ rq_buf = kmalloc_array(count, sizeof(*rq_buf), GFP_KERNEL);
+ if (!rq_buf)
+ return NULL;
+ memset(rq_buf, 0, sizeof(*rq_buf) * count);
+
+ for (i = 0, prq = rq_buf; i < count; i ++, prq++) {
+ prq->rqindex = rqindex;
+ prq->dma.size = size;
+ prq->dma.virt = dma_alloc_coherent(&efct->pci->dev,
+ prq->dma.size,
+ &prq->dma.phys,
+ GFP_DMA);
+ if (!prq->dma.virt) {
+ efc_log_err(hw->os, "DMA allocation failed\n");
+ kfree(rq_buf);
+ return NULL;
+ }
+ }
+ return rq_buf;
+}
+
+static void
+efct_hw_rx_buffer_free(struct efct_hw *hw,
+ struct efc_hw_rq_buffer *rq_buf,
+ u32 count)
+{
+ struct efct *efct = hw->os;
+ u32 i;
+ struct efc_hw_rq_buffer *prq;
+
+ if (rq_buf) {
+ for (i = 0, prq = rq_buf; i < count; i++, prq++) {
+ dma_free_coherent(&efct->pci->dev,
+ prq->dma.size, prq->dma.virt,
+ prq->dma.phys);
+ memset(&prq->dma, 0, sizeof(struct efc_dma));
+ }
+
+ kfree(rq_buf);
+ }
+}
+
+int
+efct_hw_rx_allocate(struct efct_hw *hw)
+{
+ struct efct *efct = hw->os;
+ u32 i;
+ int rc = 0;
+ u32 rqindex = 0;
+ u32 hdr_size = EFCT_HW_RQ_SIZE_HDR;
+ u32 payload_size = hw->config.rq_default_buffer_size;
+
+ rqindex = 0;
+
+ for (i = 0; i < hw->hw_rq_count; i++) {
+ struct hw_rq *rq = hw->hw_rq[i];
+
+ /* Allocate header buffers */
+ rq->hdr_buf = efct_hw_rx_buffer_alloc(hw, rqindex,
+ rq->entry_count,
+ hdr_size);
+ if (!rq->hdr_buf) {
+ efc_log_err(efct, "rx_buffer_alloc hdr_buf failed\n");
+ rc = -EIO;
+ break;
+ }
+
+ efc_log_debug(hw->os,
+ "rq[%2d] rq_id %02d header %4d by %4d bytes\n",
+ i, rq->hdr->id, rq->entry_count, hdr_size);
+
+ rqindex++;
+
+ /* Allocate payload buffers */
+ rq->payload_buf = efct_hw_rx_buffer_alloc(hw, rqindex,
+ rq->entry_count,
+ payload_size);
+ if (!rq->payload_buf) {
+ efc_log_err(efct, "rx_buffer_alloc fb_buf failed\n");
+ rc = -EIO;
+ break;
+ }
+ efc_log_debug(hw->os,
+ "rq[%2d] rq_id %02d default %4d by %4d bytes\n",
+ i, rq->data->id, rq->entry_count, payload_size);
+ rqindex++;
+ }
+
+ return rc ? -EIO : 0;
+}
+
+int
+efct_hw_rx_post(struct efct_hw *hw)
+{
+ u32 i;
+ u32 idx;
+ u32 rq_idx;
+ int rc = 0;
+
+ if (!hw->seq_pool) {
+ u32 count = 0;
+
+ for (i = 0; i < hw->hw_rq_count; i++)
+ count += hw->hw_rq[i]->entry_count;
+
+ hw->seq_pool = kmalloc_array(count,
+ sizeof(struct efc_hw_sequence), GFP_KERNEL);
+ if (!hw->seq_pool)
+ return -ENOMEM;
+ }
+
+ /*
+ * In RQ pair mode, we MUST post the header and payload buffer at the
+ * same time.
+ */
+ for (rq_idx = 0, idx = 0; rq_idx < hw->hw_rq_count; rq_idx++) {
+ struct hw_rq *rq = hw->hw_rq[rq_idx];
+
+ for (i = 0; i < rq->entry_count - 1; i++) {
+ struct efc_hw_sequence *seq;
+
+ seq = hw->seq_pool + idx;
+ idx++;
+ seq->header = &rq->hdr_buf[i];
+ seq->payload = &rq->payload_buf[i];
+ rc = efct_hw_sequence_free(hw, seq);
+ if (rc)
+ break;
+ }
+ if (rc)
+ break;
+ }
+
+ if (rc && hw->seq_pool)
+ kfree(hw->seq_pool);
+
+ return rc;
+}
+
+void
+efct_hw_rx_free(struct efct_hw *hw)
+{
+ u32 i;
+
+ /* Free hw_rq buffers */
+ for (i = 0; i < hw->hw_rq_count; i++) {
+ struct hw_rq *rq = hw->hw_rq[i];
+
+ if (rq) {
+ efct_hw_rx_buffer_free(hw, rq->hdr_buf,
+ rq->entry_count);
+ rq->hdr_buf = NULL;
+ efct_hw_rx_buffer_free(hw, rq->payload_buf,
+ rq->entry_count);
+ rq->payload_buf = NULL;
+ }
+ }
+}
+
+static int
+efct_hw_cmd_submit_pending(struct efct_hw *hw)
+{
+ int rc = 0;
+
+ /* Assumes lock held */
+
+ /* Only submit MQE if there's room */
+ while (hw->cmd_head_count < (EFCT_HW_MQ_DEPTH - 1) &&
+ !list_empty(&hw->cmd_pending)) {
+ struct efct_command_ctx *ctx;
+
+ ctx = list_first_entry(&hw->cmd_pending,
+ struct efct_command_ctx, list_entry);
+ if (!ctx)
+ break;
+
+ list_del_init(&ctx->list_entry);
+
+ list_add_tail(&ctx->list_entry, &hw->cmd_head);
+ hw->cmd_head_count++;
+ if (sli_mq_write(&hw->sli, hw->mq, ctx->buf) < 0) {
+ efc_log_debug(hw->os,
+ "sli_queue_write failed: %d\n", rc);
+ rc = -EIO;
+ break;
+ }
+ }
+ return rc;
+}
+
+int
+efct_hw_command(struct efct_hw *hw, u8 *cmd, u32 opts, void *cb, void *arg)
+{
+ int rc = -EIO;
+ unsigned long flags = 0;
+ void *bmbx = NULL;
+
+ /*
+ * If the chip is in an error state (UE'd) then reject this mailbox
+ * command.
+ */
+ if (sli_fw_error_status(&hw->sli) > 0) {
+ efc_log_crit(hw->os, "Chip in an error state - reset needed\n");
+ efc_log_crit(hw->os, "status=%#x error1=%#x error2=%#x\n",
+ sli_reg_read_status(&hw->sli),
+ sli_reg_read_err1(&hw->sli),
+ sli_reg_read_err2(&hw->sli));
+
+ return -EIO;
+ }
+
+ /*
+ * Send a mailbox command to the hardware, and either wait for
+ * a completion (EFCT_CMD_POLL) or get an optional asynchronous
+ * completion (EFCT_CMD_NOWAIT).
+ */
+
+ if (opts == EFCT_CMD_POLL) {
+ mutex_lock(&hw->bmbx_lock);
+ bmbx = hw->sli.bmbx.virt;
+
+ memset(bmbx, 0, SLI4_BMBX_SIZE);
+ memcpy(bmbx, cmd, SLI4_BMBX_SIZE);
+
+ if (sli_bmbx_command(&hw->sli) == 0) {
+ rc = 0;
+ memcpy(cmd, bmbx, SLI4_BMBX_SIZE);
+ }
+ mutex_unlock(&hw->bmbx_lock);
+ } else if (opts == EFCT_CMD_NOWAIT) {
+ struct efct_command_ctx *ctx = NULL;
+
+ if (hw->state != EFCT_HW_STATE_ACTIVE) {
+ efc_log_err(hw->os, "Can't send command, HW state=%d\n",
+ hw->state);
+ return -EIO;
+ }
+
+ ctx = mempool_alloc(hw->cmd_ctx_pool, GFP_ATOMIC);
+ if (!ctx)
+ return -ENOSPC;
+
+ memset(ctx, 0, sizeof(struct efct_command_ctx));
+
+ if (cb) {
+ ctx->cb = cb;
+ ctx->arg = arg;
+ }
+
+ memcpy(ctx->buf, cmd, SLI4_BMBX_SIZE);
+ ctx->ctx = hw;
+
+ spin_lock_irqsave(&hw->cmd_lock, flags);
+
+ /* Add to pending list */
+ INIT_LIST_HEAD(&ctx->list_entry);
+ list_add_tail(&ctx->list_entry, &hw->cmd_pending);
+
+ /* Submit as much of the pending list as we can */
+ rc = efct_hw_cmd_submit_pending(hw);
+
+ spin_unlock_irqrestore(&hw->cmd_lock, flags);
+ }
+
+ return rc;
+}
+
+static int
+efct_hw_command_process(struct efct_hw *hw, int status, u8 *mqe,
+ size_t size)
+{
+ struct efct_command_ctx *ctx = NULL;
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&hw->cmd_lock, flags);
+ if (!list_empty(&hw->cmd_head)) {
+ ctx = list_first_entry(&hw->cmd_head,
+ struct efct_command_ctx, list_entry);
+ list_del_init(&ctx->list_entry);
+ }
+ if (!ctx) {
+ efc_log_err(hw->os, "no command context\n");
+ spin_unlock_irqrestore(&hw->cmd_lock, flags);
+ return -EIO;
+ }
+
+ hw->cmd_head_count--;
+
+ /* Post any pending requests */
+ efct_hw_cmd_submit_pending(hw);
+
+ spin_unlock_irqrestore(&hw->cmd_lock, flags);
+
+ if (ctx->cb) {
+ memcpy(ctx->buf, mqe, size);
+ ctx->cb(hw, status, ctx->buf, ctx->arg);
+ }
+
+ mempool_free(ctx, hw->cmd_ctx_pool);
+
+ return 0;
+}
+
+static int
+efct_hw_mq_process(struct efct_hw *hw,
+ int status, struct sli4_queue *mq)
+{
+ u8 mqe[SLI4_BMBX_SIZE];
+ int rc;
+
+ rc = sli_mq_read(&hw->sli, mq, mqe);
+ if (!rc)
+ rc = efct_hw_command_process(hw, status, mqe, mq->size);
+
+ return rc;
+}
+
+static int
+efct_hw_command_cancel(struct efct_hw *hw)
+{
+ unsigned long flags = 0;
+ int rc = 0;
+
+ spin_lock_irqsave(&hw->cmd_lock, flags);
+
+ /*
+ * Manually clean up remaining commands. Note: since this calls
+ * efct_hw_command_process(), we'll also process the cmd_pending
+ * list, so no need to manually clean that out.
+ */
+ while (!list_empty(&hw->cmd_head)) {
+ u8 mqe[SLI4_BMBX_SIZE] = { 0 };
+ struct efct_command_ctx *ctx;
+
+ ctx = list_first_entry(&hw->cmd_head,
+ struct efct_command_ctx, list_entry);
+
+ efc_log_debug(hw->os, "hung command %08x\n",
+ !ctx ? U32_MAX : *((u32 *)ctx->buf));
+ spin_unlock_irqrestore(&hw->cmd_lock, flags);
+ rc = efct_hw_command_process(hw, -1, mqe, SLI4_BMBX_SIZE);
+ spin_lock_irqsave(&hw->cmd_lock, flags);
+ }
+
+ spin_unlock_irqrestore(&hw->cmd_lock, flags);
+
+ return rc;
+}
+
+static void
+efct_mbox_rsp_cb(struct efct_hw *hw, int status, u8 *mqe, void *arg)
+{
+ struct efct_mbox_rqst_ctx *ctx = arg;
+
+ if (ctx) {
+ if (ctx->callback)
+ (*ctx->callback)(hw->os->efcport, status, mqe,
+ ctx->arg);
+
+ mempool_free(ctx, hw->mbox_rqst_pool);
+ }
+}
+
+int
+efct_issue_mbox_rqst(void *base, void *cmd, void *cb, void *arg)
+{
+ struct efct_mbox_rqst_ctx *ctx;
+ struct efct *efct = base;
+ struct efct_hw *hw = &efct->hw;
+ int rc;
+
+ /*
+ * Allocate a callback context (which includes the mbox cmd buffer),
+ * we need this to be persistent as the mbox cmd submission may be
+ * queued and executed later execution.
+ */
+ ctx = mempool_alloc(hw->mbox_rqst_pool, GFP_ATOMIC);
+ if (!ctx)
+ return -EIO;
+
+ ctx->callback = cb;
+ ctx->arg = arg;
+
+ rc = efct_hw_command(hw, cmd, EFCT_CMD_NOWAIT, efct_mbox_rsp_cb, ctx);
+ if (rc) {
+ efc_log_err(efct, "issue mbox rqst failure rc:%d\n", rc);
+ mempool_free(ctx, hw->mbox_rqst_pool);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static inline struct efct_hw_io *
+_efct_hw_io_alloc(struct efct_hw *hw)
+{
+ struct efct_hw_io *io = NULL;
+
+ if (!list_empty(&hw->io_free)) {
+ io = list_first_entry(&hw->io_free, struct efct_hw_io,
+ list_entry);
+ list_del(&io->list_entry);
+ }
+ if (io) {
+ INIT_LIST_HEAD(&io->list_entry);
+ list_add_tail(&io->list_entry, &hw->io_inuse);
+ io->state = EFCT_HW_IO_STATE_INUSE;
+ io->abort_reqtag = U32_MAX;
+ io->wq = hw->wq_cpu_array[raw_smp_processor_id()];
+ if (!io->wq) {
+ efc_log_err(hw->os, "WQ not assigned for cpu:%d\n",
+ raw_smp_processor_id());
+ io->wq = hw->hw_wq[0];
+ }
+ kref_init(&io->ref);
+ io->release = efct_hw_io_free_internal;
+ } else {
+ atomic_add(1, &hw->io_alloc_failed_count);
+ }
+
+ return io;
+}
+
+struct efct_hw_io *
+efct_hw_io_alloc(struct efct_hw *hw)
+{
+ struct efct_hw_io *io = NULL;
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&hw->io_lock, flags);
+ io = _efct_hw_io_alloc(hw);
+ spin_unlock_irqrestore(&hw->io_lock, flags);
+
+ return io;
+}
+
+static void
+efct_hw_io_free_move_correct_list(struct efct_hw *hw,
+ struct efct_hw_io *io)
+{
+ /*
+ * When an IO is freed, depending on the exchange busy flag,
+ * move it to the correct list.
+ */
+ if (io->xbusy) {
+ /*
+ * add to wait_free list and wait for XRI_ABORTED CQEs to clean
+ * up
+ */
+ INIT_LIST_HEAD(&io->list_entry);
+ list_add_tail(&io->list_entry, &hw->io_wait_free);
+ io->state = EFCT_HW_IO_STATE_WAIT_FREE;
+ } else {
+ /* IO not busy, add to free list */
+ INIT_LIST_HEAD(&io->list_entry);
+ list_add_tail(&io->list_entry, &hw->io_free);
+ io->state = EFCT_HW_IO_STATE_FREE;
+ }
+}
+
+static inline void
+efct_hw_io_free_common(struct efct_hw *hw, struct efct_hw_io *io)
+{
+ /* initialize IO fields */
+ efct_hw_init_free_io(io);
+
+ /* Restore default SGL */
+ efct_hw_io_restore_sgl(hw, io);
+}
+
+void
+efct_hw_io_free_internal(struct kref *arg)
+{
+ unsigned long flags = 0;
+ struct efct_hw_io *io = container_of(arg, struct efct_hw_io, ref);
+ struct efct_hw *hw = io->hw;
+
+ /* perform common cleanup */
+ efct_hw_io_free_common(hw, io);
+
+ spin_lock_irqsave(&hw->io_lock, flags);
+ /* remove from in-use list */
+ if (!list_empty(&io->list_entry) && !list_empty(&hw->io_inuse)) {
+ list_del_init(&io->list_entry);
+ efct_hw_io_free_move_correct_list(hw, io);
+ }
+ spin_unlock_irqrestore(&hw->io_lock, flags);
+}
+
+int
+efct_hw_io_free(struct efct_hw *hw, struct efct_hw_io *io)
+{
+ return kref_put(&io->ref, io->release);
+}
+
+struct efct_hw_io *
+efct_hw_io_lookup(struct efct_hw *hw, u32 xri)
+{
+ u32 ioindex;
+
+ ioindex = xri - hw->sli.ext[SLI4_RSRC_XRI].base[0];
+ return hw->io[ioindex];
+}
+
+int
+efct_hw_io_init_sges(struct efct_hw *hw, struct efct_hw_io *io,
+ enum efct_hw_io_type type)
+{
+ struct sli4_sge *data = NULL;
+ u32 i = 0;
+ u32 skips = 0;
+ u32 sge_flags = 0;
+
+ if (!io) {
+ efc_log_err(hw->os, "bad parameter hw=%p io=%p\n", hw, io);
+ return -EIO;
+ }
+
+ /* Clear / reset the scatter-gather list */
+ io->sgl = &io->def_sgl;
+ io->sgl_count = io->def_sgl_count;
+ io->first_data_sge = 0;
+
+ memset(io->sgl->virt, 0, 2 * sizeof(struct sli4_sge));
+ io->n_sge = 0;
+ io->sge_offset = 0;
+
+ io->type = type;
+
+ data = io->sgl->virt;
+
+ /*
+ * Some IO types have underlying hardware requirements on the order
+ * of SGEs. Process all special entries here.
+ */
+ switch (type) {
+ case EFCT_HW_IO_TARGET_WRITE:
+
+ /* populate host resident XFER_RDY buffer */
+ sge_flags = le32_to_cpu(data->dw2_flags);
+ sge_flags &= (~SLI4_SGE_TYPE_MASK);
+ sge_flags |= (SLI4_SGE_TYPE_DATA << SLI4_SGE_TYPE_SHIFT);
+ data->buffer_address_high =
+ cpu_to_le32(upper_32_bits(io->xfer_rdy.phys));
+ data->buffer_address_low =
+ cpu_to_le32(lower_32_bits(io->xfer_rdy.phys));
+ data->buffer_length = cpu_to_le32(io->xfer_rdy.size);
+ data->dw2_flags = cpu_to_le32(sge_flags);
+ data++;
+
+ skips = EFCT_TARGET_WRITE_SKIPS;
+
+ io->n_sge = 1;
+ break;
+ case EFCT_HW_IO_TARGET_READ:
+ /*
+ * For FCP_TSEND64, the first 2 entries are SKIP SGE's
+ */
+ skips = EFCT_TARGET_READ_SKIPS;
+ break;
+ case EFCT_HW_IO_TARGET_RSP:
+ /*
+ * No skips, etc. for FCP_TRSP64
+ */
+ break;
+ default:
+ efc_log_err(hw->os, "unsupported IO type %#x\n", type);
+ return -EIO;
+ }
+
+ /*
+ * Write skip entries
+ */
+ for (i = 0; i < skips; i++) {
+ sge_flags = le32_to_cpu(data->dw2_flags);
+ sge_flags &= (~SLI4_SGE_TYPE_MASK);
+ sge_flags |= (SLI4_SGE_TYPE_SKIP << SLI4_SGE_TYPE_SHIFT);
+ data->dw2_flags = cpu_to_le32(sge_flags);
+ data++;
+ }
+
+ io->n_sge += skips;
+
+ /*
+ * Set last
+ */
+ sge_flags = le32_to_cpu(data->dw2_flags);
+ sge_flags |= SLI4_SGE_LAST;
+ data->dw2_flags = cpu_to_le32(sge_flags);
+
+ return 0;
+}
+
+int
+efct_hw_io_add_sge(struct efct_hw *hw, struct efct_hw_io *io,
+ uintptr_t addr, u32 length)
+{
+ struct sli4_sge *data = NULL;
+ u32 sge_flags = 0;
+
+ if (!io || !addr || !length) {
+ efc_log_err(hw->os,
+ "bad parameter hw=%p io=%p addr=%lx length=%u\n",
+ hw, io, addr, length);
+ return -EIO;
+ }
+
+ if (length > hw->sli.sge_supported_length) {
+ efc_log_err(hw->os,
+ "length of SGE %d bigger than allowed %d\n",
+ length, hw->sli.sge_supported_length);
+ return -EIO;
+ }
+
+ data = io->sgl->virt;
+ data += io->n_sge;
+
+ sge_flags = le32_to_cpu(data->dw2_flags);
+ sge_flags &= ~SLI4_SGE_TYPE_MASK;
+ sge_flags |= SLI4_SGE_TYPE_DATA << SLI4_SGE_TYPE_SHIFT;
+ sge_flags &= ~SLI4_SGE_DATA_OFFSET_MASK;
+ sge_flags |= SLI4_SGE_DATA_OFFSET_MASK & io->sge_offset;
+
+ data->buffer_address_high = cpu_to_le32(upper_32_bits(addr));
+ data->buffer_address_low = cpu_to_le32(lower_32_bits(addr));
+ data->buffer_length = cpu_to_le32(length);
+
+ /*
+ * Always assume this is the last entry and mark as such.
+ * If this is not the first entry unset the "last SGE"
+ * indication for the previous entry
+ */
+ sge_flags |= SLI4_SGE_LAST;
+ data->dw2_flags = cpu_to_le32(sge_flags);
+
+ if (io->n_sge) {
+ sge_flags = le32_to_cpu(data[-1].dw2_flags);
+ sge_flags &= ~SLI4_SGE_LAST;
+ data[-1].dw2_flags = cpu_to_le32(sge_flags);
+ }
+
+ /* Set first_data_bde if not previously set */
+ if (io->first_data_sge == 0)
+ io->first_data_sge = io->n_sge;
+
+ io->sge_offset += length;
+ io->n_sge++;
+
+ return 0;
+}
+
+void
+efct_hw_io_abort_all(struct efct_hw *hw)
+{
+ struct efct_hw_io *io_to_abort = NULL;
+ struct efct_hw_io *next_io = NULL;
+
+ list_for_each_entry_safe(io_to_abort, next_io,
+ &hw->io_inuse, list_entry) {
+ efct_hw_io_abort(hw, io_to_abort, true, NULL, NULL);
+ }
+}
+
+static void
+efct_hw_wq_process_abort(void *arg, u8 *cqe, int status)
+{
+ struct efct_hw_io *io = arg;
+ struct efct_hw *hw = io->hw;
+ u32 ext = 0;
+ u32 len = 0;
+ struct hw_wq_callback *wqcb;
+
+ /*
+ * For IOs that were aborted internally, we may need to issue the
+ * callback here depending on whether a XRI_ABORTED CQE is expected ot
+ * not. If the status is Local Reject/No XRI, then
+ * issue the callback now.
+ */
+ ext = sli_fc_ext_status(&hw->sli, cqe);
+ if (status == SLI4_FC_WCQE_STATUS_LOCAL_REJECT &&
+ ext == SLI4_FC_LOCAL_REJECT_NO_XRI && io->done) {
+ efct_hw_done_t done = io->done;
+
+ io->done = NULL;
+
+ /*
+ * Use latched status as this is always saved for an internal
+ * abort Note: We won't have both a done and abort_done
+ * function, so don't worry about
+ * clobbering the len, status and ext fields.
+ */
+ status = io->saved_status;
+ len = io->saved_len;
+ ext = io->saved_ext;
+ io->status_saved = false;
+ done(io, len, status, ext, io->arg);
+ }
+
+ if (io->abort_done) {
+ efct_hw_done_t done = io->abort_done;
+
+ io->abort_done = NULL;
+ done(io, len, status, ext, io->abort_arg);
+ }
+
+ /* clear abort bit to indicate abort is complete */
+ io->abort_in_progress = false;
+
+ /* Free the WQ callback */
+ if (io->abort_reqtag == U32_MAX) {
+ efc_log_err(hw->os, "HW IO already freed\n");
+ return;
+ }
+
+ wqcb = efct_hw_reqtag_get_instance(hw, io->abort_reqtag);
+ efct_hw_reqtag_free(hw, wqcb);
+
+ /*
+ * Call efct_hw_io_free() because this releases the WQ reservation as
+ * well as doing the refcount put. Don't duplicate the code here.
+ */
+ (void)efct_hw_io_free(hw, io);
+}
+
+static void
+efct_hw_fill_abort_wqe(struct efct_hw *hw, struct efct_hw_wqe *wqe)
+{
+ struct sli4_abort_wqe *abort = (void *)wqe->wqebuf;
+
+ memset(abort, 0, hw->sli.wqe_size);
+
+ abort->criteria = SLI4_ABORT_CRITERIA_XRI_TAG;
+ abort->ia_ir_byte |= wqe->send_abts ? 0 : 1;
+
+ /* Suppress ABTS retries */
+ abort->ia_ir_byte |= SLI4_ABRT_WQE_IR;
+
+ abort->t_tag = cpu_to_le32(wqe->id);
+ abort->command = SLI4_WQE_ABORT;
+ abort->request_tag = cpu_to_le16(wqe->abort_reqtag);
+
+ abort->dw10w0_flags = cpu_to_le16(SLI4_ABRT_WQE_QOSD);
+
+ abort->cq_id = cpu_to_le16(SLI4_CQ_DEFAULT);
+}
+
+int
+efct_hw_io_abort(struct efct_hw *hw, struct efct_hw_io *io_to_abort,
+ bool send_abts, void *cb, void *arg)
+{
+ struct hw_wq_callback *wqcb;
+ unsigned long flags = 0;
+
+ if (!io_to_abort) {
+ efc_log_err(hw->os, "bad parameter hw=%p io=%p\n",
+ hw, io_to_abort);
+ return -EIO;
+ }
+
+ if (hw->state != EFCT_HW_STATE_ACTIVE) {
+ efc_log_err(hw->os, "cannot send IO abort, HW state=%d\n",
+ hw->state);
+ return -EIO;
+ }
+
+ /* take a reference on IO being aborted */
+ if (kref_get_unless_zero(&io_to_abort->ref) == 0) {
+ /* command no longer active */
+ efc_log_debug(hw->os,
+ "io not active xri=0x%x tag=0x%x\n",
+ io_to_abort->indicator, io_to_abort->reqtag);
+ return -ENOENT;
+ }
+
+ /* Must have a valid WQ reference */
+ if (!io_to_abort->wq) {
+ efc_log_debug(hw->os, "io_to_abort xri=0x%x not active on WQ\n",
+ io_to_abort->indicator);
+ /* efct_ref_get(): same function */
+ kref_put(&io_to_abort->ref, io_to_abort->release);
+ return -ENOENT;
+ }
+
+ /*
+ * Validation checks complete; now check to see if already being
+ * aborted, if not set the flag.
+ */
+ if (cmpxchg(&io_to_abort->abort_in_progress, false, true)) {
+ /* efct_ref_get(): same function */
+ kref_put(&io_to_abort->ref, io_to_abort->release);
+ efc_log_debug(hw->os,
+ "io already being aborted xri=0x%x tag=0x%x\n",
+ io_to_abort->indicator, io_to_abort->reqtag);
+ return -EINPROGRESS;
+ }
+
+ /*
+ * If we got here, the possibilities are:
+ * - host owned xri
+ * - io_to_abort->wq_index != U32_MAX
+ * - submit ABORT_WQE to same WQ
+ * - port owned xri:
+ * - rxri: io_to_abort->wq_index == U32_MAX
+ * - submit ABORT_WQE to any WQ
+ * - non-rxri
+ * - io_to_abort->index != U32_MAX
+ * - submit ABORT_WQE to same WQ
+ * - io_to_abort->index == U32_MAX
+ * - submit ABORT_WQE to any WQ
+ */
+ io_to_abort->abort_done = cb;
+ io_to_abort->abort_arg = arg;
+
+ /* Allocate a request tag for the abort portion of this IO */
+ wqcb = efct_hw_reqtag_alloc(hw, efct_hw_wq_process_abort, io_to_abort);
+ if (!wqcb) {
+ efc_log_err(hw->os, "can't allocate request tag\n");
+ return -ENOSPC;
+ }
+
+ io_to_abort->abort_reqtag = wqcb->instance_index;
+ io_to_abort->wqe.send_abts = send_abts;
+ io_to_abort->wqe.id = io_to_abort->indicator;
+ io_to_abort->wqe.abort_reqtag = io_to_abort->abort_reqtag;
+
+ /*
+ * If the wqe is on the pending list, then set this wqe to be
+ * aborted when the IO's wqe is removed from the list.
+ */
+ if (io_to_abort->wq) {
+ spin_lock_irqsave(&io_to_abort->wq->queue->lock, flags);
+ if (io_to_abort->wqe.list_entry.next) {
+ io_to_abort->wqe.abort_wqe_submit_needed = true;
+ spin_unlock_irqrestore(&io_to_abort->wq->queue->lock,
+ flags);
+ return 0;
+ }
+ spin_unlock_irqrestore(&io_to_abort->wq->queue->lock, flags);
+ }
+
+ efct_hw_fill_abort_wqe(hw, &io_to_abort->wqe);
+
+ /* ABORT_WQE does not actually utilize an XRI on the Port,
+ * therefore, keep xbusy as-is to track the exchange's state,
+ * not the ABORT_WQE's state
+ */
+ if (efct_hw_wq_write(io_to_abort->wq, &io_to_abort->wqe)) {
+ io_to_abort->abort_in_progress = false;
+ /* efct_ref_get(): same function */
+ kref_put(&io_to_abort->ref, io_to_abort->release);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+void
+efct_hw_reqtag_pool_free(struct efct_hw *hw)
+{
+ u32 i;
+ struct reqtag_pool *reqtag_pool = hw->wq_reqtag_pool;
+ struct hw_wq_callback *wqcb = NULL;
+
+ if (reqtag_pool) {
+ for (i = 0; i < U16_MAX; i++) {
+ wqcb = reqtag_pool->tags[i];
+ if (!wqcb)
+ continue;
+
+ kfree(wqcb);
+ }
+ kfree(reqtag_pool);
+ hw->wq_reqtag_pool = NULL;
+ }
+}
+
+struct reqtag_pool *
+efct_hw_reqtag_pool_alloc(struct efct_hw *hw)
+{
+ u32 i = 0;
+ struct reqtag_pool *reqtag_pool;
+ struct hw_wq_callback *wqcb;
+
+ reqtag_pool = kzalloc(sizeof(*reqtag_pool), GFP_KERNEL);
+ if (!reqtag_pool)
+ return NULL;
+
+ INIT_LIST_HEAD(&reqtag_pool->freelist);
+ /* initialize reqtag pool lock */
+ spin_lock_init(&reqtag_pool->lock);
+ for (i = 0; i < U16_MAX; i++) {
+ wqcb = kmalloc(sizeof(*wqcb), GFP_KERNEL);
+ if (!wqcb)
+ break;
+
+ reqtag_pool->tags[i] = wqcb;
+ wqcb->instance_index = i;
+ wqcb->callback = NULL;
+ wqcb->arg = NULL;
+ INIT_LIST_HEAD(&wqcb->list_entry);
+ list_add_tail(&wqcb->list_entry, &reqtag_pool->freelist);
+ }
+
+ return reqtag_pool;
+}
+
+struct hw_wq_callback *
+efct_hw_reqtag_alloc(struct efct_hw *hw,
+ void (*callback)(void *arg, u8 *cqe, int status),
+ void *arg)
+{
+ struct hw_wq_callback *wqcb = NULL;
+ struct reqtag_pool *reqtag_pool = hw->wq_reqtag_pool;
+ unsigned long flags = 0;
+
+ if (!callback)
+ return wqcb;
+
+ spin_lock_irqsave(&reqtag_pool->lock, flags);
+
+ if (!list_empty(&reqtag_pool->freelist)) {
+ wqcb = list_first_entry(&reqtag_pool->freelist,
+ struct hw_wq_callback, list_entry);
+ }
+
+ if (wqcb) {
+ list_del_init(&wqcb->list_entry);
+ spin_unlock_irqrestore(&reqtag_pool->lock, flags);
+ wqcb->callback = callback;
+ wqcb->arg = arg;
+ } else {
+ spin_unlock_irqrestore(&reqtag_pool->lock, flags);
+ }
+
+ return wqcb;
+}
+
+void
+efct_hw_reqtag_free(struct efct_hw *hw, struct hw_wq_callback *wqcb)
+{
+ unsigned long flags = 0;
+ struct reqtag_pool *reqtag_pool = hw->wq_reqtag_pool;
+
+ if (!wqcb->callback)
+ efc_log_err(hw->os, "WQCB is already freed\n");
+
+ spin_lock_irqsave(&reqtag_pool->lock, flags);
+ wqcb->callback = NULL;
+ wqcb->arg = NULL;
+ INIT_LIST_HEAD(&wqcb->list_entry);
+ list_add(&wqcb->list_entry, &hw->wq_reqtag_pool->freelist);
+ spin_unlock_irqrestore(&reqtag_pool->lock, flags);
+}
+
+struct hw_wq_callback *
+efct_hw_reqtag_get_instance(struct efct_hw *hw, u32 instance_index)
+{
+ struct hw_wq_callback *wqcb;
+
+ wqcb = hw->wq_reqtag_pool->tags[instance_index];
+ if (!wqcb)
+ efc_log_err(hw->os, "wqcb for instance %d is null\n",
+ instance_index);
+
+ return wqcb;
+}
+
+int
+efct_hw_queue_hash_find(struct efct_queue_hash *hash, u16 id)
+{
+ int index = -1;
+ int i = id & (EFCT_HW_Q_HASH_SIZE - 1);
+
+ /*
+ * Since the hash is always bigger than the maximum number of Qs, then
+ * we never have to worry about an infinite loop. We will always find
+ * an unused entry.
+ */
+ do {
+ if (hash[i].in_use && hash[i].id == id)
+ index = hash[i].index;
+ else
+ i = (i + 1) & (EFCT_HW_Q_HASH_SIZE - 1);
+ } while (index == -1 && hash[i].in_use);
+
+ return index;
+}
+
+int
+efct_hw_process(struct efct_hw *hw, u32 vector,
+ u32 max_isr_time_msec)
+{
+ struct hw_eq *eq;
+
+ /*
+ * The caller should disable interrupts if they wish to prevent us
+ * from processing during a shutdown. The following states are defined:
+ * EFCT_HW_STATE_UNINITIALIZED - No queues allocated
+ * EFCT_HW_STATE_QUEUES_ALLOCATED - The state after a chip reset,
+ * queues are cleared.
+ * EFCT_HW_STATE_ACTIVE - Chip and queues are operational
+ * EFCT_HW_STATE_RESET_IN_PROGRESS - reset, we still want completions
+ * EFCT_HW_STATE_TEARDOWN_IN_PROGRESS - We still want mailbox
+ * completions.
+ */
+ if (hw->state == EFCT_HW_STATE_UNINITIALIZED)
+ return 0;
+
+ /* Get pointer to struct hw_eq */
+ eq = hw->hw_eq[vector];
+ if (!eq)
+ return 0;
+
+ eq->use_count++;
+
+ return efct_hw_eq_process(hw, eq, max_isr_time_msec);
+}
+
+int
+efct_hw_eq_process(struct efct_hw *hw, struct hw_eq *eq,
+ u32 max_isr_time_msec)
+{
+ u8 eqe[sizeof(struct sli4_eqe)] = { 0 };
+ u32 tcheck_count;
+ u64 tstart;
+ u64 telapsed;
+ bool done = false;
+
+ tcheck_count = EFCT_HW_TIMECHECK_ITERATIONS;
+ tstart = jiffies_to_msecs(jiffies);
+
+ while (!done && !sli_eq_read(&hw->sli, eq->queue, eqe)) {
+ u16 cq_id = 0;
+ int rc;
+
+ rc = sli_eq_parse(&hw->sli, eqe, &cq_id);
+ if (unlikely(rc)) {
+ if (rc == SLI4_EQE_STATUS_EQ_FULL) {
+ u32 i;
+
+ /*
+ * Received a sentinel EQE indicating the
+ * EQ is full. Process all CQs
+ */
+ for (i = 0; i < hw->cq_count; i++)
+ efct_hw_cq_process(hw, hw->hw_cq[i]);
+ continue;
+ } else {
+ return rc;
+ }
+ } else {
+ int index;
+
+ index = efct_hw_queue_hash_find(hw->cq_hash, cq_id);
+
+ if (likely(index >= 0))
+ efct_hw_cq_process(hw, hw->hw_cq[index]);
+ else
+ efc_log_err(hw->os, "bad CQ_ID %#06x\n", cq_id);
+ }
+
+ if (eq->queue->n_posted > eq->queue->posted_limit)
+ sli_queue_arm(&hw->sli, eq->queue, false);
+
+ if (tcheck_count && (--tcheck_count == 0)) {
+ tcheck_count = EFCT_HW_TIMECHECK_ITERATIONS;
+ telapsed = jiffies_to_msecs(jiffies) - tstart;
+ if (telapsed >= max_isr_time_msec)
+ done = true;
+ }
+ }
+ sli_queue_eq_arm(&hw->sli, eq->queue, true);
+
+ return 0;
+}
+
+static int
+_efct_hw_wq_write(struct hw_wq *wq, struct efct_hw_wqe *wqe)
+{
+ int queue_rc;
+
+ /* Every so often, set the wqec bit to generate comsummed completions */
+ if (wq->wqec_count)
+ wq->wqec_count--;
+
+ if (wq->wqec_count == 0) {
+ struct sli4_generic_wqe *genwqe = (void *)wqe->wqebuf;
+
+ genwqe->cmdtype_wqec_byte |= SLI4_GEN_WQE_WQEC;
+ wq->wqec_count = wq->wqec_set_count;
+ }
+
+ /* Decrement WQ free count */
+ wq->free_count--;
+
+ queue_rc = sli_wq_write(&wq->hw->sli, wq->queue, wqe->wqebuf);
+
+ return (queue_rc < 0) ? -EIO : 0;
+}
+
+static void
+hw_wq_submit_pending(struct hw_wq *wq, u32 update_free_count)
+{
+ struct efct_hw_wqe *wqe;
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&wq->queue->lock, flags);
+
+ /* Update free count with value passed in */
+ wq->free_count += update_free_count;
+
+ while ((wq->free_count > 0) && (!list_empty(&wq->pending_list))) {
+ wqe = list_first_entry(&wq->pending_list,
+ struct efct_hw_wqe, list_entry);
+ list_del_init(&wqe->list_entry);
+ _efct_hw_wq_write(wq, wqe);
+
+ if (wqe->abort_wqe_submit_needed) {
+ wqe->abort_wqe_submit_needed = false;
+ efct_hw_fill_abort_wqe(wq->hw, wqe);
+ INIT_LIST_HEAD(&wqe->list_entry);
+ list_add_tail(&wqe->list_entry, &wq->pending_list);
+ wq->wq_pending_count++;
+ }
+ }
+
+ spin_unlock_irqrestore(&wq->queue->lock, flags);
+}
+
+void
+efct_hw_cq_process(struct efct_hw *hw, struct hw_cq *cq)
+{
+ u8 cqe[sizeof(struct sli4_mcqe)];
+ u16 rid = U16_MAX;
+ /* completion type */
+ enum sli4_qentry ctype;
+ u32 n_processed = 0;
+ u32 tstart, telapsed;
+
+ tstart = jiffies_to_msecs(jiffies);
+
+ while (!sli_cq_read(&hw->sli, cq->queue, cqe)) {
+ int status;
+
+ status = sli_cq_parse(&hw->sli, cq->queue, cqe, &ctype, &rid);
+ /*
+ * The sign of status is significant. If status is:
+ * == 0 : call completed correctly and
+ * the CQE indicated success
+ * > 0 : call completed correctly and
+ * the CQE indicated an error
+ * < 0 : call failed and no information is available about the
+ * CQE
+ */
+ if (status < 0) {
+ if (status == SLI4_MCQE_STATUS_NOT_COMPLETED)
+ /*
+ * Notification that an entry was consumed,
+ * but not completed
+ */
+ continue;
+
+ break;
+ }
+
+ switch (ctype) {
+ case SLI4_QENTRY_ASYNC:
+ sli_cqe_async(&hw->sli, cqe);
+ break;
+ case SLI4_QENTRY_MQ:
+ /*
+ * Process MQ entry. Note there is no way to determine
+ * the MQ_ID from the completion entry.
+ */
+ efct_hw_mq_process(hw, status, hw->mq);
+ break;
+ case SLI4_QENTRY_WQ:
+ efct_hw_wq_process(hw, cq, cqe, status, rid);
+ break;
+ case SLI4_QENTRY_WQ_RELEASE: {
+ u32 wq_id = rid;
+ int index;
+ struct hw_wq *wq = NULL;
+
+ index = efct_hw_queue_hash_find(hw->wq_hash, wq_id);
+
+ if (likely(index >= 0)) {
+ wq = hw->hw_wq[index];
+ } else {
+ efc_log_err(hw->os, "bad WQ_ID %#06x\n", wq_id);
+ break;
+ }
+ /* Submit any HW IOs that are on the WQ pending list */
+ hw_wq_submit_pending(wq, wq->wqec_set_count);
+
+ break;
+ }
+
+ case SLI4_QENTRY_RQ:
+ efct_hw_rqpair_process_rq(hw, cq, cqe);
+ break;
+ case SLI4_QENTRY_XABT: {
+ efct_hw_xabt_process(hw, cq, cqe, rid);
+ break;
+ }
+ default:
+ efc_log_debug(hw->os, "unhandled ctype=%#x rid=%#x\n",
+ ctype, rid);
+ break;
+ }
+
+ n_processed++;
+ if (n_processed == cq->queue->proc_limit)
+ break;
+
+ if (cq->queue->n_posted >= cq->queue->posted_limit)
+ sli_queue_arm(&hw->sli, cq->queue, false);
+ }
+
+ sli_queue_arm(&hw->sli, cq->queue, true);
+
+ if (n_processed > cq->queue->max_num_processed)
+ cq->queue->max_num_processed = n_processed;
+ telapsed = jiffies_to_msecs(jiffies) - tstart;
+ if (telapsed > cq->queue->max_process_time)
+ cq->queue->max_process_time = telapsed;
+}
+
+void
+efct_hw_wq_process(struct efct_hw *hw, struct hw_cq *cq,
+ u8 *cqe, int status, u16 rid)
+{
+ struct hw_wq_callback *wqcb;
+
+ if (rid == EFCT_HW_REQUE_XRI_REGTAG) {
+ if (status)
+ efc_log_err(hw->os, "reque xri failed, status = %d\n",
+ status);
+ return;
+ }
+
+ wqcb = efct_hw_reqtag_get_instance(hw, rid);
+ if (!wqcb) {
+ efc_log_err(hw->os, "invalid request tag: x%x\n", rid);
+ return;
+ }
+
+ if (!wqcb->callback) {
+ efc_log_err(hw->os, "wqcb callback is NULL\n");
+ return;
+ }
+
+ (*wqcb->callback)(wqcb->arg, cqe, status);
+}
+
+void
+efct_hw_xabt_process(struct efct_hw *hw, struct hw_cq *cq,
+ u8 *cqe, u16 rid)
+{
+ /* search IOs wait free list */
+ struct efct_hw_io *io = NULL;
+ unsigned long flags = 0;
+
+ io = efct_hw_io_lookup(hw, rid);
+ if (!io) {
+ /* IO lookup failure should never happen */
+ efc_log_err(hw->os, "xabt io lookup failed rid=%#x\n", rid);
+ return;
+ }
+
+ if (!io->xbusy)
+ efc_log_debug(hw->os, "xabt io not busy rid=%#x\n", rid);
+ else
+ /* mark IO as no longer busy */
+ io->xbusy = false;
+
+ /*
+ * For IOs that were aborted internally, we need to issue any pending
+ * callback here.
+ */
+ if (io->done) {
+ efct_hw_done_t done = io->done;
+ void *arg = io->arg;
+
+ /*
+ * Use latched status as this is always saved for an internal
+ * abort
+ */
+ int status = io->saved_status;
+ u32 len = io->saved_len;
+ u32 ext = io->saved_ext;
+
+ io->done = NULL;
+ io->status_saved = false;
+
+ done(io, len, status, ext, arg);
+ }
+
+ spin_lock_irqsave(&hw->io_lock, flags);
+ if (io->state == EFCT_HW_IO_STATE_INUSE ||
+ io->state == EFCT_HW_IO_STATE_WAIT_FREE) {
+ /* if on wait_free list, caller has already freed IO;
+ * remove from wait_free list and add to free list.
+ * if on in-use list, already marked as no longer busy;
+ * just leave there and wait for caller to free.
+ */
+ if (io->state == EFCT_HW_IO_STATE_WAIT_FREE) {
+ io->state = EFCT_HW_IO_STATE_FREE;
+ list_del_init(&io->list_entry);
+ efct_hw_io_free_move_correct_list(hw, io);
+ }
+ }
+ spin_unlock_irqrestore(&hw->io_lock, flags);
+}
+
+static int
+efct_hw_flush(struct efct_hw *hw)
+{
+ u32 i = 0;
+
+ /* Process any remaining completions */
+ for (i = 0; i < hw->eq_count; i++)
+ efct_hw_process(hw, i, ~0);
+
+ return 0;
+}
+
+int
+efct_hw_wq_write(struct hw_wq *wq, struct efct_hw_wqe *wqe)
+{
+ int rc = 0;
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&wq->queue->lock, flags);
+ if (list_empty(&wq->pending_list)) {
+ if (wq->free_count > 0) {
+ rc = _efct_hw_wq_write(wq, wqe);
+ } else {
+ INIT_LIST_HEAD(&wqe->list_entry);
+ list_add_tail(&wqe->list_entry, &wq->pending_list);
+ wq->wq_pending_count++;
+ }
+
+ spin_unlock_irqrestore(&wq->queue->lock, flags);
+ return rc;
+ }
+
+ INIT_LIST_HEAD(&wqe->list_entry);
+ list_add_tail(&wqe->list_entry, &wq->pending_list);
+ wq->wq_pending_count++;
+ while (wq->free_count > 0) {
+ wqe = list_first_entry(&wq->pending_list, struct efct_hw_wqe,
+ list_entry);
+ if (!wqe)
+ break;
+
+ list_del_init(&wqe->list_entry);
+ rc = _efct_hw_wq_write(wq, wqe);
+ if (rc)
+ break;
+
+ if (wqe->abort_wqe_submit_needed) {
+ wqe->abort_wqe_submit_needed = false;
+ efct_hw_fill_abort_wqe(wq->hw, wqe);
+
+ INIT_LIST_HEAD(&wqe->list_entry);
+ list_add_tail(&wqe->list_entry, &wq->pending_list);
+ wq->wq_pending_count++;
+ }
+ }
+
+ spin_unlock_irqrestore(&wq->queue->lock, flags);
+
+ return rc;
+}
+
+int
+efct_efc_bls_send(struct efc *efc, u32 type, struct sli_bls_params *bls)
+{
+ struct efct *efct = efc->base;
+
+ return efct_hw_bls_send(efct, type, bls, NULL, NULL);
+}
+
+int
+efct_hw_bls_send(struct efct *efct, u32 type, struct sli_bls_params *bls_params,
+ void *cb, void *arg)
+{
+ struct efct_hw *hw = &efct->hw;
+ struct efct_hw_io *hio;
+ struct sli_bls_payload bls;
+ int rc;
+
+ if (hw->state != EFCT_HW_STATE_ACTIVE) {
+ efc_log_err(hw->os,
+ "cannot send BLS, HW state=%d\n", hw->state);
+ return -EIO;
+ }
+
+ hio = efct_hw_io_alloc(hw);
+ if (!hio) {
+ efc_log_err(hw->os, "HIO allocation failed\n");
+ return -EIO;
+ }
+
+ hio->done = cb;
+ hio->arg = arg;
+
+ bls_params->xri = hio->indicator;
+ bls_params->tag = hio->reqtag;
+
+ if (type == FC_RCTL_BA_ACC) {
+ hio->type = EFCT_HW_BLS_ACC;
+ bls.type = SLI4_SLI_BLS_ACC;
+ memcpy(&bls.u.acc, bls_params->payload, sizeof(bls.u.acc));
+ } else {
+ hio->type = EFCT_HW_BLS_RJT;
+ bls.type = SLI4_SLI_BLS_RJT;
+ memcpy(&bls.u.rjt, bls_params->payload, sizeof(bls.u.rjt));
+ }
+
+ bls.ox_id = cpu_to_le16(bls_params->ox_id);
+ bls.rx_id = cpu_to_le16(bls_params->rx_id);
+
+ if (sli_xmit_bls_rsp64_wqe(&hw->sli, hio->wqe.wqebuf,
+ &bls, bls_params)) {
+ efc_log_err(hw->os, "XMIT_BLS_RSP64 WQE error\n");
+ return -EIO;
+ }
+
+ hio->xbusy = true;
+
+ /*
+ * Add IO to active io wqe list before submitting, in case the
+ * wcqe processing preempts this thread.
+ */
+ hio->wq->use_count++;
+ rc = efct_hw_wq_write(hio->wq, &hio->wqe);
+ if (rc >= 0) {
+ /* non-negative return is success */
+ rc = 0;
+ } else {
+ /* failed to write wqe, remove from active wqe list */
+ efc_log_err(hw->os,
+ "sli_queue_write failed: %d\n", rc);
+ hio->xbusy = false;
+ }
+
+ return rc;
+}
+
+static int
+efct_els_ssrs_send_cb(struct efct_hw_io *hio, u32 length, int status,
+ u32 ext_status, void *arg)
+{
+ struct efc_disc_io *io = arg;
+
+ efc_disc_io_complete(io, length, status, ext_status);
+ return 0;
+}
+
+static inline void
+efct_fill_els_params(struct efc_disc_io *io, struct sli_els_params *params)
+{
+ u8 *cmd = io->req.virt;
+
+ params->cmd = *cmd;
+ params->s_id = io->s_id;
+ params->d_id = io->d_id;
+ params->ox_id = io->iparam.els.ox_id;
+ params->rpi = io->rpi;
+ params->vpi = io->vpi;
+ params->rpi_registered = io->rpi_registered;
+ params->xmit_len = io->xmit_len;
+ params->rsp_len = io->rsp_len;
+ params->timeout = io->iparam.els.timeout;
+}
+
+static inline void
+efct_fill_ct_params(struct efc_disc_io *io, struct sli_ct_params *params)
+{
+ params->r_ctl = io->iparam.ct.r_ctl;
+ params->type = io->iparam.ct.type;
+ params->df_ctl = io->iparam.ct.df_ctl;
+ params->d_id = io->d_id;
+ params->ox_id = io->iparam.ct.ox_id;
+ params->rpi = io->rpi;
+ params->vpi = io->vpi;
+ params->rpi_registered = io->rpi_registered;
+ params->xmit_len = io->xmit_len;
+ params->rsp_len = io->rsp_len;
+ params->timeout = io->iparam.ct.timeout;
+}
+
+/**
+ * efct_els_hw_srrs_send() - Send a single request and response cmd.
+ * @efc: efc library structure
+ * @io: Discovery IO used to hold els and ct cmd context.
+ *
+ * This routine supports communication sequences consisting of a single
+ * request and single response between two endpoints. Examples include:
+ * - Sending an ELS request.
+ * - Sending an ELS response - To send an ELS response, the caller must provide
+ * the OX_ID from the received request.
+ * - Sending a FC Common Transport (FC-CT) request - To send a FC-CT request,
+ * the caller must provide the R_CTL, TYPE, and DF_CTL
+ * values to place in the FC frame header.
+ *
+ * Return: Status of the request.
+ */
+int
+efct_els_hw_srrs_send(struct efc *efc, struct efc_disc_io *io)
+{
+ struct efct *efct = efc->base;
+ struct efct_hw_io *hio;
+ struct efct_hw *hw = &efct->hw;
+ struct efc_dma *send = &io->req;
+ struct efc_dma *receive = &io->rsp;
+ struct sli4_sge *sge = NULL;
+ int rc = 0;
+ u32 len = io->xmit_len;
+ u32 sge0_flags;
+ u32 sge1_flags;
+
+ hio = efct_hw_io_alloc(hw);
+ if (!hio) {
+ pr_err("HIO alloc failed\n");
+ return -EIO;
+ }
+
+ if (hw->state != EFCT_HW_STATE_ACTIVE) {
+ efc_log_debug(hw->os,
+ "cannot send SRRS, HW state=%d\n", hw->state);
+ return -EIO;
+ }
+
+ hio->done = efct_els_ssrs_send_cb;
+ hio->arg = io;
+
+ sge = hio->sgl->virt;
+
+ /* clear both SGE */
+ memset(hio->sgl->virt, 0, 2 * sizeof(struct sli4_sge));
+
+ sge0_flags = le32_to_cpu(sge[0].dw2_flags);
+ sge1_flags = le32_to_cpu(sge[1].dw2_flags);
+ if (send->size) {
+ sge[0].buffer_address_high =
+ cpu_to_le32(upper_32_bits(send->phys));
+ sge[0].buffer_address_low =
+ cpu_to_le32(lower_32_bits(send->phys));
+
+ sge0_flags |= (SLI4_SGE_TYPE_DATA << SLI4_SGE_TYPE_SHIFT);
+
+ sge[0].buffer_length = cpu_to_le32(len);
+ }
+
+ if (io->io_type == EFC_DISC_IO_ELS_REQ ||
+ io->io_type == EFC_DISC_IO_CT_REQ) {
+ sge[1].buffer_address_high =
+ cpu_to_le32(upper_32_bits(receive->phys));
+ sge[1].buffer_address_low =
+ cpu_to_le32(lower_32_bits(receive->phys));
+
+ sge1_flags |= (SLI4_SGE_TYPE_DATA << SLI4_SGE_TYPE_SHIFT);
+ sge1_flags |= SLI4_SGE_LAST;
+
+ sge[1].buffer_length = cpu_to_le32(receive->size);
+ } else {
+ sge0_flags |= SLI4_SGE_LAST;
+ }
+
+ sge[0].dw2_flags = cpu_to_le32(sge0_flags);
+ sge[1].dw2_flags = cpu_to_le32(sge1_flags);
+
+ switch (io->io_type) {
+ case EFC_DISC_IO_ELS_REQ: {
+ struct sli_els_params els_params;
+
+ hio->type = EFCT_HW_ELS_REQ;
+ efct_fill_els_params(io, &els_params);
+ els_params.xri = hio->indicator;
+ els_params.tag = hio->reqtag;
+
+ if (sli_els_request64_wqe(&hw->sli, hio->wqe.wqebuf, hio->sgl,
+ &els_params)) {
+ efc_log_err(hw->os, "REQ WQE error\n");
+ rc = -EIO;
+ }
+ break;
+ }
+ case EFC_DISC_IO_ELS_RESP: {
+ struct sli_els_params els_params;
+
+ hio->type = EFCT_HW_ELS_RSP;
+ efct_fill_els_params(io, &els_params);
+ els_params.xri = hio->indicator;
+ els_params.tag = hio->reqtag;
+ if (sli_xmit_els_rsp64_wqe(&hw->sli, hio->wqe.wqebuf, send,
+ &els_params)){
+ efc_log_err(hw->os, "RSP WQE error\n");
+ rc = -EIO;
+ }
+ break;
+ }
+ case EFC_DISC_IO_CT_REQ: {
+ struct sli_ct_params ct_params;
+
+ hio->type = EFCT_HW_FC_CT;
+ efct_fill_ct_params(io, &ct_params);
+ ct_params.xri = hio->indicator;
+ ct_params.tag = hio->reqtag;
+ if (sli_gen_request64_wqe(&hw->sli, hio->wqe.wqebuf, hio->sgl,
+ &ct_params)){
+ efc_log_err(hw->os, "GEN WQE error\n");
+ rc = -EIO;
+ }
+ break;
+ }
+ case EFC_DISC_IO_CT_RESP: {
+ struct sli_ct_params ct_params;
+
+ hio->type = EFCT_HW_FC_CT_RSP;
+ efct_fill_ct_params(io, &ct_params);
+ ct_params.xri = hio->indicator;
+ ct_params.tag = hio->reqtag;
+ if (sli_xmit_sequence64_wqe(&hw->sli, hio->wqe.wqebuf, hio->sgl,
+ &ct_params)){
+ efc_log_err(hw->os, "XMIT SEQ WQE error\n");
+ rc = -EIO;
+ }
+ break;
+ }
+ default:
+ efc_log_err(hw->os, "bad SRRS type %#x\n", io->io_type);
+ rc = -EIO;
+ }
+
+ if (rc == 0) {
+ hio->xbusy = true;
+
+ /*
+ * Add IO to active io wqe list before submitting, in case the
+ * wcqe processing preempts this thread.
+ */
+ hio->wq->use_count++;
+ rc = efct_hw_wq_write(hio->wq, &hio->wqe);
+ if (rc >= 0) {
+ /* non-negative return is success */
+ rc = 0;
+ } else {
+ /* failed to write wqe, remove from active wqe list */
+ efc_log_err(hw->os,
+ "sli_queue_write failed: %d\n", rc);
+ hio->xbusy = false;
+ }
+ }
+
+ return rc;
+}
+
+int
+efct_hw_io_send(struct efct_hw *hw, enum efct_hw_io_type type,
+ struct efct_hw_io *io, union efct_hw_io_param_u *iparam,
+ void *cb, void *arg)
+{
+ int rc = 0;
+ bool send_wqe = true;
+
+ if (!io) {
+ pr_err("bad parm hw=%p io=%p\n", hw, io);
+ return -EIO;
+ }
+
+ if (hw->state != EFCT_HW_STATE_ACTIVE) {
+ efc_log_err(hw->os, "cannot send IO, HW state=%d\n", hw->state);
+ return -EIO;
+ }
+
+ /*
+ * Save state needed during later stages
+ */
+ io->type = type;
+ io->done = cb;
+ io->arg = arg;
+
+ /*
+ * Format the work queue entry used to send the IO
+ */
+ switch (type) {
+ case EFCT_HW_IO_TARGET_WRITE: {
+ u16 *flags = &iparam->fcp_tgt.flags;
+ struct fcp_txrdy *xfer = io->xfer_rdy.virt;
+
+ /*
+ * Fill in the XFER_RDY for IF_TYPE 0 devices
+ */
+ xfer->ft_data_ro = cpu_to_be32(iparam->fcp_tgt.offset);
+ xfer->ft_burst_len = cpu_to_be32(iparam->fcp_tgt.xmit_len);
+
+ if (io->xbusy)
+ *flags |= SLI4_IO_CONTINUATION;
+ else
+ *flags &= ~SLI4_IO_CONTINUATION;
+ iparam->fcp_tgt.xri = io->indicator;
+ iparam->fcp_tgt.tag = io->reqtag;
+
+ if (sli_fcp_treceive64_wqe(&hw->sli, io->wqe.wqebuf,
+ &io->def_sgl, io->first_data_sge,
+ SLI4_CQ_DEFAULT,
+ 0, 0, &iparam->fcp_tgt)) {
+ efc_log_err(hw->os, "TRECEIVE WQE error\n");
+ rc = -EIO;
+ }
+ break;
+ }
+ case EFCT_HW_IO_TARGET_READ: {
+ u16 *flags = &iparam->fcp_tgt.flags;
+
+ if (io->xbusy)
+ *flags |= SLI4_IO_CONTINUATION;
+ else
+ *flags &= ~SLI4_IO_CONTINUATION;
+
+ iparam->fcp_tgt.xri = io->indicator;
+ iparam->fcp_tgt.tag = io->reqtag;
+
+ if (sli_fcp_tsend64_wqe(&hw->sli, io->wqe.wqebuf,
+ &io->def_sgl, io->first_data_sge,
+ SLI4_CQ_DEFAULT,
+ 0, 0, &iparam->fcp_tgt)) {
+ efc_log_err(hw->os, "TSEND WQE error\n");
+ rc = -EIO;
+ }
+ break;
+ }
+ case EFCT_HW_IO_TARGET_RSP: {
+ u16 *flags = &iparam->fcp_tgt.flags;
+
+ if (io->xbusy)
+ *flags |= SLI4_IO_CONTINUATION;
+ else
+ *flags &= ~SLI4_IO_CONTINUATION;
+
+ iparam->fcp_tgt.xri = io->indicator;
+ iparam->fcp_tgt.tag = io->reqtag;
+
+ if (sli_fcp_trsp64_wqe(&hw->sli, io->wqe.wqebuf,
+ &io->def_sgl, SLI4_CQ_DEFAULT,
+ 0, &iparam->fcp_tgt)) {
+ efc_log_err(hw->os, "TRSP WQE error\n");
+ rc = -EIO;
+ }
+
+ break;
+ }
+ default:
+ efc_log_err(hw->os, "unsupported IO type %#x\n", type);
+ rc = -EIO;
+ }
+
+ if (send_wqe && rc == 0) {
+ io->xbusy = true;
+
+ /*
+ * Add IO to active io wqe list before submitting, in case the
+ * wcqe processing preempts this thread.
+ */
+ hw->tcmd_wq_submit[io->wq->instance]++;
+ io->wq->use_count++;
+ rc = efct_hw_wq_write(io->wq, &io->wqe);
+ if (rc >= 0) {
+ /* non-negative return is success */
+ rc = 0;
+ } else {
+ /* failed to write wqe, remove from active wqe list */
+ efc_log_err(hw->os,
+ "sli_queue_write failed: %d\n", rc);
+ io->xbusy = false;
+ }
+ }
+
+ return rc;
+}
+
+int
+efct_hw_send_frame(struct efct_hw *hw, struct fc_frame_header *hdr,
+ u8 sof, u8 eof, struct efc_dma *payload,
+ struct efct_hw_send_frame_context *ctx,
+ void (*callback)(void *arg, u8 *cqe, int status),
+ void *arg)
+{
+ int rc;
+ struct efct_hw_wqe *wqe;
+ u32 xri;
+ struct hw_wq *wq;
+
+ wqe = &ctx->wqe;
+
+ /* populate the callback object */
+ ctx->hw = hw;
+
+ /* Fetch and populate request tag */
+ ctx->wqcb = efct_hw_reqtag_alloc(hw, callback, arg);
+ if (!ctx->wqcb) {
+ efc_log_err(hw->os, "can't allocate request tag\n");
+ return -ENOSPC;
+ }
+
+ wq = hw->hw_wq[0];
+
+ /* Set XRI and RX_ID in the header based on which WQ, and which
+ * send_frame_io we are using
+ */
+ xri = wq->send_frame_io->indicator;
+
+ /* Build the send frame WQE */
+ rc = sli_send_frame_wqe(&hw->sli, wqe->wqebuf,
+ sof, eof, (u32 *)hdr, payload, payload->len,
+ EFCT_HW_SEND_FRAME_TIMEOUT, xri,
+ ctx->wqcb->instance_index);
+ if (rc) {
+ efc_log_err(hw->os, "sli_send_frame_wqe failed: %d\n", rc);
+ return -EIO;
+ }
+
+ /* Write to WQ */
+ rc = efct_hw_wq_write(wq, wqe);
+ if (rc) {
+ efc_log_err(hw->os, "efct_hw_wq_write failed: %d\n", rc);
+ return -EIO;
+ }
+
+ wq->use_count++;
+
+ return 0;
+}
+
+static int
+efct_hw_cb_link_stat(struct efct_hw *hw, int status,
+ u8 *mqe, void *arg)
+{
+ struct sli4_cmd_read_link_stats *mbox_rsp;
+ struct efct_hw_link_stat_cb_arg *cb_arg = arg;
+ struct efct_hw_link_stat_counts counts[EFCT_HW_LINK_STAT_MAX];
+ u32 num_counters, i;
+ u32 mbox_rsp_flags = 0;
+
+ mbox_rsp = (struct sli4_cmd_read_link_stats *)mqe;
+ mbox_rsp_flags = le32_to_cpu(mbox_rsp->dw1_flags);
+ num_counters = (mbox_rsp_flags & SLI4_READ_LNKSTAT_GEC) ? 20 : 13;
+ memset(counts, 0, sizeof(struct efct_hw_link_stat_counts) *
+ EFCT_HW_LINK_STAT_MAX);
+
+ /* Fill overflow counts, mask starts from SLI4_READ_LNKSTAT_W02OF*/
+ for (i = 0; i < EFCT_HW_LINK_STAT_MAX; i++)
+ counts[i].overflow = (mbox_rsp_flags & (1 << (i + 2)));
+
+ counts[EFCT_HW_LINK_STAT_LINK_FAILURE_COUNT].counter =
+ le32_to_cpu(mbox_rsp->linkfail_errcnt);
+ counts[EFCT_HW_LINK_STAT_LOSS_OF_SYNC_COUNT].counter =
+ le32_to_cpu(mbox_rsp->losssync_errcnt);
+ counts[EFCT_HW_LINK_STAT_LOSS_OF_SIGNAL_COUNT].counter =
+ le32_to_cpu(mbox_rsp->losssignal_errcnt);
+ counts[EFCT_HW_LINK_STAT_PRIMITIVE_SEQ_COUNT].counter =
+ le32_to_cpu(mbox_rsp->primseq_errcnt);
+ counts[EFCT_HW_LINK_STAT_INVALID_XMIT_WORD_COUNT].counter =
+ le32_to_cpu(mbox_rsp->inval_txword_errcnt);
+ counts[EFCT_HW_LINK_STAT_CRC_COUNT].counter =
+ le32_to_cpu(mbox_rsp->crc_errcnt);
+ counts[EFCT_HW_LINK_STAT_PRIMITIVE_SEQ_TIMEOUT_COUNT].counter =
+ le32_to_cpu(mbox_rsp->primseq_eventtimeout_cnt);
+ counts[EFCT_HW_LINK_STAT_ELASTIC_BUFFER_OVERRUN_COUNT].counter =
+ le32_to_cpu(mbox_rsp->elastic_bufoverrun_errcnt);
+ counts[EFCT_HW_LINK_STAT_ARB_TIMEOUT_COUNT].counter =
+ le32_to_cpu(mbox_rsp->arbit_fc_al_timeout_cnt);
+ counts[EFCT_HW_LINK_STAT_ADVERTISED_RCV_B2B_CREDIT].counter =
+ le32_to_cpu(mbox_rsp->adv_rx_buftor_to_buf_credit);
+ counts[EFCT_HW_LINK_STAT_CURR_RCV_B2B_CREDIT].counter =
+ le32_to_cpu(mbox_rsp->curr_rx_buf_to_buf_credit);
+ counts[EFCT_HW_LINK_STAT_ADVERTISED_XMIT_B2B_CREDIT].counter =
+ le32_to_cpu(mbox_rsp->adv_tx_buf_to_buf_credit);
+ counts[EFCT_HW_LINK_STAT_CURR_XMIT_B2B_CREDIT].counter =
+ le32_to_cpu(mbox_rsp->curr_tx_buf_to_buf_credit);
+ counts[EFCT_HW_LINK_STAT_RCV_EOFA_COUNT].counter =
+ le32_to_cpu(mbox_rsp->rx_eofa_cnt);
+ counts[EFCT_HW_LINK_STAT_RCV_EOFDTI_COUNT].counter =
+ le32_to_cpu(mbox_rsp->rx_eofdti_cnt);
+ counts[EFCT_HW_LINK_STAT_RCV_EOFNI_COUNT].counter =
+ le32_to_cpu(mbox_rsp->rx_eofni_cnt);
+ counts[EFCT_HW_LINK_STAT_RCV_SOFF_COUNT].counter =
+ le32_to_cpu(mbox_rsp->rx_soff_cnt);
+ counts[EFCT_HW_LINK_STAT_RCV_DROPPED_NO_AER_COUNT].counter =
+ le32_to_cpu(mbox_rsp->rx_dropped_no_aer_cnt);
+ counts[EFCT_HW_LINK_STAT_RCV_DROPPED_NO_RPI_COUNT].counter =
+ le32_to_cpu(mbox_rsp->rx_dropped_no_avail_rpi_rescnt);
+ counts[EFCT_HW_LINK_STAT_RCV_DROPPED_NO_XRI_COUNT].counter =
+ le32_to_cpu(mbox_rsp->rx_dropped_no_avail_xri_rescnt);
+
+ if (cb_arg) {
+ if (cb_arg->cb) {
+ if (status == 0 && le16_to_cpu(mbox_rsp->hdr.status))
+ status = le16_to_cpu(mbox_rsp->hdr.status);
+ cb_arg->cb(status, num_counters, counts, cb_arg->arg);
+ }
+
+ kfree(cb_arg);
+ }
+
+ return 0;
+}
+
+int
+efct_hw_get_link_stats(struct efct_hw *hw, u8 req_ext_counters,
+ u8 clear_overflow_flags, u8 clear_all_counters,
+ void (*cb)(int status, u32 num_counters,
+ struct efct_hw_link_stat_counts *counters,
+ void *arg),
+ void *arg)
+{
+ int rc = -EIO;
+ struct efct_hw_link_stat_cb_arg *cb_arg;
+ u8 mbxdata[SLI4_BMBX_SIZE];
+
+ cb_arg = kzalloc(sizeof(*cb_arg), GFP_ATOMIC);
+ if (!cb_arg)
+ return -ENOMEM;
+
+ cb_arg->cb = cb;
+ cb_arg->arg = arg;
+
+ /* Send the HW command */
+ if (!sli_cmd_read_link_stats(&hw->sli, mbxdata, req_ext_counters,
+ clear_overflow_flags, clear_all_counters))
+ rc = efct_hw_command(hw, mbxdata, EFCT_CMD_NOWAIT,
+ efct_hw_cb_link_stat, cb_arg);
+
+ if (rc)
+ kfree(cb_arg);
+
+ return rc;
+}
+
+static int
+efct_hw_cb_host_stat(struct efct_hw *hw, int status, u8 *mqe, void *arg)
+{
+ struct sli4_cmd_read_status *mbox_rsp =
+ (struct sli4_cmd_read_status *)mqe;
+ struct efct_hw_host_stat_cb_arg *cb_arg = arg;
+ struct efct_hw_host_stat_counts counts[EFCT_HW_HOST_STAT_MAX];
+ u32 num_counters = EFCT_HW_HOST_STAT_MAX;
+
+ memset(counts, 0, sizeof(struct efct_hw_host_stat_counts) *
+ EFCT_HW_HOST_STAT_MAX);
+
+ counts[EFCT_HW_HOST_STAT_TX_KBYTE_COUNT].counter =
+ le32_to_cpu(mbox_rsp->trans_kbyte_cnt);
+ counts[EFCT_HW_HOST_STAT_RX_KBYTE_COUNT].counter =
+ le32_to_cpu(mbox_rsp->recv_kbyte_cnt);
+ counts[EFCT_HW_HOST_STAT_TX_FRAME_COUNT].counter =
+ le32_to_cpu(mbox_rsp->trans_frame_cnt);
+ counts[EFCT_HW_HOST_STAT_RX_FRAME_COUNT].counter =
+ le32_to_cpu(mbox_rsp->recv_frame_cnt);
+ counts[EFCT_HW_HOST_STAT_TX_SEQ_COUNT].counter =
+ le32_to_cpu(mbox_rsp->trans_seq_cnt);
+ counts[EFCT_HW_HOST_STAT_RX_SEQ_COUNT].counter =
+ le32_to_cpu(mbox_rsp->recv_seq_cnt);
+ counts[EFCT_HW_HOST_STAT_TOTAL_EXCH_ORIG].counter =
+ le32_to_cpu(mbox_rsp->tot_exchanges_orig);
+ counts[EFCT_HW_HOST_STAT_TOTAL_EXCH_RESP].counter =
+ le32_to_cpu(mbox_rsp->tot_exchanges_resp);
+ counts[EFCT_HW_HOSY_STAT_RX_P_BSY_COUNT].counter =
+ le32_to_cpu(mbox_rsp->recv_p_bsy_cnt);
+ counts[EFCT_HW_HOST_STAT_RX_F_BSY_COUNT].counter =
+ le32_to_cpu(mbox_rsp->recv_f_bsy_cnt);
+ counts[EFCT_HW_HOST_STAT_DROP_FRM_DUE_TO_NO_RQ_BUF_COUNT].counter =
+ le32_to_cpu(mbox_rsp->no_rq_buf_dropped_frames_cnt);
+ counts[EFCT_HW_HOST_STAT_EMPTY_RQ_TIMEOUT_COUNT].counter =
+ le32_to_cpu(mbox_rsp->empty_rq_timeout_cnt);
+ counts[EFCT_HW_HOST_STAT_DROP_FRM_DUE_TO_NO_XRI_COUNT].counter =
+ le32_to_cpu(mbox_rsp->no_xri_dropped_frames_cnt);
+ counts[EFCT_HW_HOST_STAT_EMPTY_XRI_POOL_COUNT].counter =
+ le32_to_cpu(mbox_rsp->empty_xri_pool_cnt);
+
+ if (cb_arg) {
+ if (cb_arg->cb) {
+ if (status == 0 && le16_to_cpu(mbox_rsp->hdr.status))
+ status = le16_to_cpu(mbox_rsp->hdr.status);
+ cb_arg->cb(status, num_counters, counts, cb_arg->arg);
+ }
+
+ kfree(cb_arg);
+ }
+
+ return 0;
+}
+
+int
+efct_hw_get_host_stats(struct efct_hw *hw, u8 cc,
+ void (*cb)(int status, u32 num_counters,
+ struct efct_hw_host_stat_counts *counters,
+ void *arg),
+ void *arg)
+{
+ int rc = -EIO;
+ struct efct_hw_host_stat_cb_arg *cb_arg;
+ u8 mbxdata[SLI4_BMBX_SIZE];
+
+ cb_arg = kmalloc(sizeof(*cb_arg), GFP_ATOMIC);
+ if (!cb_arg)
+ return -ENOMEM;
+
+ cb_arg->cb = cb;
+ cb_arg->arg = arg;
+
+ /* Send the HW command to get the host stats */
+ if (!sli_cmd_read_status(&hw->sli, mbxdata, cc))
+ rc = efct_hw_command(hw, mbxdata, EFCT_CMD_NOWAIT,
+ efct_hw_cb_host_stat, cb_arg);
+
+ if (rc) {
+ efc_log_debug(hw->os, "READ_HOST_STATS failed\n");
+ kfree(cb_arg);
+ }
+
+ return rc;
+}
+
+struct efct_hw_async_call_ctx {
+ efct_hw_async_cb_t callback;
+ void *arg;
+ u8 cmd[SLI4_BMBX_SIZE];
+};
+
+static void
+efct_hw_async_cb(struct efct_hw *hw, int status, u8 *mqe, void *arg)
+{
+ struct efct_hw_async_call_ctx *ctx = arg;
+
+ if (ctx) {
+ if (ctx->callback)
+ (*ctx->callback)(hw, status, mqe, ctx->arg);
+
+ kfree(ctx);
+ }
+}
+
+int
+efct_hw_async_call(struct efct_hw *hw, efct_hw_async_cb_t callback, void *arg)
+{
+ struct efct_hw_async_call_ctx *ctx;
+ int rc;
+
+ /*
+ * Allocate a callback context (which includes the mbox cmd buffer),
+ * we need this to be persistent as the mbox cmd submission may be
+ * queued and executed later execution.
+ */
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->callback = callback;
+ ctx->arg = arg;
+
+ /* Build and send a NOP mailbox command */
+ if (sli_cmd_common_nop(&hw->sli, ctx->cmd, 0)) {
+ efc_log_err(hw->os, "COMMON_NOP format failure\n");
+ kfree(ctx);
+ return -EIO;
+ }
+
+ rc = efct_hw_command(hw, ctx->cmd, EFCT_CMD_NOWAIT, efct_hw_async_cb,
+ ctx);
+ if (rc) {
+ efc_log_err(hw->os, "COMMON_NOP command failure, rc=%d\n", rc);
+ kfree(ctx);
+ return -EIO;
+ }
+ return 0;
+}
+
+static int
+efct_hw_cb_fw_write(struct efct_hw *hw, int status, u8 *mqe, void *arg)
+{
+ struct sli4_cmd_sli_config *mbox_rsp =
+ (struct sli4_cmd_sli_config *)mqe;
+ struct sli4_rsp_cmn_write_object *wr_obj_rsp;
+ struct efct_hw_fw_wr_cb_arg *cb_arg = arg;
+ u32 bytes_written;
+ u16 mbox_status;
+ u32 change_status;
+
+ wr_obj_rsp = (struct sli4_rsp_cmn_write_object *)
+ &mbox_rsp->payload.embed;
+ bytes_written = le32_to_cpu(wr_obj_rsp->actual_write_length);
+ mbox_status = le16_to_cpu(mbox_rsp->hdr.status);
+ change_status = (le32_to_cpu(wr_obj_rsp->change_status_dword) &
+ RSP_CHANGE_STATUS);
+
+ if (cb_arg) {
+ if (cb_arg->cb) {
+ if (!status && mbox_status)
+ status = mbox_status;
+ cb_arg->cb(status, bytes_written, change_status,
+ cb_arg->arg);
+ }
+
+ kfree(cb_arg);
+ }
+
+ return 0;
+}
+
+int
+efct_hw_firmware_write(struct efct_hw *hw, struct efc_dma *dma, u32 size,
+ u32 offset, int last,
+ void (*cb)(int status, u32 bytes_written,
+ u32 change_status, void *arg),
+ void *arg)
+{
+ int rc = -EIO;
+ u8 mbxdata[SLI4_BMBX_SIZE];
+ struct efct_hw_fw_wr_cb_arg *cb_arg;
+ int noc = 0;
+
+ cb_arg = kzalloc(sizeof(*cb_arg), GFP_KERNEL);
+ if (!cb_arg)
+ return -ENOMEM;
+
+ cb_arg->cb = cb;
+ cb_arg->arg = arg;
+
+ /* Write a portion of a firmware image to the device */
+ if (!sli_cmd_common_write_object(&hw->sli, mbxdata,
+ noc, last, size, offset, "/prg/",
+ dma))
+ rc = efct_hw_command(hw, mbxdata, EFCT_CMD_NOWAIT,
+ efct_hw_cb_fw_write, cb_arg);
+
+ if (rc != 0) {
+ efc_log_debug(hw->os, "COMMON_WRITE_OBJECT failed\n");
+ kfree(cb_arg);
+ }
+
+ return rc;
+}
+
+static int
+efct_hw_cb_port_control(struct efct_hw *hw, int status, u8 *mqe,
+ void *arg)
+{
+ return 0;
+}
+
+int
+efct_hw_port_control(struct efct_hw *hw, enum efct_hw_port ctrl,
+ uintptr_t value,
+ void (*cb)(int status, uintptr_t value, void *arg),
+ void *arg)
+{
+ int rc = -EIO;
+ u8 link[SLI4_BMBX_SIZE];
+ u32 speed = 0;
+ u8 reset_alpa = 0;
+
+ switch (ctrl) {
+ case EFCT_HW_PORT_INIT:
+ if (!sli_cmd_config_link(&hw->sli, link))
+ rc = efct_hw_command(hw, link, EFCT_CMD_NOWAIT,
+ efct_hw_cb_port_control, NULL);
+
+ if (rc != 0) {
+ efc_log_err(hw->os, "CONFIG_LINK failed\n");
+ break;
+ }
+ speed = hw->config.speed;
+ reset_alpa = (u8)(value & 0xff);
+
+ rc = -EIO;
+ if (!sli_cmd_init_link(&hw->sli, link, speed, reset_alpa))
+ rc = efct_hw_command(hw, link, EFCT_CMD_NOWAIT,
+ efct_hw_cb_port_control, NULL);
+ /* Free buffer on error, since no callback is coming */
+ if (rc)
+ efc_log_err(hw->os, "INIT_LINK failed\n");
+ break;
+
+ case EFCT_HW_PORT_SHUTDOWN:
+ if (!sli_cmd_down_link(&hw->sli, link))
+ rc = efct_hw_command(hw, link, EFCT_CMD_NOWAIT,
+ efct_hw_cb_port_control, NULL);
+ /* Free buffer on error, since no callback is coming */
+ if (rc)
+ efc_log_err(hw->os, "DOWN_LINK failed\n");
+ break;
+
+ default:
+ efc_log_debug(hw->os, "unhandled control %#x\n", ctrl);
+ break;
+ }
+
+ return rc;
+}
+
+void
+efct_hw_teardown(struct efct_hw *hw)
+{
+ u32 i = 0;
+ u32 destroy_queues;
+ u32 free_memory;
+ struct efc_dma *dma;
+ struct efct *efct = hw->os;
+
+ destroy_queues = (hw->state == EFCT_HW_STATE_ACTIVE);
+ free_memory = (hw->state != EFCT_HW_STATE_UNINITIALIZED);
+
+ /* Cancel Sliport Healthcheck */
+ if (hw->sliport_healthcheck) {
+ hw->sliport_healthcheck = 0;
+ efct_hw_config_sli_port_health_check(hw, 0, 0);
+ }
+
+ if (hw->state != EFCT_HW_STATE_QUEUES_ALLOCATED) {
+ hw->state = EFCT_HW_STATE_TEARDOWN_IN_PROGRESS;
+
+ efct_hw_flush(hw);
+
+ if (list_empty(&hw->cmd_head))
+ efc_log_debug(hw->os,
+ "All commands completed on MQ queue\n");
+ else
+ efc_log_debug(hw->os,
+ "Some cmds still pending on MQ queue\n");
+
+ /* Cancel any remaining commands */
+ efct_hw_command_cancel(hw);
+ } else {
+ hw->state = EFCT_HW_STATE_TEARDOWN_IN_PROGRESS;
+ }
+
+ dma_free_coherent(&efct->pci->dev,
+ hw->rnode_mem.size, hw->rnode_mem.virt,
+ hw->rnode_mem.phys);
+ memset(&hw->rnode_mem, 0, sizeof(struct efc_dma));
+
+ if (hw->io) {
+ for (i = 0; i < hw->config.n_io; i++) {
+ if (hw->io[i] && hw->io[i]->sgl &&
+ hw->io[i]->sgl->virt) {
+ dma_free_coherent(&efct->pci->dev,
+ hw->io[i]->sgl->size,
+ hw->io[i]->sgl->virt,
+ hw->io[i]->sgl->phys);
+ }
+ kfree(hw->io[i]);
+ hw->io[i] = NULL;
+ }
+ kfree(hw->io);
+ hw->io = NULL;
+ kfree(hw->wqe_buffs);
+ hw->wqe_buffs = NULL;
+ }
+
+ dma = &hw->xfer_rdy;
+ dma_free_coherent(&efct->pci->dev,
+ dma->size, dma->virt, dma->phys);
+ memset(dma, 0, sizeof(struct efc_dma));
+
+ dma = &hw->loop_map;
+ dma_free_coherent(&efct->pci->dev,
+ dma->size, dma->virt, dma->phys);
+ memset(dma, 0, sizeof(struct efc_dma));
+
+ for (i = 0; i < hw->wq_count; i++)
+ sli_queue_free(&hw->sli, &hw->wq[i], destroy_queues,
+ free_memory);
+
+ for (i = 0; i < hw->rq_count; i++)
+ sli_queue_free(&hw->sli, &hw->rq[i], destroy_queues,
+ free_memory);
+
+ for (i = 0; i < hw->mq_count; i++)
+ sli_queue_free(&hw->sli, &hw->mq[i], destroy_queues,
+ free_memory);
+
+ for (i = 0; i < hw->cq_count; i++)
+ sli_queue_free(&hw->sli, &hw->cq[i], destroy_queues,
+ free_memory);
+
+ for (i = 0; i < hw->eq_count; i++)
+ sli_queue_free(&hw->sli, &hw->eq[i], destroy_queues,
+ free_memory);
+
+ /* Free rq buffers */
+ efct_hw_rx_free(hw);
+
+ efct_hw_queue_teardown(hw);
+
+ kfree(hw->wq_cpu_array);
+
+ sli_teardown(&hw->sli);
+
+ /* record the fact that the queues are non-functional */
+ hw->state = EFCT_HW_STATE_UNINITIALIZED;
+
+ /* free sequence free pool */
+ kfree(hw->seq_pool);
+ hw->seq_pool = NULL;
+
+ /* free hw_wq_callback pool */
+ efct_hw_reqtag_pool_free(hw);
+
+ mempool_destroy(hw->cmd_ctx_pool);
+ mempool_destroy(hw->mbox_rqst_pool);
+
+ /* Mark HW setup as not having been called */
+ hw->hw_setup_called = false;
+}
+
+static int
+efct_hw_sli_reset(struct efct_hw *hw, enum efct_hw_reset reset,
+ enum efct_hw_state prev_state)
+{
+ int rc = 0;
+
+ switch (reset) {
+ case EFCT_HW_RESET_FUNCTION:
+ efc_log_debug(hw->os, "issuing function level reset\n");
+ if (sli_reset(&hw->sli)) {
+ efc_log_err(hw->os, "sli_reset failed\n");
+ rc = -EIO;
+ }
+ break;
+ case EFCT_HW_RESET_FIRMWARE:
+ efc_log_debug(hw->os, "issuing firmware reset\n");
+ if (sli_fw_reset(&hw->sli)) {
+ efc_log_err(hw->os, "sli_soft_reset failed\n");
+ rc = -EIO;
+ }
+ /*
+ * Because the FW reset leaves the FW in a non-running state,
+ * follow that with a regular reset.
+ */
+ efc_log_debug(hw->os, "issuing function level reset\n");
+ if (sli_reset(&hw->sli)) {
+ efc_log_err(hw->os, "sli_reset failed\n");
+ rc = -EIO;
+ }
+ break;
+ default:
+ efc_log_err(hw->os, "unknown type - no reset performed\n");
+ hw->state = prev_state;
+ rc = -EINVAL;
+ break;
+ }
+
+ return rc;
+}
+
+int
+efct_hw_reset(struct efct_hw *hw, enum efct_hw_reset reset)
+{
+ int rc = 0;
+ enum efct_hw_state prev_state = hw->state;
+
+ if (hw->state != EFCT_HW_STATE_ACTIVE)
+ efc_log_debug(hw->os,
+ "HW state %d is not active\n", hw->state);
+
+ hw->state = EFCT_HW_STATE_RESET_IN_PROGRESS;
+
+ /*
+ * If the prev_state is already reset/teardown in progress,
+ * don't continue further
+ */
+ if (prev_state == EFCT_HW_STATE_RESET_IN_PROGRESS ||
+ prev_state == EFCT_HW_STATE_TEARDOWN_IN_PROGRESS)
+ return efct_hw_sli_reset(hw, reset, prev_state);
+
+ if (prev_state != EFCT_HW_STATE_UNINITIALIZED) {
+ efct_hw_flush(hw);
+
+ if (list_empty(&hw->cmd_head))
+ efc_log_debug(hw->os,
+ "All commands completed on MQ queue\n");
+ else
+ efc_log_err(hw->os,
+ "Some commands still pending on MQ queue\n");
+ }
+
+ /* Reset the chip */
+ rc = efct_hw_sli_reset(hw, reset, prev_state);
+ if (rc == -EINVAL)
+ return -EIO;
+
+ return rc;
+}
diff --git a/drivers/scsi/elx/efct/efct_hw.h b/drivers/scsi/elx/efct/efct_hw.h
new file mode 100644
index 000000000000..f3f4aa78dce9
--- /dev/null
+++ b/drivers/scsi/elx/efct/efct_hw.h
@@ -0,0 +1,764 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+#ifndef _EFCT_HW_H
+#define _EFCT_HW_H
+
+#include "../libefc_sli/sli4.h"
+
+/*
+ * EFCT PCI IDs
+ */
+#define EFCT_VENDOR_ID 0x10df
+/* LightPulse 16Gb x 4 FC (lancer-g6) */
+#define EFCT_DEVICE_LANCER_G6 0xe307
+/* LightPulse 32Gb x 4 FC (lancer-g7) */
+#define EFCT_DEVICE_LANCER_G7 0xf407
+
+/*Default RQ entries len used by driver*/
+#define EFCT_HW_RQ_ENTRIES_MIN 512
+#define EFCT_HW_RQ_ENTRIES_DEF 1024
+#define EFCT_HW_RQ_ENTRIES_MAX 4096
+
+/*Defines the size of the RQ buffers used for each RQ*/
+#define EFCT_HW_RQ_SIZE_HDR 128
+#define EFCT_HW_RQ_SIZE_PAYLOAD 1024
+
+/*Define the maximum number of multi-receive queues*/
+#define EFCT_HW_MAX_MRQS 8
+
+/*
+ * Define count of when to set the WQEC bit in a submitted
+ * WQE, causing a consummed/released completion to be posted.
+ */
+#define EFCT_HW_WQEC_SET_COUNT 32
+
+/*Send frame timeout in seconds*/
+#define EFCT_HW_SEND_FRAME_TIMEOUT 10
+
+/*
+ * FDT Transfer Hint value, reads greater than this value
+ * will be segmented to implement fairness. A value of zero disables
+ * the feature.
+ */
+#define EFCT_HW_FDT_XFER_HINT 8192
+
+#define EFCT_HW_TIMECHECK_ITERATIONS 100
+#define EFCT_HW_MAX_NUM_MQ 1
+#define EFCT_HW_MAX_NUM_RQ 32
+#define EFCT_HW_MAX_NUM_EQ 16
+#define EFCT_HW_MAX_NUM_WQ 32
+#define EFCT_HW_DEF_NUM_EQ 1
+
+#define OCE_HW_MAX_NUM_MRQ_PAIRS 16
+
+#define EFCT_HW_MQ_DEPTH 128
+#define EFCT_HW_EQ_DEPTH 1024
+
+/*
+ * A CQ will be assinged to each WQ
+ * (CQ must have 2X entries of the WQ for abort
+ * processing), plus a separate one for each RQ PAIR and one for MQ
+ */
+#define EFCT_HW_MAX_NUM_CQ \
+ ((EFCT_HW_MAX_NUM_WQ * 2) + 1 + (OCE_HW_MAX_NUM_MRQ_PAIRS * 2))
+
+#define EFCT_HW_Q_HASH_SIZE 128
+#define EFCT_HW_RQ_HEADER_SIZE 128
+#define EFCT_HW_RQ_HEADER_INDEX 0
+
+#define EFCT_HW_REQUE_XRI_REGTAG 65534
+
+/* Options for efct_hw_command() */
+enum efct_cmd_opts {
+ /* command executes synchronously and busy-waits for completion */
+ EFCT_CMD_POLL,
+ /* command executes asynchronously. Uses callback */
+ EFCT_CMD_NOWAIT,
+};
+
+enum efct_hw_reset {
+ EFCT_HW_RESET_FUNCTION,
+ EFCT_HW_RESET_FIRMWARE,
+ EFCT_HW_RESET_MAX
+};
+
+enum efct_hw_topo {
+ EFCT_HW_TOPOLOGY_AUTO,
+ EFCT_HW_TOPOLOGY_NPORT,
+ EFCT_HW_TOPOLOGY_LOOP,
+ EFCT_HW_TOPOLOGY_NONE,
+ EFCT_HW_TOPOLOGY_MAX
+};
+
+/* pack fw revision values into a single uint64_t */
+#define HW_FWREV(a, b, c, d) (((uint64_t)(a) << 48) | ((uint64_t)(b) << 32) \
+ | ((uint64_t)(c) << 16) | ((uint64_t)(d)))
+
+#define EFCT_FW_VER_STR(a, b, c, d) (#a "." #b "." #c "." #d)
+
+enum efct_hw_io_type {
+ EFCT_HW_ELS_REQ,
+ EFCT_HW_ELS_RSP,
+ EFCT_HW_FC_CT,
+ EFCT_HW_FC_CT_RSP,
+ EFCT_HW_BLS_ACC,
+ EFCT_HW_BLS_RJT,
+ EFCT_HW_IO_TARGET_READ,
+ EFCT_HW_IO_TARGET_WRITE,
+ EFCT_HW_IO_TARGET_RSP,
+ EFCT_HW_IO_DNRX_REQUEUE,
+ EFCT_HW_IO_MAX,
+};
+
+enum efct_hw_io_state {
+ EFCT_HW_IO_STATE_FREE,
+ EFCT_HW_IO_STATE_INUSE,
+ EFCT_HW_IO_STATE_WAIT_FREE,
+ EFCT_HW_IO_STATE_WAIT_SEC_HIO,
+};
+
+#define EFCT_TARGET_WRITE_SKIPS 1
+#define EFCT_TARGET_READ_SKIPS 2
+
+struct efct_hw;
+struct efct_io;
+
+#define EFCT_CMD_CTX_POOL_SZ 32
+/**
+ * HW command context.
+ * Stores the state for the asynchronous commands sent to the hardware.
+ */
+struct efct_command_ctx {
+ struct list_head list_entry;
+ int (*cb)(struct efct_hw *hw, int status, u8 *mqe, void *arg);
+ void *arg; /* Argument for callback */
+ /* buffer holding command / results */
+ u8 buf[SLI4_BMBX_SIZE];
+ void *ctx; /* upper layer context */
+};
+
+struct efct_hw_sgl {
+ uintptr_t addr;
+ size_t len;
+};
+
+union efct_hw_io_param_u {
+ struct sli_bls_params bls;
+ struct sli_els_params els;
+ struct sli_ct_params fc_ct;
+ struct sli_fcp_tgt_params fcp_tgt;
+};
+
+/* WQ steering mode */
+enum efct_hw_wq_steering {
+ EFCT_HW_WQ_STEERING_CLASS,
+ EFCT_HW_WQ_STEERING_REQUEST,
+ EFCT_HW_WQ_STEERING_CPU,
+};
+
+/* HW wqe object */
+struct efct_hw_wqe {
+ struct list_head list_entry;
+ bool abort_wqe_submit_needed;
+ bool send_abts;
+ u32 id;
+ u32 abort_reqtag;
+ u8 *wqebuf;
+};
+
+struct efct_hw_io;
+/* Typedef for HW "done" callback */
+typedef int (*efct_hw_done_t)(struct efct_hw_io *, u32 len, int status,
+ u32 ext, void *ul_arg);
+
+/**
+ * HW IO object.
+ *
+ * Stores the per-IO information necessary
+ * for both SLI and efct.
+ * @ref: reference counter for hw io object
+ * @state: state of IO: free, busy, wait_free
+ * @list_entry used for busy, wait_free, free lists
+ * @wqe Work queue object, with link for pending
+ * @hw pointer back to hardware context
+ * @xfer_rdy transfer ready data
+ * @type IO type
+ * @xbusy Exchange is active in FW
+ * @abort_in_progress if TRUE, abort is in progress
+ * @status_saved if TRUE, latched status should be returned
+ * @wq_class WQ class if steering mode is Class
+ * @reqtag request tag for this HW IO
+ * @wq WQ assigned to the exchange
+ * @done Function called on IO completion
+ * @arg argument passed to IO done callback
+ * @abort_done Function called on abort completion
+ * @abort_arg argument passed to abort done callback
+ * @wq_steering WQ steering mode request
+ * @saved_status Saved status
+ * @saved_len Status length
+ * @saved_ext Saved extended status
+ * @eq EQ on which this HIO came up
+ * @sge_offset SGE data offset
+ * @def_sgl_count Count of SGEs in default SGL
+ * @abort_reqtag request tag for an abort of this HW IO
+ * @indicator Exchange indicator
+ * @def_sgl default SGL
+ * @sgl pointer to current active SGL
+ * @sgl_count count of SGEs in io->sgl
+ * @first_data_sge index of first data SGE
+ * @n_sge number of active SGEs
+ */
+struct efct_hw_io {
+ struct kref ref;
+ enum efct_hw_io_state state;
+ void (*release)(struct kref *arg);
+ struct list_head list_entry;
+ struct efct_hw_wqe wqe;
+
+ struct efct_hw *hw;
+ struct efc_dma xfer_rdy;
+ u16 type;
+ bool xbusy;
+ int abort_in_progress;
+ bool status_saved;
+ u8 wq_class;
+ u16 reqtag;
+
+ struct hw_wq *wq;
+ efct_hw_done_t done;
+ void *arg;
+ efct_hw_done_t abort_done;
+ void *abort_arg;
+
+ enum efct_hw_wq_steering wq_steering;
+
+ u32 saved_status;
+ u32 saved_len;
+ u32 saved_ext;
+
+ struct hw_eq *eq;
+ u32 sge_offset;
+ u32 def_sgl_count;
+ u32 abort_reqtag;
+ u32 indicator;
+ struct efc_dma def_sgl;
+ struct efc_dma *sgl;
+ u32 sgl_count;
+ u32 first_data_sge;
+ u32 n_sge;
+};
+
+enum efct_hw_port {
+ EFCT_HW_PORT_INIT,
+ EFCT_HW_PORT_SHUTDOWN,
+};
+
+/* Node group rpi reference */
+struct efct_hw_rpi_ref {
+ atomic_t rpi_count;
+ atomic_t rpi_attached;
+};
+
+enum efct_hw_link_stat {
+ EFCT_HW_LINK_STAT_LINK_FAILURE_COUNT,
+ EFCT_HW_LINK_STAT_LOSS_OF_SYNC_COUNT,
+ EFCT_HW_LINK_STAT_LOSS_OF_SIGNAL_COUNT,
+ EFCT_HW_LINK_STAT_PRIMITIVE_SEQ_COUNT,
+ EFCT_HW_LINK_STAT_INVALID_XMIT_WORD_COUNT,
+ EFCT_HW_LINK_STAT_CRC_COUNT,
+ EFCT_HW_LINK_STAT_PRIMITIVE_SEQ_TIMEOUT_COUNT,
+ EFCT_HW_LINK_STAT_ELASTIC_BUFFER_OVERRUN_COUNT,
+ EFCT_HW_LINK_STAT_ARB_TIMEOUT_COUNT,
+ EFCT_HW_LINK_STAT_ADVERTISED_RCV_B2B_CREDIT,
+ EFCT_HW_LINK_STAT_CURR_RCV_B2B_CREDIT,
+ EFCT_HW_LINK_STAT_ADVERTISED_XMIT_B2B_CREDIT,
+ EFCT_HW_LINK_STAT_CURR_XMIT_B2B_CREDIT,
+ EFCT_HW_LINK_STAT_RCV_EOFA_COUNT,
+ EFCT_HW_LINK_STAT_RCV_EOFDTI_COUNT,
+ EFCT_HW_LINK_STAT_RCV_EOFNI_COUNT,
+ EFCT_HW_LINK_STAT_RCV_SOFF_COUNT,
+ EFCT_HW_LINK_STAT_RCV_DROPPED_NO_AER_COUNT,
+ EFCT_HW_LINK_STAT_RCV_DROPPED_NO_RPI_COUNT,
+ EFCT_HW_LINK_STAT_RCV_DROPPED_NO_XRI_COUNT,
+ EFCT_HW_LINK_STAT_MAX,
+};
+
+enum efct_hw_host_stat {
+ EFCT_HW_HOST_STAT_TX_KBYTE_COUNT,
+ EFCT_HW_HOST_STAT_RX_KBYTE_COUNT,
+ EFCT_HW_HOST_STAT_TX_FRAME_COUNT,
+ EFCT_HW_HOST_STAT_RX_FRAME_COUNT,
+ EFCT_HW_HOST_STAT_TX_SEQ_COUNT,
+ EFCT_HW_HOST_STAT_RX_SEQ_COUNT,
+ EFCT_HW_HOST_STAT_TOTAL_EXCH_ORIG,
+ EFCT_HW_HOST_STAT_TOTAL_EXCH_RESP,
+ EFCT_HW_HOSY_STAT_RX_P_BSY_COUNT,
+ EFCT_HW_HOST_STAT_RX_F_BSY_COUNT,
+ EFCT_HW_HOST_STAT_DROP_FRM_DUE_TO_NO_RQ_BUF_COUNT,
+ EFCT_HW_HOST_STAT_EMPTY_RQ_TIMEOUT_COUNT,
+ EFCT_HW_HOST_STAT_DROP_FRM_DUE_TO_NO_XRI_COUNT,
+ EFCT_HW_HOST_STAT_EMPTY_XRI_POOL_COUNT,
+ EFCT_HW_HOST_STAT_MAX,
+};
+
+enum efct_hw_state {
+ EFCT_HW_STATE_UNINITIALIZED,
+ EFCT_HW_STATE_QUEUES_ALLOCATED,
+ EFCT_HW_STATE_ACTIVE,
+ EFCT_HW_STATE_RESET_IN_PROGRESS,
+ EFCT_HW_STATE_TEARDOWN_IN_PROGRESS,
+};
+
+struct efct_hw_link_stat_counts {
+ u8 overflow;
+ u32 counter;
+};
+
+struct efct_hw_host_stat_counts {
+ u32 counter;
+};
+
+/* Structure used for the hash lookup of queue IDs */
+struct efct_queue_hash {
+ bool in_use;
+ u16 id;
+ u16 index;
+};
+
+/* WQ callback object */
+struct hw_wq_callback {
+ u16 instance_index; /* use for request tag */
+ void (*callback)(void *arg, u8 *cqe, int status);
+ void *arg;
+ struct list_head list_entry;
+};
+
+struct reqtag_pool {
+ spinlock_t lock; /* pool lock */
+ struct hw_wq_callback *tags[U16_MAX];
+ struct list_head freelist;
+};
+
+struct efct_hw_config {
+ u32 n_eq;
+ u32 n_cq;
+ u32 n_mq;
+ u32 n_rq;
+ u32 n_wq;
+ u32 n_io;
+ u32 n_sgl;
+ u32 speed;
+ u32 topology;
+ /* size of the buffers for first burst */
+ u32 rq_default_buffer_size;
+ u8 esoc;
+ /* MRQ RQ selection policy */
+ u8 rq_selection_policy;
+ /* RQ quanta if rq_selection_policy == 2 */
+ u8 rr_quanta;
+ u32 filter_def[SLI4_CMD_REG_FCFI_NUM_RQ_CFG];
+};
+
+struct efct_hw {
+ struct efct *os;
+ struct sli4 sli;
+ u16 ulp_start;
+ u16 ulp_max;
+ u32 dump_size;
+ enum efct_hw_state state;
+ bool hw_setup_called;
+ u8 sliport_healthcheck;
+ u16 fcf_indicator;
+
+ /* HW configuration */
+ struct efct_hw_config config;
+
+ /* calculated queue sizes for each type */
+ u32 num_qentries[SLI4_QTYPE_MAX];
+
+ /* Storage for SLI queue objects */
+ struct sli4_queue wq[EFCT_HW_MAX_NUM_WQ];
+ struct sli4_queue rq[EFCT_HW_MAX_NUM_RQ];
+ u16 hw_rq_lookup[EFCT_HW_MAX_NUM_RQ];
+ struct sli4_queue mq[EFCT_HW_MAX_NUM_MQ];
+ struct sli4_queue cq[EFCT_HW_MAX_NUM_CQ];
+ struct sli4_queue eq[EFCT_HW_MAX_NUM_EQ];
+
+ /* HW queue */
+ u32 eq_count;
+ u32 cq_count;
+ u32 mq_count;
+ u32 wq_count;
+ u32 rq_count;
+ u32 cmd_head_count;
+ struct list_head eq_list;
+
+ struct efct_queue_hash cq_hash[EFCT_HW_Q_HASH_SIZE];
+ struct efct_queue_hash rq_hash[EFCT_HW_Q_HASH_SIZE];
+ struct efct_queue_hash wq_hash[EFCT_HW_Q_HASH_SIZE];
+
+ /* Storage for HW queue objects */
+ struct hw_wq *hw_wq[EFCT_HW_MAX_NUM_WQ];
+ struct hw_rq *hw_rq[EFCT_HW_MAX_NUM_RQ];
+ struct hw_mq *hw_mq[EFCT_HW_MAX_NUM_MQ];
+ struct hw_cq *hw_cq[EFCT_HW_MAX_NUM_CQ];
+ struct hw_eq *hw_eq[EFCT_HW_MAX_NUM_EQ];
+ /* count of hw_rq[] entries */
+ u32 hw_rq_count;
+ /* count of multirq RQs */
+ u32 hw_mrq_count;
+
+ struct hw_wq **wq_cpu_array;
+
+ /* Sequence objects used in incoming frame processing */
+ struct efc_hw_sequence *seq_pool;
+
+ /* Maintain an ordered, linked list of outstanding HW commands. */
+ struct mutex bmbx_lock;
+ spinlock_t cmd_lock;
+ struct list_head cmd_head;
+ struct list_head cmd_pending;
+ mempool_t *cmd_ctx_pool;
+ mempool_t *mbox_rqst_pool;
+
+ struct sli4_link_event link;
+
+ /* pointer array of IO objects */
+ struct efct_hw_io **io;
+ /* array of WQE buffs mapped to IO objects */
+ u8 *wqe_buffs;
+
+ /* IO lock to synchronize list access */
+ spinlock_t io_lock;
+ /* List of IO objects in use */
+ struct list_head io_inuse;
+ /* List of IO objects waiting to be freed */
+ struct list_head io_wait_free;
+ /* List of IO objects available for allocation */
+ struct list_head io_free;
+
+ struct efc_dma loop_map;
+
+ struct efc_dma xfer_rdy;
+
+ struct efc_dma rnode_mem;
+
+ atomic_t io_alloc_failed_count;
+
+ /* stat: wq sumbit count */
+ u32 tcmd_wq_submit[EFCT_HW_MAX_NUM_WQ];
+ /* stat: wq complete count */
+ u32 tcmd_wq_complete[EFCT_HW_MAX_NUM_WQ];
+
+ atomic_t send_frame_seq_id;
+ struct reqtag_pool *wq_reqtag_pool;
+};
+
+enum efct_hw_io_count_type {
+ EFCT_HW_IO_INUSE_COUNT,
+ EFCT_HW_IO_FREE_COUNT,
+ EFCT_HW_IO_WAIT_FREE_COUNT,
+ EFCT_HW_IO_N_TOTAL_IO_COUNT,
+};
+
+/* HW queue data structures */
+struct hw_eq {
+ struct list_head list_entry;
+ enum sli4_qtype type;
+ u32 instance;
+ u32 entry_count;
+ u32 entry_size;
+ struct efct_hw *hw;
+ struct sli4_queue *queue;
+ struct list_head cq_list;
+ u32 use_count;
+};
+
+struct hw_cq {
+ struct list_head list_entry;
+ enum sli4_qtype type;
+ u32 instance;
+ u32 entry_count;
+ u32 entry_size;
+ struct hw_eq *eq;
+ struct sli4_queue *queue;
+ struct list_head q_list;
+ u32 use_count;
+};
+
+struct hw_q {
+ struct list_head list_entry;
+ enum sli4_qtype type;
+};
+
+struct hw_mq {
+ struct list_head list_entry;
+ enum sli4_qtype type;
+ u32 instance;
+
+ u32 entry_count;
+ u32 entry_size;
+ struct hw_cq *cq;
+ struct sli4_queue *queue;
+
+ u32 use_count;
+};
+
+struct hw_wq {
+ struct list_head list_entry;
+ enum sli4_qtype type;
+ u32 instance;
+ struct efct_hw *hw;
+
+ u32 entry_count;
+ u32 entry_size;
+ struct hw_cq *cq;
+ struct sli4_queue *queue;
+ u32 class;
+
+ /* WQ consumed */
+ u32 wqec_set_count;
+ u32 wqec_count;
+ u32 free_count;
+ u32 total_submit_count;
+ struct list_head pending_list;
+
+ /* HW IO allocated for use with Send Frame */
+ struct efct_hw_io *send_frame_io;
+
+ /* Stats */
+ u32 use_count;
+ u32 wq_pending_count;
+};
+
+struct hw_rq {
+ struct list_head list_entry;
+ enum sli4_qtype type;
+ u32 instance;
+
+ u32 entry_count;
+ u32 use_count;
+ u32 hdr_entry_size;
+ u32 first_burst_entry_size;
+ u32 data_entry_size;
+ bool is_mrq;
+ u32 base_mrq_id;
+
+ struct hw_cq *cq;
+
+ u8 filter_mask;
+ struct sli4_queue *hdr;
+ struct sli4_queue *first_burst;
+ struct sli4_queue *data;
+
+ struct efc_hw_rq_buffer *hdr_buf;
+ struct efc_hw_rq_buffer *fb_buf;
+ struct efc_hw_rq_buffer *payload_buf;
+ /* RQ tracker for this RQ */
+ struct efc_hw_sequence **rq_tracker;
+};
+
+struct efct_hw_send_frame_context {
+ struct efct_hw *hw;
+ struct hw_wq_callback *wqcb;
+ struct efct_hw_wqe wqe;
+ void (*callback)(int status, void *arg);
+ void *arg;
+
+ /* General purpose elements */
+ struct efc_hw_sequence *seq;
+ struct efc_dma payload;
+};
+
+struct efct_hw_grp_hdr {
+ u32 size;
+ __be32 magic_number;
+ u32 word2;
+ u8 rev_name[128];
+ u8 date[12];
+ u8 revision[32];
+};
+
+static inline int
+efct_hw_get_link_speed(struct efct_hw *hw) {
+ return hw->link.speed;
+}
+
+int
+efct_hw_setup(struct efct_hw *hw, void *os, struct pci_dev *pdev);
+int efct_hw_init(struct efct_hw *hw);
+int
+efct_hw_parse_filter(struct efct_hw *hw, void *value);
+int
+efct_hw_init_queues(struct efct_hw *hw);
+int
+efct_hw_map_wq_cpu(struct efct_hw *hw);
+uint64_t
+efct_get_wwnn(struct efct_hw *hw);
+uint64_t
+efct_get_wwpn(struct efct_hw *hw);
+
+int efct_hw_rx_allocate(struct efct_hw *hw);
+int efct_hw_rx_post(struct efct_hw *hw);
+void efct_hw_rx_free(struct efct_hw *hw);
+int
+efct_hw_command(struct efct_hw *hw, u8 *cmd, u32 opts, void *cb,
+ void *arg);
+int
+efct_issue_mbox_rqst(void *base, void *cmd, void *cb, void *arg);
+
+struct efct_hw_io *efct_hw_io_alloc(struct efct_hw *hw);
+int efct_hw_io_free(struct efct_hw *hw, struct efct_hw_io *io);
+u8 efct_hw_io_inuse(struct efct_hw *hw, struct efct_hw_io *io);
+int
+efct_hw_io_send(struct efct_hw *hw, enum efct_hw_io_type type,
+ struct efct_hw_io *io, union efct_hw_io_param_u *iparam,
+ void *cb, void *arg);
+int
+efct_hw_io_register_sgl(struct efct_hw *hw, struct efct_hw_io *io,
+ struct efc_dma *sgl,
+ u32 sgl_count);
+int
+efct_hw_io_init_sges(struct efct_hw *hw,
+ struct efct_hw_io *io, enum efct_hw_io_type type);
+
+int
+efct_hw_io_add_sge(struct efct_hw *hw, struct efct_hw_io *io,
+ uintptr_t addr, u32 length);
+int
+efct_hw_io_abort(struct efct_hw *hw, struct efct_hw_io *io_to_abort,
+ bool send_abts, void *cb, void *arg);
+u32
+efct_hw_io_get_count(struct efct_hw *hw,
+ enum efct_hw_io_count_type io_count_type);
+struct efct_hw_io
+*efct_hw_io_lookup(struct efct_hw *hw, u32 indicator);
+void efct_hw_io_abort_all(struct efct_hw *hw);
+void efct_hw_io_free_internal(struct kref *arg);
+
+/* HW WQ request tag API */
+struct reqtag_pool *efct_hw_reqtag_pool_alloc(struct efct_hw *hw);
+void efct_hw_reqtag_pool_free(struct efct_hw *hw);
+struct hw_wq_callback
+*efct_hw_reqtag_alloc(struct efct_hw *hw,
+ void (*callback)(void *arg, u8 *cqe,
+ int status), void *arg);
+void
+efct_hw_reqtag_free(struct efct_hw *hw, struct hw_wq_callback *wqcb);
+struct hw_wq_callback
+*efct_hw_reqtag_get_instance(struct efct_hw *hw, u32 instance_index);
+
+/* RQ completion handlers for RQ pair mode */
+int
+efct_hw_rqpair_process_rq(struct efct_hw *hw,
+ struct hw_cq *cq, u8 *cqe);
+int
+efct_hw_rqpair_sequence_free(struct efct_hw *hw, struct efc_hw_sequence *seq);
+static inline void
+efct_hw_sequence_copy(struct efc_hw_sequence *dst,
+ struct efc_hw_sequence *src)
+{
+ /* Copy src to dst, then zero out the linked list link */
+ *dst = *src;
+}
+
+int
+efct_efc_hw_sequence_free(struct efc *efc, struct efc_hw_sequence *seq);
+
+static inline int
+efct_hw_sequence_free(struct efct_hw *hw, struct efc_hw_sequence *seq)
+{
+ /* Only RQ pair mode is supported */
+ return efct_hw_rqpair_sequence_free(hw, seq);
+}
+
+int
+efct_hw_eq_process(struct efct_hw *hw, struct hw_eq *eq,
+ u32 max_isr_time_msec);
+void efct_hw_cq_process(struct efct_hw *hw, struct hw_cq *cq);
+void
+efct_hw_wq_process(struct efct_hw *hw, struct hw_cq *cq,
+ u8 *cqe, int status, u16 rid);
+void
+efct_hw_xabt_process(struct efct_hw *hw, struct hw_cq *cq,
+ u8 *cqe, u16 rid);
+int
+efct_hw_process(struct efct_hw *hw, u32 vector, u32 max_isr_time_msec);
+int
+efct_hw_queue_hash_find(struct efct_queue_hash *hash, u16 id);
+int efct_hw_wq_write(struct hw_wq *wq, struct efct_hw_wqe *wqe);
+int
+efct_hw_send_frame(struct efct_hw *hw, struct fc_frame_header *hdr,
+ u8 sof, u8 eof, struct efc_dma *payload,
+ struct efct_hw_send_frame_context *ctx,
+ void (*callback)(void *arg, u8 *cqe, int status),
+ void *arg);
+int
+efct_els_hw_srrs_send(struct efc *efc, struct efc_disc_io *io);
+int
+efct_efc_bls_send(struct efc *efc, u32 type, struct sli_bls_params *bls);
+int
+efct_hw_bls_send(struct efct *efct, u32 type, struct sli_bls_params *bls_params,
+ void *cb, void *arg);
+
+/* Function for retrieving link statistics */
+int
+efct_hw_get_link_stats(struct efct_hw *hw,
+ u8 req_ext_counters,
+ u8 clear_overflow_flags,
+ u8 clear_all_counters,
+ void (*efct_hw_link_stat_cb_t)(int status,
+ u32 num_counters,
+ struct efct_hw_link_stat_counts *counters, void *arg),
+ void *arg);
+/* Function for retrieving host statistics */
+int
+efct_hw_get_host_stats(struct efct_hw *hw,
+ u8 cc,
+ void (*efct_hw_host_stat_cb_t)(int status,
+ u32 num_counters,
+ struct efct_hw_host_stat_counts *counters, void *arg),
+ void *arg);
+int
+efct_hw_firmware_write(struct efct_hw *hw, struct efc_dma *dma,
+ u32 size, u32 offset, int last,
+ void (*cb)(int status, u32 bytes_written,
+ u32 change_status, void *arg),
+ void *arg);
+typedef void (*efct_hw_async_cb_t)(struct efct_hw *hw, int status,
+ u8 *mqe, void *arg);
+int
+efct_hw_async_call(struct efct_hw *hw, efct_hw_async_cb_t callback, void *arg);
+
+struct hw_eq *efct_hw_new_eq(struct efct_hw *hw, u32 entry_count);
+struct hw_cq *efct_hw_new_cq(struct hw_eq *eq, u32 entry_count);
+u32
+efct_hw_new_cq_set(struct hw_eq *eqs[], struct hw_cq *cqs[],
+ u32 num_cqs, u32 entry_count);
+struct hw_mq *efct_hw_new_mq(struct hw_cq *cq, u32 entry_count);
+struct hw_wq
+*efct_hw_new_wq(struct hw_cq *cq, u32 entry_count);
+u32
+efct_hw_new_rq_set(struct hw_cq *cqs[], struct hw_rq *rqs[],
+ u32 num_rq_pairs, u32 entry_count);
+void efct_hw_del_eq(struct hw_eq *eq);
+void efct_hw_del_cq(struct hw_cq *cq);
+void efct_hw_del_mq(struct hw_mq *mq);
+void efct_hw_del_wq(struct hw_wq *wq);
+void efct_hw_del_rq(struct hw_rq *rq);
+void efct_hw_queue_teardown(struct efct_hw *hw);
+void efct_hw_teardown(struct efct_hw *hw);
+int
+efct_hw_reset(struct efct_hw *hw, enum efct_hw_reset reset);
+
+int
+efct_hw_port_control(struct efct_hw *hw, enum efct_hw_port ctrl,
+ uintptr_t value,
+ void (*cb)(int status, uintptr_t value, void *arg),
+ void *arg);
+
+#endif /* __EFCT_H__ */
diff --git a/drivers/scsi/elx/efct/efct_hw_queues.c b/drivers/scsi/elx/efct/efct_hw_queues.c
new file mode 100644
index 000000000000..3a1d1a5864a3
--- /dev/null
+++ b/drivers/scsi/elx/efct/efct_hw_queues.c
@@ -0,0 +1,677 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+#include "efct_driver.h"
+#include "efct_hw.h"
+#include "efct_unsol.h"
+
+int
+efct_hw_init_queues(struct efct_hw *hw)
+{
+ struct hw_eq *eq = NULL;
+ struct hw_cq *cq = NULL;
+ struct hw_wq *wq = NULL;
+ struct hw_mq *mq = NULL;
+
+ struct hw_eq *eqs[EFCT_HW_MAX_NUM_EQ];
+ struct hw_cq *cqs[EFCT_HW_MAX_NUM_EQ];
+ struct hw_rq *rqs[EFCT_HW_MAX_NUM_EQ];
+ u32 i = 0, j;
+
+ hw->eq_count = 0;
+ hw->cq_count = 0;
+ hw->mq_count = 0;
+ hw->wq_count = 0;
+ hw->rq_count = 0;
+ hw->hw_rq_count = 0;
+ INIT_LIST_HEAD(&hw->eq_list);
+
+ for (i = 0; i < hw->config.n_eq; i++) {
+ /* Create EQ */
+ eq = efct_hw_new_eq(hw, EFCT_HW_EQ_DEPTH);
+ if (!eq) {
+ efct_hw_queue_teardown(hw);
+ return -ENOMEM;
+ }
+
+ eqs[i] = eq;
+
+ /* Create one MQ */
+ if (!i) {
+ cq = efct_hw_new_cq(eq,
+ hw->num_qentries[SLI4_QTYPE_CQ]);
+ if (!cq) {
+ efct_hw_queue_teardown(hw);
+ return -ENOMEM;
+ }
+
+ mq = efct_hw_new_mq(cq, EFCT_HW_MQ_DEPTH);
+ if (!mq) {
+ efct_hw_queue_teardown(hw);
+ return -ENOMEM;
+ }
+ }
+
+ /* Create WQ */
+ cq = efct_hw_new_cq(eq, hw->num_qentries[SLI4_QTYPE_CQ]);
+ if (!cq) {
+ efct_hw_queue_teardown(hw);
+ return -ENOMEM;
+ }
+
+ wq = efct_hw_new_wq(cq, hw->num_qentries[SLI4_QTYPE_WQ]);
+ if (!wq) {
+ efct_hw_queue_teardown(hw);
+ return -ENOMEM;
+ }
+ }
+
+ /* Create CQ set */
+ if (efct_hw_new_cq_set(eqs, cqs, i, hw->num_qentries[SLI4_QTYPE_CQ])) {
+ efct_hw_queue_teardown(hw);
+ return -EIO;
+ }
+
+ /* Create RQ set */
+ if (efct_hw_new_rq_set(cqs, rqs, i, EFCT_HW_RQ_ENTRIES_DEF)) {
+ efct_hw_queue_teardown(hw);
+ return -EIO;
+ }
+
+ for (j = 0; j < i ; j++) {
+ rqs[j]->filter_mask = 0;
+ rqs[j]->is_mrq = true;
+ rqs[j]->base_mrq_id = rqs[0]->hdr->id;
+ }
+
+ hw->hw_mrq_count = i;
+
+ return 0;
+}
+
+int
+efct_hw_map_wq_cpu(struct efct_hw *hw)
+{
+ struct efct *efct = hw->os;
+ u32 cpu = 0, i;
+
+ /* Init cpu_map array */
+ hw->wq_cpu_array = kcalloc(num_possible_cpus(), sizeof(void *),
+ GFP_KERNEL);
+ if (!hw->wq_cpu_array)
+ return -ENOMEM;
+
+ for (i = 0; i < hw->config.n_eq; i++) {
+ const struct cpumask *maskp;
+
+ /* Get a CPU mask for all CPUs affinitized to this vector */
+ maskp = pci_irq_get_affinity(efct->pci, i);
+ if (!maskp) {
+ efc_log_debug(efct, "maskp null for vector:%d\n", i);
+ continue;
+ }
+
+ /* Loop through all CPUs associated with vector idx */
+ for_each_cpu_and(cpu, maskp, cpu_present_mask) {
+ efc_log_debug(efct, "CPU:%d irq vector:%d\n", cpu, i);
+ hw->wq_cpu_array[cpu] = hw->hw_wq[i];
+ }
+ }
+
+ return 0;
+}
+
+struct hw_eq *
+efct_hw_new_eq(struct efct_hw *hw, u32 entry_count)
+{
+ struct hw_eq *eq = kzalloc(sizeof(*eq), GFP_KERNEL);
+
+ if (!eq)
+ return NULL;
+
+ eq->type = SLI4_QTYPE_EQ;
+ eq->hw = hw;
+ eq->entry_count = entry_count;
+ eq->instance = hw->eq_count++;
+ eq->queue = &hw->eq[eq->instance];
+ INIT_LIST_HEAD(&eq->cq_list);
+
+ if (sli_queue_alloc(&hw->sli, SLI4_QTYPE_EQ, eq->queue, entry_count,
+ NULL)) {
+ efc_log_err(hw->os, "EQ[%d] alloc failure\n", eq->instance);
+ kfree(eq);
+ return NULL;
+ }
+
+ sli_eq_modify_delay(&hw->sli, eq->queue, 1, 0, 8);
+ hw->hw_eq[eq->instance] = eq;
+ INIT_LIST_HEAD(&eq->list_entry);
+ list_add_tail(&eq->list_entry, &hw->eq_list);
+ efc_log_debug(hw->os, "create eq[%2d] id %3d len %4d\n", eq->instance,
+ eq->queue->id, eq->entry_count);
+ return eq;
+}
+
+struct hw_cq *
+efct_hw_new_cq(struct hw_eq *eq, u32 entry_count)
+{
+ struct efct_hw *hw = eq->hw;
+ struct hw_cq *cq = kzalloc(sizeof(*cq), GFP_KERNEL);
+
+ if (!cq)
+ return NULL;
+
+ cq->eq = eq;
+ cq->type = SLI4_QTYPE_CQ;
+ cq->instance = eq->hw->cq_count++;
+ cq->entry_count = entry_count;
+ cq->queue = &hw->cq[cq->instance];
+
+ INIT_LIST_HEAD(&cq->q_list);
+
+ if (sli_queue_alloc(&hw->sli, SLI4_QTYPE_CQ, cq->queue,
+ cq->entry_count, eq->queue)) {
+ efc_log_err(hw->os, "CQ[%d] allocation failure len=%d\n",
+ eq->instance, eq->entry_count);
+ kfree(cq);
+ return NULL;
+ }
+
+ hw->hw_cq[cq->instance] = cq;
+ INIT_LIST_HEAD(&cq->list_entry);
+ list_add_tail(&cq->list_entry, &eq->cq_list);
+ efc_log_debug(hw->os, "create cq[%2d] id %3d len %4d\n", cq->instance,
+ cq->queue->id, cq->entry_count);
+ return cq;
+}
+
+u32
+efct_hw_new_cq_set(struct hw_eq *eqs[], struct hw_cq *cqs[],
+ u32 num_cqs, u32 entry_count)
+{
+ u32 i;
+ struct efct_hw *hw = eqs[0]->hw;
+ struct sli4 *sli4 = &hw->sli;
+ struct hw_cq *cq = NULL;
+ struct sli4_queue *qs[SLI4_MAX_CQ_SET_COUNT];
+ struct sli4_queue *assefct[SLI4_MAX_CQ_SET_COUNT];
+
+ /* Initialise CQS pointers to NULL */
+ for (i = 0; i < num_cqs; i++)
+ cqs[i] = NULL;
+
+ for (i = 0; i < num_cqs; i++) {
+ cq = kzalloc(sizeof(*cq), GFP_KERNEL);
+ if (!cq)
+ goto error;
+
+ cqs[i] = cq;
+ cq->eq = eqs[i];
+ cq->type = SLI4_QTYPE_CQ;
+ cq->instance = hw->cq_count++;
+ cq->entry_count = entry_count;
+ cq->queue = &hw->cq[cq->instance];
+ qs[i] = cq->queue;
+ assefct[i] = eqs[i]->queue;
+ INIT_LIST_HEAD(&cq->q_list);
+ }
+
+ if (sli_cq_alloc_set(sli4, qs, num_cqs, entry_count, assefct)) {
+ efc_log_err(hw->os, "Failed to create CQ Set.\n");
+ goto error;
+ }
+
+ for (i = 0; i < num_cqs; i++) {
+ hw->hw_cq[cqs[i]->instance] = cqs[i];
+ INIT_LIST_HEAD(&cqs[i]->list_entry);
+ list_add_tail(&cqs[i]->list_entry, &cqs[i]->eq->cq_list);
+ }
+
+ return 0;
+
+error:
+ for (i = 0; i < num_cqs; i++) {
+ kfree(cqs[i]);
+ cqs[i] = NULL;
+ }
+ return -EIO;
+}
+
+struct hw_mq *
+efct_hw_new_mq(struct hw_cq *cq, u32 entry_count)
+{
+ struct efct_hw *hw = cq->eq->hw;
+ struct hw_mq *mq = kzalloc(sizeof(*mq), GFP_KERNEL);
+
+ if (!mq)
+ return NULL;
+
+ mq->cq = cq;
+ mq->type = SLI4_QTYPE_MQ;
+ mq->instance = cq->eq->hw->mq_count++;
+ mq->entry_count = entry_count;
+ mq->entry_size = EFCT_HW_MQ_DEPTH;
+ mq->queue = &hw->mq[mq->instance];
+
+ if (sli_queue_alloc(&hw->sli, SLI4_QTYPE_MQ, mq->queue, mq->entry_size,
+ cq->queue)) {
+ efc_log_err(hw->os, "MQ allocation failure\n");
+ kfree(mq);
+ return NULL;
+ }
+
+ hw->hw_mq[mq->instance] = mq;
+ INIT_LIST_HEAD(&mq->list_entry);
+ list_add_tail(&mq->list_entry, &cq->q_list);
+ efc_log_debug(hw->os, "create mq[%2d] id %3d len %4d\n", mq->instance,
+ mq->queue->id, mq->entry_count);
+ return mq;
+}
+
+struct hw_wq *
+efct_hw_new_wq(struct hw_cq *cq, u32 entry_count)
+{
+ struct efct_hw *hw = cq->eq->hw;
+ struct hw_wq *wq = kzalloc(sizeof(*wq), GFP_KERNEL);
+
+ if (!wq)
+ return NULL;
+
+ wq->hw = cq->eq->hw;
+ wq->cq = cq;
+ wq->type = SLI4_QTYPE_WQ;
+ wq->instance = cq->eq->hw->wq_count++;
+ wq->entry_count = entry_count;
+ wq->queue = &hw->wq[wq->instance];
+ wq->wqec_set_count = EFCT_HW_WQEC_SET_COUNT;
+ wq->wqec_count = wq->wqec_set_count;
+ wq->free_count = wq->entry_count - 1;
+ INIT_LIST_HEAD(&wq->pending_list);
+
+ if (sli_queue_alloc(&hw->sli, SLI4_QTYPE_WQ, wq->queue,
+ wq->entry_count, cq->queue)) {
+ efc_log_err(hw->os, "WQ allocation failure\n");
+ kfree(wq);
+ return NULL;
+ }
+
+ hw->hw_wq[wq->instance] = wq;
+ INIT_LIST_HEAD(&wq->list_entry);
+ list_add_tail(&wq->list_entry, &cq->q_list);
+ efc_log_debug(hw->os, "create wq[%2d] id %3d len %4d cls %d\n",
+ wq->instance, wq->queue->id, wq->entry_count, wq->class);
+ return wq;
+}
+
+u32
+efct_hw_new_rq_set(struct hw_cq *cqs[], struct hw_rq *rqs[],
+ u32 num_rq_pairs, u32 entry_count)
+{
+ struct efct_hw *hw = cqs[0]->eq->hw;
+ struct hw_rq *rq = NULL;
+ struct sli4_queue *qs[SLI4_MAX_RQ_SET_COUNT * 2] = { NULL };
+ u32 i, q_count, size;
+
+ /* Initialise RQS pointers */
+ for (i = 0; i < num_rq_pairs; i++)
+ rqs[i] = NULL;
+
+ /*
+ * Allocate an RQ object SET, where each element in set
+ * encapsulates 2 SLI queues (for rq pair)
+ */
+ for (i = 0, q_count = 0; i < num_rq_pairs; i++, q_count += 2) {
+ rq = kzalloc(sizeof(*rq), GFP_KERNEL);
+ if (!rq)
+ goto error;
+
+ rqs[i] = rq;
+ rq->instance = hw->hw_rq_count++;
+ rq->cq = cqs[i];
+ rq->type = SLI4_QTYPE_RQ;
+ rq->entry_count = entry_count;
+
+ /* Header RQ */
+ rq->hdr = &hw->rq[hw->rq_count];
+ rq->hdr_entry_size = EFCT_HW_RQ_HEADER_SIZE;
+ hw->hw_rq_lookup[hw->rq_count] = rq->instance;
+ hw->rq_count++;
+ qs[q_count] = rq->hdr;
+
+ /* Data RQ */
+ rq->data = &hw->rq[hw->rq_count];
+ rq->data_entry_size = hw->config.rq_default_buffer_size;
+ hw->hw_rq_lookup[hw->rq_count] = rq->instance;
+ hw->rq_count++;
+ qs[q_count + 1] = rq->data;
+
+ rq->rq_tracker = NULL;
+ }
+
+ if (sli_fc_rq_set_alloc(&hw->sli, num_rq_pairs, qs,
+ cqs[0]->queue->id,
+ rqs[0]->entry_count,
+ rqs[0]->hdr_entry_size,
+ rqs[0]->data_entry_size)) {
+ efc_log_err(hw->os, "RQ Set alloc failure for base CQ=%d\n",
+ cqs[0]->queue->id);
+ goto error;
+ }
+
+ for (i = 0; i < num_rq_pairs; i++) {
+ hw->hw_rq[rqs[i]->instance] = rqs[i];
+ INIT_LIST_HEAD(&rqs[i]->list_entry);
+ list_add_tail(&rqs[i]->list_entry, &cqs[i]->q_list);
+ size = sizeof(struct efc_hw_sequence *) * rqs[i]->entry_count;
+ rqs[i]->rq_tracker = kzalloc(size, GFP_KERNEL);
+ if (!rqs[i]->rq_tracker)
+ goto error;
+ }
+
+ return 0;
+
+error:
+ for (i = 0; i < num_rq_pairs; i++) {
+ if (rqs[i]) {
+ kfree(rqs[i]->rq_tracker);
+ kfree(rqs[i]);
+ }
+ }
+
+ return -EIO;
+}
+
+void
+efct_hw_del_eq(struct hw_eq *eq)
+{
+ struct hw_cq *cq;
+ struct hw_cq *cq_next;
+
+ if (!eq)
+ return;
+
+ list_for_each_entry_safe(cq, cq_next, &eq->cq_list, list_entry)
+ efct_hw_del_cq(cq);
+ list_del(&eq->list_entry);
+ eq->hw->hw_eq[eq->instance] = NULL;
+ kfree(eq);
+}
+
+void
+efct_hw_del_cq(struct hw_cq *cq)
+{
+ struct hw_q *q;
+ struct hw_q *q_next;
+
+ if (!cq)
+ return;
+
+ list_for_each_entry_safe(q, q_next, &cq->q_list, list_entry) {
+ switch (q->type) {
+ case SLI4_QTYPE_MQ:
+ efct_hw_del_mq((struct hw_mq *)q);
+ break;
+ case SLI4_QTYPE_WQ:
+ efct_hw_del_wq((struct hw_wq *)q);
+ break;
+ case SLI4_QTYPE_RQ:
+ efct_hw_del_rq((struct hw_rq *)q);
+ break;
+ default:
+ break;
+ }
+ }
+ list_del(&cq->list_entry);
+ cq->eq->hw->hw_cq[cq->instance] = NULL;
+ kfree(cq);
+}
+
+void
+efct_hw_del_mq(struct hw_mq *mq)
+{
+ if (!mq)
+ return;
+
+ list_del(&mq->list_entry);
+ mq->cq->eq->hw->hw_mq[mq->instance] = NULL;
+ kfree(mq);
+}
+
+void
+efct_hw_del_wq(struct hw_wq *wq)
+{
+ if (!wq)
+ return;
+
+ list_del(&wq->list_entry);
+ wq->cq->eq->hw->hw_wq[wq->instance] = NULL;
+ kfree(wq);
+}
+
+void
+efct_hw_del_rq(struct hw_rq *rq)
+{
+ struct efct_hw *hw = NULL;
+
+ if (!rq)
+ return;
+ /* Free RQ tracker */
+ kfree(rq->rq_tracker);
+ rq->rq_tracker = NULL;
+ list_del(&rq->list_entry);
+ hw = rq->cq->eq->hw;
+ hw->hw_rq[rq->instance] = NULL;
+ kfree(rq);
+}
+
+void
+efct_hw_queue_teardown(struct efct_hw *hw)
+{
+ struct hw_eq *eq;
+ struct hw_eq *eq_next;
+
+ if (!hw->eq_list.next)
+ return;
+
+ list_for_each_entry_safe(eq, eq_next, &hw->eq_list, list_entry)
+ efct_hw_del_eq(eq);
+}
+
+static inline int
+efct_hw_rqpair_find(struct efct_hw *hw, u16 rq_id)
+{
+ return efct_hw_queue_hash_find(hw->rq_hash, rq_id);
+}
+
+static struct efc_hw_sequence *
+efct_hw_rqpair_get(struct efct_hw *hw, u16 rqindex, u16 bufindex)
+{
+ struct sli4_queue *rq_hdr = &hw->rq[rqindex];
+ struct efc_hw_sequence *seq = NULL;
+ struct hw_rq *rq = hw->hw_rq[hw->hw_rq_lookup[rqindex]];
+ unsigned long flags = 0;
+
+ if (bufindex >= rq_hdr->length) {
+ efc_log_err(hw->os,
+ "RQidx %d bufidx %d exceed ring len %d for id %d\n",
+ rqindex, bufindex, rq_hdr->length, rq_hdr->id);
+ return NULL;
+ }
+
+ /* rq_hdr lock also covers rqindex+1 queue */
+ spin_lock_irqsave(&rq_hdr->lock, flags);
+
+ seq = rq->rq_tracker[bufindex];
+ rq->rq_tracker[bufindex] = NULL;
+
+ if (!seq) {
+ efc_log_err(hw->os,
+ "RQbuf NULL, rqidx %d, bufidx %d, cur q idx = %d\n",
+ rqindex, bufindex, rq_hdr->index);
+ }
+
+ spin_unlock_irqrestore(&rq_hdr->lock, flags);
+ return seq;
+}
+
+int
+efct_hw_rqpair_process_rq(struct efct_hw *hw, struct hw_cq *cq,
+ u8 *cqe)
+{
+ u16 rq_id;
+ u32 index;
+ int rqindex;
+ int rq_status;
+ u32 h_len;
+ u32 p_len;
+ struct efc_hw_sequence *seq;
+ struct hw_rq *rq;
+
+ rq_status = sli_fc_rqe_rqid_and_index(&hw->sli, cqe,
+ &rq_id, &index);
+ if (rq_status != 0) {
+ switch (rq_status) {
+ case SLI4_FC_ASYNC_RQ_BUF_LEN_EXCEEDED:
+ case SLI4_FC_ASYNC_RQ_DMA_FAILURE:
+ /* just get RQ buffer then return to chip */
+ rqindex = efct_hw_rqpair_find(hw, rq_id);
+ if (rqindex < 0) {
+ efc_log_debug(hw->os,
+ "status=%#x: lookup fail id=%#x\n",
+ rq_status, rq_id);
+ break;
+ }
+
+ /* get RQ buffer */
+ seq = efct_hw_rqpair_get(hw, rqindex, index);
+
+ /* return to chip */
+ if (efct_hw_rqpair_sequence_free(hw, seq)) {
+ efc_log_debug(hw->os,
+ "status=%#x,fail rtrn buf to RQ\n",
+ rq_status);
+ break;
+ }
+ break;
+ case SLI4_FC_ASYNC_RQ_INSUFF_BUF_NEEDED:
+ case SLI4_FC_ASYNC_RQ_INSUFF_BUF_FRM_DISC:
+ /*
+ * since RQ buffers were not consumed, cannot return
+ * them to chip
+ */
+ efc_log_debug(hw->os, "Warning: RCQE status=%#x,\n",
+ rq_status);
+ fallthrough;
+ default:
+ break;
+ }
+ return -EIO;
+ }
+
+ rqindex = efct_hw_rqpair_find(hw, rq_id);
+ if (rqindex < 0) {
+ efc_log_debug(hw->os, "Error: rq_id lookup failed for id=%#x\n",
+ rq_id);
+ return -EIO;
+ }
+
+ rq = hw->hw_rq[hw->hw_rq_lookup[rqindex]];
+ rq->use_count++;
+
+ seq = efct_hw_rqpair_get(hw, rqindex, index);
+ if (WARN_ON(!seq))
+ return -EIO;
+
+ seq->hw = hw;
+
+ sli_fc_rqe_length(&hw->sli, cqe, &h_len, &p_len);
+ seq->header->dma.len = h_len;
+ seq->payload->dma.len = p_len;
+ seq->fcfi = sli_fc_rqe_fcfi(&hw->sli, cqe);
+ seq->hw_priv = cq->eq;
+
+ efct_unsolicited_cb(hw->os, seq);
+
+ return 0;
+}
+
+static int
+efct_hw_rqpair_put(struct efct_hw *hw, struct efc_hw_sequence *seq)
+{
+ struct sli4_queue *rq_hdr = &hw->rq[seq->header->rqindex];
+ struct sli4_queue *rq_payload = &hw->rq[seq->payload->rqindex];
+ u32 hw_rq_index = hw->hw_rq_lookup[seq->header->rqindex];
+ struct hw_rq *rq = hw->hw_rq[hw_rq_index];
+ u32 phys_hdr[2];
+ u32 phys_payload[2];
+ int qindex_hdr;
+ int qindex_payload;
+ unsigned long flags = 0;
+
+ /* Update the RQ verification lookup tables */
+ phys_hdr[0] = upper_32_bits(seq->header->dma.phys);
+ phys_hdr[1] = lower_32_bits(seq->header->dma.phys);
+ phys_payload[0] = upper_32_bits(seq->payload->dma.phys);
+ phys_payload[1] = lower_32_bits(seq->payload->dma.phys);
+
+ /* rq_hdr lock also covers payload / header->rqindex+1 queue */
+ spin_lock_irqsave(&rq_hdr->lock, flags);
+
+ /*
+ * Note: The header must be posted last for buffer pair mode because
+ * posting on the header queue posts the payload queue as well.
+ * We do not ring the payload queue independently in RQ pair mode.
+ */
+ qindex_payload = sli_rq_write(&hw->sli, rq_payload,
+ (void *)phys_payload);
+ qindex_hdr = sli_rq_write(&hw->sli, rq_hdr, (void *)phys_hdr);
+ if (qindex_hdr < 0 ||
+ qindex_payload < 0) {
+ efc_log_err(hw->os, "RQ_ID=%#x write failed\n", rq_hdr->id);
+ spin_unlock_irqrestore(&rq_hdr->lock, flags);
+ return -EIO;
+ }
+
+ /* ensure the indexes are the same */
+ WARN_ON(qindex_hdr != qindex_payload);
+
+ /* Update the lookup table */
+ if (!rq->rq_tracker[qindex_hdr]) {
+ rq->rq_tracker[qindex_hdr] = seq;
+ } else {
+ efc_log_debug(hw->os,
+ "expected rq_tracker[%d][%d] buffer to be NULL\n",
+ hw_rq_index, qindex_hdr);
+ }
+
+ spin_unlock_irqrestore(&rq_hdr->lock, flags);
+ return 0;
+}
+
+int
+efct_hw_rqpair_sequence_free(struct efct_hw *hw, struct efc_hw_sequence *seq)
+{
+ int rc = 0;
+
+ /*
+ * Post the data buffer first. Because in RQ pair mode, ringing the
+ * doorbell of the header ring will post the data buffer as well.
+ */
+ if (efct_hw_rqpair_put(hw, seq)) {
+ efc_log_err(hw->os, "error writing buffers\n");
+ return -EIO;
+ }
+
+ return rc;
+}
+
+int
+efct_efc_hw_sequence_free(struct efc *efc, struct efc_hw_sequence *seq)
+{
+ struct efct *efct = efc->base;
+
+ return efct_hw_rqpair_sequence_free(&efct->hw, seq);
+}
diff --git a/drivers/scsi/elx/efct/efct_io.c b/drivers/scsi/elx/efct/efct_io.c
new file mode 100644
index 000000000000..71e21655916a
--- /dev/null
+++ b/drivers/scsi/elx/efct/efct_io.c
@@ -0,0 +1,191 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+#include "efct_driver.h"
+#include "efct_hw.h"
+#include "efct_io.h"
+
+struct efct_io_pool {
+ struct efct *efct;
+ spinlock_t lock; /* IO pool lock */
+ u32 io_num_ios; /* Total IOs allocated */
+ struct efct_io *ios[EFCT_NUM_SCSI_IOS];
+ struct list_head freelist;
+
+};
+
+struct efct_io_pool *
+efct_io_pool_create(struct efct *efct, u32 num_sgl)
+{
+ u32 i = 0;
+ struct efct_io_pool *io_pool;
+ struct efct_io *io;
+
+ /* Allocate the IO pool */
+ io_pool = kzalloc(sizeof(*io_pool), GFP_KERNEL);
+ if (!io_pool)
+ return NULL;
+
+ io_pool->efct = efct;
+ INIT_LIST_HEAD(&io_pool->freelist);
+ /* initialize IO pool lock */
+ spin_lock_init(&io_pool->lock);
+
+ for (i = 0; i < EFCT_NUM_SCSI_IOS; i++) {
+ io = kzalloc(sizeof(*io), GFP_KERNEL);
+ if (!io)
+ break;
+
+ io_pool->io_num_ios++;
+ io_pool->ios[i] = io;
+ io->tag = i;
+ io->instance_index = i;
+
+ /* Allocate a response buffer */
+ io->rspbuf.size = SCSI_RSP_BUF_LENGTH;
+ io->rspbuf.virt = dma_alloc_coherent(&efct->pci->dev,
+ io->rspbuf.size,
+ &io->rspbuf.phys, GFP_DMA);
+ if (!io->rspbuf.virt) {
+ efc_log_err(efct, "dma_alloc rspbuf failed\n");
+ efct_io_pool_free(io_pool);
+ return NULL;
+ }
+
+ /* Allocate SGL */
+ io->sgl = kzalloc(sizeof(*io->sgl) * num_sgl, GFP_KERNEL);
+ if (!io->sgl) {
+ efct_io_pool_free(io_pool);
+ return NULL;
+ }
+
+ memset(io->sgl, 0, sizeof(*io->sgl) * num_sgl);
+ io->sgl_allocated = num_sgl;
+ io->sgl_count = 0;
+
+ INIT_LIST_HEAD(&io->list_entry);
+ list_add_tail(&io->list_entry, &io_pool->freelist);
+ }
+
+ return io_pool;
+}
+
+int
+efct_io_pool_free(struct efct_io_pool *io_pool)
+{
+ struct efct *efct;
+ u32 i;
+ struct efct_io *io;
+
+ if (io_pool) {
+ efct = io_pool->efct;
+
+ for (i = 0; i < io_pool->io_num_ios; i++) {
+ io = io_pool->ios[i];
+ if (!io)
+ continue;
+
+ kfree(io->sgl);
+ dma_free_coherent(&efct->pci->dev,
+ io->rspbuf.size, io->rspbuf.virt,
+ io->rspbuf.phys);
+ memset(&io->rspbuf, 0, sizeof(struct efc_dma));
+ }
+
+ kfree(io_pool);
+ efct->xport->io_pool = NULL;
+ }
+
+ return 0;
+}
+
+struct efct_io *
+efct_io_pool_io_alloc(struct efct_io_pool *io_pool)
+{
+ struct efct_io *io = NULL;
+ struct efct *efct;
+ unsigned long flags = 0;
+
+ efct = io_pool->efct;
+
+ spin_lock_irqsave(&io_pool->lock, flags);
+
+ if (!list_empty(&io_pool->freelist)) {
+ io = list_first_entry(&io_pool->freelist, struct efct_io,
+ list_entry);
+ list_del_init(&io->list_entry);
+ }
+
+ spin_unlock_irqrestore(&io_pool->lock, flags);
+
+ if (!io)
+ return NULL;
+
+ io->io_type = EFCT_IO_TYPE_MAX;
+ io->hio_type = EFCT_HW_IO_MAX;
+ io->hio = NULL;
+ io->transferred = 0;
+ io->efct = efct;
+ io->timeout = 0;
+ io->sgl_count = 0;
+ io->tgt_task_tag = 0;
+ io->init_task_tag = 0;
+ io->hw_tag = 0;
+ io->display_name = "pending";
+ io->seq_init = 0;
+ io->io_free = 0;
+ io->release = NULL;
+ atomic_add_return(1, &efct->xport->io_active_count);
+ atomic_add_return(1, &efct->xport->io_total_alloc);
+ return io;
+}
+
+/* Free an object used to track an IO */
+void
+efct_io_pool_io_free(struct efct_io_pool *io_pool, struct efct_io *io)
+{
+ struct efct *efct;
+ struct efct_hw_io *hio = NULL;
+ unsigned long flags = 0;
+
+ efct = io_pool->efct;
+
+ spin_lock_irqsave(&io_pool->lock, flags);
+ hio = io->hio;
+ io->hio = NULL;
+ io->io_free = 1;
+ INIT_LIST_HEAD(&io->list_entry);
+ list_add(&io->list_entry, &io_pool->freelist);
+ spin_unlock_irqrestore(&io_pool->lock, flags);
+
+ if (hio)
+ efct_hw_io_free(&efct->hw, hio);
+
+ atomic_sub_return(1, &efct->xport->io_active_count);
+ atomic_add_return(1, &efct->xport->io_total_free);
+}
+
+/* Find an I/O given it's node and ox_id */
+struct efct_io *
+efct_io_find_tgt_io(struct efct *efct, struct efct_node *node,
+ u16 ox_id, u16 rx_id)
+{
+ struct efct_io *io = NULL;
+ unsigned long flags = 0;
+ u8 found = false;
+
+ spin_lock_irqsave(&node->active_ios_lock, flags);
+ list_for_each_entry(io, &node->active_ios, list_entry) {
+ if ((io->cmd_tgt && io->init_task_tag == ox_id) &&
+ (rx_id == 0xffff || io->tgt_task_tag == rx_id)) {
+ if (kref_get_unless_zero(&io->ref))
+ found = true;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&node->active_ios_lock, flags);
+ return found ? io : NULL;
+}
diff --git a/drivers/scsi/elx/efct/efct_io.h b/drivers/scsi/elx/efct/efct_io.h
new file mode 100644
index 000000000000..bb0f51811a7c
--- /dev/null
+++ b/drivers/scsi/elx/efct/efct_io.h
@@ -0,0 +1,174 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+#if !defined(__EFCT_IO_H__)
+#define __EFCT_IO_H__
+
+#include "efct_lio.h"
+
+#define EFCT_LOG_ENABLE_IO_ERRORS(efct) \
+ (((efct) != NULL) ? (((efct)->logmask & (1U << 6)) != 0) : 0)
+
+#define io_error_log(io, fmt, ...) \
+ do { \
+ if (EFCT_LOG_ENABLE_IO_ERRORS(io->efct)) \
+ efc_log_warn(io->efct, fmt, ##__VA_ARGS__); \
+ } while (0)
+
+#define SCSI_CMD_BUF_LENGTH 48
+#define SCSI_RSP_BUF_LENGTH (FCP_RESP_WITH_EXT + SCSI_SENSE_BUFFERSIZE)
+#define EFCT_NUM_SCSI_IOS 8192
+
+enum efct_io_type {
+ EFCT_IO_TYPE_IO = 0,
+ EFCT_IO_TYPE_ELS,
+ EFCT_IO_TYPE_CT,
+ EFCT_IO_TYPE_CT_RESP,
+ EFCT_IO_TYPE_BLS_RESP,
+ EFCT_IO_TYPE_ABORT,
+
+ EFCT_IO_TYPE_MAX,
+};
+
+enum efct_els_state {
+ EFCT_ELS_REQUEST = 0,
+ EFCT_ELS_REQUEST_DELAYED,
+ EFCT_ELS_REQUEST_DELAY_ABORT,
+ EFCT_ELS_REQ_ABORT,
+ EFCT_ELS_REQ_ABORTED,
+ EFCT_ELS_ABORT_IO_COMPL,
+};
+
+/**
+ * Scsi target IO object
+ * @efct: pointer back to efct
+ * @instance_index: unique instance index value
+ * @io: IO display name
+ * @node: pointer to node
+ * @list_entry: io list entry
+ * @io_pending_link: io pending list entry
+ * @ref: reference counter
+ * @release: release callback function
+ * @init_task_tag: initiator task tag (OX_ID) for back-end and SCSI logging
+ * @tgt_task_tag: target task tag (RX_ID) for back-end and SCSI logging
+ * @hw_tag: HW layer unique IO id
+ * @tag: unique IO identifier
+ * @sgl: SGL
+ * @sgl_allocated: Number of allocated SGEs
+ * @sgl_count: Number of SGEs in this SGL
+ * @tgt_io: backend target private IO data
+ * @exp_xfer_len: expected data transfer length, based on FC header
+ * @hw_priv: Declarations private to HW/SLI
+ * @io_type: indicates what this struct efct_io structure is used for
+ * @hio: hw io object
+ * @transferred: Number of bytes transferred
+ * @auto_resp: set if auto_trsp was set
+ * @low_latency: set if low latency request
+ * @wq_steering: selected WQ steering request
+ * @wq_class: selected WQ class if steering is class
+ * @xfer_req: transfer size for current request
+ * @scsi_tgt_cb: target callback function
+ * @scsi_tgt_cb_arg: target callback function argument
+ * @abort_cb: abort callback function
+ * @abort_cb_arg: abort callback function argument
+ * @bls_cb: BLS callback function
+ * @bls_cb_arg: BLS callback function argument
+ * @tmf_cmd: TMF command being processed
+ * @abort_rx_id: rx_id from the ABTS that initiated the command abort
+ * @cmd_tgt: True if this is a Target command
+ * @send_abts: when aborting, indicates ABTS is to be sent
+ * @cmd_ini: True if this is an Initiator command
+ * @seq_init: True if local node has sequence initiative
+ * @iparam: iparams for hw io send call
+ * @hio_type: HW IO type
+ * @wire_len: wire length
+ * @hw_cb: saved HW callback
+ * @io_to_abort: for abort handling, pointer to IO to abort
+ * @rspbuf: SCSI Response buffer
+ * @timeout: Timeout value in seconds for this IO
+ * @cs_ctl: CS_CTL priority for this IO
+ * @io_free: Is io object in freelist
+ * @app_id: application id
+ */
+struct efct_io {
+ struct efct *efct;
+ u32 instance_index;
+ const char *display_name;
+ struct efct_node *node;
+
+ struct list_head list_entry;
+ struct list_head io_pending_link;
+ struct kref ref;
+ void (*release)(struct kref *arg);
+ u32 init_task_tag;
+ u32 tgt_task_tag;
+ u32 hw_tag;
+ u32 tag;
+ struct efct_scsi_sgl *sgl;
+ u32 sgl_allocated;
+ u32 sgl_count;
+ struct efct_scsi_tgt_io tgt_io;
+ u32 exp_xfer_len;
+
+ void *hw_priv;
+
+ enum efct_io_type io_type;
+ struct efct_hw_io *hio;
+ size_t transferred;
+
+ bool auto_resp;
+ bool low_latency;
+ u8 wq_steering;
+ u8 wq_class;
+ u64 xfer_req;
+ efct_scsi_io_cb_t scsi_tgt_cb;
+ void *scsi_tgt_cb_arg;
+ efct_scsi_io_cb_t abort_cb;
+ void *abort_cb_arg;
+ efct_scsi_io_cb_t bls_cb;
+ void *bls_cb_arg;
+ enum efct_scsi_tmf_cmd tmf_cmd;
+ u16 abort_rx_id;
+
+ bool cmd_tgt;
+ bool send_abts;
+ bool cmd_ini;
+ bool seq_init;
+ union efct_hw_io_param_u iparam;
+ enum efct_hw_io_type hio_type;
+ u64 wire_len;
+ void *hw_cb;
+
+ struct efct_io *io_to_abort;
+
+ struct efc_dma rspbuf;
+ u32 timeout;
+ u8 cs_ctl;
+ u8 io_free;
+ u32 app_id;
+};
+
+struct efct_io_cb_arg {
+ int status;
+ int ext_status;
+ void *app;
+};
+
+struct efct_io_pool *
+efct_io_pool_create(struct efct *efct, u32 num_sgl);
+int
+efct_io_pool_free(struct efct_io_pool *io_pool);
+u32
+efct_io_pool_allocated(struct efct_io_pool *io_pool);
+
+struct efct_io *
+efct_io_pool_io_alloc(struct efct_io_pool *io_pool);
+void
+efct_io_pool_io_free(struct efct_io_pool *io_pool, struct efct_io *io);
+struct efct_io *
+efct_io_find_tgt_io(struct efct *efct, struct efct_node *node,
+ u16 ox_id, u16 rx_id);
+#endif /* __EFCT_IO_H__ */
diff --git a/drivers/scsi/elx/efct/efct_lio.c b/drivers/scsi/elx/efct/efct_lio.c
new file mode 100644
index 000000000000..b7d69ff29c09
--- /dev/null
+++ b/drivers/scsi/elx/efct/efct_lio.c
@@ -0,0 +1,1698 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+#include <target/target_core_base.h>
+#include <target/target_core_fabric.h>
+#include "efct_driver.h"
+#include "efct_lio.h"
+
+/*
+ * lio_wq is used to call the LIO backed during creation or deletion of
+ * sessions. This brings serialization to the session management as we create
+ * single threaded work queue.
+ */
+static struct workqueue_struct *lio_wq;
+
+static int
+efct_format_wwn(char *str, size_t len, const char *pre, u64 wwn)
+{
+ u8 a[8];
+
+ put_unaligned_be64(wwn, a);
+ return snprintf(str, len, "%s%8phC", pre, a);
+}
+
+static int
+efct_lio_parse_wwn(const char *name, u64 *wwp, u8 npiv)
+{
+ int num;
+ u8 b[8];
+
+ if (npiv) {
+ num = sscanf(name,
+ "%02hhx%02hhx%02hhx%02hhx%02hhx%02hhx%02hhx%02hhx",
+ &b[0], &b[1], &b[2], &b[3], &b[4], &b[5], &b[6],
+ &b[7]);
+ } else {
+ num = sscanf(name,
+ "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx",
+ &b[0], &b[1], &b[2], &b[3], &b[4], &b[5], &b[6],
+ &b[7]);
+ }
+
+ if (num != 8)
+ return -EINVAL;
+
+ *wwp = get_unaligned_be64(b);
+ return 0;
+}
+
+static int
+efct_lio_parse_npiv_wwn(const char *name, size_t size, u64 *wwpn, u64 *wwnn)
+{
+ unsigned int cnt = size;
+ int rc;
+
+ *wwpn = *wwnn = 0;
+ if (name[cnt - 1] == '\n' || name[cnt - 1] == 0)
+ cnt--;
+
+ /* validate we have enough characters for WWPN */
+ if ((cnt != (16 + 1 + 16)) || (name[16] != ':'))
+ return -EINVAL;
+
+ rc = efct_lio_parse_wwn(&name[0], wwpn, 1);
+ if (rc)
+ return rc;
+
+ rc = efct_lio_parse_wwn(&name[17], wwnn, 1);
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
+static ssize_t
+efct_lio_tpg_enable_show(struct config_item *item, char *page)
+{
+ struct se_portal_group *se_tpg = to_tpg(item);
+ struct efct_lio_tpg *tpg =
+ container_of(se_tpg, struct efct_lio_tpg, tpg);
+
+ return snprintf(page, PAGE_SIZE, "%d\n", tpg->enabled);
+}
+
+static ssize_t
+efct_lio_tpg_enable_store(struct config_item *item, const char *page,
+ size_t count)
+{
+ struct se_portal_group *se_tpg = to_tpg(item);
+ struct efct_lio_tpg *tpg =
+ container_of(se_tpg, struct efct_lio_tpg, tpg);
+ struct efct *efct;
+ struct efc *efc;
+ unsigned long op;
+
+ if (!tpg->nport || !tpg->nport->efct) {
+ pr_err("%s: Unable to find EFCT device\n", __func__);
+ return -EINVAL;
+ }
+
+ efct = tpg->nport->efct;
+ efc = efct->efcport;
+
+ if (kstrtoul(page, 0, &op) < 0)
+ return -EINVAL;
+
+ if (op == 1) {
+ int ret;
+
+ tpg->enabled = true;
+ efc_log_debug(efct, "enable portal group %d\n", tpg->tpgt);
+
+ ret = efct_xport_control(efct->xport, EFCT_XPORT_PORT_ONLINE);
+ if (ret) {
+ efct->tgt_efct.lio_nport = NULL;
+ efc_log_debug(efct, "cannot bring port online\n");
+ return ret;
+ }
+ } else if (op == 0) {
+ efc_log_debug(efct, "disable portal group %d\n", tpg->tpgt);
+
+ if (efc->domain && efc->domain->nport)
+ efct_scsi_tgt_del_nport(efc, efc->domain->nport);
+
+ tpg->enabled = false;
+ } else {
+ return -EINVAL;
+ }
+
+ return count;
+}
+
+static ssize_t
+efct_lio_npiv_tpg_enable_show(struct config_item *item, char *page)
+{
+ struct se_portal_group *se_tpg = to_tpg(item);
+ struct efct_lio_tpg *tpg =
+ container_of(se_tpg, struct efct_lio_tpg, tpg);
+
+ return snprintf(page, PAGE_SIZE, "%d\n", tpg->enabled);
+}
+
+static ssize_t
+efct_lio_npiv_tpg_enable_store(struct config_item *item, const char *page,
+ size_t count)
+{
+ struct se_portal_group *se_tpg = to_tpg(item);
+ struct efct_lio_tpg *tpg =
+ container_of(se_tpg, struct efct_lio_tpg, tpg);
+ struct efct_lio_vport *lio_vport = tpg->vport;
+ struct efct *efct;
+ struct efc *efc;
+ unsigned long op;
+
+ if (kstrtoul(page, 0, &op) < 0)
+ return -EINVAL;
+
+ if (!lio_vport) {
+ pr_err("Unable to find vport\n");
+ return -EINVAL;
+ }
+
+ efct = lio_vport->efct;
+ efc = efct->efcport;
+
+ if (op == 1) {
+ tpg->enabled = true;
+ efc_log_debug(efct, "enable portal group %d\n", tpg->tpgt);
+
+ if (efc->domain) {
+ int ret;
+
+ ret = efc_nport_vport_new(efc->domain,
+ lio_vport->npiv_wwpn,
+ lio_vport->npiv_wwnn,
+ U32_MAX, false, true,
+ NULL, NULL);
+ if (ret != 0) {
+ efc_log_err(efct, "Failed to create Vport\n");
+ return ret;
+ }
+ return count;
+ }
+
+ if (!(efc_vport_create_spec(efc, lio_vport->npiv_wwnn,
+ lio_vport->npiv_wwpn, U32_MAX,
+ false, true, NULL, NULL)))
+ return -ENOMEM;
+
+ } else if (op == 0) {
+ efc_log_debug(efct, "disable portal group %d\n", tpg->tpgt);
+
+ tpg->enabled = false;
+ /* only physical nport should exist, free lio_nport
+ * allocated in efct_lio_make_nport
+ */
+ if (efc->domain) {
+ efc_nport_vport_del(efct->efcport, efc->domain,
+ lio_vport->npiv_wwpn,
+ lio_vport->npiv_wwnn);
+ return count;
+ }
+ } else {
+ return -EINVAL;
+ }
+ return count;
+}
+
+static char *efct_lio_get_fabric_wwn(struct se_portal_group *se_tpg)
+{
+ struct efct_lio_tpg *tpg =
+ container_of(se_tpg, struct efct_lio_tpg, tpg);
+
+ return tpg->nport->wwpn_str;
+}
+
+static char *efct_lio_get_npiv_fabric_wwn(struct se_portal_group *se_tpg)
+{
+ struct efct_lio_tpg *tpg =
+ container_of(se_tpg, struct efct_lio_tpg, tpg);
+
+ return tpg->vport->wwpn_str;
+}
+
+static u16 efct_lio_get_tag(struct se_portal_group *se_tpg)
+{
+ struct efct_lio_tpg *tpg =
+ container_of(se_tpg, struct efct_lio_tpg, tpg);
+
+ return tpg->tpgt;
+}
+
+static u16 efct_lio_get_npiv_tag(struct se_portal_group *se_tpg)
+{
+ struct efct_lio_tpg *tpg =
+ container_of(se_tpg, struct efct_lio_tpg, tpg);
+
+ return tpg->tpgt;
+}
+
+static int efct_lio_check_demo_mode(struct se_portal_group *se_tpg)
+{
+ return 1;
+}
+
+static int efct_lio_check_demo_mode_cache(struct se_portal_group *se_tpg)
+{
+ return 1;
+}
+
+static int efct_lio_check_demo_write_protect(struct se_portal_group *se_tpg)
+{
+ struct efct_lio_tpg *tpg =
+ container_of(se_tpg, struct efct_lio_tpg, tpg);
+
+ return tpg->tpg_attrib.demo_mode_write_protect;
+}
+
+static int
+efct_lio_npiv_check_demo_write_protect(struct se_portal_group *se_tpg)
+{
+ struct efct_lio_tpg *tpg =
+ container_of(se_tpg, struct efct_lio_tpg, tpg);
+
+ return tpg->tpg_attrib.demo_mode_write_protect;
+}
+
+static int efct_lio_check_prod_write_protect(struct se_portal_group *se_tpg)
+{
+ struct efct_lio_tpg *tpg =
+ container_of(se_tpg, struct efct_lio_tpg, tpg);
+
+ return tpg->tpg_attrib.prod_mode_write_protect;
+}
+
+static int
+efct_lio_npiv_check_prod_write_protect(struct se_portal_group *se_tpg)
+{
+ struct efct_lio_tpg *tpg =
+ container_of(se_tpg, struct efct_lio_tpg, tpg);
+
+ return tpg->tpg_attrib.prod_mode_write_protect;
+}
+
+static u32 efct_lio_tpg_get_inst_index(struct se_portal_group *se_tpg)
+{
+ return 1;
+}
+
+static int efct_lio_check_stop_free(struct se_cmd *se_cmd)
+{
+ struct efct_scsi_tgt_io *ocp =
+ container_of(se_cmd, struct efct_scsi_tgt_io, cmd);
+ struct efct_io *io = container_of(ocp, struct efct_io, tgt_io);
+
+ efct_set_lio_io_state(io, EFCT_LIO_STATE_TFO_CHK_STOP_FREE);
+ return target_put_sess_cmd(se_cmd);
+}
+
+static int
+efct_lio_abort_tgt_cb(struct efct_io *io,
+ enum efct_scsi_io_status scsi_status,
+ u32 flags, void *arg)
+{
+ efct_lio_io_printf(io, "Abort done, status:%d\n", scsi_status);
+ return 0;
+}
+
+static void
+efct_lio_aborted_task(struct se_cmd *se_cmd)
+{
+ struct efct_scsi_tgt_io *ocp =
+ container_of(se_cmd, struct efct_scsi_tgt_io, cmd);
+ struct efct_io *io = container_of(ocp, struct efct_io, tgt_io);
+
+ efct_set_lio_io_state(io, EFCT_LIO_STATE_TFO_ABORTED_TASK);
+
+ if (ocp->rsp_sent)
+ return;
+
+ /* command has been aborted, cleanup here */
+ ocp->aborting = true;
+ ocp->err = EFCT_SCSI_STATUS_ABORTED;
+ /* terminate the exchange */
+ efct_scsi_tgt_abort_io(io, efct_lio_abort_tgt_cb, NULL);
+}
+
+static void efct_lio_release_cmd(struct se_cmd *se_cmd)
+{
+ struct efct_scsi_tgt_io *ocp =
+ container_of(se_cmd, struct efct_scsi_tgt_io, cmd);
+ struct efct_io *io = container_of(ocp, struct efct_io, tgt_io);
+ struct efct *efct = io->efct;
+
+ efct_set_lio_io_state(io, EFCT_LIO_STATE_TFO_RELEASE_CMD);
+ efct_set_lio_io_state(io, EFCT_LIO_STATE_SCSI_CMPL_CMD);
+ efct_scsi_io_complete(io);
+ atomic_sub_return(1, &efct->tgt_efct.ios_in_use);
+}
+
+static void efct_lio_close_session(struct se_session *se_sess)
+{
+ struct efc_node *node = se_sess->fabric_sess_ptr;
+
+ pr_debug("se_sess=%p node=%p", se_sess, node);
+
+ if (!node) {
+ pr_debug("node is NULL");
+ return;
+ }
+
+ efc_node_post_shutdown(node, NULL);
+}
+
+static u32 efct_lio_sess_get_index(struct se_session *se_sess)
+{
+ return 0;
+}
+
+static void efct_lio_set_default_node_attrs(struct se_node_acl *nacl)
+{
+}
+
+static int efct_lio_get_cmd_state(struct se_cmd *cmd)
+{
+ struct efct_scsi_tgt_io *ocp =
+ container_of(cmd, struct efct_scsi_tgt_io, cmd);
+ struct efct_io *io = container_of(ocp, struct efct_io, tgt_io);
+
+ if (!io)
+ return 0;
+
+ return io->tgt_io.state;
+}
+
+static int
+efct_lio_sg_map(struct efct_io *io)
+{
+ struct efct_scsi_tgt_io *ocp = &io->tgt_io;
+ struct se_cmd *cmd = &ocp->cmd;
+
+ ocp->seg_map_cnt = pci_map_sg(io->efct->pci, cmd->t_data_sg,
+ cmd->t_data_nents, cmd->data_direction);
+ if (ocp->seg_map_cnt == 0)
+ return -EFAULT;
+ return 0;
+}
+
+static void
+efct_lio_sg_unmap(struct efct_io *io)
+{
+ struct efct_scsi_tgt_io *ocp = &io->tgt_io;
+ struct se_cmd *cmd = &ocp->cmd;
+
+ if (WARN_ON(!ocp->seg_map_cnt || !cmd->t_data_sg))
+ return;
+
+ pci_unmap_sg(io->efct->pci, cmd->t_data_sg,
+ ocp->seg_map_cnt, cmd->data_direction);
+ ocp->seg_map_cnt = 0;
+}
+
+static int
+efct_lio_status_done(struct efct_io *io,
+ enum efct_scsi_io_status scsi_status,
+ u32 flags, void *arg)
+{
+ struct efct_scsi_tgt_io *ocp = &io->tgt_io;
+
+ efct_set_lio_io_state(io, EFCT_LIO_STATE_SCSI_RSP_DONE);
+ if (scsi_status != EFCT_SCSI_STATUS_GOOD) {
+ efct_lio_io_printf(io, "callback completed with error=%d\n",
+ scsi_status);
+ ocp->err = scsi_status;
+ }
+ if (ocp->seg_map_cnt)
+ efct_lio_sg_unmap(io);
+
+ efct_lio_io_printf(io, "status=%d, err=%d flags=0x%x, dir=%d\n",
+ scsi_status, ocp->err, flags, ocp->ddir);
+
+ efct_set_lio_io_state(io, EFCT_LIO_STATE_TGT_GENERIC_FREE);
+ transport_generic_free_cmd(&io->tgt_io.cmd, 0);
+ return 0;
+}
+
+static int
+efct_lio_datamove_done(struct efct_io *io, enum efct_scsi_io_status scsi_status,
+ u32 flags, void *arg);
+
+static int
+efct_lio_write_pending(struct se_cmd *cmd)
+{
+ struct efct_scsi_tgt_io *ocp =
+ container_of(cmd, struct efct_scsi_tgt_io, cmd);
+ struct efct_io *io = container_of(ocp, struct efct_io, tgt_io);
+ struct efct_scsi_sgl *sgl = io->sgl;
+ struct scatterlist *sg;
+ u32 flags = 0, cnt, curcnt;
+ u64 length = 0;
+
+ efct_set_lio_io_state(io, EFCT_LIO_STATE_TFO_WRITE_PENDING);
+ efct_lio_io_printf(io, "trans_state=0x%x se_cmd_flags=0x%x\n",
+ cmd->transport_state, cmd->se_cmd_flags);
+
+ if (ocp->seg_cnt == 0) {
+ ocp->seg_cnt = cmd->t_data_nents;
+ ocp->cur_seg = 0;
+ if (efct_lio_sg_map(io)) {
+ efct_lio_io_printf(io, "efct_lio_sg_map failed\n");
+ return -EFAULT;
+ }
+ }
+ curcnt = (ocp->seg_map_cnt - ocp->cur_seg);
+ curcnt = (curcnt < io->sgl_allocated) ? curcnt : io->sgl_allocated;
+ /* find current sg */
+ for (cnt = 0, sg = cmd->t_data_sg; cnt < ocp->cur_seg; cnt++,
+ sg = sg_next(sg))
+ ;/* do nothing */
+
+ for (cnt = 0; cnt < curcnt; cnt++, sg = sg_next(sg)) {
+ sgl[cnt].addr = sg_dma_address(sg);
+ sgl[cnt].dif_addr = 0;
+ sgl[cnt].len = sg_dma_len(sg);
+ length += sgl[cnt].len;
+ ocp->cur_seg++;
+ }
+
+ if (ocp->cur_seg == ocp->seg_cnt)
+ flags = EFCT_SCSI_LAST_DATAPHASE;
+
+ return efct_scsi_recv_wr_data(io, flags, sgl, curcnt, length,
+ efct_lio_datamove_done, NULL);
+}
+
+static int
+efct_lio_queue_data_in(struct se_cmd *cmd)
+{
+ struct efct_scsi_tgt_io *ocp =
+ container_of(cmd, struct efct_scsi_tgt_io, cmd);
+ struct efct_io *io = container_of(ocp, struct efct_io, tgt_io);
+ struct efct_scsi_sgl *sgl = io->sgl;
+ struct scatterlist *sg = NULL;
+ uint flags = 0, cnt = 0, curcnt = 0;
+ u64 length = 0;
+
+ efct_set_lio_io_state(io, EFCT_LIO_STATE_TFO_QUEUE_DATA_IN);
+
+ if (ocp->seg_cnt == 0) {
+ if (cmd->data_length) {
+ ocp->seg_cnt = cmd->t_data_nents;
+ ocp->cur_seg = 0;
+ if (efct_lio_sg_map(io)) {
+ efct_lio_io_printf(io,
+ "efct_lio_sg_map failed\n");
+ return -EAGAIN;
+ }
+ } else {
+ /* If command length is 0, send the response status */
+ struct efct_scsi_cmd_resp rsp;
+
+ memset(&rsp, 0, sizeof(rsp));
+ efct_lio_io_printf(io,
+ "cmd : %p length 0, send status\n",
+ cmd);
+ return efct_scsi_send_resp(io, 0, &rsp,
+ efct_lio_status_done, NULL);
+ }
+ }
+ curcnt = min(ocp->seg_map_cnt - ocp->cur_seg, io->sgl_allocated);
+
+ while (cnt < curcnt) {
+ sg = &cmd->t_data_sg[ocp->cur_seg];
+ sgl[cnt].addr = sg_dma_address(sg);
+ sgl[cnt].dif_addr = 0;
+ if (ocp->transferred_len + sg_dma_len(sg) >= cmd->data_length)
+ sgl[cnt].len = cmd->data_length - ocp->transferred_len;
+ else
+ sgl[cnt].len = sg_dma_len(sg);
+
+ ocp->transferred_len += sgl[cnt].len;
+ length += sgl[cnt].len;
+ ocp->cur_seg++;
+ cnt++;
+ if (ocp->transferred_len == cmd->data_length)
+ break;
+ }
+
+ if (ocp->transferred_len == cmd->data_length) {
+ flags = EFCT_SCSI_LAST_DATAPHASE;
+ ocp->seg_cnt = ocp->cur_seg;
+ }
+
+ /* If there is residual, disable Auto Good Response */
+ if (cmd->residual_count)
+ flags |= EFCT_SCSI_NO_AUTO_RESPONSE;
+
+ efct_set_lio_io_state(io, EFCT_LIO_STATE_SCSI_SEND_RD_DATA);
+
+ return efct_scsi_send_rd_data(io, flags, sgl, curcnt, length,
+ efct_lio_datamove_done, NULL);
+}
+
+static void
+efct_lio_send_resp(struct efct_io *io, enum efct_scsi_io_status scsi_status,
+ u32 flags)
+{
+ struct efct_scsi_cmd_resp rsp;
+ struct efct_scsi_tgt_io *ocp = &io->tgt_io;
+ struct se_cmd *cmd = &io->tgt_io.cmd;
+ int rc;
+
+ if (flags & EFCT_SCSI_IO_CMPL_RSP_SENT) {
+ ocp->rsp_sent = true;
+ efct_set_lio_io_state(io, EFCT_LIO_STATE_TGT_GENERIC_FREE);
+ transport_generic_free_cmd(&io->tgt_io.cmd, 0);
+ return;
+ }
+
+ /* send check condition if an error occurred */
+ memset(&rsp, 0, sizeof(rsp));
+ rsp.scsi_status = cmd->scsi_status;
+ rsp.sense_data = (uint8_t *)io->tgt_io.sense_buffer;
+ rsp.sense_data_length = cmd->scsi_sense_length;
+
+ /* Check for residual underrun or overrun */
+ if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT)
+ rsp.residual = -cmd->residual_count;
+ else if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT)
+ rsp.residual = cmd->residual_count;
+
+ rc = efct_scsi_send_resp(io, 0, &rsp, efct_lio_status_done, NULL);
+ efct_set_lio_io_state(io, EFCT_LIO_STATE_SCSI_SEND_RSP);
+ if (rc != 0) {
+ efct_lio_io_printf(io, "Read done, send rsp failed %d\n", rc);
+ efct_set_lio_io_state(io, EFCT_LIO_STATE_TGT_GENERIC_FREE);
+ transport_generic_free_cmd(&io->tgt_io.cmd, 0);
+ } else {
+ ocp->rsp_sent = true;
+ }
+}
+
+static int
+efct_lio_datamove_done(struct efct_io *io, enum efct_scsi_io_status scsi_status,
+ u32 flags, void *arg)
+{
+ struct efct_scsi_tgt_io *ocp = &io->tgt_io;
+
+ efct_set_lio_io_state(io, EFCT_LIO_STATE_SCSI_DATA_DONE);
+ if (scsi_status != EFCT_SCSI_STATUS_GOOD) {
+ efct_lio_io_printf(io, "callback completed with error=%d\n",
+ scsi_status);
+ ocp->err = scsi_status;
+ }
+ efct_lio_io_printf(io, "seg_map_cnt=%d\n", ocp->seg_map_cnt);
+ if (ocp->seg_map_cnt) {
+ if (ocp->err == EFCT_SCSI_STATUS_GOOD &&
+ ocp->cur_seg < ocp->seg_cnt) {
+ int rc;
+
+ efct_lio_io_printf(io, "continuing cmd at segm=%d\n",
+ ocp->cur_seg);
+ if (ocp->ddir == DMA_TO_DEVICE)
+ rc = efct_lio_write_pending(&ocp->cmd);
+ else
+ rc = efct_lio_queue_data_in(&ocp->cmd);
+ if (!rc)
+ return 0;
+
+ ocp->err = EFCT_SCSI_STATUS_ERROR;
+ efct_lio_io_printf(io, "could not continue command\n");
+ }
+ efct_lio_sg_unmap(io);
+ }
+
+ if (io->tgt_io.aborting) {
+ efct_lio_io_printf(io, "IO done aborted\n");
+ return 0;
+ }
+
+ if (ocp->ddir == DMA_TO_DEVICE) {
+ efct_lio_io_printf(io, "Write done, trans_state=0x%x\n",
+ io->tgt_io.cmd.transport_state);
+ if (scsi_status != EFCT_SCSI_STATUS_GOOD) {
+ transport_generic_request_failure(&io->tgt_io.cmd,
+ TCM_CHECK_CONDITION_ABORT_CMD);
+ efct_set_lio_io_state(io,
+ EFCT_LIO_STATE_TGT_GENERIC_REQ_FAILURE);
+ } else {
+ efct_set_lio_io_state(io,
+ EFCT_LIO_STATE_TGT_EXECUTE_CMD);
+ target_execute_cmd(&io->tgt_io.cmd);
+ }
+ } else {
+ efct_lio_send_resp(io, scsi_status, flags);
+ }
+ return 0;
+}
+
+static int
+efct_lio_tmf_done(struct efct_io *io, enum efct_scsi_io_status scsi_status,
+ u32 flags, void *arg)
+{
+ efct_lio_tmfio_printf(io, "cmd=%p status=%d, flags=0x%x\n",
+ &io->tgt_io.cmd, scsi_status, flags);
+
+ efct_set_lio_io_state(io, EFCT_LIO_STATE_TGT_GENERIC_FREE);
+ transport_generic_free_cmd(&io->tgt_io.cmd, 0);
+ return 0;
+}
+
+static int
+efct_lio_null_tmf_done(struct efct_io *tmfio,
+ enum efct_scsi_io_status scsi_status,
+ u32 flags, void *arg)
+{
+ efct_lio_tmfio_printf(tmfio, "cmd=%p status=%d, flags=0x%x\n",
+ &tmfio->tgt_io.cmd, scsi_status, flags);
+
+ /* free struct efct_io only, no active se_cmd */
+ efct_scsi_io_complete(tmfio);
+ return 0;
+}
+
+static int
+efct_lio_queue_status(struct se_cmd *cmd)
+{
+ struct efct_scsi_cmd_resp rsp;
+ struct efct_scsi_tgt_io *ocp =
+ container_of(cmd, struct efct_scsi_tgt_io, cmd);
+ struct efct_io *io = container_of(ocp, struct efct_io, tgt_io);
+ int rc = 0;
+
+ efct_set_lio_io_state(io, EFCT_LIO_STATE_TFO_QUEUE_STATUS);
+ efct_lio_io_printf(io,
+ "status=0x%x trans_state=0x%x se_cmd_flags=0x%x sns_len=%d\n",
+ cmd->scsi_status, cmd->transport_state, cmd->se_cmd_flags,
+ cmd->scsi_sense_length);
+
+ memset(&rsp, 0, sizeof(rsp));
+ rsp.scsi_status = cmd->scsi_status;
+ rsp.sense_data = (u8 *)io->tgt_io.sense_buffer;
+ rsp.sense_data_length = cmd->scsi_sense_length;
+
+ /* Check for residual underrun or overrun, mark negitive value for
+ * underrun to recognize in HW
+ */
+ if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT)
+ rsp.residual = -cmd->residual_count;
+ else if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT)
+ rsp.residual = cmd->residual_count;
+
+ rc = efct_scsi_send_resp(io, 0, &rsp, efct_lio_status_done, NULL);
+ efct_set_lio_io_state(io, EFCT_LIO_STATE_SCSI_SEND_RSP);
+ if (rc == 0)
+ ocp->rsp_sent = true;
+ return rc;
+}
+
+static void efct_lio_queue_tm_rsp(struct se_cmd *cmd)
+{
+ struct efct_scsi_tgt_io *ocp =
+ container_of(cmd, struct efct_scsi_tgt_io, cmd);
+ struct efct_io *tmfio = container_of(ocp, struct efct_io, tgt_io);
+ struct se_tmr_req *se_tmr = cmd->se_tmr_req;
+ u8 rspcode;
+
+ efct_lio_tmfio_printf(tmfio, "cmd=%p function=0x%x tmr->response=%d\n",
+ cmd, se_tmr->function, se_tmr->response);
+ switch (se_tmr->response) {
+ case TMR_FUNCTION_COMPLETE:
+ rspcode = EFCT_SCSI_TMF_FUNCTION_COMPLETE;
+ break;
+ case TMR_TASK_DOES_NOT_EXIST:
+ rspcode = EFCT_SCSI_TMF_FUNCTION_IO_NOT_FOUND;
+ break;
+ case TMR_LUN_DOES_NOT_EXIST:
+ rspcode = EFCT_SCSI_TMF_INCORRECT_LOGICAL_UNIT_NUMBER;
+ break;
+ case TMR_FUNCTION_REJECTED:
+ default:
+ rspcode = EFCT_SCSI_TMF_FUNCTION_REJECTED;
+ break;
+ }
+ efct_scsi_send_tmf_resp(tmfio, rspcode, NULL, efct_lio_tmf_done, NULL);
+}
+
+static struct efct *efct_find_wwpn(u64 wwpn)
+{
+ struct efct *efct;
+
+ /* Search for the HBA that has this WWPN */
+ list_for_each_entry(efct, &efct_devices, list_entry) {
+
+ if (wwpn == efct_get_wwpn(&efct->hw))
+ return efct;
+ }
+
+ return NULL;
+}
+
+static struct se_wwn *
+efct_lio_make_nport(struct target_fabric_configfs *tf,
+ struct config_group *group, const char *name)
+{
+ struct efct_lio_nport *lio_nport;
+ struct efct *efct;
+ int ret;
+ u64 wwpn;
+
+ ret = efct_lio_parse_wwn(name, &wwpn, 0);
+ if (ret)
+ return ERR_PTR(ret);
+
+ efct = efct_find_wwpn(wwpn);
+ if (!efct) {
+ pr_err("cannot find EFCT for base wwpn %s\n", name);
+ return ERR_PTR(-ENXIO);
+ }
+
+ lio_nport = kzalloc(sizeof(*lio_nport), GFP_KERNEL);
+ if (!lio_nport)
+ return ERR_PTR(-ENOMEM);
+
+ lio_nport->efct = efct;
+ lio_nport->wwpn = wwpn;
+ efct_format_wwn(lio_nport->wwpn_str, sizeof(lio_nport->wwpn_str),
+ "naa.", wwpn);
+ efct->tgt_efct.lio_nport = lio_nport;
+
+ return &lio_nport->nport_wwn;
+}
+
+static struct se_wwn *
+efct_lio_npiv_make_nport(struct target_fabric_configfs *tf,
+ struct config_group *group, const char *name)
+{
+ struct efct_lio_vport *lio_vport;
+ struct efct *efct;
+ int ret = -1;
+ u64 p_wwpn, npiv_wwpn, npiv_wwnn;
+ char *p, *pbuf, tmp[128];
+ struct efct_lio_vport_list_t *vport_list;
+ struct fc_vport *new_fc_vport;
+ struct fc_vport_identifiers vport_id;
+ unsigned long flags = 0;
+
+ snprintf(tmp, sizeof(tmp), "%s", name);
+ pbuf = &tmp[0];
+
+ p = strsep(&pbuf, "@");
+
+ if (!p || !pbuf) {
+ pr_err("Unable to find separator operator(@)\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ ret = efct_lio_parse_wwn(p, &p_wwpn, 0);
+ if (ret)
+ return ERR_PTR(ret);
+
+ ret = efct_lio_parse_npiv_wwn(pbuf, strlen(pbuf), &npiv_wwpn,
+ &npiv_wwnn);
+ if (ret)
+ return ERR_PTR(ret);
+
+ efct = efct_find_wwpn(p_wwpn);
+ if (!efct) {
+ pr_err("cannot find EFCT for base wwpn %s\n", name);
+ return ERR_PTR(-ENXIO);
+ }
+
+ lio_vport = kzalloc(sizeof(*lio_vport), GFP_KERNEL);
+ if (!lio_vport)
+ return ERR_PTR(-ENOMEM);
+
+ lio_vport->efct = efct;
+ lio_vport->wwpn = p_wwpn;
+ lio_vport->npiv_wwpn = npiv_wwpn;
+ lio_vport->npiv_wwnn = npiv_wwnn;
+
+ efct_format_wwn(lio_vport->wwpn_str, sizeof(lio_vport->wwpn_str),
+ "naa.", npiv_wwpn);
+
+ vport_list = kzalloc(sizeof(*vport_list), GFP_KERNEL);
+ if (!vport_list) {
+ kfree(lio_vport);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ vport_list->lio_vport = lio_vport;
+ spin_lock_irqsave(&efct->tgt_efct.efct_lio_lock, flags);
+ INIT_LIST_HEAD(&vport_list->list_entry);
+ list_add_tail(&vport_list->list_entry, &efct->tgt_efct.vport_list);
+ spin_unlock_irqrestore(&efct->tgt_efct.efct_lio_lock, flags);
+
+ memset(&vport_id, 0, sizeof(vport_id));
+ vport_id.port_name = npiv_wwpn;
+ vport_id.node_name = npiv_wwnn;
+ vport_id.roles = FC_PORT_ROLE_FCP_INITIATOR;
+ vport_id.vport_type = FC_PORTTYPE_NPIV;
+ vport_id.disable = false;
+
+ new_fc_vport = fc_vport_create(efct->shost, 0, &vport_id);
+ if (!new_fc_vport) {
+ efc_log_err(efct, "fc_vport_create failed\n");
+ kfree(lio_vport);
+ kfree(vport_list);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ lio_vport->fc_vport = new_fc_vport;
+
+ return &lio_vport->vport_wwn;
+}
+
+static void
+efct_lio_drop_nport(struct se_wwn *wwn)
+{
+ struct efct_lio_nport *lio_nport =
+ container_of(wwn, struct efct_lio_nport, nport_wwn);
+ struct efct *efct = lio_nport->efct;
+
+ /* only physical nport should exist, free lio_nport allocated
+ * in efct_lio_make_nport.
+ */
+ kfree(efct->tgt_efct.lio_nport);
+ efct->tgt_efct.lio_nport = NULL;
+}
+
+static void
+efct_lio_npiv_drop_nport(struct se_wwn *wwn)
+{
+ struct efct_lio_vport *lio_vport =
+ container_of(wwn, struct efct_lio_vport, vport_wwn);
+ struct efct_lio_vport_list_t *vport, *next_vport;
+ struct efct *efct = lio_vport->efct;
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&efct->tgt_efct.efct_lio_lock, flags);
+
+ if (lio_vport->fc_vport)
+ fc_vport_terminate(lio_vport->fc_vport);
+
+ list_for_each_entry_safe(vport, next_vport, &efct->tgt_efct.vport_list,
+ list_entry) {
+ if (vport->lio_vport == lio_vport) {
+ list_del(&vport->list_entry);
+ kfree(vport->lio_vport);
+ kfree(vport);
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&efct->tgt_efct.efct_lio_lock, flags);
+}
+
+static struct se_portal_group *
+efct_lio_make_tpg(struct se_wwn *wwn, const char *name)
+{
+ struct efct_lio_nport *lio_nport =
+ container_of(wwn, struct efct_lio_nport, nport_wwn);
+ struct efct_lio_tpg *tpg;
+ struct efct *efct;
+ unsigned long n;
+ int ret;
+
+ if (strstr(name, "tpgt_") != name)
+ return ERR_PTR(-EINVAL);
+ if (kstrtoul(name + 5, 10, &n) || n > USHRT_MAX)
+ return ERR_PTR(-EINVAL);
+
+ tpg = kzalloc(sizeof(*tpg), GFP_KERNEL);
+ if (!tpg)
+ return ERR_PTR(-ENOMEM);
+
+ tpg->nport = lio_nport;
+ tpg->tpgt = n;
+ tpg->enabled = false;
+
+ tpg->tpg_attrib.generate_node_acls = 1;
+ tpg->tpg_attrib.demo_mode_write_protect = 1;
+ tpg->tpg_attrib.cache_dynamic_acls = 1;
+ tpg->tpg_attrib.demo_mode_login_only = 1;
+ tpg->tpg_attrib.session_deletion_wait = 1;
+
+ ret = core_tpg_register(wwn, &tpg->tpg, SCSI_PROTOCOL_FCP);
+ if (ret < 0) {
+ kfree(tpg);
+ return NULL;
+ }
+ efct = lio_nport->efct;
+ efct->tgt_efct.tpg = tpg;
+ efc_log_debug(efct, "create portal group %d\n", tpg->tpgt);
+
+ xa_init(&efct->lookup);
+ return &tpg->tpg;
+}
+
+static void
+efct_lio_drop_tpg(struct se_portal_group *se_tpg)
+{
+ struct efct_lio_tpg *tpg =
+ container_of(se_tpg, struct efct_lio_tpg, tpg);
+
+ struct efct *efct = tpg->nport->efct;
+
+ efc_log_debug(efct, "drop portal group %d\n", tpg->tpgt);
+ tpg->nport->efct->tgt_efct.tpg = NULL;
+ core_tpg_deregister(se_tpg);
+ xa_destroy(&efct->lookup);
+ kfree(tpg);
+}
+
+static struct se_portal_group *
+efct_lio_npiv_make_tpg(struct se_wwn *wwn, const char *name)
+{
+ struct efct_lio_vport *lio_vport =
+ container_of(wwn, struct efct_lio_vport, vport_wwn);
+ struct efct_lio_tpg *tpg;
+ struct efct *efct;
+ unsigned long n;
+ int ret;
+
+ efct = lio_vport->efct;
+ if (strstr(name, "tpgt_") != name)
+ return ERR_PTR(-EINVAL);
+ if (kstrtoul(name + 5, 10, &n) || n > USHRT_MAX)
+ return ERR_PTR(-EINVAL);
+
+ if (n != 1) {
+ efc_log_err(efct, "Invalid tpgt index: %ld provided\n", n);
+ return ERR_PTR(-EINVAL);
+ }
+
+ tpg = kzalloc(sizeof(*tpg), GFP_KERNEL);
+ if (!tpg)
+ return ERR_PTR(-ENOMEM);
+
+ tpg->vport = lio_vport;
+ tpg->tpgt = n;
+ tpg->enabled = false;
+
+ tpg->tpg_attrib.generate_node_acls = 1;
+ tpg->tpg_attrib.demo_mode_write_protect = 1;
+ tpg->tpg_attrib.cache_dynamic_acls = 1;
+ tpg->tpg_attrib.demo_mode_login_only = 1;
+ tpg->tpg_attrib.session_deletion_wait = 1;
+
+ ret = core_tpg_register(wwn, &tpg->tpg, SCSI_PROTOCOL_FCP);
+
+ if (ret < 0) {
+ kfree(tpg);
+ return NULL;
+ }
+ lio_vport->tpg = tpg;
+ efc_log_debug(efct, "create vport portal group %d\n", tpg->tpgt);
+
+ return &tpg->tpg;
+}
+
+static void
+efct_lio_npiv_drop_tpg(struct se_portal_group *se_tpg)
+{
+ struct efct_lio_tpg *tpg =
+ container_of(se_tpg, struct efct_lio_tpg, tpg);
+
+ efc_log_debug(tpg->vport->efct, "drop npiv portal group %d\n",
+ tpg->tpgt);
+ core_tpg_deregister(se_tpg);
+ kfree(tpg);
+}
+
+static int
+efct_lio_init_nodeacl(struct se_node_acl *se_nacl, const char *name)
+{
+ struct efct_lio_nacl *nacl;
+ u64 wwnn;
+
+ if (efct_lio_parse_wwn(name, &wwnn, 0) < 0)
+ return -EINVAL;
+
+ nacl = container_of(se_nacl, struct efct_lio_nacl, se_node_acl);
+ nacl->nport_wwnn = wwnn;
+
+ efct_format_wwn(nacl->nport_name, sizeof(nacl->nport_name), "", wwnn);
+ return 0;
+}
+
+static int efct_lio_check_demo_mode_login_only(struct se_portal_group *stpg)
+{
+ struct efct_lio_tpg *tpg = container_of(stpg, struct efct_lio_tpg, tpg);
+
+ return tpg->tpg_attrib.demo_mode_login_only;
+}
+
+static int
+efct_lio_npiv_check_demo_mode_login_only(struct se_portal_group *stpg)
+{
+ struct efct_lio_tpg *tpg = container_of(stpg, struct efct_lio_tpg, tpg);
+
+ return tpg->tpg_attrib.demo_mode_login_only;
+}
+
+static struct efct_lio_tpg *
+efct_get_vport_tpg(struct efc_node *node)
+{
+ struct efct *efct;
+ u64 wwpn = node->nport->wwpn;
+ struct efct_lio_vport_list_t *vport, *next;
+ struct efct_lio_vport *lio_vport = NULL;
+ struct efct_lio_tpg *tpg = NULL;
+ unsigned long flags = 0;
+
+ efct = node->efc->base;
+ spin_lock_irqsave(&efct->tgt_efct.efct_lio_lock, flags);
+ list_for_each_entry_safe(vport, next, &efct->tgt_efct.vport_list,
+ list_entry) {
+ lio_vport = vport->lio_vport;
+ if (wwpn && lio_vport && lio_vport->npiv_wwpn == wwpn) {
+ efc_log_debug(efct, "found tpg on vport\n");
+ tpg = lio_vport->tpg;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&efct->tgt_efct.efct_lio_lock, flags);
+ return tpg;
+}
+
+static void
+_efct_tgt_node_free(struct kref *arg)
+{
+ struct efct_node *tgt_node = container_of(arg, struct efct_node, ref);
+ struct efc_node *node = tgt_node->node;
+
+ efc_scsi_del_initiator_complete(node->efc, node);
+ kfree(tgt_node);
+}
+
+static int efct_session_cb(struct se_portal_group *se_tpg,
+ struct se_session *se_sess, void *private)
+{
+ struct efc_node *node = private;
+ struct efct_node *tgt_node;
+ struct efct *efct = node->efc->base;
+
+ tgt_node = kzalloc(sizeof(*tgt_node), GFP_KERNEL);
+ if (!tgt_node)
+ return -ENOMEM;
+
+ kref_init(&tgt_node->ref);
+ tgt_node->release = _efct_tgt_node_free;
+
+ tgt_node->session = se_sess;
+ node->tgt_node = tgt_node;
+ tgt_node->efct = efct;
+
+ tgt_node->node = node;
+
+ tgt_node->node_fc_id = node->rnode.fc_id;
+ tgt_node->port_fc_id = node->nport->fc_id;
+ tgt_node->vpi = node->nport->indicator;
+ tgt_node->rpi = node->rnode.indicator;
+
+ spin_lock_init(&tgt_node->active_ios_lock);
+ INIT_LIST_HEAD(&tgt_node->active_ios);
+
+ return 0;
+}
+
+int efct_scsi_tgt_new_device(struct efct *efct)
+{
+ u32 total_ios;
+
+ /* Get the max settings */
+ efct->tgt_efct.max_sge = sli_get_max_sge(&efct->hw.sli);
+ efct->tgt_efct.max_sgl = sli_get_max_sgl(&efct->hw.sli);
+
+ /* initialize IO watermark fields */
+ atomic_set(&efct->tgt_efct.ios_in_use, 0);
+ total_ios = efct->hw.config.n_io;
+ efc_log_debug(efct, "total_ios=%d\n", total_ios);
+ efct->tgt_efct.watermark_min =
+ (total_ios * EFCT_WATERMARK_LOW_PCT) / 100;
+ efct->tgt_efct.watermark_max =
+ (total_ios * EFCT_WATERMARK_HIGH_PCT) / 100;
+ atomic_set(&efct->tgt_efct.io_high_watermark,
+ efct->tgt_efct.watermark_max);
+ atomic_set(&efct->tgt_efct.watermark_hit, 0);
+ atomic_set(&efct->tgt_efct.initiator_count, 0);
+
+ lio_wq = create_singlethread_workqueue("efct_lio_worker");
+ if (!lio_wq) {
+ efc_log_err(efct, "workqueue create failed\n");
+ return -EIO;
+ }
+
+ spin_lock_init(&efct->tgt_efct.efct_lio_lock);
+ INIT_LIST_HEAD(&efct->tgt_efct.vport_list);
+
+ return 0;
+}
+
+int efct_scsi_tgt_del_device(struct efct *efct)
+{
+ flush_workqueue(lio_wq);
+
+ return 0;
+}
+
+int
+efct_scsi_tgt_new_nport(struct efc *efc, struct efc_nport *nport)
+{
+ struct efct *efct = nport->efc->base;
+
+ efc_log_debug(efct, "New SPORT: %s bound to %s\n", nport->display_name,
+ efct->tgt_efct.lio_nport->wwpn_str);
+
+ return 0;
+}
+
+void
+efct_scsi_tgt_del_nport(struct efc *efc, struct efc_nport *nport)
+{
+ efc_log_debug(efc, "Del SPORT: %s\n", nport->display_name);
+}
+
+static void efct_lio_setup_session(struct work_struct *work)
+{
+ struct efct_lio_wq_data *wq_data =
+ container_of(work, struct efct_lio_wq_data, work);
+ struct efct *efct = wq_data->efct;
+ struct efc_node *node = wq_data->ptr;
+ char wwpn[WWN_NAME_LEN];
+ struct efct_lio_tpg *tpg;
+ struct efct_node *tgt_node;
+ struct se_portal_group *se_tpg;
+ struct se_session *se_sess;
+ int watermark;
+ int ini_count;
+ u64 id;
+
+ /* Check to see if it's belongs to vport,
+ * if not get physical port
+ */
+ tpg = efct_get_vport_tpg(node);
+ if (tpg) {
+ se_tpg = &tpg->tpg;
+ } else if (efct->tgt_efct.tpg) {
+ tpg = efct->tgt_efct.tpg;
+ se_tpg = &tpg->tpg;
+ } else {
+ efc_log_err(efct, "failed to init session\n");
+ return;
+ }
+
+ /*
+ * Format the FCP Initiator port_name into colon
+ * separated values to match the format by our explicit
+ * ConfigFS NodeACLs.
+ */
+ efct_format_wwn(wwpn, sizeof(wwpn), "", efc_node_get_wwpn(node));
+
+ se_sess = target_setup_session(se_tpg, 0, 0, TARGET_PROT_NORMAL, wwpn,
+ node, efct_session_cb);
+ if (IS_ERR(se_sess)) {
+ efc_log_err(efct, "failed to setup session\n");
+ kfree(wq_data);
+ efc_scsi_sess_reg_complete(node, -EIO);
+ return;
+ }
+
+ tgt_node = node->tgt_node;
+ id = (u64) tgt_node->port_fc_id << 32 | tgt_node->node_fc_id;
+
+ efc_log_debug(efct, "new initiator sess=%p node=%p id: %llx\n",
+ se_sess, node, id);
+
+ if (xa_err(xa_store(&efct->lookup, id, tgt_node, GFP_KERNEL)))
+ efc_log_err(efct, "Node lookup store failed\n");
+
+ efc_scsi_sess_reg_complete(node, 0);
+
+ /* update IO watermark: increment initiator count */
+ ini_count = atomic_add_return(1, &efct->tgt_efct.initiator_count);
+ watermark = efct->tgt_efct.watermark_max -
+ ini_count * EFCT_IO_WATERMARK_PER_INITIATOR;
+ watermark = (efct->tgt_efct.watermark_min > watermark) ?
+ efct->tgt_efct.watermark_min : watermark;
+ atomic_set(&efct->tgt_efct.io_high_watermark, watermark);
+
+ kfree(wq_data);
+}
+
+int efct_scsi_new_initiator(struct efc *efc, struct efc_node *node)
+{
+ struct efct *efct = node->efc->base;
+ struct efct_lio_wq_data *wq_data;
+
+ /*
+ * Since LIO only supports initiator validation at thread level,
+ * we are open minded and accept all callers.
+ */
+ wq_data = kzalloc(sizeof(*wq_data), GFP_ATOMIC);
+ if (!wq_data)
+ return -ENOMEM;
+
+ wq_data->ptr = node;
+ wq_data->efct = efct;
+ INIT_WORK(&wq_data->work, efct_lio_setup_session);
+ queue_work(lio_wq, &wq_data->work);
+ return EFC_SCSI_CALL_ASYNC;
+}
+
+static void efct_lio_remove_session(struct work_struct *work)
+{
+ struct efct_lio_wq_data *wq_data =
+ container_of(work, struct efct_lio_wq_data, work);
+ struct efct *efct = wq_data->efct;
+ struct efc_node *node = wq_data->ptr;
+ struct efct_node *tgt_node;
+ struct se_session *se_sess;
+
+ tgt_node = node->tgt_node;
+ if (!tgt_node) {
+ /* base driver has sent back-to-back requests
+ * to unreg session with no intervening
+ * register
+ */
+ efc_log_err(efct, "unreg session for NULL session\n");
+ efc_scsi_del_initiator_complete(node->efc, node);
+ return;
+ }
+
+ se_sess = tgt_node->session;
+ efc_log_debug(efct, "unreg session se_sess=%p node=%p\n",
+ se_sess, node);
+
+ /* first flag all session commands to complete */
+ target_stop_session(se_sess);
+
+ /* now wait for session commands to complete */
+ target_wait_for_sess_cmds(se_sess);
+ target_remove_session(se_sess);
+ tgt_node->session = NULL;
+ node->tgt_node = NULL;
+ kref_put(&tgt_node->ref, tgt_node->release);
+
+ kfree(wq_data);
+}
+
+int efct_scsi_del_initiator(struct efc *efc, struct efc_node *node, int reason)
+{
+ struct efct *efct = node->efc->base;
+ struct efct_node *tgt_node = node->tgt_node;
+ struct efct_lio_wq_data *wq_data;
+ int watermark;
+ int ini_count;
+ u64 id;
+
+ if (reason == EFCT_SCSI_INITIATOR_MISSING)
+ return EFC_SCSI_CALL_COMPLETE;
+
+ if (!tgt_node) {
+ efc_log_err(efct, "tgt_node is NULL\n");
+ return -EIO;
+ }
+
+ wq_data = kzalloc(sizeof(*wq_data), GFP_ATOMIC);
+ if (!wq_data)
+ return -ENOMEM;
+
+ id = (u64) tgt_node->port_fc_id << 32 | tgt_node->node_fc_id;
+ xa_erase(&efct->lookup, id);
+
+ wq_data->ptr = node;
+ wq_data->efct = efct;
+ INIT_WORK(&wq_data->work, efct_lio_remove_session);
+ queue_work(lio_wq, &wq_data->work);
+
+ /*
+ * update IO watermark: decrement initiator count
+ */
+ ini_count = atomic_sub_return(1, &efct->tgt_efct.initiator_count);
+
+ watermark = efct->tgt_efct.watermark_max -
+ ini_count * EFCT_IO_WATERMARK_PER_INITIATOR;
+ watermark = (efct->tgt_efct.watermark_min > watermark) ?
+ efct->tgt_efct.watermark_min : watermark;
+ atomic_set(&efct->tgt_efct.io_high_watermark, watermark);
+
+ return EFC_SCSI_CALL_ASYNC;
+}
+
+void efct_scsi_recv_cmd(struct efct_io *io, uint64_t lun, u8 *cdb,
+ u32 cdb_len, u32 flags)
+{
+ struct efct_scsi_tgt_io *ocp = &io->tgt_io;
+ struct se_cmd *se_cmd = &io->tgt_io.cmd;
+ struct efct *efct = io->efct;
+ char *ddir;
+ struct efct_node *tgt_node;
+ struct se_session *se_sess;
+ int rc = 0;
+
+ memset(ocp, 0, sizeof(struct efct_scsi_tgt_io));
+ efct_set_lio_io_state(io, EFCT_LIO_STATE_SCSI_RECV_CMD);
+ atomic_add_return(1, &efct->tgt_efct.ios_in_use);
+
+ /* set target timeout */
+ io->timeout = efct->target_io_timer_sec;
+
+ if (flags & EFCT_SCSI_CMD_SIMPLE)
+ ocp->task_attr = TCM_SIMPLE_TAG;
+ else if (flags & EFCT_SCSI_CMD_HEAD_OF_QUEUE)
+ ocp->task_attr = TCM_HEAD_TAG;
+ else if (flags & EFCT_SCSI_CMD_ORDERED)
+ ocp->task_attr = TCM_ORDERED_TAG;
+ else if (flags & EFCT_SCSI_CMD_ACA)
+ ocp->task_attr = TCM_ACA_TAG;
+
+ switch (flags & (EFCT_SCSI_CMD_DIR_IN | EFCT_SCSI_CMD_DIR_OUT)) {
+ case EFCT_SCSI_CMD_DIR_IN:
+ ddir = "FROM_INITIATOR";
+ ocp->ddir = DMA_TO_DEVICE;
+ break;
+ case EFCT_SCSI_CMD_DIR_OUT:
+ ddir = "TO_INITIATOR";
+ ocp->ddir = DMA_FROM_DEVICE;
+ break;
+ case EFCT_SCSI_CMD_DIR_IN | EFCT_SCSI_CMD_DIR_OUT:
+ ddir = "BIDIR";
+ ocp->ddir = DMA_BIDIRECTIONAL;
+ break;
+ default:
+ ddir = "NONE";
+ ocp->ddir = DMA_NONE;
+ break;
+ }
+
+ ocp->lun = lun;
+ efct_lio_io_printf(io, "new cmd=0x%x ddir=%s dl=%u\n",
+ cdb[0], ddir, io->exp_xfer_len);
+
+ tgt_node = io->node;
+ se_sess = tgt_node->session;
+ if (!se_sess) {
+ efc_log_err(efct, "No session found to submit IO se_cmd: %p\n",
+ &ocp->cmd);
+ efct_scsi_io_free(io);
+ return;
+ }
+
+ efct_set_lio_io_state(io, EFCT_LIO_STATE_TGT_SUBMIT_CMD);
+ rc = target_init_cmd(se_cmd, se_sess, &io->tgt_io.sense_buffer[0],
+ ocp->lun, io->exp_xfer_len, ocp->task_attr,
+ ocp->ddir, TARGET_SCF_ACK_KREF);
+ if (rc) {
+ efc_log_err(efct, "failed to init cmd se_cmd: %p\n", se_cmd);
+ efct_scsi_io_free(io);
+ return;
+ }
+
+ if (target_submit_prep(se_cmd, cdb, NULL, 0, NULL, 0,
+ NULL, 0, GFP_ATOMIC))
+ return;
+
+ target_submit(se_cmd);
+}
+
+int
+efct_scsi_recv_tmf(struct efct_io *tmfio, u32 lun, enum efct_scsi_tmf_cmd cmd,
+ struct efct_io *io_to_abort, u32 flags)
+{
+ unsigned char tmr_func;
+ struct efct *efct = tmfio->efct;
+ struct efct_scsi_tgt_io *ocp = &tmfio->tgt_io;
+ struct efct_node *tgt_node;
+ struct se_session *se_sess;
+ int rc;
+
+ memset(ocp, 0, sizeof(struct efct_scsi_tgt_io));
+ efct_set_lio_io_state(tmfio, EFCT_LIO_STATE_SCSI_RECV_TMF);
+ atomic_add_return(1, &efct->tgt_efct.ios_in_use);
+ efct_lio_tmfio_printf(tmfio, "%s: new tmf %x lun=%u\n",
+ tmfio->display_name, cmd, lun);
+
+ switch (cmd) {
+ case EFCT_SCSI_TMF_ABORT_TASK:
+ tmr_func = TMR_ABORT_TASK;
+ break;
+ case EFCT_SCSI_TMF_ABORT_TASK_SET:
+ tmr_func = TMR_ABORT_TASK_SET;
+ break;
+ case EFCT_SCSI_TMF_CLEAR_TASK_SET:
+ tmr_func = TMR_CLEAR_TASK_SET;
+ break;
+ case EFCT_SCSI_TMF_LOGICAL_UNIT_RESET:
+ tmr_func = TMR_LUN_RESET;
+ break;
+ case EFCT_SCSI_TMF_CLEAR_ACA:
+ tmr_func = TMR_CLEAR_ACA;
+ break;
+ case EFCT_SCSI_TMF_TARGET_RESET:
+ tmr_func = TMR_TARGET_WARM_RESET;
+ break;
+ case EFCT_SCSI_TMF_QUERY_ASYNCHRONOUS_EVENT:
+ case EFCT_SCSI_TMF_QUERY_TASK_SET:
+ default:
+ goto tmf_fail;
+ }
+
+ tmfio->tgt_io.tmf = tmr_func;
+ tmfio->tgt_io.lun = lun;
+ tmfio->tgt_io.io_to_abort = io_to_abort;
+
+ tgt_node = tmfio->node;
+
+ se_sess = tgt_node->session;
+ if (!se_sess)
+ return 0;
+
+ rc = target_submit_tmr(&ocp->cmd, se_sess, NULL, lun, ocp, tmr_func,
+ GFP_ATOMIC, tmfio->init_task_tag, TARGET_SCF_ACK_KREF);
+
+ efct_set_lio_io_state(tmfio, EFCT_LIO_STATE_TGT_SUBMIT_TMR);
+ if (rc)
+ goto tmf_fail;
+
+ return 0;
+
+tmf_fail:
+ efct_scsi_send_tmf_resp(tmfio, EFCT_SCSI_TMF_FUNCTION_REJECTED,
+ NULL, efct_lio_null_tmf_done, NULL);
+ return 0;
+}
+
+/* Start items for efct_lio_tpg_attrib_cit */
+
+#define DEF_EFCT_TPG_ATTRIB(name) \
+ \
+static ssize_t efct_lio_tpg_attrib_##name##_show( \
+ struct config_item *item, char *page) \
+{ \
+ struct se_portal_group *se_tpg = to_tpg(item); \
+ struct efct_lio_tpg *tpg = container_of(se_tpg, \
+ struct efct_lio_tpg, tpg); \
+ \
+ return sprintf(page, "%u\n", tpg->tpg_attrib.name); \
+} \
+ \
+static ssize_t efct_lio_tpg_attrib_##name##_store( \
+ struct config_item *item, const char *page, size_t count) \
+{ \
+ struct se_portal_group *se_tpg = to_tpg(item); \
+ struct efct_lio_tpg *tpg = container_of(se_tpg, \
+ struct efct_lio_tpg, tpg); \
+ struct efct_lio_tpg_attrib *a = &tpg->tpg_attrib; \
+ unsigned long val; \
+ int ret; \
+ \
+ ret = kstrtoul(page, 0, &val); \
+ if (ret < 0) { \
+ pr_err("kstrtoul() failed with ret: %d\n", ret); \
+ return ret; \
+ } \
+ \
+ if (val != 0 && val != 1) { \
+ pr_err("Illegal boolean value %lu\n", val); \
+ return -EINVAL; \
+ } \
+ \
+ a->name = val; \
+ \
+ return count; \
+} \
+CONFIGFS_ATTR(efct_lio_tpg_attrib_, name)
+
+DEF_EFCT_TPG_ATTRIB(generate_node_acls);
+DEF_EFCT_TPG_ATTRIB(cache_dynamic_acls);
+DEF_EFCT_TPG_ATTRIB(demo_mode_write_protect);
+DEF_EFCT_TPG_ATTRIB(prod_mode_write_protect);
+DEF_EFCT_TPG_ATTRIB(demo_mode_login_only);
+DEF_EFCT_TPG_ATTRIB(session_deletion_wait);
+
+static struct configfs_attribute *efct_lio_tpg_attrib_attrs[] = {
+ &efct_lio_tpg_attrib_attr_generate_node_acls,
+ &efct_lio_tpg_attrib_attr_cache_dynamic_acls,
+ &efct_lio_tpg_attrib_attr_demo_mode_write_protect,
+ &efct_lio_tpg_attrib_attr_prod_mode_write_protect,
+ &efct_lio_tpg_attrib_attr_demo_mode_login_only,
+ &efct_lio_tpg_attrib_attr_session_deletion_wait,
+ NULL,
+};
+
+#define DEF_EFCT_NPIV_TPG_ATTRIB(name) \
+ \
+static ssize_t efct_lio_npiv_tpg_attrib_##name##_show( \
+ struct config_item *item, char *page) \
+{ \
+ struct se_portal_group *se_tpg = to_tpg(item); \
+ struct efct_lio_tpg *tpg = container_of(se_tpg, \
+ struct efct_lio_tpg, tpg); \
+ \
+ return sprintf(page, "%u\n", tpg->tpg_attrib.name); \
+} \
+ \
+static ssize_t efct_lio_npiv_tpg_attrib_##name##_store( \
+ struct config_item *item, const char *page, size_t count) \
+{ \
+ struct se_portal_group *se_tpg = to_tpg(item); \
+ struct efct_lio_tpg *tpg = container_of(se_tpg, \
+ struct efct_lio_tpg, tpg); \
+ struct efct_lio_tpg_attrib *a = &tpg->tpg_attrib; \
+ unsigned long val; \
+ int ret; \
+ \
+ ret = kstrtoul(page, 0, &val); \
+ if (ret < 0) { \
+ pr_err("kstrtoul() failed with ret: %d\n", ret); \
+ return ret; \
+ } \
+ \
+ if (val != 0 && val != 1) { \
+ pr_err("Illegal boolean value %lu\n", val); \
+ return -EINVAL; \
+ } \
+ \
+ a->name = val; \
+ \
+ return count; \
+} \
+CONFIGFS_ATTR(efct_lio_npiv_tpg_attrib_, name)
+
+DEF_EFCT_NPIV_TPG_ATTRIB(generate_node_acls);
+DEF_EFCT_NPIV_TPG_ATTRIB(cache_dynamic_acls);
+DEF_EFCT_NPIV_TPG_ATTRIB(demo_mode_write_protect);
+DEF_EFCT_NPIV_TPG_ATTRIB(prod_mode_write_protect);
+DEF_EFCT_NPIV_TPG_ATTRIB(demo_mode_login_only);
+DEF_EFCT_NPIV_TPG_ATTRIB(session_deletion_wait);
+
+static struct configfs_attribute *efct_lio_npiv_tpg_attrib_attrs[] = {
+ &efct_lio_npiv_tpg_attrib_attr_generate_node_acls,
+ &efct_lio_npiv_tpg_attrib_attr_cache_dynamic_acls,
+ &efct_lio_npiv_tpg_attrib_attr_demo_mode_write_protect,
+ &efct_lio_npiv_tpg_attrib_attr_prod_mode_write_protect,
+ &efct_lio_npiv_tpg_attrib_attr_demo_mode_login_only,
+ &efct_lio_npiv_tpg_attrib_attr_session_deletion_wait,
+ NULL,
+};
+
+CONFIGFS_ATTR(efct_lio_tpg_, enable);
+static struct configfs_attribute *efct_lio_tpg_attrs[] = {
+ &efct_lio_tpg_attr_enable, NULL };
+CONFIGFS_ATTR(efct_lio_npiv_tpg_, enable);
+static struct configfs_attribute *efct_lio_npiv_tpg_attrs[] = {
+ &efct_lio_npiv_tpg_attr_enable, NULL };
+
+static const struct target_core_fabric_ops efct_lio_ops = {
+ .module = THIS_MODULE,
+ .fabric_name = "efct",
+ .node_acl_size = sizeof(struct efct_lio_nacl),
+ .max_data_sg_nents = 65535,
+ .tpg_get_wwn = efct_lio_get_fabric_wwn,
+ .tpg_get_tag = efct_lio_get_tag,
+ .fabric_init_nodeacl = efct_lio_init_nodeacl,
+ .tpg_check_demo_mode = efct_lio_check_demo_mode,
+ .tpg_check_demo_mode_cache = efct_lio_check_demo_mode_cache,
+ .tpg_check_demo_mode_write_protect = efct_lio_check_demo_write_protect,
+ .tpg_check_prod_mode_write_protect = efct_lio_check_prod_write_protect,
+ .tpg_get_inst_index = efct_lio_tpg_get_inst_index,
+ .check_stop_free = efct_lio_check_stop_free,
+ .aborted_task = efct_lio_aborted_task,
+ .release_cmd = efct_lio_release_cmd,
+ .close_session = efct_lio_close_session,
+ .sess_get_index = efct_lio_sess_get_index,
+ .write_pending = efct_lio_write_pending,
+ .set_default_node_attributes = efct_lio_set_default_node_attrs,
+ .get_cmd_state = efct_lio_get_cmd_state,
+ .queue_data_in = efct_lio_queue_data_in,
+ .queue_status = efct_lio_queue_status,
+ .queue_tm_rsp = efct_lio_queue_tm_rsp,
+ .fabric_make_wwn = efct_lio_make_nport,
+ .fabric_drop_wwn = efct_lio_drop_nport,
+ .fabric_make_tpg = efct_lio_make_tpg,
+ .fabric_drop_tpg = efct_lio_drop_tpg,
+ .tpg_check_demo_mode_login_only = efct_lio_check_demo_mode_login_only,
+ .tpg_check_prot_fabric_only = NULL,
+ .sess_get_initiator_sid = NULL,
+ .tfc_tpg_base_attrs = efct_lio_tpg_attrs,
+ .tfc_tpg_attrib_attrs = efct_lio_tpg_attrib_attrs,
+};
+
+static const struct target_core_fabric_ops efct_lio_npiv_ops = {
+ .module = THIS_MODULE,
+ .fabric_name = "efct_npiv",
+ .node_acl_size = sizeof(struct efct_lio_nacl),
+ .max_data_sg_nents = 65535,
+ .tpg_get_wwn = efct_lio_get_npiv_fabric_wwn,
+ .tpg_get_tag = efct_lio_get_npiv_tag,
+ .fabric_init_nodeacl = efct_lio_init_nodeacl,
+ .tpg_check_demo_mode = efct_lio_check_demo_mode,
+ .tpg_check_demo_mode_cache = efct_lio_check_demo_mode_cache,
+ .tpg_check_demo_mode_write_protect =
+ efct_lio_npiv_check_demo_write_protect,
+ .tpg_check_prod_mode_write_protect =
+ efct_lio_npiv_check_prod_write_protect,
+ .tpg_get_inst_index = efct_lio_tpg_get_inst_index,
+ .check_stop_free = efct_lio_check_stop_free,
+ .aborted_task = efct_lio_aborted_task,
+ .release_cmd = efct_lio_release_cmd,
+ .close_session = efct_lio_close_session,
+ .sess_get_index = efct_lio_sess_get_index,
+ .write_pending = efct_lio_write_pending,
+ .set_default_node_attributes = efct_lio_set_default_node_attrs,
+ .get_cmd_state = efct_lio_get_cmd_state,
+ .queue_data_in = efct_lio_queue_data_in,
+ .queue_status = efct_lio_queue_status,
+ .queue_tm_rsp = efct_lio_queue_tm_rsp,
+ .fabric_make_wwn = efct_lio_npiv_make_nport,
+ .fabric_drop_wwn = efct_lio_npiv_drop_nport,
+ .fabric_make_tpg = efct_lio_npiv_make_tpg,
+ .fabric_drop_tpg = efct_lio_npiv_drop_tpg,
+ .tpg_check_demo_mode_login_only =
+ efct_lio_npiv_check_demo_mode_login_only,
+ .tpg_check_prot_fabric_only = NULL,
+ .sess_get_initiator_sid = NULL,
+ .tfc_tpg_base_attrs = efct_lio_npiv_tpg_attrs,
+ .tfc_tpg_attrib_attrs = efct_lio_npiv_tpg_attrib_attrs,
+};
+
+int efct_scsi_tgt_driver_init(void)
+{
+ int rc;
+
+ /* Register the top level struct config_item_type with TCM core */
+ rc = target_register_template(&efct_lio_ops);
+ if (rc < 0) {
+ pr_err("target_fabric_configfs_register failed with %d\n", rc);
+ return rc;
+ }
+ rc = target_register_template(&efct_lio_npiv_ops);
+ if (rc < 0) {
+ pr_err("target_fabric_configfs_register failed with %d\n", rc);
+ target_unregister_template(&efct_lio_ops);
+ return rc;
+ }
+ return 0;
+}
+
+int efct_scsi_tgt_driver_exit(void)
+{
+ target_unregister_template(&efct_lio_ops);
+ target_unregister_template(&efct_lio_npiv_ops);
+ return 0;
+}
diff --git a/drivers/scsi/elx/efct/efct_lio.h b/drivers/scsi/elx/efct/efct_lio.h
new file mode 100644
index 000000000000..569a0d4b1894
--- /dev/null
+++ b/drivers/scsi/elx/efct/efct_lio.h
@@ -0,0 +1,189 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+#ifndef __EFCT_LIO_H__
+#define __EFCT_LIO_H__
+
+#include "efct_scsi.h"
+#include <target/target_core_base.h>
+
+#define efct_lio_io_printf(io, fmt, ...) \
+ efc_log_debug(io->efct, \
+ "[%s] [%04x][i:%04x t:%04x h:%04x]" fmt,\
+ io->node->display_name, io->instance_index, \
+ io->init_task_tag, io->tgt_task_tag, io->hw_tag,\
+ ##__VA_ARGS__)
+
+#define efct_lio_tmfio_printf(io, fmt, ...) \
+ efc_log_debug(io->efct, \
+ "[%s] [%04x][i:%04x t:%04x h:%04x][f:%02x]" fmt,\
+ io->node->display_name, io->instance_index, \
+ io->init_task_tag, io->tgt_task_tag, io->hw_tag,\
+ io->tgt_io.tmf, ##__VA_ARGS__)
+
+#define efct_set_lio_io_state(io, value) (io->tgt_io.state |= value)
+
+struct efct_lio_wq_data {
+ struct efct *efct;
+ void *ptr;
+ struct work_struct work;
+};
+
+/* Target private efct structure */
+struct efct_scsi_tgt {
+ u32 max_sge;
+ u32 max_sgl;
+
+ /*
+ * Variables used to send task set full. We are using a high watermark
+ * method to send task set full. We will reserve a fixed number of IOs
+ * per initiator plus a fudge factor. Once we reach this number,
+ * then the target will start sending task set full/busy responses.
+ */
+ atomic_t initiator_count;
+ atomic_t ios_in_use;
+ atomic_t io_high_watermark;
+
+ atomic_t watermark_hit;
+ int watermark_min;
+ int watermark_max;
+
+ struct efct_lio_nport *lio_nport;
+ struct efct_lio_tpg *tpg;
+
+ struct list_head vport_list;
+ /* Protects vport list*/
+ spinlock_t efct_lio_lock;
+
+ u64 wwnn;
+};
+
+struct efct_scsi_tgt_nport {
+ struct efct_lio_nport *lio_nport;
+};
+
+struct efct_node {
+ struct list_head list_entry;
+ struct kref ref;
+ void (*release)(struct kref *arg);
+ struct efct *efct;
+ struct efc_node *node;
+ struct se_session *session;
+ spinlock_t active_ios_lock;
+ struct list_head active_ios;
+ char display_name[EFC_NAME_LENGTH];
+ u32 port_fc_id;
+ u32 node_fc_id;
+ u32 vpi;
+ u32 rpi;
+ u32 abort_cnt;
+};
+
+#define EFCT_LIO_STATE_SCSI_RECV_CMD (1 << 0)
+#define EFCT_LIO_STATE_TGT_SUBMIT_CMD (1 << 1)
+#define EFCT_LIO_STATE_TFO_QUEUE_DATA_IN (1 << 2)
+#define EFCT_LIO_STATE_TFO_WRITE_PENDING (1 << 3)
+#define EFCT_LIO_STATE_TGT_EXECUTE_CMD (1 << 4)
+#define EFCT_LIO_STATE_SCSI_SEND_RD_DATA (1 << 5)
+#define EFCT_LIO_STATE_TFO_CHK_STOP_FREE (1 << 6)
+#define EFCT_LIO_STATE_SCSI_DATA_DONE (1 << 7)
+#define EFCT_LIO_STATE_TFO_QUEUE_STATUS (1 << 8)
+#define EFCT_LIO_STATE_SCSI_SEND_RSP (1 << 9)
+#define EFCT_LIO_STATE_SCSI_RSP_DONE (1 << 10)
+#define EFCT_LIO_STATE_TGT_GENERIC_FREE (1 << 11)
+#define EFCT_LIO_STATE_SCSI_RECV_TMF (1 << 12)
+#define EFCT_LIO_STATE_TGT_SUBMIT_TMR (1 << 13)
+#define EFCT_LIO_STATE_TFO_WRITE_PEND_STATUS (1 << 14)
+#define EFCT_LIO_STATE_TGT_GENERIC_REQ_FAILURE (1 << 15)
+
+#define EFCT_LIO_STATE_TFO_ABORTED_TASK (1 << 29)
+#define EFCT_LIO_STATE_TFO_RELEASE_CMD (1 << 30)
+#define EFCT_LIO_STATE_SCSI_CMPL_CMD (1u << 31)
+
+struct efct_scsi_tgt_io {
+ struct se_cmd cmd;
+ unsigned char sense_buffer[TRANSPORT_SENSE_BUFFER];
+ enum dma_data_direction ddir;
+ int task_attr;
+ u64 lun;
+
+ u32 state;
+ u8 tmf;
+ struct efct_io *io_to_abort;
+ u32 seg_map_cnt;
+ u32 seg_cnt;
+ u32 cur_seg;
+ enum efct_scsi_io_status err;
+ bool aborting;
+ bool rsp_sent;
+ u32 transferred_len;
+};
+
+/* Handler return codes */
+enum {
+ SCSI_HANDLER_DATAPHASE_STARTED = 1,
+ SCSI_HANDLER_RESP_STARTED,
+ SCSI_HANDLER_VALIDATED_DATAPHASE_STARTED,
+ SCSI_CMD_NOT_SUPPORTED,
+};
+
+#define WWN_NAME_LEN 32
+struct efct_lio_vport {
+ u64 wwpn;
+ u64 npiv_wwpn;
+ u64 npiv_wwnn;
+ unsigned char wwpn_str[WWN_NAME_LEN];
+ struct se_wwn vport_wwn;
+ struct efct_lio_tpg *tpg;
+ struct efct *efct;
+ struct Scsi_Host *shost;
+ struct fc_vport *fc_vport;
+ atomic_t enable;
+};
+
+struct efct_lio_nport {
+ u64 wwpn;
+ unsigned char wwpn_str[WWN_NAME_LEN];
+ struct se_wwn nport_wwn;
+ struct efct_lio_tpg *tpg;
+ struct efct *efct;
+ atomic_t enable;
+};
+
+struct efct_lio_tpg_attrib {
+ u32 generate_node_acls;
+ u32 cache_dynamic_acls;
+ u32 demo_mode_write_protect;
+ u32 prod_mode_write_protect;
+ u32 demo_mode_login_only;
+ bool session_deletion_wait;
+};
+
+struct efct_lio_tpg {
+ struct se_portal_group tpg;
+ struct efct_lio_nport *nport;
+ struct efct_lio_vport *vport;
+ struct efct_lio_tpg_attrib tpg_attrib;
+ unsigned short tpgt;
+ bool enabled;
+};
+
+struct efct_lio_nacl {
+ u64 nport_wwnn;
+ char nport_name[WWN_NAME_LEN];
+ struct se_session *session;
+ struct se_node_acl se_node_acl;
+};
+
+struct efct_lio_vport_list_t {
+ struct list_head list_entry;
+ struct efct_lio_vport *lio_vport;
+};
+
+int efct_scsi_tgt_driver_init(void);
+int efct_scsi_tgt_driver_exit(void);
+
+#endif /*__EFCT_LIO_H__ */
diff --git a/drivers/scsi/elx/efct/efct_scsi.c b/drivers/scsi/elx/efct/efct_scsi.c
new file mode 100644
index 000000000000..40fb3a724c76
--- /dev/null
+++ b/drivers/scsi/elx/efct/efct_scsi.c
@@ -0,0 +1,1159 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+#include "efct_driver.h"
+#include "efct_hw.h"
+
+#define enable_tsend_auto_resp(efct) 1
+#define enable_treceive_auto_resp(efct) 0
+
+#define SCSI_IOFMT "[%04x][i:%04x t:%04x h:%04x]"
+
+#define scsi_io_printf(io, fmt, ...) \
+ efc_log_debug(io->efct, "[%s]" SCSI_IOFMT fmt, \
+ io->node->display_name, io->instance_index,\
+ io->init_task_tag, io->tgt_task_tag, io->hw_tag, ##__VA_ARGS__)
+
+#define EFCT_LOG_ENABLE_SCSI_TRACE(efct) \
+ (((efct) != NULL) ? (((efct)->logmask & (1U << 2)) != 0) : 0)
+
+#define scsi_io_trace(io, fmt, ...) \
+ do { \
+ if (EFCT_LOG_ENABLE_SCSI_TRACE(io->efct)) \
+ scsi_io_printf(io, fmt, ##__VA_ARGS__); \
+ } while (0)
+
+struct efct_io *
+efct_scsi_io_alloc(struct efct_node *node)
+{
+ struct efct *efct;
+ struct efct_xport *xport;
+ struct efct_io *io;
+ unsigned long flags = 0;
+
+ efct = node->efct;
+
+ xport = efct->xport;
+
+ spin_lock_irqsave(&node->active_ios_lock, flags);
+
+ io = efct_io_pool_io_alloc(efct->xport->io_pool);
+ if (!io) {
+ efc_log_err(efct, "IO alloc Failed\n");
+ atomic_add_return(1, &xport->io_alloc_failed_count);
+ spin_unlock_irqrestore(&node->active_ios_lock, flags);
+ return NULL;
+ }
+
+ /* initialize refcount */
+ kref_init(&io->ref);
+ io->release = _efct_scsi_io_free;
+
+ /* set generic fields */
+ io->efct = efct;
+ io->node = node;
+ kref_get(&node->ref);
+
+ /* set type and name */
+ io->io_type = EFCT_IO_TYPE_IO;
+ io->display_name = "scsi_io";
+
+ io->cmd_ini = false;
+ io->cmd_tgt = true;
+
+ /* Add to node's active_ios list */
+ INIT_LIST_HEAD(&io->list_entry);
+ list_add(&io->list_entry, &node->active_ios);
+
+ spin_unlock_irqrestore(&node->active_ios_lock, flags);
+
+ return io;
+}
+
+void
+_efct_scsi_io_free(struct kref *arg)
+{
+ struct efct_io *io = container_of(arg, struct efct_io, ref);
+ struct efct *efct = io->efct;
+ struct efct_node *node = io->node;
+ unsigned long flags = 0;
+
+ scsi_io_trace(io, "freeing io 0x%p %s\n", io, io->display_name);
+
+ if (io->io_free) {
+ efc_log_err(efct, "IO already freed.\n");
+ return;
+ }
+
+ spin_lock_irqsave(&node->active_ios_lock, flags);
+ list_del_init(&io->list_entry);
+ spin_unlock_irqrestore(&node->active_ios_lock, flags);
+
+ kref_put(&node->ref, node->release);
+ io->node = NULL;
+ efct_io_pool_io_free(efct->xport->io_pool, io);
+}
+
+void
+efct_scsi_io_free(struct efct_io *io)
+{
+ scsi_io_trace(io, "freeing io 0x%p %s\n", io, io->display_name);
+ WARN_ON(!refcount_read(&io->ref.refcount));
+ kref_put(&io->ref, io->release);
+}
+
+static void
+efct_target_io_cb(struct efct_hw_io *hio, u32 length, int status,
+ u32 ext_status, void *app)
+{
+ u32 flags = 0;
+ struct efct_io *io = app;
+ struct efct *efct;
+ enum efct_scsi_io_status scsi_stat = EFCT_SCSI_STATUS_GOOD;
+ efct_scsi_io_cb_t cb;
+
+ if (!io || !io->efct) {
+ pr_err("%s: IO can not be NULL\n", __func__);
+ return;
+ }
+
+ scsi_io_trace(io, "status x%x ext_status x%x\n", status, ext_status);
+
+ efct = io->efct;
+
+ io->transferred += length;
+
+ if (!io->scsi_tgt_cb) {
+ efct_scsi_check_pending(efct);
+ return;
+ }
+
+ /* Call target server completion */
+ cb = io->scsi_tgt_cb;
+
+ /* Clear the callback before invoking the callback */
+ io->scsi_tgt_cb = NULL;
+
+ /* if status was good, and auto-good-response was set,
+ * then callback target-server with IO_CMPL_RSP_SENT,
+ * otherwise send IO_CMPL
+ */
+ if (status == 0 && io->auto_resp)
+ flags |= EFCT_SCSI_IO_CMPL_RSP_SENT;
+ else
+ flags |= EFCT_SCSI_IO_CMPL;
+
+ switch (status) {
+ case SLI4_FC_WCQE_STATUS_SUCCESS:
+ scsi_stat = EFCT_SCSI_STATUS_GOOD;
+ break;
+ case SLI4_FC_WCQE_STATUS_DI_ERROR:
+ if (ext_status & SLI4_FC_DI_ERROR_GE)
+ scsi_stat = EFCT_SCSI_STATUS_DIF_GUARD_ERR;
+ else if (ext_status & SLI4_FC_DI_ERROR_AE)
+ scsi_stat = EFCT_SCSI_STATUS_DIF_APP_TAG_ERROR;
+ else if (ext_status & SLI4_FC_DI_ERROR_RE)
+ scsi_stat = EFCT_SCSI_STATUS_DIF_REF_TAG_ERROR;
+ else
+ scsi_stat = EFCT_SCSI_STATUS_DIF_UNKNOWN_ERROR;
+ break;
+ case SLI4_FC_WCQE_STATUS_LOCAL_REJECT:
+ switch (ext_status) {
+ case SLI4_FC_LOCAL_REJECT_INVALID_RELOFFSET:
+ case SLI4_FC_LOCAL_REJECT_ABORT_REQUESTED:
+ scsi_stat = EFCT_SCSI_STATUS_ABORTED;
+ break;
+ case SLI4_FC_LOCAL_REJECT_INVALID_RPI:
+ scsi_stat = EFCT_SCSI_STATUS_NEXUS_LOST;
+ break;
+ case SLI4_FC_LOCAL_REJECT_NO_XRI:
+ scsi_stat = EFCT_SCSI_STATUS_NO_IO;
+ break;
+ default:
+ /*we have seen 0x0d(TX_DMA_FAILED err)*/
+ scsi_stat = EFCT_SCSI_STATUS_ERROR;
+ break;
+ }
+ break;
+
+ case SLI4_FC_WCQE_STATUS_TARGET_WQE_TIMEOUT:
+ /* target IO timed out */
+ scsi_stat = EFCT_SCSI_STATUS_TIMEDOUT_AND_ABORTED;
+ break;
+
+ case SLI4_FC_WCQE_STATUS_SHUTDOWN:
+ /* Target IO cancelled by HW */
+ scsi_stat = EFCT_SCSI_STATUS_SHUTDOWN;
+ break;
+
+ default:
+ scsi_stat = EFCT_SCSI_STATUS_ERROR;
+ break;
+ }
+
+ cb(io, scsi_stat, flags, io->scsi_tgt_cb_arg);
+
+ efct_scsi_check_pending(efct);
+}
+
+static int
+efct_scsi_build_sgls(struct efct_hw *hw, struct efct_hw_io *hio,
+ struct efct_scsi_sgl *sgl, u32 sgl_count,
+ enum efct_hw_io_type type)
+{
+ int rc;
+ u32 i;
+ struct efct *efct = hw->os;
+
+ /* Initialize HW SGL */
+ rc = efct_hw_io_init_sges(hw, hio, type);
+ if (rc) {
+ efc_log_err(efct, "efct_hw_io_init_sges failed: %d\n", rc);
+ return -EIO;
+ }
+
+ for (i = 0; i < sgl_count; i++) {
+ /* Add data SGE */
+ rc = efct_hw_io_add_sge(hw, hio, sgl[i].addr, sgl[i].len);
+ if (rc) {
+ efc_log_err(efct, "add sge failed cnt=%d rc=%d\n",
+ sgl_count, rc);
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+static void efc_log_sgl(struct efct_io *io)
+{
+ struct efct_hw_io *hio = io->hio;
+ struct sli4_sge *data = NULL;
+ u32 *dword = NULL;
+ u32 i;
+ u32 n_sge;
+
+ scsi_io_trace(io, "def_sgl at 0x%x 0x%08x\n",
+ upper_32_bits(hio->def_sgl.phys),
+ lower_32_bits(hio->def_sgl.phys));
+ n_sge = (hio->sgl == &hio->def_sgl) ? hio->n_sge : hio->def_sgl_count;
+ for (i = 0, data = hio->def_sgl.virt; i < n_sge; i++, data++) {
+ dword = (u32 *)data;
+
+ scsi_io_trace(io, "SGL %2d 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ i, dword[0], dword[1], dword[2], dword[3]);
+
+ if (dword[2] & (1U << 31))
+ break;
+ }
+}
+
+static void
+efct_scsi_check_pending_async_cb(struct efct_hw *hw, int status,
+ u8 *mqe, void *arg)
+{
+ struct efct_io *io = arg;
+
+ if (io) {
+ efct_hw_done_t cb = io->hw_cb;
+
+ if (!io->hw_cb)
+ return;
+
+ io->hw_cb = NULL;
+ (cb)(io->hio, 0, SLI4_FC_WCQE_STATUS_DISPATCH_ERROR, 0, io);
+ }
+}
+
+static int
+efct_scsi_io_dispatch_hw_io(struct efct_io *io, struct efct_hw_io *hio)
+{
+ int rc = 0;
+ struct efct *efct = io->efct;
+
+ /* Got a HW IO;
+ * update ini/tgt_task_tag with HW IO info and dispatch
+ */
+ io->hio = hio;
+ if (io->cmd_tgt)
+ io->tgt_task_tag = hio->indicator;
+ else if (io->cmd_ini)
+ io->init_task_tag = hio->indicator;
+ io->hw_tag = hio->reqtag;
+
+ hio->eq = io->hw_priv;
+
+ /* Copy WQ steering */
+ switch (io->wq_steering) {
+ case EFCT_SCSI_WQ_STEERING_CLASS >> EFCT_SCSI_WQ_STEERING_SHIFT:
+ hio->wq_steering = EFCT_HW_WQ_STEERING_CLASS;
+ break;
+ case EFCT_SCSI_WQ_STEERING_REQUEST >> EFCT_SCSI_WQ_STEERING_SHIFT:
+ hio->wq_steering = EFCT_HW_WQ_STEERING_REQUEST;
+ break;
+ case EFCT_SCSI_WQ_STEERING_CPU >> EFCT_SCSI_WQ_STEERING_SHIFT:
+ hio->wq_steering = EFCT_HW_WQ_STEERING_CPU;
+ break;
+ }
+
+ switch (io->io_type) {
+ case EFCT_IO_TYPE_IO:
+ rc = efct_scsi_build_sgls(&efct->hw, io->hio,
+ io->sgl, io->sgl_count, io->hio_type);
+ if (rc)
+ break;
+
+ if (EFCT_LOG_ENABLE_SCSI_TRACE(efct))
+ efc_log_sgl(io);
+
+ if (io->app_id)
+ io->iparam.fcp_tgt.app_id = io->app_id;
+
+ io->iparam.fcp_tgt.vpi = io->node->vpi;
+ io->iparam.fcp_tgt.rpi = io->node->rpi;
+ io->iparam.fcp_tgt.s_id = io->node->port_fc_id;
+ io->iparam.fcp_tgt.d_id = io->node->node_fc_id;
+ io->iparam.fcp_tgt.xmit_len = io->wire_len;
+
+ rc = efct_hw_io_send(&io->efct->hw, io->hio_type, io->hio,
+ &io->iparam, io->hw_cb, io);
+ break;
+ default:
+ scsi_io_printf(io, "Unknown IO type=%d\n", io->io_type);
+ rc = -EIO;
+ break;
+ }
+ return rc;
+}
+
+static int
+efct_scsi_io_dispatch_no_hw_io(struct efct_io *io)
+{
+ int rc;
+
+ switch (io->io_type) {
+ case EFCT_IO_TYPE_ABORT: {
+ struct efct_hw_io *hio_to_abort = NULL;
+
+ hio_to_abort = io->io_to_abort->hio;
+
+ if (!hio_to_abort) {
+ /*
+ * If "IO to abort" does not have an
+ * associated HW IO, immediately make callback with
+ * success. The command must have been sent to
+ * the backend, but the data phase has not yet
+ * started, so we don't have a HW IO.
+ *
+ * Note: since the backend shims should be
+ * taking a reference on io_to_abort, it should not
+ * be possible to have been completed and freed by
+ * the backend before the abort got here.
+ */
+ scsi_io_printf(io, "IO: not active\n");
+ ((efct_hw_done_t)io->hw_cb)(io->hio, 0,
+ SLI4_FC_WCQE_STATUS_SUCCESS, 0, io);
+ rc = 0;
+ break;
+ }
+
+ /* HW IO is valid, abort it */
+ scsi_io_printf(io, "aborting\n");
+ rc = efct_hw_io_abort(&io->efct->hw, hio_to_abort,
+ io->send_abts, io->hw_cb, io);
+ if (rc) {
+ int status = SLI4_FC_WCQE_STATUS_SUCCESS;
+ efct_hw_done_t cb = io->hw_cb;
+
+ if (rc != -ENOENT && rc != -EINPROGRESS) {
+ status = -1;
+ scsi_io_printf(io, "Failed to abort IO rc=%d\n",
+ rc);
+ }
+ cb(io->hio, 0, status, 0, io);
+ rc = 0;
+ }
+
+ break;
+ }
+ default:
+ scsi_io_printf(io, "Unknown IO type=%d\n", io->io_type);
+ rc = -EIO;
+ break;
+ }
+ return rc;
+}
+
+static struct efct_io *
+efct_scsi_dispatch_pending(struct efct *efct)
+{
+ struct efct_xport *xport = efct->xport;
+ struct efct_io *io = NULL;
+ struct efct_hw_io *hio;
+ unsigned long flags = 0;
+ int status;
+
+ spin_lock_irqsave(&xport->io_pending_lock, flags);
+
+ if (!list_empty(&xport->io_pending_list)) {
+ io = list_first_entry(&xport->io_pending_list, struct efct_io,
+ io_pending_link);
+ list_del_init(&io->io_pending_link);
+ }
+
+ if (!io) {
+ spin_unlock_irqrestore(&xport->io_pending_lock, flags);
+ return NULL;
+ }
+
+ if (io->io_type == EFCT_IO_TYPE_ABORT) {
+ hio = NULL;
+ } else {
+ hio = efct_hw_io_alloc(&efct->hw);
+ if (!hio) {
+ /*
+ * No HW IO available.Put IO back on
+ * the front of pending list
+ */
+ list_add(&xport->io_pending_list, &io->io_pending_link);
+ io = NULL;
+ } else {
+ hio->eq = io->hw_priv;
+ }
+ }
+
+ /* Must drop the lock before dispatching the IO */
+ spin_unlock_irqrestore(&xport->io_pending_lock, flags);
+
+ if (!io)
+ return NULL;
+
+ /*
+ * We pulled an IO off the pending list,
+ * and either got an HW IO or don't need one
+ */
+ atomic_sub_return(1, &xport->io_pending_count);
+ if (!hio)
+ status = efct_scsi_io_dispatch_no_hw_io(io);
+ else
+ status = efct_scsi_io_dispatch_hw_io(io, hio);
+ if (status) {
+ /*
+ * Invoke the HW callback, but do so in the
+ * separate execution context,provided by the
+ * NOP mailbox completion processing context
+ * by using efct_hw_async_call()
+ */
+ if (efct_hw_async_call(&efct->hw,
+ efct_scsi_check_pending_async_cb, io)) {
+ efc_log_debug(efct, "call hw async failed\n");
+ }
+ }
+
+ return io;
+}
+
+void
+efct_scsi_check_pending(struct efct *efct)
+{
+ struct efct_xport *xport = efct->xport;
+ struct efct_io *io = NULL;
+ int count = 0;
+ unsigned long flags = 0;
+ int dispatch = 0;
+
+ /* Guard against recursion */
+ if (atomic_add_return(1, &xport->io_pending_recursing)) {
+ /* This function is already running. Decrement and return. */
+ atomic_sub_return(1, &xport->io_pending_recursing);
+ return;
+ }
+
+ while (efct_scsi_dispatch_pending(efct))
+ count++;
+
+ if (count) {
+ atomic_sub_return(1, &xport->io_pending_recursing);
+ return;
+ }
+
+ /*
+ * If nothing was removed from the list,
+ * we might be in a case where we need to abort an
+ * active IO and the abort is on the pending list.
+ * Look for an abort we can dispatch.
+ */
+
+ spin_lock_irqsave(&xport->io_pending_lock, flags);
+
+ list_for_each_entry(io, &xport->io_pending_list, io_pending_link) {
+ if (io->io_type == EFCT_IO_TYPE_ABORT && io->io_to_abort->hio) {
+ /* This IO has a HW IO, so it is
+ * active. Dispatch the abort.
+ */
+ dispatch = 1;
+ list_del_init(&io->io_pending_link);
+ atomic_sub_return(1, &xport->io_pending_count);
+ break;
+ }
+ }
+
+ spin_unlock_irqrestore(&xport->io_pending_lock, flags);
+
+ if (dispatch) {
+ if (efct_scsi_io_dispatch_no_hw_io(io)) {
+ if (efct_hw_async_call(&efct->hw,
+ efct_scsi_check_pending_async_cb, io)) {
+ efc_log_debug(efct, "hw async failed\n");
+ }
+ }
+ }
+
+ atomic_sub_return(1, &xport->io_pending_recursing);
+}
+
+int
+efct_scsi_io_dispatch(struct efct_io *io, void *cb)
+{
+ struct efct_hw_io *hio;
+ struct efct *efct = io->efct;
+ struct efct_xport *xport = efct->xport;
+ unsigned long flags = 0;
+
+ io->hw_cb = cb;
+
+ /*
+ * if this IO already has a HW IO, then this is either
+ * not the first phase of the IO. Send it to the HW.
+ */
+ if (io->hio)
+ return efct_scsi_io_dispatch_hw_io(io, io->hio);
+
+ /*
+ * We don't already have a HW IO associated with the IO. First check
+ * the pending list. If not empty, add IO to the tail and process the
+ * pending list.
+ */
+ spin_lock_irqsave(&xport->io_pending_lock, flags);
+ if (!list_empty(&xport->io_pending_list)) {
+ /*
+ * If this is a low latency request,
+ * the put at the front of the IO pending
+ * queue, otherwise put it at the end of the queue.
+ */
+ if (io->low_latency) {
+ INIT_LIST_HEAD(&io->io_pending_link);
+ list_add(&xport->io_pending_list, &io->io_pending_link);
+ } else {
+ INIT_LIST_HEAD(&io->io_pending_link);
+ list_add_tail(&io->io_pending_link,
+ &xport->io_pending_list);
+ }
+ spin_unlock_irqrestore(&xport->io_pending_lock, flags);
+ atomic_add_return(1, &xport->io_pending_count);
+ atomic_add_return(1, &xport->io_total_pending);
+
+ /* process pending list */
+ efct_scsi_check_pending(efct);
+ return 0;
+ }
+ spin_unlock_irqrestore(&xport->io_pending_lock, flags);
+
+ /*
+ * We don't have a HW IO associated with the IO and there's nothing
+ * on the pending list. Attempt to allocate a HW IO and dispatch it.
+ */
+ hio = efct_hw_io_alloc(&io->efct->hw);
+ if (!hio) {
+ /* Couldn't get a HW IO. Save this IO on the pending list */
+ spin_lock_irqsave(&xport->io_pending_lock, flags);
+ INIT_LIST_HEAD(&io->io_pending_link);
+ list_add_tail(&io->io_pending_link, &xport->io_pending_list);
+ spin_unlock_irqrestore(&xport->io_pending_lock, flags);
+
+ atomic_add_return(1, &xport->io_total_pending);
+ atomic_add_return(1, &xport->io_pending_count);
+ return 0;
+ }
+
+ /* We successfully allocated a HW IO; dispatch to HW */
+ return efct_scsi_io_dispatch_hw_io(io, hio);
+}
+
+int
+efct_scsi_io_dispatch_abort(struct efct_io *io, void *cb)
+{
+ struct efct *efct = io->efct;
+ struct efct_xport *xport = efct->xport;
+ unsigned long flags = 0;
+
+ io->hw_cb = cb;
+
+ /*
+ * For aborts, we don't need a HW IO, but we still want
+ * to pass through the pending list to preserve ordering.
+ * Thus, if the pending list is not empty, add this abort
+ * to the pending list and process the pending list.
+ */
+ spin_lock_irqsave(&xport->io_pending_lock, flags);
+ if (!list_empty(&xport->io_pending_list)) {
+ INIT_LIST_HEAD(&io->io_pending_link);
+ list_add_tail(&io->io_pending_link, &xport->io_pending_list);
+ spin_unlock_irqrestore(&xport->io_pending_lock, flags);
+ atomic_add_return(1, &xport->io_pending_count);
+ atomic_add_return(1, &xport->io_total_pending);
+
+ /* process pending list */
+ efct_scsi_check_pending(efct);
+ return 0;
+ }
+ spin_unlock_irqrestore(&xport->io_pending_lock, flags);
+
+ /* nothing on pending list, dispatch abort */
+ return efct_scsi_io_dispatch_no_hw_io(io);
+}
+
+static inline int
+efct_scsi_xfer_data(struct efct_io *io, u32 flags,
+ struct efct_scsi_sgl *sgl, u32 sgl_count, u64 xwire_len,
+ enum efct_hw_io_type type, int enable_ar,
+ efct_scsi_io_cb_t cb, void *arg)
+{
+ struct efct *efct;
+ size_t residual = 0;
+
+ io->sgl_count = sgl_count;
+
+ efct = io->efct;
+
+ scsi_io_trace(io, "%s wire_len %llu\n",
+ (type == EFCT_HW_IO_TARGET_READ) ? "send" : "recv",
+ xwire_len);
+
+ io->hio_type = type;
+
+ io->scsi_tgt_cb = cb;
+ io->scsi_tgt_cb_arg = arg;
+
+ residual = io->exp_xfer_len - io->transferred;
+ io->wire_len = (xwire_len < residual) ? xwire_len : residual;
+ residual = (xwire_len - io->wire_len);
+
+ memset(&io->iparam, 0, sizeof(io->iparam));
+ io->iparam.fcp_tgt.ox_id = io->init_task_tag;
+ io->iparam.fcp_tgt.offset = io->transferred;
+ io->iparam.fcp_tgt.cs_ctl = io->cs_ctl;
+ io->iparam.fcp_tgt.timeout = io->timeout;
+
+ /* if this is the last data phase and there is no residual, enable
+ * auto-good-response
+ */
+ if (enable_ar && (flags & EFCT_SCSI_LAST_DATAPHASE) && residual == 0 &&
+ ((io->transferred + io->wire_len) == io->exp_xfer_len) &&
+ (!(flags & EFCT_SCSI_NO_AUTO_RESPONSE))) {
+ io->iparam.fcp_tgt.flags |= SLI4_IO_AUTO_GOOD_RESPONSE;
+ io->auto_resp = true;
+ } else {
+ io->auto_resp = false;
+ }
+
+ /* save this transfer length */
+ io->xfer_req = io->wire_len;
+
+ /* Adjust the transferred count to account for overrun
+ * when the residual is calculated in efct_scsi_send_resp
+ */
+ io->transferred += residual;
+
+ /* Adjust the SGL size if there is overrun */
+
+ if (residual) {
+ struct efct_scsi_sgl *sgl_ptr = &io->sgl[sgl_count - 1];
+
+ while (residual) {
+ size_t len = sgl_ptr->len;
+
+ if (len > residual) {
+ sgl_ptr->len = len - residual;
+ residual = 0;
+ } else {
+ sgl_ptr->len = 0;
+ residual -= len;
+ io->sgl_count--;
+ }
+ sgl_ptr--;
+ }
+ }
+
+ /* Set latency and WQ steering */
+ io->low_latency = (flags & EFCT_SCSI_LOW_LATENCY) != 0;
+ io->wq_steering = (flags & EFCT_SCSI_WQ_STEERING_MASK) >>
+ EFCT_SCSI_WQ_STEERING_SHIFT;
+ io->wq_class = (flags & EFCT_SCSI_WQ_CLASS_MASK) >>
+ EFCT_SCSI_WQ_CLASS_SHIFT;
+
+ if (efct->xport) {
+ struct efct_xport *xport = efct->xport;
+
+ if (type == EFCT_HW_IO_TARGET_READ) {
+ xport->fcp_stats.input_requests++;
+ xport->fcp_stats.input_bytes += xwire_len;
+ } else if (type == EFCT_HW_IO_TARGET_WRITE) {
+ xport->fcp_stats.output_requests++;
+ xport->fcp_stats.output_bytes += xwire_len;
+ }
+ }
+ return efct_scsi_io_dispatch(io, efct_target_io_cb);
+}
+
+int
+efct_scsi_send_rd_data(struct efct_io *io, u32 flags,
+ struct efct_scsi_sgl *sgl, u32 sgl_count, u64 len,
+ efct_scsi_io_cb_t cb, void *arg)
+{
+ return efct_scsi_xfer_data(io, flags, sgl, sgl_count,
+ len, EFCT_HW_IO_TARGET_READ,
+ enable_tsend_auto_resp(io->efct), cb, arg);
+}
+
+int
+efct_scsi_recv_wr_data(struct efct_io *io, u32 flags,
+ struct efct_scsi_sgl *sgl, u32 sgl_count, u64 len,
+ efct_scsi_io_cb_t cb, void *arg)
+{
+ return efct_scsi_xfer_data(io, flags, sgl, sgl_count, len,
+ EFCT_HW_IO_TARGET_WRITE,
+ enable_treceive_auto_resp(io->efct), cb, arg);
+}
+
+int
+efct_scsi_send_resp(struct efct_io *io, u32 flags,
+ struct efct_scsi_cmd_resp *rsp,
+ efct_scsi_io_cb_t cb, void *arg)
+{
+ struct efct *efct;
+ int residual;
+ /* Always try auto resp */
+ bool auto_resp = true;
+ u8 scsi_status = 0;
+ u16 scsi_status_qualifier = 0;
+ u8 *sense_data = NULL;
+ u32 sense_data_length = 0;
+
+ efct = io->efct;
+
+ if (rsp) {
+ scsi_status = rsp->scsi_status;
+ scsi_status_qualifier = rsp->scsi_status_qualifier;
+ sense_data = rsp->sense_data;
+ sense_data_length = rsp->sense_data_length;
+ residual = rsp->residual;
+ } else {
+ residual = io->exp_xfer_len - io->transferred;
+ }
+
+ io->wire_len = 0;
+ io->hio_type = EFCT_HW_IO_TARGET_RSP;
+
+ io->scsi_tgt_cb = cb;
+ io->scsi_tgt_cb_arg = arg;
+
+ memset(&io->iparam, 0, sizeof(io->iparam));
+ io->iparam.fcp_tgt.ox_id = io->init_task_tag;
+ io->iparam.fcp_tgt.offset = 0;
+ io->iparam.fcp_tgt.cs_ctl = io->cs_ctl;
+ io->iparam.fcp_tgt.timeout = io->timeout;
+
+ /* Set low latency queueing request */
+ io->low_latency = (flags & EFCT_SCSI_LOW_LATENCY) != 0;
+ io->wq_steering = (flags & EFCT_SCSI_WQ_STEERING_MASK) >>
+ EFCT_SCSI_WQ_STEERING_SHIFT;
+ io->wq_class = (flags & EFCT_SCSI_WQ_CLASS_MASK) >>
+ EFCT_SCSI_WQ_CLASS_SHIFT;
+
+ if (scsi_status != 0 || residual || sense_data_length) {
+ struct fcp_resp_with_ext *fcprsp = io->rspbuf.virt;
+ u8 *sns_data;
+
+ if (!fcprsp) {
+ efc_log_err(efct, "NULL response buffer\n");
+ return -EIO;
+ }
+
+ sns_data = (u8 *)io->rspbuf.virt + sizeof(*fcprsp);
+
+ auto_resp = false;
+
+ memset(fcprsp, 0, sizeof(*fcprsp));
+
+ io->wire_len += sizeof(*fcprsp);
+
+ fcprsp->resp.fr_status = scsi_status;
+ fcprsp->resp.fr_retry_delay =
+ cpu_to_be16(scsi_status_qualifier);
+
+ /* set residual status if necessary */
+ if (residual != 0) {
+ /* FCP: if data transferred is less than the
+ * amount expected, then this is an underflow.
+ * If data transferred would have been greater
+ * than the amount expected this is an overflow
+ */
+ if (residual > 0) {
+ fcprsp->resp.fr_flags |= FCP_RESID_UNDER;
+ fcprsp->ext.fr_resid = cpu_to_be32(residual);
+ } else {
+ fcprsp->resp.fr_flags |= FCP_RESID_OVER;
+ fcprsp->ext.fr_resid = cpu_to_be32(-residual);
+ }
+ }
+
+ if (EFCT_SCSI_SNS_BUF_VALID(sense_data) && sense_data_length) {
+ if (sense_data_length > SCSI_SENSE_BUFFERSIZE) {
+ efc_log_err(efct, "Sense exceeds max size.\n");
+ return -EIO;
+ }
+
+ fcprsp->resp.fr_flags |= FCP_SNS_LEN_VAL;
+ memcpy(sns_data, sense_data, sense_data_length);
+ fcprsp->ext.fr_sns_len = cpu_to_be32(sense_data_length);
+ io->wire_len += sense_data_length;
+ }
+
+ io->sgl[0].addr = io->rspbuf.phys;
+ io->sgl[0].dif_addr = 0;
+ io->sgl[0].len = io->wire_len;
+ io->sgl_count = 1;
+ }
+
+ if (auto_resp)
+ io->iparam.fcp_tgt.flags |= SLI4_IO_AUTO_GOOD_RESPONSE;
+
+ return efct_scsi_io_dispatch(io, efct_target_io_cb);
+}
+
+static int
+efct_target_bls_resp_cb(struct efct_hw_io *hio, u32 length, int status,
+ u32 ext_status, void *app)
+{
+ struct efct_io *io = app;
+ struct efct *efct;
+ enum efct_scsi_io_status bls_status;
+
+ efct = io->efct;
+
+ /* BLS isn't really a "SCSI" concept, but use SCSI status */
+ if (status) {
+ io_error_log(io, "s=%#x x=%#x\n", status, ext_status);
+ bls_status = EFCT_SCSI_STATUS_ERROR;
+ } else {
+ bls_status = EFCT_SCSI_STATUS_GOOD;
+ }
+
+ if (io->bls_cb) {
+ efct_scsi_io_cb_t bls_cb = io->bls_cb;
+ void *bls_cb_arg = io->bls_cb_arg;
+
+ io->bls_cb = NULL;
+ io->bls_cb_arg = NULL;
+
+ /* invoke callback */
+ bls_cb(io, bls_status, 0, bls_cb_arg);
+ }
+
+ efct_scsi_check_pending(efct);
+ return 0;
+}
+
+static int
+efct_target_send_bls_resp(struct efct_io *io,
+ efct_scsi_io_cb_t cb, void *arg)
+{
+ struct efct_node *node = io->node;
+ struct sli_bls_params *bls = &io->iparam.bls;
+ struct efct *efct = node->efct;
+ struct fc_ba_acc *acc;
+ int rc;
+
+ /* fill out IO structure with everything needed to send BA_ACC */
+ memset(&io->iparam, 0, sizeof(io->iparam));
+ bls->ox_id = io->init_task_tag;
+ bls->rx_id = io->abort_rx_id;
+ bls->vpi = io->node->vpi;
+ bls->rpi = io->node->rpi;
+ bls->s_id = U32_MAX;
+ bls->d_id = io->node->node_fc_id;
+ bls->rpi_registered = true;
+
+ acc = (void *)bls->payload;
+ acc->ba_ox_id = cpu_to_be16(bls->ox_id);
+ acc->ba_rx_id = cpu_to_be16(bls->rx_id);
+ acc->ba_high_seq_cnt = cpu_to_be16(U16_MAX);
+
+ /* generic io fields have already been populated */
+
+ /* set type and BLS-specific fields */
+ io->io_type = EFCT_IO_TYPE_BLS_RESP;
+ io->display_name = "bls_rsp";
+ io->hio_type = EFCT_HW_BLS_ACC;
+ io->bls_cb = cb;
+ io->bls_cb_arg = arg;
+
+ /* dispatch IO */
+ rc = efct_hw_bls_send(efct, FC_RCTL_BA_ACC, bls,
+ efct_target_bls_resp_cb, io);
+ return rc;
+}
+
+static int efct_bls_send_rjt_cb(struct efct_hw_io *hio, u32 length, int status,
+ u32 ext_status, void *app)
+{
+ struct efct_io *io = app;
+
+ efct_scsi_io_free(io);
+ return 0;
+}
+
+struct efct_io *
+efct_bls_send_rjt(struct efct_io *io, struct fc_frame_header *hdr)
+{
+ struct efct_node *node = io->node;
+ struct sli_bls_params *bls = &io->iparam.bls;
+ struct efct *efct = node->efct;
+ struct fc_ba_rjt *acc;
+ int rc;
+
+ /* fill out BLS Response-specific fields */
+ io->io_type = EFCT_IO_TYPE_BLS_RESP;
+ io->display_name = "ba_rjt";
+ io->hio_type = EFCT_HW_BLS_RJT;
+ io->init_task_tag = be16_to_cpu(hdr->fh_ox_id);
+
+ /* fill out iparam fields */
+ memset(&io->iparam, 0, sizeof(io->iparam));
+ bls->ox_id = be16_to_cpu(hdr->fh_ox_id);
+ bls->rx_id = be16_to_cpu(hdr->fh_rx_id);
+ bls->vpi = io->node->vpi;
+ bls->rpi = io->node->rpi;
+ bls->s_id = U32_MAX;
+ bls->d_id = io->node->node_fc_id;
+ bls->rpi_registered = true;
+
+ acc = (void *)bls->payload;
+ acc->br_reason = ELS_RJT_UNAB;
+ acc->br_explan = ELS_EXPL_NONE;
+
+ rc = efct_hw_bls_send(efct, FC_RCTL_BA_RJT, bls, efct_bls_send_rjt_cb,
+ io);
+ if (rc) {
+ efc_log_err(efct, "efct_scsi_io_dispatch() failed: %d\n", rc);
+ efct_scsi_io_free(io);
+ io = NULL;
+ }
+ return io;
+}
+
+int
+efct_scsi_send_tmf_resp(struct efct_io *io,
+ enum efct_scsi_tmf_resp rspcode,
+ u8 addl_rsp_info[3],
+ efct_scsi_io_cb_t cb, void *arg)
+{
+ int rc;
+ struct {
+ struct fcp_resp_with_ext rsp_ext;
+ struct fcp_resp_rsp_info info;
+ } *fcprsp;
+ u8 fcp_rspcode;
+
+ io->wire_len = 0;
+
+ switch (rspcode) {
+ case EFCT_SCSI_TMF_FUNCTION_COMPLETE:
+ fcp_rspcode = FCP_TMF_CMPL;
+ break;
+ case EFCT_SCSI_TMF_FUNCTION_SUCCEEDED:
+ case EFCT_SCSI_TMF_FUNCTION_IO_NOT_FOUND:
+ fcp_rspcode = FCP_TMF_CMPL;
+ break;
+ case EFCT_SCSI_TMF_FUNCTION_REJECTED:
+ fcp_rspcode = FCP_TMF_REJECTED;
+ break;
+ case EFCT_SCSI_TMF_INCORRECT_LOGICAL_UNIT_NUMBER:
+ fcp_rspcode = FCP_TMF_INVALID_LUN;
+ break;
+ case EFCT_SCSI_TMF_SERVICE_DELIVERY:
+ fcp_rspcode = FCP_TMF_FAILED;
+ break;
+ default:
+ fcp_rspcode = FCP_TMF_REJECTED;
+ break;
+ }
+
+ io->hio_type = EFCT_HW_IO_TARGET_RSP;
+
+ io->scsi_tgt_cb = cb;
+ io->scsi_tgt_cb_arg = arg;
+
+ if (io->tmf_cmd == EFCT_SCSI_TMF_ABORT_TASK) {
+ rc = efct_target_send_bls_resp(io, cb, arg);
+ return rc;
+ }
+
+ /* populate the FCP TMF response */
+ fcprsp = io->rspbuf.virt;
+ memset(fcprsp, 0, sizeof(*fcprsp));
+
+ fcprsp->rsp_ext.resp.fr_flags |= FCP_SNS_LEN_VAL;
+
+ if (addl_rsp_info) {
+ memcpy(fcprsp->info._fr_resvd, addl_rsp_info,
+ sizeof(fcprsp->info._fr_resvd));
+ }
+ fcprsp->info.rsp_code = fcp_rspcode;
+
+ io->wire_len = sizeof(*fcprsp);
+
+ fcprsp->rsp_ext.ext.fr_rsp_len =
+ cpu_to_be32(sizeof(struct fcp_resp_rsp_info));
+
+ io->sgl[0].addr = io->rspbuf.phys;
+ io->sgl[0].dif_addr = 0;
+ io->sgl[0].len = io->wire_len;
+ io->sgl_count = 1;
+
+ memset(&io->iparam, 0, sizeof(io->iparam));
+ io->iparam.fcp_tgt.ox_id = io->init_task_tag;
+ io->iparam.fcp_tgt.offset = 0;
+ io->iparam.fcp_tgt.cs_ctl = io->cs_ctl;
+ io->iparam.fcp_tgt.timeout = io->timeout;
+
+ rc = efct_scsi_io_dispatch(io, efct_target_io_cb);
+
+ return rc;
+}
+
+static int
+efct_target_abort_cb(struct efct_hw_io *hio, u32 length, int status,
+ u32 ext_status, void *app)
+{
+ struct efct_io *io = app;
+ struct efct *efct;
+ enum efct_scsi_io_status scsi_status;
+ efct_scsi_io_cb_t abort_cb;
+ void *abort_cb_arg;
+
+ efct = io->efct;
+
+ if (!io->abort_cb)
+ goto done;
+
+ abort_cb = io->abort_cb;
+ abort_cb_arg = io->abort_cb_arg;
+
+ io->abort_cb = NULL;
+ io->abort_cb_arg = NULL;
+
+ switch (status) {
+ case SLI4_FC_WCQE_STATUS_SUCCESS:
+ scsi_status = EFCT_SCSI_STATUS_GOOD;
+ break;
+ case SLI4_FC_WCQE_STATUS_LOCAL_REJECT:
+ switch (ext_status) {
+ case SLI4_FC_LOCAL_REJECT_NO_XRI:
+ scsi_status = EFCT_SCSI_STATUS_NO_IO;
+ break;
+ case SLI4_FC_LOCAL_REJECT_ABORT_IN_PROGRESS:
+ scsi_status = EFCT_SCSI_STATUS_ABORT_IN_PROGRESS;
+ break;
+ default:
+ /*we have seen 0x15 (abort in progress)*/
+ scsi_status = EFCT_SCSI_STATUS_ERROR;
+ break;
+ }
+ break;
+ case SLI4_FC_WCQE_STATUS_FCP_RSP_FAILURE:
+ scsi_status = EFCT_SCSI_STATUS_CHECK_RESPONSE;
+ break;
+ default:
+ scsi_status = EFCT_SCSI_STATUS_ERROR;
+ break;
+ }
+ /* invoke callback */
+ abort_cb(io->io_to_abort, scsi_status, 0, abort_cb_arg);
+
+done:
+ /* done with IO to abort,efct_ref_get(): efct_scsi_tgt_abort_io() */
+ kref_put(&io->io_to_abort->ref, io->io_to_abort->release);
+
+ efct_io_pool_io_free(efct->xport->io_pool, io);
+
+ efct_scsi_check_pending(efct);
+ return 0;
+}
+
+int
+efct_scsi_tgt_abort_io(struct efct_io *io, efct_scsi_io_cb_t cb, void *arg)
+{
+ struct efct *efct;
+ struct efct_xport *xport;
+ int rc;
+ struct efct_io *abort_io = NULL;
+
+ efct = io->efct;
+ xport = efct->xport;
+
+ /* take a reference on IO being aborted */
+ if (kref_get_unless_zero(&io->ref) == 0) {
+ /* command no longer active */
+ scsi_io_printf(io, "command no longer active\n");
+ return -EIO;
+ }
+
+ /*
+ * allocate a new IO to send the abort request. Use efct_io_alloc()
+ * directly, as we need an IO object that will not fail allocation
+ * due to allocations being disabled (in efct_scsi_io_alloc())
+ */
+ abort_io = efct_io_pool_io_alloc(efct->xport->io_pool);
+ if (!abort_io) {
+ atomic_add_return(1, &xport->io_alloc_failed_count);
+ kref_put(&io->ref, io->release);
+ return -EIO;
+ }
+
+ /* Save the target server callback and argument */
+ /* set generic fields */
+ abort_io->cmd_tgt = true;
+ abort_io->node = io->node;
+
+ /* set type and abort-specific fields */
+ abort_io->io_type = EFCT_IO_TYPE_ABORT;
+ abort_io->display_name = "tgt_abort";
+ abort_io->io_to_abort = io;
+ abort_io->send_abts = false;
+ abort_io->abort_cb = cb;
+ abort_io->abort_cb_arg = arg;
+
+ /* now dispatch IO */
+ rc = efct_scsi_io_dispatch_abort(abort_io, efct_target_abort_cb);
+ if (rc)
+ kref_put(&io->ref, io->release);
+ return rc;
+}
+
+void
+efct_scsi_io_complete(struct efct_io *io)
+{
+ if (io->io_free) {
+ efc_log_debug(io->efct, "completion for non-busy io tag 0x%x\n",
+ io->tag);
+ return;
+ }
+
+ scsi_io_trace(io, "freeing io 0x%p %s\n", io, io->display_name);
+ kref_put(&io->ref, io->release);
+}
diff --git a/drivers/scsi/elx/efct/efct_scsi.h b/drivers/scsi/elx/efct/efct_scsi.h
new file mode 100644
index 000000000000..b04faffa3984
--- /dev/null
+++ b/drivers/scsi/elx/efct/efct_scsi.h
@@ -0,0 +1,203 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+#if !defined(__EFCT_SCSI_H__)
+#define __EFCT_SCSI_H__
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_transport_fc.h>
+
+/* efct_scsi_rcv_cmd() efct_scsi_rcv_tmf() flags */
+#define EFCT_SCSI_CMD_DIR_IN (1 << 0)
+#define EFCT_SCSI_CMD_DIR_OUT (1 << 1)
+#define EFCT_SCSI_CMD_SIMPLE (1 << 2)
+#define EFCT_SCSI_CMD_HEAD_OF_QUEUE (1 << 3)
+#define EFCT_SCSI_CMD_ORDERED (1 << 4)
+#define EFCT_SCSI_CMD_UNTAGGED (1 << 5)
+#define EFCT_SCSI_CMD_ACA (1 << 6)
+#define EFCT_SCSI_FIRST_BURST_ERR (1 << 7)
+#define EFCT_SCSI_FIRST_BURST_ABORTED (1 << 8)
+
+/* efct_scsi_send_rd_data/recv_wr_data/send_resp flags */
+#define EFCT_SCSI_LAST_DATAPHASE (1 << 0)
+#define EFCT_SCSI_NO_AUTO_RESPONSE (1 << 1)
+#define EFCT_SCSI_LOW_LATENCY (1 << 2)
+
+#define EFCT_SCSI_SNS_BUF_VALID(sense) ((sense) && \
+ (0x70 == (((const u8 *)(sense))[0] & 0x70)))
+
+#define EFCT_SCSI_WQ_STEERING_SHIFT 16
+#define EFCT_SCSI_WQ_STEERING_MASK (0xf << EFCT_SCSI_WQ_STEERING_SHIFT)
+#define EFCT_SCSI_WQ_STEERING_CLASS (0 << EFCT_SCSI_WQ_STEERING_SHIFT)
+#define EFCT_SCSI_WQ_STEERING_REQUEST (1 << EFCT_SCSI_WQ_STEERING_SHIFT)
+#define EFCT_SCSI_WQ_STEERING_CPU (2 << EFCT_SCSI_WQ_STEERING_SHIFT)
+
+#define EFCT_SCSI_WQ_CLASS_SHIFT (20)
+#define EFCT_SCSI_WQ_CLASS_MASK (0xf << EFCT_SCSI_WQ_CLASS_SHIFT)
+#define EFCT_SCSI_WQ_CLASS(x) ((x & EFCT_SCSI_WQ_CLASS_MASK) << \
+ EFCT_SCSI_WQ_CLASS_SHIFT)
+
+#define EFCT_SCSI_WQ_CLASS_LOW_LATENCY 1
+
+struct efct_scsi_cmd_resp {
+ u8 scsi_status;
+ u16 scsi_status_qualifier;
+ u8 *response_data;
+ u32 response_data_length;
+ u8 *sense_data;
+ u32 sense_data_length;
+ int residual;
+ u32 response_wire_length;
+};
+
+struct efct_vport {
+ struct efct *efct;
+ bool is_vport;
+ struct fc_host_statistics fc_host_stats;
+ struct Scsi_Host *shost;
+ struct fc_vport *fc_vport;
+ u64 npiv_wwpn;
+ u64 npiv_wwnn;
+};
+
+/* Status values returned by IO callbacks */
+enum efct_scsi_io_status {
+ EFCT_SCSI_STATUS_GOOD = 0,
+ EFCT_SCSI_STATUS_ABORTED,
+ EFCT_SCSI_STATUS_ERROR,
+ EFCT_SCSI_STATUS_DIF_GUARD_ERR,
+ EFCT_SCSI_STATUS_DIF_REF_TAG_ERROR,
+ EFCT_SCSI_STATUS_DIF_APP_TAG_ERROR,
+ EFCT_SCSI_STATUS_DIF_UNKNOWN_ERROR,
+ EFCT_SCSI_STATUS_PROTOCOL_CRC_ERROR,
+ EFCT_SCSI_STATUS_NO_IO,
+ EFCT_SCSI_STATUS_ABORT_IN_PROGRESS,
+ EFCT_SCSI_STATUS_CHECK_RESPONSE,
+ EFCT_SCSI_STATUS_COMMAND_TIMEOUT,
+ EFCT_SCSI_STATUS_TIMEDOUT_AND_ABORTED,
+ EFCT_SCSI_STATUS_SHUTDOWN,
+ EFCT_SCSI_STATUS_NEXUS_LOST,
+};
+
+struct efct_node;
+struct efct_io;
+struct efc_node;
+struct efc_nport;
+
+/* Callback used by send_rd_data(), recv_wr_data(), send_resp() */
+typedef int (*efct_scsi_io_cb_t)(struct efct_io *io,
+ enum efct_scsi_io_status status,
+ u32 flags, void *arg);
+
+/* Callback used by send_rd_io(), send_wr_io() */
+typedef int (*efct_scsi_rsp_io_cb_t)(struct efct_io *io,
+ enum efct_scsi_io_status status,
+ struct efct_scsi_cmd_resp *rsp,
+ u32 flags, void *arg);
+
+/* efct_scsi_cb_t flags */
+#define EFCT_SCSI_IO_CMPL (1 << 0)
+/* IO completed, response sent */
+#define EFCT_SCSI_IO_CMPL_RSP_SENT (1 << 1)
+#define EFCT_SCSI_IO_ABORTED (1 << 2)
+
+/* efct_scsi_recv_tmf() request values */
+enum efct_scsi_tmf_cmd {
+ EFCT_SCSI_TMF_ABORT_TASK = 1,
+ EFCT_SCSI_TMF_QUERY_TASK_SET,
+ EFCT_SCSI_TMF_ABORT_TASK_SET,
+ EFCT_SCSI_TMF_CLEAR_TASK_SET,
+ EFCT_SCSI_TMF_QUERY_ASYNCHRONOUS_EVENT,
+ EFCT_SCSI_TMF_LOGICAL_UNIT_RESET,
+ EFCT_SCSI_TMF_CLEAR_ACA,
+ EFCT_SCSI_TMF_TARGET_RESET,
+};
+
+/* efct_scsi_send_tmf_resp() response values */
+enum efct_scsi_tmf_resp {
+ EFCT_SCSI_TMF_FUNCTION_COMPLETE = 1,
+ EFCT_SCSI_TMF_FUNCTION_SUCCEEDED,
+ EFCT_SCSI_TMF_FUNCTION_IO_NOT_FOUND,
+ EFCT_SCSI_TMF_FUNCTION_REJECTED,
+ EFCT_SCSI_TMF_INCORRECT_LOGICAL_UNIT_NUMBER,
+ EFCT_SCSI_TMF_SERVICE_DELIVERY,
+};
+
+struct efct_scsi_sgl {
+ uintptr_t addr;
+ uintptr_t dif_addr;
+ size_t len;
+};
+
+enum efct_scsi_io_role {
+ EFCT_SCSI_IO_ROLE_ORIGINATOR,
+ EFCT_SCSI_IO_ROLE_RESPONDER,
+};
+
+struct efct_io *
+efct_scsi_io_alloc(struct efct_node *node);
+void efct_scsi_io_free(struct efct_io *io);
+struct efct_io *efct_io_get_instance(struct efct *efct, u32 index);
+
+int efct_scsi_tgt_driver_init(void);
+int efct_scsi_tgt_driver_exit(void);
+int efct_scsi_tgt_new_device(struct efct *efct);
+int efct_scsi_tgt_del_device(struct efct *efct);
+int
+efct_scsi_tgt_new_nport(struct efc *efc, struct efc_nport *nport);
+void
+efct_scsi_tgt_del_nport(struct efc *efc, struct efc_nport *nport);
+
+int
+efct_scsi_new_initiator(struct efc *efc, struct efc_node *node);
+
+enum efct_scsi_del_initiator_reason {
+ EFCT_SCSI_INITIATOR_DELETED,
+ EFCT_SCSI_INITIATOR_MISSING,
+};
+
+int
+efct_scsi_del_initiator(struct efc *efc, struct efc_node *node, int reason);
+void
+efct_scsi_recv_cmd(struct efct_io *io, uint64_t lun, u8 *cdb, u32 cdb_len,
+ u32 flags);
+int
+efct_scsi_recv_tmf(struct efct_io *tmfio, u32 lun, enum efct_scsi_tmf_cmd cmd,
+ struct efct_io *abortio, u32 flags);
+int
+efct_scsi_send_rd_data(struct efct_io *io, u32 flags, struct efct_scsi_sgl *sgl,
+ u32 sgl_count, u64 wire_len, efct_scsi_io_cb_t cb, void *arg);
+int
+efct_scsi_recv_wr_data(struct efct_io *io, u32 flags, struct efct_scsi_sgl *sgl,
+ u32 sgl_count, u64 wire_len, efct_scsi_io_cb_t cb, void *arg);
+int
+efct_scsi_send_resp(struct efct_io *io, u32 flags,
+ struct efct_scsi_cmd_resp *rsp, efct_scsi_io_cb_t cb, void *arg);
+int
+efct_scsi_send_tmf_resp(struct efct_io *io, enum efct_scsi_tmf_resp rspcode,
+ u8 addl_rsp_info[3], efct_scsi_io_cb_t cb, void *arg);
+int
+efct_scsi_tgt_abort_io(struct efct_io *io, efct_scsi_io_cb_t cb, void *arg);
+
+void efct_scsi_io_complete(struct efct_io *io);
+
+int efct_scsi_reg_fc_transport(void);
+void efct_scsi_release_fc_transport(void);
+int efct_scsi_new_device(struct efct *efct);
+void efct_scsi_del_device(struct efct *efct);
+void _efct_scsi_io_free(struct kref *arg);
+
+int
+efct_scsi_del_vport(struct efct *efct, struct Scsi_Host *shost);
+struct efct_vport *
+efct_scsi_new_vport(struct efct *efct, struct device *dev);
+
+int efct_scsi_io_dispatch(struct efct_io *io, void *cb);
+int efct_scsi_io_dispatch_abort(struct efct_io *io, void *cb);
+void efct_scsi_check_pending(struct efct *efct);
+struct efct_io *
+efct_bls_send_rjt(struct efct_io *io, struct fc_frame_header *hdr);
+
+#endif /* __EFCT_SCSI_H__ */
diff --git a/drivers/scsi/elx/efct/efct_unsol.c b/drivers/scsi/elx/efct/efct_unsol.c
new file mode 100644
index 000000000000..e6addab66a60
--- /dev/null
+++ b/drivers/scsi/elx/efct/efct_unsol.c
@@ -0,0 +1,492 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+#include "efct_driver.h"
+#include "efct_unsol.h"
+
+#define frame_printf(efct, hdr, fmt, ...) \
+ do { \
+ char s_id_text[16]; \
+ efc_node_fcid_display(ntoh24((hdr)->fh_s_id), \
+ s_id_text, sizeof(s_id_text)); \
+ efc_log_debug(efct, "[%06x.%s] %02x/%04x/%04x: " fmt, \
+ ntoh24((hdr)->fh_d_id), s_id_text, \
+ (hdr)->fh_r_ctl, be16_to_cpu((hdr)->fh_ox_id), \
+ be16_to_cpu((hdr)->fh_rx_id), ##__VA_ARGS__); \
+ } while (0)
+
+static struct efct_node *
+efct_node_find(struct efct *efct, u32 port_id, u32 node_id)
+{
+ struct efct_node *node;
+ u64 id = (u64)port_id << 32 | node_id;
+
+ /*
+ * During node shutdown, Lookup will be removed first,
+ * before announcing to backend. So, no new IOs will be allowed
+ */
+ /* Find a target node, given s_id and d_id */
+ node = xa_load(&efct->lookup, id);
+ if (node)
+ kref_get(&node->ref);
+
+ return node;
+}
+
+static int
+efct_dispatch_frame(struct efct *efct, struct efc_hw_sequence *seq)
+{
+ struct efct_node *node;
+ struct fc_frame_header *hdr;
+ u32 s_id, d_id;
+
+ hdr = seq->header->dma.virt;
+
+ /* extract the s_id and d_id */
+ s_id = ntoh24(hdr->fh_s_id);
+ d_id = ntoh24(hdr->fh_d_id);
+
+ if (!(hdr->fh_type == FC_TYPE_FCP || hdr->fh_type == FC_TYPE_BLS))
+ return -EIO;
+
+ if (hdr->fh_type == FC_TYPE_FCP) {
+ node = efct_node_find(efct, d_id, s_id);
+ if (!node) {
+ efc_log_err(efct,
+ "Node not found, drop cmd d_id:%x s_id:%x\n",
+ d_id, s_id);
+ efct_hw_sequence_free(&efct->hw, seq);
+ return 0;
+ }
+
+ efct_dispatch_fcp_cmd(node, seq);
+ } else {
+ node = efct_node_find(efct, d_id, s_id);
+ if (!node) {
+ efc_log_err(efct, "ABTS: Node not found, d_id:%x s_id:%x\n",
+ d_id, s_id);
+ return -EIO;
+ }
+
+ efc_log_err(efct, "Received ABTS for Node:%p\n", node);
+ efct_node_recv_abts_frame(node, seq);
+ }
+
+ kref_put(&node->ref, node->release);
+ efct_hw_sequence_free(&efct->hw, seq);
+ return 0;
+}
+
+int
+efct_unsolicited_cb(void *arg, struct efc_hw_sequence *seq)
+{
+ struct efct *efct = arg;
+
+ /* Process FCP command */
+ if (!efct_dispatch_frame(efct, seq))
+ return 0;
+
+ /* Forward frame to discovery lib */
+ efc_dispatch_frame(efct->efcport, seq);
+ return 0;
+}
+
+static int
+efct_fc_tmf_rejected_cb(struct efct_io *io,
+ enum efct_scsi_io_status scsi_status,
+ u32 flags, void *arg)
+{
+ efct_scsi_io_free(io);
+ return 0;
+}
+
+static void
+efct_dispatch_unsol_tmf(struct efct_io *io, u8 tm_flags, u32 lun)
+{
+ u32 i;
+ struct {
+ u32 mask;
+ enum efct_scsi_tmf_cmd cmd;
+ } tmflist[] = {
+ {FCP_TMF_ABT_TASK_SET, EFCT_SCSI_TMF_ABORT_TASK_SET},
+ {FCP_TMF_CLR_TASK_SET, EFCT_SCSI_TMF_CLEAR_TASK_SET},
+ {FCP_TMF_LUN_RESET, EFCT_SCSI_TMF_LOGICAL_UNIT_RESET},
+ {FCP_TMF_TGT_RESET, EFCT_SCSI_TMF_TARGET_RESET},
+ {FCP_TMF_CLR_ACA, EFCT_SCSI_TMF_CLEAR_ACA} };
+
+ io->exp_xfer_len = 0;
+
+ for (i = 0; i < ARRAY_SIZE(tmflist); i++) {
+ if (tmflist[i].mask & tm_flags) {
+ io->tmf_cmd = tmflist[i].cmd;
+ efct_scsi_recv_tmf(io, lun, tmflist[i].cmd, NULL, 0);
+ break;
+ }
+ }
+ if (i == ARRAY_SIZE(tmflist)) {
+ /* Not handled */
+ efc_log_err(io->node->efct, "TMF x%x rejected\n", tm_flags);
+ efct_scsi_send_tmf_resp(io, EFCT_SCSI_TMF_FUNCTION_REJECTED,
+ NULL, efct_fc_tmf_rejected_cb, NULL);
+ }
+}
+
+static int
+efct_validate_fcp_cmd(struct efct *efct, struct efc_hw_sequence *seq)
+{
+ /*
+ * If we received less than FCP_CMND_IU bytes, assume that the frame is
+ * corrupted in some way and drop it.
+ * This was seen when jamming the FCTL
+ * fill bytes field.
+ */
+ if (seq->payload->dma.len < sizeof(struct fcp_cmnd)) {
+ struct fc_frame_header *fchdr = seq->header->dma.virt;
+
+ efc_log_debug(efct,
+ "drop ox_id %04x payload (%zd) less than (%zd)\n",
+ be16_to_cpu(fchdr->fh_ox_id),
+ seq->payload->dma.len, sizeof(struct fcp_cmnd));
+ return -EIO;
+ }
+ return 0;
+}
+
+static void
+efct_populate_io_fcp_cmd(struct efct_io *io, struct fcp_cmnd *cmnd,
+ struct fc_frame_header *fchdr, bool sit)
+{
+ io->init_task_tag = be16_to_cpu(fchdr->fh_ox_id);
+ /* note, tgt_task_tag, hw_tag set when HW io is allocated */
+ io->exp_xfer_len = be32_to_cpu(cmnd->fc_dl);
+ io->transferred = 0;
+
+ /* The upper 7 bits of CS_CTL is the frame priority thru the SAN.
+ * Our assertion here is, the priority given to a frame containing
+ * the FCP cmd should be the priority given to ALL frames contained
+ * in that IO. Thus we need to save the incoming CS_CTL here.
+ */
+ if (ntoh24(fchdr->fh_f_ctl) & FC_FC_RES_B17)
+ io->cs_ctl = fchdr->fh_cs_ctl;
+ else
+ io->cs_ctl = 0;
+
+ io->seq_init = sit;
+}
+
+static u32
+efct_get_flags_fcp_cmd(struct fcp_cmnd *cmnd)
+{
+ u32 flags = 0;
+
+ switch (cmnd->fc_pri_ta & FCP_PTA_MASK) {
+ case FCP_PTA_SIMPLE:
+ flags |= EFCT_SCSI_CMD_SIMPLE;
+ break;
+ case FCP_PTA_HEADQ:
+ flags |= EFCT_SCSI_CMD_HEAD_OF_QUEUE;
+ break;
+ case FCP_PTA_ORDERED:
+ flags |= EFCT_SCSI_CMD_ORDERED;
+ break;
+ case FCP_PTA_ACA:
+ flags |= EFCT_SCSI_CMD_ACA;
+ break;
+ }
+ if (cmnd->fc_flags & FCP_CFL_WRDATA)
+ flags |= EFCT_SCSI_CMD_DIR_IN;
+ if (cmnd->fc_flags & FCP_CFL_RDDATA)
+ flags |= EFCT_SCSI_CMD_DIR_OUT;
+
+ return flags;
+}
+
+static void
+efct_sframe_common_send_cb(void *arg, u8 *cqe, int status)
+{
+ struct efct_hw_send_frame_context *ctx = arg;
+ struct efct_hw *hw = ctx->hw;
+
+ /* Free WQ completion callback */
+ efct_hw_reqtag_free(hw, ctx->wqcb);
+
+ /* Free sequence */
+ efct_hw_sequence_free(hw, ctx->seq);
+}
+
+static int
+efct_sframe_common_send(struct efct_node *node,
+ struct efc_hw_sequence *seq,
+ enum fc_rctl r_ctl, u32 f_ctl,
+ u8 type, void *payload, u32 payload_len)
+{
+ struct efct *efct = node->efct;
+ struct efct_hw *hw = &efct->hw;
+ int rc = 0;
+ struct fc_frame_header *req_hdr = seq->header->dma.virt;
+ struct fc_frame_header hdr;
+ struct efct_hw_send_frame_context *ctx;
+
+ u32 heap_size = seq->payload->dma.size;
+ uintptr_t heap_phys_base = seq->payload->dma.phys;
+ u8 *heap_virt_base = seq->payload->dma.virt;
+ u32 heap_offset = 0;
+
+ /* Build the FC header reusing the RQ header DMA buffer */
+ memset(&hdr, 0, sizeof(hdr));
+ hdr.fh_r_ctl = r_ctl;
+ /* send it back to whomever sent it to us */
+ memcpy(hdr.fh_d_id, req_hdr->fh_s_id, sizeof(hdr.fh_d_id));
+ memcpy(hdr.fh_s_id, req_hdr->fh_d_id, sizeof(hdr.fh_s_id));
+ hdr.fh_type = type;
+ hton24(hdr.fh_f_ctl, f_ctl);
+ hdr.fh_ox_id = req_hdr->fh_ox_id;
+ hdr.fh_rx_id = req_hdr->fh_rx_id;
+ hdr.fh_cs_ctl = 0;
+ hdr.fh_df_ctl = 0;
+ hdr.fh_seq_cnt = 0;
+ hdr.fh_parm_offset = 0;
+
+ /*
+ * send_frame_seq_id is an atomic, we just let it increment,
+ * while storing only the low 8 bits to hdr->seq_id
+ */
+ hdr.fh_seq_id = (u8)atomic_add_return(1, &hw->send_frame_seq_id);
+ hdr.fh_seq_id--;
+
+ /* Allocate and fill in the send frame request context */
+ ctx = (void *)(heap_virt_base + heap_offset);
+ heap_offset += sizeof(*ctx);
+ if (heap_offset > heap_size) {
+ efc_log_err(efct, "Fill send frame failed offset %d size %d\n",
+ heap_offset, heap_size);
+ return -EIO;
+ }
+
+ memset(ctx, 0, sizeof(*ctx));
+
+ /* Save sequence */
+ ctx->seq = seq;
+
+ /* Allocate a response payload DMA buffer from the heap */
+ ctx->payload.phys = heap_phys_base + heap_offset;
+ ctx->payload.virt = heap_virt_base + heap_offset;
+ ctx->payload.size = payload_len;
+ ctx->payload.len = payload_len;
+ heap_offset += payload_len;
+ if (heap_offset > heap_size) {
+ efc_log_err(efct, "Fill send frame failed offset %d size %d\n",
+ heap_offset, heap_size);
+ return -EIO;
+ }
+
+ /* Copy the payload in */
+ memcpy(ctx->payload.virt, payload, payload_len);
+
+ /* Send */
+ rc = efct_hw_send_frame(&efct->hw, (void *)&hdr, FC_SOF_N3,
+ FC_EOF_T, &ctx->payload, ctx,
+ efct_sframe_common_send_cb, ctx);
+ if (rc)
+ efc_log_debug(efct, "efct_hw_send_frame failed: %d\n", rc);
+
+ return rc;
+}
+
+static int
+efct_sframe_send_fcp_rsp(struct efct_node *node, struct efc_hw_sequence *seq,
+ void *rsp, u32 rsp_len)
+{
+ return efct_sframe_common_send(node, seq, FC_RCTL_DD_CMD_STATUS,
+ FC_FC_EX_CTX |
+ FC_FC_LAST_SEQ |
+ FC_FC_END_SEQ |
+ FC_FC_SEQ_INIT,
+ FC_TYPE_FCP,
+ rsp, rsp_len);
+}
+
+static int
+efct_sframe_send_task_set_full_or_busy(struct efct_node *node,
+ struct efc_hw_sequence *seq)
+{
+ struct fcp_resp_with_ext fcprsp;
+ struct fcp_cmnd *fcpcmd = seq->payload->dma.virt;
+ int rc = 0;
+ unsigned long flags = 0;
+ struct efct *efct = node->efct;
+
+ /* construct task set full or busy response */
+ memset(&fcprsp, 0, sizeof(fcprsp));
+ spin_lock_irqsave(&node->active_ios_lock, flags);
+ fcprsp.resp.fr_status = list_empty(&node->active_ios) ?
+ SAM_STAT_BUSY : SAM_STAT_TASK_SET_FULL;
+ spin_unlock_irqrestore(&node->active_ios_lock, flags);
+ *((u32 *)&fcprsp.ext.fr_resid) = be32_to_cpu(fcpcmd->fc_dl);
+
+ /* send it using send_frame */
+ rc = efct_sframe_send_fcp_rsp(node, seq, &fcprsp, sizeof(fcprsp));
+ if (rc)
+ efc_log_debug(efct, "efct_sframe_send_fcp_rsp failed %d\n", rc);
+
+ return rc;
+}
+
+int
+efct_dispatch_fcp_cmd(struct efct_node *node, struct efc_hw_sequence *seq)
+{
+ struct efct *efct = node->efct;
+ struct fc_frame_header *fchdr = seq->header->dma.virt;
+ struct fcp_cmnd *cmnd = NULL;
+ struct efct_io *io = NULL;
+ u32 lun;
+
+ if (!seq->payload) {
+ efc_log_err(efct, "Sequence payload is NULL.\n");
+ return -EIO;
+ }
+
+ cmnd = seq->payload->dma.virt;
+
+ /* perform FCP_CMND validation check(s) */
+ if (efct_validate_fcp_cmd(efct, seq))
+ return -EIO;
+
+ lun = scsilun_to_int(&cmnd->fc_lun);
+ if (lun == U32_MAX)
+ return -EIO;
+
+ io = efct_scsi_io_alloc(node);
+ if (!io) {
+ int rc;
+
+ /* Use SEND_FRAME to send task set full or busy */
+ rc = efct_sframe_send_task_set_full_or_busy(node, seq);
+ if (rc)
+ efc_log_err(efct, "Failed to send busy task: %d\n", rc);
+
+ return rc;
+ }
+
+ io->hw_priv = seq->hw_priv;
+
+ io->app_id = 0;
+
+ /* RQ pair, if we got here, SIT=1 */
+ efct_populate_io_fcp_cmd(io, cmnd, fchdr, true);
+
+ if (cmnd->fc_tm_flags) {
+ efct_dispatch_unsol_tmf(io, cmnd->fc_tm_flags, lun);
+ } else {
+ u32 flags = efct_get_flags_fcp_cmd(cmnd);
+
+ if (cmnd->fc_flags & FCP_CFL_LEN_MASK) {
+ efc_log_err(efct, "Additional CDB not supported\n");
+ return -EIO;
+ }
+ /*
+ * Can return failure for things like task set full and UAs,
+ * no need to treat as a dropped frame if rc != 0
+ */
+ efct_scsi_recv_cmd(io, lun, cmnd->fc_cdb,
+ sizeof(cmnd->fc_cdb), flags);
+ }
+
+ return 0;
+}
+
+static int
+efct_process_abts(struct efct_io *io, struct fc_frame_header *hdr)
+{
+ struct efct_node *node = io->node;
+ struct efct *efct = io->efct;
+ u16 ox_id = be16_to_cpu(hdr->fh_ox_id);
+ u16 rx_id = be16_to_cpu(hdr->fh_rx_id);
+ struct efct_io *abortio;
+
+ /* Find IO and attempt to take a reference on it */
+ abortio = efct_io_find_tgt_io(efct, node, ox_id, rx_id);
+
+ if (abortio) {
+ /* Got a reference on the IO. Hold it until backend
+ * is notified below
+ */
+ efc_log_info(node->efct, "Abort ox_id [%04x] rx_id [%04x]\n",
+ ox_id, rx_id);
+
+ /*
+ * Save the ox_id for the ABTS as the init_task_tag in our
+ * manufactured
+ * TMF IO object
+ */
+ io->display_name = "abts";
+ io->init_task_tag = ox_id;
+ /* don't set tgt_task_tag, don't want to confuse with XRI */
+
+ /*
+ * Save the rx_id from the ABTS as it is
+ * needed for the BLS response,
+ * regardless of the IO context's rx_id
+ */
+ io->abort_rx_id = rx_id;
+
+ /* Call target server command abort */
+ io->tmf_cmd = EFCT_SCSI_TMF_ABORT_TASK;
+ efct_scsi_recv_tmf(io, abortio->tgt_io.lun,
+ EFCT_SCSI_TMF_ABORT_TASK, abortio, 0);
+
+ /*
+ * Backend will have taken an additional
+ * reference on the IO if needed;
+ * done with current reference.
+ */
+ kref_put(&abortio->ref, abortio->release);
+ } else {
+ /*
+ * Either IO was not found or it has been
+ * freed between finding it
+ * and attempting to get the reference,
+ */
+ efc_log_info(node->efct, "Abort: ox_id [%04x], IO not found\n",
+ ox_id);
+
+ /* Send a BA_RJT */
+ efct_bls_send_rjt(io, hdr);
+ }
+ return 0;
+}
+
+int
+efct_node_recv_abts_frame(struct efct_node *node, struct efc_hw_sequence *seq)
+{
+ struct efct *efct = node->efct;
+ struct fc_frame_header *hdr = seq->header->dma.virt;
+ struct efct_io *io = NULL;
+
+ node->abort_cnt++;
+ io = efct_scsi_io_alloc(node);
+ if (io) {
+ io->hw_priv = seq->hw_priv;
+ /* If we got this far, SIT=1 */
+ io->seq_init = 1;
+
+ /* fill out generic fields */
+ io->efct = efct;
+ io->node = node;
+ io->cmd_tgt = true;
+
+ efct_process_abts(io, seq->header->dma.virt);
+ } else {
+ efc_log_err(efct,
+ "SCSI IO allocation failed for ABTS received ");
+ efc_log_err(efct, "s_id %06x d_id %06x ox_id %04x rx_id %04x\n",
+ ntoh24(hdr->fh_s_id), ntoh24(hdr->fh_d_id),
+ be16_to_cpu(hdr->fh_ox_id),
+ be16_to_cpu(hdr->fh_rx_id));
+ }
+
+ return 0;
+}
diff --git a/drivers/scsi/elx/efct/efct_unsol.h b/drivers/scsi/elx/efct/efct_unsol.h
new file mode 100644
index 000000000000..16d1e3ba1833
--- /dev/null
+++ b/drivers/scsi/elx/efct/efct_unsol.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+#if !defined(__OSC_UNSOL_H__)
+#define __OSC_UNSOL_H__
+
+int
+efct_unsolicited_cb(void *arg, struct efc_hw_sequence *seq);
+int
+efct_dispatch_fcp_cmd(struct efct_node *node, struct efc_hw_sequence *seq);
+int
+efct_node_recv_abts_frame(struct efct_node *node, struct efc_hw_sequence *seq);
+
+#endif /* __OSC_UNSOL_H__ */
diff --git a/drivers/scsi/elx/efct/efct_xport.c b/drivers/scsi/elx/efct/efct_xport.c
new file mode 100644
index 000000000000..9495cedcc0b9
--- /dev/null
+++ b/drivers/scsi/elx/efct/efct_xport.c
@@ -0,0 +1,1111 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+#include "efct_driver.h"
+#include "efct_unsol.h"
+
+static struct dentry *efct_debugfs_root;
+static atomic_t efct_debugfs_count;
+
+static struct scsi_host_template efct_template = {
+ .module = THIS_MODULE,
+ .name = EFCT_DRIVER_NAME,
+ .supported_mode = MODE_TARGET,
+};
+
+/* globals */
+static struct fc_function_template efct_xport_functions;
+static struct fc_function_template efct_vport_functions;
+
+static struct scsi_transport_template *efct_xport_fc_tt;
+static struct scsi_transport_template *efct_vport_fc_tt;
+
+struct efct_xport *
+efct_xport_alloc(struct efct *efct)
+{
+ struct efct_xport *xport;
+
+ xport = kzalloc(sizeof(*xport), GFP_KERNEL);
+ if (!xport)
+ return xport;
+
+ xport->efct = efct;
+ return xport;
+}
+
+static int
+efct_xport_init_debugfs(struct efct *efct)
+{
+ /* Setup efct debugfs root directory */
+ if (!efct_debugfs_root) {
+ efct_debugfs_root = debugfs_create_dir("efct", NULL);
+ atomic_set(&efct_debugfs_count, 0);
+ }
+
+ /* Create a directory for sessions in root */
+ if (!efct->sess_debugfs_dir) {
+ efct->sess_debugfs_dir = debugfs_create_dir("sessions",
+ efct_debugfs_root);
+ if (IS_ERR(efct->sess_debugfs_dir)) {
+ efc_log_err(efct,
+ "failed to create debugfs entry for sessions\n");
+ goto debugfs_fail;
+ }
+ atomic_inc(&efct_debugfs_count);
+ }
+
+ return 0;
+
+debugfs_fail:
+ return -EIO;
+}
+
+static void efct_xport_delete_debugfs(struct efct *efct)
+{
+ /* Remove session debugfs directory */
+ debugfs_remove(efct->sess_debugfs_dir);
+ efct->sess_debugfs_dir = NULL;
+ atomic_dec(&efct_debugfs_count);
+
+ if (atomic_read(&efct_debugfs_count) == 0) {
+ /* remove root debugfs directory */
+ debugfs_remove(efct_debugfs_root);
+ efct_debugfs_root = NULL;
+ }
+}
+
+int
+efct_xport_attach(struct efct_xport *xport)
+{
+ struct efct *efct = xport->efct;
+ int rc;
+
+ rc = efct_hw_setup(&efct->hw, efct, efct->pci);
+ if (rc) {
+ efc_log_err(efct, "%s: Can't setup hardware\n", efct->desc);
+ return rc;
+ }
+
+ efct_hw_parse_filter(&efct->hw, (void *)efct->filter_def);
+
+ xport->io_pool = efct_io_pool_create(efct, efct->hw.config.n_sgl);
+ if (!xport->io_pool) {
+ efc_log_err(efct, "Can't allocate IO pool\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void
+efct_xport_link_stats_cb(int status, u32 num_counters,
+ struct efct_hw_link_stat_counts *counters, void *arg)
+{
+ union efct_xport_stats_u *result = arg;
+
+ result->stats.link_stats.link_failure_error_count =
+ counters[EFCT_HW_LINK_STAT_LINK_FAILURE_COUNT].counter;
+ result->stats.link_stats.loss_of_sync_error_count =
+ counters[EFCT_HW_LINK_STAT_LOSS_OF_SYNC_COUNT].counter;
+ result->stats.link_stats.primitive_sequence_error_count =
+ counters[EFCT_HW_LINK_STAT_PRIMITIVE_SEQ_COUNT].counter;
+ result->stats.link_stats.invalid_transmission_word_error_count =
+ counters[EFCT_HW_LINK_STAT_INVALID_XMIT_WORD_COUNT].counter;
+ result->stats.link_stats.crc_error_count =
+ counters[EFCT_HW_LINK_STAT_CRC_COUNT].counter;
+
+ complete(&result->stats.done);
+}
+
+static void
+efct_xport_host_stats_cb(int status, u32 num_counters,
+ struct efct_hw_host_stat_counts *counters, void *arg)
+{
+ union efct_xport_stats_u *result = arg;
+
+ result->stats.host_stats.transmit_kbyte_count =
+ counters[EFCT_HW_HOST_STAT_TX_KBYTE_COUNT].counter;
+ result->stats.host_stats.receive_kbyte_count =
+ counters[EFCT_HW_HOST_STAT_RX_KBYTE_COUNT].counter;
+ result->stats.host_stats.transmit_frame_count =
+ counters[EFCT_HW_HOST_STAT_TX_FRAME_COUNT].counter;
+ result->stats.host_stats.receive_frame_count =
+ counters[EFCT_HW_HOST_STAT_RX_FRAME_COUNT].counter;
+
+ complete(&result->stats.done);
+}
+
+static void
+efct_xport_async_link_stats_cb(int status, u32 num_counters,
+ struct efct_hw_link_stat_counts *counters,
+ void *arg)
+{
+ union efct_xport_stats_u *result = arg;
+
+ result->stats.link_stats.link_failure_error_count =
+ counters[EFCT_HW_LINK_STAT_LINK_FAILURE_COUNT].counter;
+ result->stats.link_stats.loss_of_sync_error_count =
+ counters[EFCT_HW_LINK_STAT_LOSS_OF_SYNC_COUNT].counter;
+ result->stats.link_stats.primitive_sequence_error_count =
+ counters[EFCT_HW_LINK_STAT_PRIMITIVE_SEQ_COUNT].counter;
+ result->stats.link_stats.invalid_transmission_word_error_count =
+ counters[EFCT_HW_LINK_STAT_INVALID_XMIT_WORD_COUNT].counter;
+ result->stats.link_stats.crc_error_count =
+ counters[EFCT_HW_LINK_STAT_CRC_COUNT].counter;
+}
+
+static void
+efct_xport_async_host_stats_cb(int status, u32 num_counters,
+ struct efct_hw_host_stat_counts *counters,
+ void *arg)
+{
+ union efct_xport_stats_u *result = arg;
+
+ result->stats.host_stats.transmit_kbyte_count =
+ counters[EFCT_HW_HOST_STAT_TX_KBYTE_COUNT].counter;
+ result->stats.host_stats.receive_kbyte_count =
+ counters[EFCT_HW_HOST_STAT_RX_KBYTE_COUNT].counter;
+ result->stats.host_stats.transmit_frame_count =
+ counters[EFCT_HW_HOST_STAT_TX_FRAME_COUNT].counter;
+ result->stats.host_stats.receive_frame_count =
+ counters[EFCT_HW_HOST_STAT_RX_FRAME_COUNT].counter;
+}
+
+static void
+efct_xport_config_stats_timer(struct efct *efct);
+
+static void
+efct_xport_stats_timer_cb(struct timer_list *t)
+{
+ struct efct_xport *xport = from_timer(xport, t, stats_timer);
+ struct efct *efct = xport->efct;
+
+ efct_xport_config_stats_timer(efct);
+}
+
+static void
+efct_xport_config_stats_timer(struct efct *efct)
+{
+ u32 timeout = 3 * 1000;
+ struct efct_xport *xport = NULL;
+
+ if (!efct) {
+ pr_err("%s: failed to locate EFCT device\n", __func__);
+ return;
+ }
+
+ xport = efct->xport;
+ efct_hw_get_link_stats(&efct->hw, 0, 0, 0,
+ efct_xport_async_link_stats_cb,
+ &xport->fc_xport_stats);
+ efct_hw_get_host_stats(&efct->hw, 0, efct_xport_async_host_stats_cb,
+ &xport->fc_xport_stats);
+
+ timer_setup(&xport->stats_timer,
+ &efct_xport_stats_timer_cb, 0);
+ mod_timer(&xport->stats_timer,
+ jiffies + msecs_to_jiffies(timeout));
+}
+
+int
+efct_xport_initialize(struct efct_xport *xport)
+{
+ struct efct *efct = xport->efct;
+ int rc = 0;
+
+ /* Initialize io lists */
+ spin_lock_init(&xport->io_pending_lock);
+ INIT_LIST_HEAD(&xport->io_pending_list);
+ atomic_set(&xport->io_active_count, 0);
+ atomic_set(&xport->io_pending_count, 0);
+ atomic_set(&xport->io_total_free, 0);
+ atomic_set(&xport->io_total_pending, 0);
+ atomic_set(&xport->io_alloc_failed_count, 0);
+ atomic_set(&xport->io_pending_recursing, 0);
+
+ rc = efct_hw_init(&efct->hw);
+ if (rc) {
+ efc_log_err(efct, "efct_hw_init failure\n");
+ goto out;
+ }
+
+ rc = efct_scsi_tgt_new_device(efct);
+ if (rc) {
+ efc_log_err(efct, "failed to initialize target\n");
+ goto hw_init_out;
+ }
+
+ rc = efct_scsi_new_device(efct);
+ if (rc) {
+ efc_log_err(efct, "failed to initialize initiator\n");
+ goto tgt_dev_out;
+ }
+
+ /* Get FC link and host statistics perodically*/
+ efct_xport_config_stats_timer(efct);
+
+ efct_xport_init_debugfs(efct);
+
+ return rc;
+
+tgt_dev_out:
+ efct_scsi_tgt_del_device(efct);
+
+hw_init_out:
+ efct_hw_teardown(&efct->hw);
+out:
+ return rc;
+}
+
+int
+efct_xport_status(struct efct_xport *xport, enum efct_xport_status cmd,
+ union efct_xport_stats_u *result)
+{
+ int rc = 0;
+ struct efct *efct = NULL;
+ union efct_xport_stats_u value;
+
+ efct = xport->efct;
+
+ switch (cmd) {
+ case EFCT_XPORT_CONFIG_PORT_STATUS:
+ if (xport->configured_link_state == 0) {
+ /*
+ * Initial state is offline. configured_link_state is
+ * set to online explicitly when port is brought online
+ */
+ xport->configured_link_state = EFCT_XPORT_PORT_OFFLINE;
+ }
+ result->value = xport->configured_link_state;
+ break;
+
+ case EFCT_XPORT_PORT_STATUS:
+ /* Determine port status based on link speed. */
+ value.value = efct_hw_get_link_speed(&efct->hw);
+ if (value.value == 0)
+ result->value = EFCT_XPORT_PORT_OFFLINE;
+ else
+ result->value = EFCT_XPORT_PORT_ONLINE;
+ break;
+
+ case EFCT_XPORT_LINK_SPEED:
+ result->value = efct_hw_get_link_speed(&efct->hw);
+ break;
+
+ case EFCT_XPORT_LINK_STATISTICS:
+ memcpy((void *)result, &efct->xport->fc_xport_stats,
+ sizeof(union efct_xport_stats_u));
+ break;
+ case EFCT_XPORT_LINK_STAT_RESET: {
+ /* Create a completion to synchronize the stat reset process */
+ init_completion(&result->stats.done);
+
+ /* First reset the link stats */
+ rc = efct_hw_get_link_stats(&efct->hw, 0, 1, 1,
+ efct_xport_link_stats_cb, result);
+ if (rc)
+ break;
+
+ /* Wait for completion to be signaled when the cmd completes */
+ if (wait_for_completion_interruptible(&result->stats.done)) {
+ /* Undefined failure */
+ efc_log_debug(efct, "sem wait failed\n");
+ rc = -EIO;
+ break;
+ }
+
+ /* Next reset the host stats */
+ rc = efct_hw_get_host_stats(&efct->hw, 1,
+ efct_xport_host_stats_cb, result);
+
+ if (rc)
+ break;
+
+ /* Wait for completion to be signaled when the cmd completes */
+ if (wait_for_completion_interruptible(&result->stats.done)) {
+ /* Undefined failure */
+ efc_log_debug(efct, "sem wait failed\n");
+ rc = -EIO;
+ break;
+ }
+ break;
+ }
+ default:
+ rc = -EIO;
+ break;
+ }
+
+ return rc;
+}
+
+static int
+efct_get_link_supported_speeds(struct efct *efct)
+{
+ u32 supported_speeds = 0;
+ u32 link_module_type, i;
+ struct {
+ u32 lmt_speed;
+ u32 speed;
+ } supported_speed_list[] = {
+ {SLI4_LINK_MODULE_TYPE_1GB, FC_PORTSPEED_1GBIT},
+ {SLI4_LINK_MODULE_TYPE_2GB, FC_PORTSPEED_2GBIT},
+ {SLI4_LINK_MODULE_TYPE_4GB, FC_PORTSPEED_4GBIT},
+ {SLI4_LINK_MODULE_TYPE_8GB, FC_PORTSPEED_8GBIT},
+ {SLI4_LINK_MODULE_TYPE_16GB, FC_PORTSPEED_16GBIT},
+ {SLI4_LINK_MODULE_TYPE_32GB, FC_PORTSPEED_32GBIT},
+ {SLI4_LINK_MODULE_TYPE_64GB, FC_PORTSPEED_64GBIT},
+ {SLI4_LINK_MODULE_TYPE_128GB, FC_PORTSPEED_128GBIT},
+ };
+
+ link_module_type = sli_get_lmt(&efct->hw.sli);
+
+ /* populate link supported speeds */
+ for (i = 0; i < ARRAY_SIZE(supported_speed_list); i++) {
+ if (link_module_type & supported_speed_list[i].lmt_speed)
+ supported_speeds |= supported_speed_list[i].speed;
+ }
+
+ return supported_speeds;
+}
+
+int
+efct_scsi_new_device(struct efct *efct)
+{
+ struct Scsi_Host *shost = NULL;
+ int error = 0;
+ struct efct_vport *vport = NULL;
+
+ shost = scsi_host_alloc(&efct_template, sizeof(*vport));
+ if (!shost) {
+ efc_log_err(efct, "failed to allocate Scsi_Host struct\n");
+ return -ENOMEM;
+ }
+
+ /* save shost to initiator-client context */
+ efct->shost = shost;
+
+ /* save efct information to shost LLD-specific space */
+ vport = (struct efct_vport *)shost->hostdata;
+ vport->efct = efct;
+
+ /*
+ * Set initial can_queue value to the max SCSI IOs. This is the maximum
+ * global queue depth (as opposed to the per-LUN queue depth --
+ * .cmd_per_lun This may need to be adjusted for I+T mode.
+ */
+ shost->can_queue = efct->hw.config.n_io;
+ shost->max_cmd_len = 16; /* 16-byte CDBs */
+ shost->max_id = 0xffff;
+ shost->max_lun = 0xffffffff;
+
+ /*
+ * can only accept (from mid-layer) as many SGEs as we've
+ * pre-registered
+ */
+ shost->sg_tablesize = sli_get_max_sgl(&efct->hw.sli);
+
+ /* attach FC Transport template to shost */
+ shost->transportt = efct_xport_fc_tt;
+ efc_log_debug(efct, "transport template=%p\n", efct_xport_fc_tt);
+
+ /* get pci_dev structure and add host to SCSI ML */
+ error = scsi_add_host_with_dma(shost, &efct->pci->dev,
+ &efct->pci->dev);
+ if (error) {
+ efc_log_debug(efct, "failed scsi_add_host_with_dma\n");
+ return -EIO;
+ }
+
+ /* Set symbolic name for host port */
+ snprintf(fc_host_symbolic_name(shost),
+ sizeof(fc_host_symbolic_name(shost)),
+ "Emulex %s FV%s DV%s", efct->model,
+ efct->hw.sli.fw_name[0], EFCT_DRIVER_VERSION);
+
+ /* Set host port supported classes */
+ fc_host_supported_classes(shost) = FC_COS_CLASS3;
+
+ fc_host_supported_speeds(shost) = efct_get_link_supported_speeds(efct);
+
+ fc_host_node_name(shost) = efct_get_wwnn(&efct->hw);
+ fc_host_port_name(shost) = efct_get_wwpn(&efct->hw);
+ fc_host_max_npiv_vports(shost) = 128;
+
+ return 0;
+}
+
+struct scsi_transport_template *
+efct_attach_fc_transport(void)
+{
+ struct scsi_transport_template *efct_fc_template = NULL;
+
+ efct_fc_template = fc_attach_transport(&efct_xport_functions);
+
+ if (!efct_fc_template)
+ pr_err("failed to attach EFCT with fc transport\n");
+
+ return efct_fc_template;
+}
+
+struct scsi_transport_template *
+efct_attach_vport_fc_transport(void)
+{
+ struct scsi_transport_template *efct_fc_template = NULL;
+
+ efct_fc_template = fc_attach_transport(&efct_vport_functions);
+
+ if (!efct_fc_template)
+ pr_err("failed to attach EFCT with fc transport\n");
+
+ return efct_fc_template;
+}
+
+int
+efct_scsi_reg_fc_transport(void)
+{
+ /* attach to appropriate scsi_tranport_* module */
+ efct_xport_fc_tt = efct_attach_fc_transport();
+ if (!efct_xport_fc_tt) {
+ pr_err("%s: failed to attach to scsi_transport_*", __func__);
+ return -EIO;
+ }
+
+ efct_vport_fc_tt = efct_attach_vport_fc_transport();
+ if (!efct_vport_fc_tt) {
+ pr_err("%s: failed to attach to scsi_transport_*", __func__);
+ efct_release_fc_transport(efct_xport_fc_tt);
+ efct_xport_fc_tt = NULL;
+ return -EIO;
+ }
+
+ return 0;
+}
+
+void
+efct_scsi_release_fc_transport(void)
+{
+ /* detach from scsi_transport_* */
+ efct_release_fc_transport(efct_xport_fc_tt);
+ efct_xport_fc_tt = NULL;
+ if (efct_vport_fc_tt)
+ efct_release_fc_transport(efct_vport_fc_tt);
+
+ efct_vport_fc_tt = NULL;
+}
+
+void
+efct_xport_detach(struct efct_xport *xport)
+{
+ struct efct *efct = xport->efct;
+
+ /* free resources associated with target-server and initiator-client */
+ efct_scsi_tgt_del_device(efct);
+
+ efct_scsi_del_device(efct);
+
+ /*Shutdown FC Statistics timer*/
+ if (timer_pending(&xport->stats_timer))
+ del_timer(&xport->stats_timer);
+
+ efct_hw_teardown(&efct->hw);
+
+ efct_xport_delete_debugfs(efct);
+}
+
+static void
+efct_xport_domain_free_cb(struct efc *efc, void *arg)
+{
+ struct completion *done = arg;
+
+ complete(done);
+}
+
+int
+efct_xport_control(struct efct_xport *xport, enum efct_xport_ctrl cmd, ...)
+{
+ u32 rc = 0;
+ struct efct *efct = NULL;
+ va_list argp;
+
+ efct = xport->efct;
+
+ switch (cmd) {
+ case EFCT_XPORT_PORT_ONLINE: {
+ /* Bring the port on-line */
+ rc = efct_hw_port_control(&efct->hw, EFCT_HW_PORT_INIT, 0,
+ NULL, NULL);
+ if (rc)
+ efc_log_err(efct,
+ "%s: Can't init port\n", efct->desc);
+ else
+ xport->configured_link_state = cmd;
+ break;
+ }
+ case EFCT_XPORT_PORT_OFFLINE: {
+ if (efct_hw_port_control(&efct->hw, EFCT_HW_PORT_SHUTDOWN, 0,
+ NULL, NULL))
+ efc_log_err(efct, "port shutdown failed\n");
+ else
+ xport->configured_link_state = cmd;
+ break;
+ }
+
+ case EFCT_XPORT_SHUTDOWN: {
+ struct completion done;
+ unsigned long timeout;
+
+ /* if a PHYSDEV reset was performed (e.g. hw dump), will affect
+ * all PCI functions; orderly shutdown won't work,
+ * just force free
+ */
+ if (sli_reset_required(&efct->hw.sli)) {
+ struct efc_domain *domain = efct->efcport->domain;
+
+ if (domain)
+ efc_domain_cb(efct->efcport, EFC_HW_DOMAIN_LOST,
+ domain);
+ } else {
+ efct_hw_port_control(&efct->hw, EFCT_HW_PORT_SHUTDOWN,
+ 0, NULL, NULL);
+ }
+
+ init_completion(&done);
+
+ efc_register_domain_free_cb(efct->efcport,
+ efct_xport_domain_free_cb, &done);
+
+ efc_log_debug(efct, "Waiting %d seconds for domain shutdown\n",
+ (EFC_SHUTDOWN_TIMEOUT_USEC / 1000000));
+
+ timeout = usecs_to_jiffies(EFC_SHUTDOWN_TIMEOUT_USEC);
+ if (!wait_for_completion_timeout(&done, timeout)) {
+ efc_log_err(efct, "Domain shutdown timed out!!\n");
+ WARN_ON(1);
+ }
+
+ efc_register_domain_free_cb(efct->efcport, NULL, NULL);
+
+ /* Free up any saved virtual ports */
+ efc_vport_del_all(efct->efcport);
+ break;
+ }
+
+ /*
+ * Set wwnn for the port. This will be used instead of the default
+ * provided by FW.
+ */
+ case EFCT_XPORT_WWNN_SET: {
+ u64 wwnn;
+
+ /* Retrieve arguments */
+ va_start(argp, cmd);
+ wwnn = va_arg(argp, uint64_t);
+ va_end(argp);
+
+ efc_log_debug(efct, " WWNN %016llx\n", wwnn);
+ xport->req_wwnn = wwnn;
+
+ break;
+ }
+ /*
+ * Set wwpn for the port. This will be used instead of the default
+ * provided by FW.
+ */
+ case EFCT_XPORT_WWPN_SET: {
+ u64 wwpn;
+
+ /* Retrieve arguments */
+ va_start(argp, cmd);
+ wwpn = va_arg(argp, uint64_t);
+ va_end(argp);
+
+ efc_log_debug(efct, " WWPN %016llx\n", wwpn);
+ xport->req_wwpn = wwpn;
+
+ break;
+ }
+
+ default:
+ break;
+ }
+ return rc;
+}
+
+void
+efct_xport_free(struct efct_xport *xport)
+{
+ if (xport) {
+ efct_io_pool_free(xport->io_pool);
+
+ kfree(xport);
+ }
+}
+
+void
+efct_release_fc_transport(struct scsi_transport_template *transport_template)
+{
+ if (transport_template)
+ pr_err("releasing transport layer\n");
+
+ /* Releasing FC transport */
+ fc_release_transport(transport_template);
+}
+
+static void
+efct_xport_remove_host(struct Scsi_Host *shost)
+{
+ fc_remove_host(shost);
+}
+
+void
+efct_scsi_del_device(struct efct *efct)
+{
+ if (!efct->shost)
+ return;
+
+ efc_log_debug(efct, "Unregistering with Transport Layer\n");
+ efct_xport_remove_host(efct->shost);
+ efc_log_debug(efct, "Unregistering with SCSI Midlayer\n");
+ scsi_remove_host(efct->shost);
+ scsi_host_put(efct->shost);
+ efct->shost = NULL;
+}
+
+static void
+efct_get_host_port_id(struct Scsi_Host *shost)
+{
+ struct efct_vport *vport = (struct efct_vport *)shost->hostdata;
+ struct efct *efct = vport->efct;
+ struct efc *efc = efct->efcport;
+ struct efc_nport *nport;
+
+ if (efc->domain && efc->domain->nport) {
+ nport = efc->domain->nport;
+ fc_host_port_id(shost) = nport->fc_id;
+ }
+}
+
+static void
+efct_get_host_port_type(struct Scsi_Host *shost)
+{
+ struct efct_vport *vport = (struct efct_vport *)shost->hostdata;
+ struct efct *efct = vport->efct;
+ struct efc *efc = efct->efcport;
+ int type = FC_PORTTYPE_UNKNOWN;
+
+ if (efc->domain && efc->domain->nport) {
+ if (efc->domain->is_loop) {
+ type = FC_PORTTYPE_LPORT;
+ } else {
+ struct efc_nport *nport = efc->domain->nport;
+
+ if (nport->is_vport)
+ type = FC_PORTTYPE_NPIV;
+ else if (nport->topology == EFC_NPORT_TOPO_P2P)
+ type = FC_PORTTYPE_PTP;
+ else if (nport->topology == EFC_NPORT_TOPO_UNKNOWN)
+ type = FC_PORTTYPE_UNKNOWN;
+ else
+ type = FC_PORTTYPE_NPORT;
+ }
+ }
+ fc_host_port_type(shost) = type;
+}
+
+static void
+efct_get_host_vport_type(struct Scsi_Host *shost)
+{
+ fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
+}
+
+static void
+efct_get_host_port_state(struct Scsi_Host *shost)
+{
+ struct efct_vport *vport = (struct efct_vport *)shost->hostdata;
+ struct efct *efct = vport->efct;
+ union efct_xport_stats_u status;
+ int rc;
+
+ rc = efct_xport_status(efct->xport, EFCT_XPORT_PORT_STATUS, &status);
+ if ((!rc) && (status.value == EFCT_XPORT_PORT_ONLINE))
+ fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
+ else
+ fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
+}
+
+static void
+efct_get_host_speed(struct Scsi_Host *shost)
+{
+ struct efct_vport *vport = (struct efct_vport *)shost->hostdata;
+ struct efct *efct = vport->efct;
+ struct efc *efc = efct->efcport;
+ union efct_xport_stats_u speed;
+ u32 fc_speed = FC_PORTSPEED_UNKNOWN;
+ int rc;
+
+ if (!efc->domain || !efc->domain->nport) {
+ fc_host_speed(shost) = fc_speed;
+ return;
+ }
+
+ rc = efct_xport_status(efct->xport, EFCT_XPORT_LINK_SPEED, &speed);
+ if (!rc) {
+ switch (speed.value) {
+ case 1000:
+ fc_speed = FC_PORTSPEED_1GBIT;
+ break;
+ case 2000:
+ fc_speed = FC_PORTSPEED_2GBIT;
+ break;
+ case 4000:
+ fc_speed = FC_PORTSPEED_4GBIT;
+ break;
+ case 8000:
+ fc_speed = FC_PORTSPEED_8GBIT;
+ break;
+ case 10000:
+ fc_speed = FC_PORTSPEED_10GBIT;
+ break;
+ case 16000:
+ fc_speed = FC_PORTSPEED_16GBIT;
+ break;
+ case 32000:
+ fc_speed = FC_PORTSPEED_32GBIT;
+ break;
+ case 64000:
+ fc_speed = FC_PORTSPEED_64GBIT;
+ break;
+ case 128000:
+ fc_speed = FC_PORTSPEED_128GBIT;
+ break;
+ }
+ }
+
+ fc_host_speed(shost) = fc_speed;
+}
+
+static void
+efct_get_host_fabric_name(struct Scsi_Host *shost)
+{
+ struct efct_vport *vport = (struct efct_vport *)shost->hostdata;
+ struct efct *efct = vport->efct;
+ struct efc *efc = efct->efcport;
+
+ if (efc->domain) {
+ struct fc_els_flogi *sp =
+ (struct fc_els_flogi *)
+ efc->domain->flogi_service_params;
+
+ fc_host_fabric_name(shost) = be64_to_cpu(sp->fl_wwnn);
+ }
+}
+
+static struct fc_host_statistics *
+efct_get_stats(struct Scsi_Host *shost)
+{
+ struct efct_vport *vport = (struct efct_vport *)shost->hostdata;
+ struct efct *efct = vport->efct;
+ union efct_xport_stats_u stats;
+ struct efct_xport *xport = efct->xport;
+ int rc = 0;
+
+ rc = efct_xport_status(xport, EFCT_XPORT_LINK_STATISTICS, &stats);
+ if (rc) {
+ pr_err("efct_xport_status returned non 0 - %d\n", rc);
+ return NULL;
+ }
+
+ vport->fc_host_stats.loss_of_sync_count =
+ stats.stats.link_stats.loss_of_sync_error_count;
+ vport->fc_host_stats.link_failure_count =
+ stats.stats.link_stats.link_failure_error_count;
+ vport->fc_host_stats.prim_seq_protocol_err_count =
+ stats.stats.link_stats.primitive_sequence_error_count;
+ vport->fc_host_stats.invalid_tx_word_count =
+ stats.stats.link_stats.invalid_transmission_word_error_count;
+ vport->fc_host_stats.invalid_crc_count =
+ stats.stats.link_stats.crc_error_count;
+ /* mbox returns kbyte count so we need to convert to words */
+ vport->fc_host_stats.tx_words =
+ stats.stats.host_stats.transmit_kbyte_count * 256;
+ /* mbox returns kbyte count so we need to convert to words */
+ vport->fc_host_stats.rx_words =
+ stats.stats.host_stats.receive_kbyte_count * 256;
+ vport->fc_host_stats.tx_frames =
+ stats.stats.host_stats.transmit_frame_count;
+ vport->fc_host_stats.rx_frames =
+ stats.stats.host_stats.receive_frame_count;
+
+ vport->fc_host_stats.fcp_input_requests =
+ xport->fcp_stats.input_requests;
+ vport->fc_host_stats.fcp_output_requests =
+ xport->fcp_stats.output_requests;
+ vport->fc_host_stats.fcp_output_megabytes =
+ xport->fcp_stats.output_bytes >> 20;
+ vport->fc_host_stats.fcp_input_megabytes =
+ xport->fcp_stats.input_bytes >> 20;
+ vport->fc_host_stats.fcp_control_requests =
+ xport->fcp_stats.control_requests;
+
+ return &vport->fc_host_stats;
+}
+
+static void
+efct_reset_stats(struct Scsi_Host *shost)
+{
+ struct efct_vport *vport = (struct efct_vport *)shost->hostdata;
+ struct efct *efct = vport->efct;
+ /* argument has no purpose for this action */
+ union efct_xport_stats_u dummy;
+ int rc;
+
+ rc = efct_xport_status(efct->xport, EFCT_XPORT_LINK_STAT_RESET, &dummy);
+ if (rc)
+ pr_err("efct_xport_status returned non 0 - %d\n", rc);
+}
+
+static int
+efct_issue_lip(struct Scsi_Host *shost)
+{
+ struct efct_vport *vport =
+ shost ? (struct efct_vport *)shost->hostdata : NULL;
+ struct efct *efct = vport ? vport->efct : NULL;
+
+ if (!shost || !vport || !efct) {
+ pr_err("%s: shost=%p vport=%p efct=%p\n", __func__,
+ shost, vport, efct);
+ return -EPERM;
+ }
+
+ /*
+ * Bring the link down gracefully then re-init the link.
+ * The firmware will re-initialize the Fibre Channel interface as
+ * required. It does not issue a LIP.
+ */
+
+ if (efct_xport_control(efct->xport, EFCT_XPORT_PORT_OFFLINE))
+ efc_log_debug(efct, "EFCT_XPORT_PORT_OFFLINE failed\n");
+
+ if (efct_xport_control(efct->xport, EFCT_XPORT_PORT_ONLINE))
+ efc_log_debug(efct, "EFCT_XPORT_PORT_ONLINE failed\n");
+
+ return 0;
+}
+
+struct efct_vport *
+efct_scsi_new_vport(struct efct *efct, struct device *dev)
+{
+ struct Scsi_Host *shost = NULL;
+ int error = 0;
+ struct efct_vport *vport = NULL;
+
+ shost = scsi_host_alloc(&efct_template, sizeof(*vport));
+ if (!shost) {
+ efc_log_err(efct, "failed to allocate Scsi_Host struct\n");
+ return NULL;
+ }
+
+ /* save efct information to shost LLD-specific space */
+ vport = (struct efct_vport *)shost->hostdata;
+ vport->efct = efct;
+ vport->is_vport = true;
+
+ shost->can_queue = efct->hw.config.n_io;
+ shost->max_cmd_len = 16; /* 16-byte CDBs */
+ shost->max_id = 0xffff;
+ shost->max_lun = 0xffffffff;
+
+ /* can only accept (from mid-layer) as many SGEs as we've pre-regited*/
+ shost->sg_tablesize = sli_get_max_sgl(&efct->hw.sli);
+
+ /* attach FC Transport template to shost */
+ shost->transportt = efct_vport_fc_tt;
+ efc_log_debug(efct, "vport transport template=%p\n",
+ efct_vport_fc_tt);
+
+ /* get pci_dev structure and add host to SCSI ML */
+ error = scsi_add_host_with_dma(shost, dev, &efct->pci->dev);
+ if (error) {
+ efc_log_debug(efct, "failed scsi_add_host_with_dma\n");
+ return NULL;
+ }
+
+ /* Set symbolic name for host port */
+ snprintf(fc_host_symbolic_name(shost),
+ sizeof(fc_host_symbolic_name(shost)),
+ "Emulex %s FV%s DV%s", efct->model, efct->hw.sli.fw_name[0],
+ EFCT_DRIVER_VERSION);
+
+ /* Set host port supported classes */
+ fc_host_supported_classes(shost) = FC_COS_CLASS3;
+
+ fc_host_supported_speeds(shost) = efct_get_link_supported_speeds(efct);
+ vport->shost = shost;
+
+ return vport;
+}
+
+int efct_scsi_del_vport(struct efct *efct, struct Scsi_Host *shost)
+{
+ if (shost) {
+ efc_log_debug(efct,
+ "Unregistering vport with Transport Layer\n");
+ efct_xport_remove_host(shost);
+ efc_log_debug(efct, "Unregistering vport with SCSI Midlayer\n");
+ scsi_remove_host(shost);
+ scsi_host_put(shost);
+ return 0;
+ }
+ return -EIO;
+}
+
+static int
+efct_vport_create(struct fc_vport *fc_vport, bool disable)
+{
+ struct Scsi_Host *shost = fc_vport ? fc_vport->shost : NULL;
+ struct efct_vport *pport = shost ?
+ (struct efct_vport *)shost->hostdata :
+ NULL;
+ struct efct *efct = pport ? pport->efct : NULL;
+ struct efct_vport *vport = NULL;
+
+ if (!fc_vport || !shost || !efct)
+ goto fail;
+
+ vport = efct_scsi_new_vport(efct, &fc_vport->dev);
+ if (!vport) {
+ efc_log_err(efct, "failed to create vport\n");
+ goto fail;
+ }
+
+ vport->fc_vport = fc_vport;
+ vport->npiv_wwpn = fc_vport->port_name;
+ vport->npiv_wwnn = fc_vport->node_name;
+ fc_host_node_name(vport->shost) = vport->npiv_wwnn;
+ fc_host_port_name(vport->shost) = vport->npiv_wwpn;
+ *(struct efct_vport **)fc_vport->dd_data = vport;
+
+ return 0;
+
+fail:
+ return -EIO;
+}
+
+static int
+efct_vport_delete(struct fc_vport *fc_vport)
+{
+ struct efct_vport *vport = *(struct efct_vport **)fc_vport->dd_data;
+ struct Scsi_Host *shost = vport ? vport->shost : NULL;
+ struct efct *efct = vport ? vport->efct : NULL;
+ int rc;
+
+ rc = efct_scsi_del_vport(efct, shost);
+
+ if (rc)
+ pr_err("%s: vport delete failed\n", __func__);
+
+ return rc;
+}
+
+static int
+efct_vport_disable(struct fc_vport *fc_vport, bool disable)
+{
+ return 0;
+}
+
+static struct fc_function_template efct_xport_functions = {
+ .get_host_port_id = efct_get_host_port_id,
+ .get_host_port_type = efct_get_host_port_type,
+ .get_host_port_state = efct_get_host_port_state,
+ .get_host_speed = efct_get_host_speed,
+ .get_host_fabric_name = efct_get_host_fabric_name,
+
+ .get_fc_host_stats = efct_get_stats,
+ .reset_fc_host_stats = efct_reset_stats,
+
+ .issue_fc_host_lip = efct_issue_lip,
+
+ .vport_disable = efct_vport_disable,
+
+ /* allocation lengths for host-specific data */
+ .dd_fcrport_size = sizeof(struct efct_rport_data),
+ .dd_fcvport_size = 128, /* should be sizeof(...) */
+
+ /* remote port fixed attributes */
+ .show_rport_maxframe_size = 1,
+ .show_rport_supported_classes = 1,
+ .show_rport_dev_loss_tmo = 1,
+
+ /* target dynamic attributes */
+ .show_starget_node_name = 1,
+ .show_starget_port_name = 1,
+ .show_starget_port_id = 1,
+
+ /* host fixed attributes */
+ .show_host_node_name = 1,
+ .show_host_port_name = 1,
+ .show_host_supported_classes = 1,
+ .show_host_supported_fc4s = 1,
+ .show_host_supported_speeds = 1,
+ .show_host_maxframe_size = 1,
+
+ /* host dynamic attributes */
+ .show_host_port_id = 1,
+ .show_host_port_type = 1,
+ .show_host_port_state = 1,
+ /* active_fc4s is shown but doesn't change (thus no get function) */
+ .show_host_active_fc4s = 1,
+ .show_host_speed = 1,
+ .show_host_fabric_name = 1,
+ .show_host_symbolic_name = 1,
+ .vport_create = efct_vport_create,
+ .vport_delete = efct_vport_delete,
+};
+
+static struct fc_function_template efct_vport_functions = {
+ .get_host_port_id = efct_get_host_port_id,
+ .get_host_port_type = efct_get_host_vport_type,
+ .get_host_port_state = efct_get_host_port_state,
+ .get_host_speed = efct_get_host_speed,
+ .get_host_fabric_name = efct_get_host_fabric_name,
+
+ .get_fc_host_stats = efct_get_stats,
+ .reset_fc_host_stats = efct_reset_stats,
+
+ .issue_fc_host_lip = efct_issue_lip,
+
+ /* allocation lengths for host-specific data */
+ .dd_fcrport_size = sizeof(struct efct_rport_data),
+ .dd_fcvport_size = 128, /* should be sizeof(...) */
+
+ /* remote port fixed attributes */
+ .show_rport_maxframe_size = 1,
+ .show_rport_supported_classes = 1,
+ .show_rport_dev_loss_tmo = 1,
+
+ /* target dynamic attributes */
+ .show_starget_node_name = 1,
+ .show_starget_port_name = 1,
+ .show_starget_port_id = 1,
+
+ /* host fixed attributes */
+ .show_host_node_name = 1,
+ .show_host_port_name = 1,
+ .show_host_supported_classes = 1,
+ .show_host_supported_fc4s = 1,
+ .show_host_supported_speeds = 1,
+ .show_host_maxframe_size = 1,
+
+ /* host dynamic attributes */
+ .show_host_port_id = 1,
+ .show_host_port_type = 1,
+ .show_host_port_state = 1,
+ /* active_fc4s is shown but doesn't change (thus no get function) */
+ .show_host_active_fc4s = 1,
+ .show_host_speed = 1,
+ .show_host_fabric_name = 1,
+ .show_host_symbolic_name = 1,
+};
diff --git a/drivers/scsi/elx/efct/efct_xport.h b/drivers/scsi/elx/efct/efct_xport.h
new file mode 100644
index 000000000000..89f3c20ecb59
--- /dev/null
+++ b/drivers/scsi/elx/efct/efct_xport.h
@@ -0,0 +1,186 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+#if !defined(__EFCT_XPORT_H__)
+#define __EFCT_XPORT_H__
+
+enum efct_xport_ctrl {
+ EFCT_XPORT_PORT_ONLINE = 1,
+ EFCT_XPORT_PORT_OFFLINE,
+ EFCT_XPORT_SHUTDOWN,
+ EFCT_XPORT_POST_NODE_EVENT,
+ EFCT_XPORT_WWNN_SET,
+ EFCT_XPORT_WWPN_SET,
+};
+
+enum efct_xport_status {
+ EFCT_XPORT_PORT_STATUS,
+ EFCT_XPORT_CONFIG_PORT_STATUS,
+ EFCT_XPORT_LINK_SPEED,
+ EFCT_XPORT_IS_SUPPORTED_LINK_SPEED,
+ EFCT_XPORT_LINK_STATISTICS,
+ EFCT_XPORT_LINK_STAT_RESET,
+ EFCT_XPORT_IS_QUIESCED
+};
+
+struct efct_xport_link_stats {
+ bool rec;
+ bool gec;
+ bool w02of;
+ bool w03of;
+ bool w04of;
+ bool w05of;
+ bool w06of;
+ bool w07of;
+ bool w08of;
+ bool w09of;
+ bool w10of;
+ bool w11of;
+ bool w12of;
+ bool w13of;
+ bool w14of;
+ bool w15of;
+ bool w16of;
+ bool w17of;
+ bool w18of;
+ bool w19of;
+ bool w20of;
+ bool w21of;
+ bool clrc;
+ bool clof1;
+ u32 link_failure_error_count;
+ u32 loss_of_sync_error_count;
+ u32 loss_of_signal_error_count;
+ u32 primitive_sequence_error_count;
+ u32 invalid_transmission_word_error_count;
+ u32 crc_error_count;
+ u32 primitive_sequence_event_timeout_count;
+ u32 elastic_buffer_overrun_error_count;
+ u32 arbitration_fc_al_timeout_count;
+ u32 advertised_receive_bufftor_to_buffer_credit;
+ u32 current_receive_buffer_to_buffer_credit;
+ u32 advertised_transmit_buffer_to_buffer_credit;
+ u32 current_transmit_buffer_to_buffer_credit;
+ u32 received_eofa_count;
+ u32 received_eofdti_count;
+ u32 received_eofni_count;
+ u32 received_soff_count;
+ u32 received_dropped_no_aer_count;
+ u32 received_dropped_no_available_rpi_resources_count;
+ u32 received_dropped_no_available_xri_resources_count;
+};
+
+struct efct_xport_host_stats {
+ bool cc;
+ u32 transmit_kbyte_count;
+ u32 receive_kbyte_count;
+ u32 transmit_frame_count;
+ u32 receive_frame_count;
+ u32 transmit_sequence_count;
+ u32 receive_sequence_count;
+ u32 total_exchanges_originator;
+ u32 total_exchanges_responder;
+ u32 receive_p_bsy_count;
+ u32 receive_f_bsy_count;
+ u32 dropped_frames_due_to_no_rq_buffer_count;
+ u32 empty_rq_timeout_count;
+ u32 dropped_frames_due_to_no_xri_count;
+ u32 empty_xri_pool_count;
+};
+
+struct efct_xport_host_statistics {
+ struct completion done;
+ struct efct_xport_link_stats link_stats;
+ struct efct_xport_host_stats host_stats;
+};
+
+union efct_xport_stats_u {
+ u32 value;
+ struct efct_xport_host_statistics stats;
+};
+
+struct efct_xport_fcp_stats {
+ u64 input_bytes;
+ u64 output_bytes;
+ u64 input_requests;
+ u64 output_requests;
+ u64 control_requests;
+};
+
+struct efct_xport {
+ struct efct *efct;
+ /* wwpn requested by user for primary nport */
+ u64 req_wwpn;
+ /* wwnn requested by user for primary nport */
+ u64 req_wwnn;
+
+ /* Nodes */
+ /* number of allocated nodes */
+ u32 nodes_count;
+ /* used to track how often IO pool is empty */
+ atomic_t io_alloc_failed_count;
+ /* array of pointers to nodes */
+ struct efc_node **nodes;
+
+ /* Io pool and counts */
+ /* pointer to IO pool */
+ struct efct_io_pool *io_pool;
+ /* lock for io_pending_list */
+ spinlock_t io_pending_lock;
+ /* list of IOs waiting for HW resources
+ * lock: xport->io_pending_lock
+ * link: efct_io_s->io_pending_link
+ */
+ struct list_head io_pending_list;
+ /* count of totals IOS allocated */
+ atomic_t io_total_alloc;
+ /* count of totals IOS free'd */
+ atomic_t io_total_free;
+ /* count of totals IOS that were pended */
+ atomic_t io_total_pending;
+ /* count of active IOS */
+ atomic_t io_active_count;
+ /* count of pending IOS */
+ atomic_t io_pending_count;
+ /* non-zero if efct_scsi_check_pending is executing */
+ atomic_t io_pending_recursing;
+
+ /* Port */
+ /* requested link state */
+ u32 configured_link_state;
+
+ /* Timer for Statistics */
+ struct timer_list stats_timer;
+ union efct_xport_stats_u fc_xport_stats;
+ struct efct_xport_fcp_stats fcp_stats;
+};
+
+struct efct_rport_data {
+ struct efc_node *node;
+};
+
+struct efct_xport *
+efct_xport_alloc(struct efct *efct);
+int
+efct_xport_attach(struct efct_xport *xport);
+int
+efct_xport_initialize(struct efct_xport *xport);
+void
+efct_xport_detach(struct efct_xport *xport);
+int
+efct_xport_control(struct efct_xport *xport, enum efct_xport_ctrl cmd, ...);
+int
+efct_xport_status(struct efct_xport *xport, enum efct_xport_status cmd,
+ union efct_xport_stats_u *result);
+void
+efct_xport_free(struct efct_xport *xport);
+
+struct scsi_transport_template *efct_attach_fc_transport(void);
+struct scsi_transport_template *efct_attach_vport_fc_transport(void);
+void
+efct_release_fc_transport(struct scsi_transport_template *transport_template);
+
+#endif /* __EFCT_XPORT_H__ */
diff --git a/drivers/scsi/elx/include/efc_common.h b/drivers/scsi/elx/include/efc_common.h
new file mode 100644
index 000000000000..8d57f69ace0a
--- /dev/null
+++ b/drivers/scsi/elx/include/efc_common.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+#ifndef __EFC_COMMON_H__
+#define __EFC_COMMON_H__
+
+#include <linux/pci.h>
+
+struct efc_dma {
+ void *virt;
+ void *alloc;
+ dma_addr_t phys;
+
+ size_t size;
+ size_t len;
+ struct pci_dev *pdev;
+};
+
+#define efc_log_crit(efc, fmt, args...) \
+ dev_crit(&((efc)->pci)->dev, fmt, ##args)
+
+#define efc_log_err(efc, fmt, args...) \
+ dev_err(&((efc)->pci)->dev, fmt, ##args)
+
+#define efc_log_warn(efc, fmt, args...) \
+ dev_warn(&((efc)->pci)->dev, fmt, ##args)
+
+#define efc_log_info(efc, fmt, args...) \
+ dev_info(&((efc)->pci)->dev, fmt, ##args)
+
+#define efc_log_debug(efc, fmt, args...) \
+ dev_dbg(&((efc)->pci)->dev, fmt, ##args)
+
+#endif /* __EFC_COMMON_H__ */
diff --git a/drivers/scsi/elx/libefc/efc.h b/drivers/scsi/elx/libefc/efc.h
new file mode 100644
index 000000000000..927016283f41
--- /dev/null
+++ b/drivers/scsi/elx/libefc/efc.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+#ifndef __EFC_H__
+#define __EFC_H__
+
+#include "../include/efc_common.h"
+#include "efclib.h"
+#include "efc_sm.h"
+#include "efc_cmds.h"
+#include "efc_domain.h"
+#include "efc_nport.h"
+#include "efc_node.h"
+#include "efc_fabric.h"
+#include "efc_device.h"
+#include "efc_els.h"
+
+#define EFC_MAX_REMOTE_NODES 2048
+#define NODE_SPARAMS_SIZE 256
+
+enum efc_scsi_del_initiator_reason {
+ EFC_SCSI_INITIATOR_DELETED,
+ EFC_SCSI_INITIATOR_MISSING,
+};
+
+enum efc_scsi_del_target_reason {
+ EFC_SCSI_TARGET_DELETED,
+ EFC_SCSI_TARGET_MISSING,
+};
+
+#define EFC_FC_ELS_DEFAULT_RETRIES 3
+
+#define domain_sm_trace(domain) \
+ efc_log_debug(domain->efc, "[domain:%s] %-20s %-20s\n", \
+ domain->display_name, __func__, efc_sm_event_name(evt)) \
+
+#define domain_trace(domain, fmt, ...) \
+ efc_log_debug(domain->efc, \
+ "[%s]" fmt, domain->display_name, ##__VA_ARGS__) \
+
+#define node_sm_trace() \
+ efc_log_debug(node->efc, "[%s] %-20s %-20s\n", \
+ node->display_name, __func__, efc_sm_event_name(evt)) \
+
+#define nport_sm_trace(nport) \
+ efc_log_debug(nport->efc, \
+ "[%s] %-20s\n", nport->display_name, efc_sm_event_name(evt)) \
+
+#endif /* __EFC_H__ */
diff --git a/drivers/scsi/elx/libefc/efc_cmds.c b/drivers/scsi/elx/libefc/efc_cmds.c
new file mode 100644
index 000000000000..37e6697d86b8
--- /dev/null
+++ b/drivers/scsi/elx/libefc/efc_cmds.c
@@ -0,0 +1,777 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+#include "efclib.h"
+#include "../libefc_sli/sli4.h"
+#include "efc_cmds.h"
+#include "efc_sm.h"
+
+static void
+efc_nport_free_resources(struct efc_nport *nport, int evt, void *data)
+{
+ struct efc *efc = nport->efc;
+
+ /* Clear the nport attached flag */
+ nport->attached = false;
+
+ /* Free the service parameters buffer */
+ if (nport->dma.virt) {
+ dma_free_coherent(&efc->pci->dev, nport->dma.size,
+ nport->dma.virt, nport->dma.phys);
+ memset(&nport->dma, 0, sizeof(struct efc_dma));
+ }
+
+ /* Free the SLI resources */
+ sli_resource_free(efc->sli, SLI4_RSRC_VPI, nport->indicator);
+
+ efc_nport_cb(efc, evt, nport);
+}
+
+static int
+efc_nport_get_mbox_status(struct efc_nport *nport, u8 *mqe, int status)
+{
+ struct efc *efc = nport->efc;
+ struct sli4_mbox_command_header *hdr =
+ (struct sli4_mbox_command_header *)mqe;
+
+ if (status || le16_to_cpu(hdr->status)) {
+ efc_log_debug(efc, "bad status vpi=%#x st=%x hdr=%x\n",
+ nport->indicator, status, le16_to_cpu(hdr->status));
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int
+efc_nport_free_unreg_vpi_cb(struct efc *efc, int status, u8 *mqe, void *arg)
+{
+ struct efc_nport *nport = arg;
+ int evt = EFC_EVT_NPORT_FREE_OK;
+ int rc;
+
+ rc = efc_nport_get_mbox_status(nport, mqe, status);
+ if (rc)
+ evt = EFC_EVT_NPORT_FREE_FAIL;
+
+ efc_nport_free_resources(nport, evt, mqe);
+ return rc;
+}
+
+static void
+efc_nport_free_unreg_vpi(struct efc_nport *nport)
+{
+ struct efc *efc = nport->efc;
+ int rc;
+ u8 data[SLI4_BMBX_SIZE];
+
+ rc = sli_cmd_unreg_vpi(efc->sli, data, nport->indicator,
+ SLI4_UNREG_TYPE_PORT);
+ if (rc) {
+ efc_log_err(efc, "UNREG_VPI format failure\n");
+ efc_nport_free_resources(nport, EFC_EVT_NPORT_FREE_FAIL, data);
+ return;
+ }
+
+ rc = efc->tt.issue_mbox_rqst(efc->base, data,
+ efc_nport_free_unreg_vpi_cb, nport);
+ if (rc) {
+ efc_log_err(efc, "UNREG_VPI command failure\n");
+ efc_nport_free_resources(nport, EFC_EVT_NPORT_FREE_FAIL, data);
+ }
+}
+
+static void
+efc_nport_send_evt(struct efc_nport *nport, int evt, void *data)
+{
+ struct efc *efc = nport->efc;
+
+ /* Now inform the registered callbacks */
+ efc_nport_cb(efc, evt, nport);
+
+ /* Set the nport attached flag */
+ if (evt == EFC_EVT_NPORT_ATTACH_OK)
+ nport->attached = true;
+
+ /* If there is a pending free request, then handle it now */
+ if (nport->free_req_pending)
+ efc_nport_free_unreg_vpi(nport);
+}
+
+static int
+efc_nport_alloc_init_vpi_cb(struct efc *efc, int status, u8 *mqe, void *arg)
+{
+ struct efc_nport *nport = arg;
+
+ if (efc_nport_get_mbox_status(nport, mqe, status)) {
+ efc_nport_free_resources(nport, EFC_EVT_NPORT_ALLOC_FAIL, mqe);
+ return -EIO;
+ }
+
+ efc_nport_send_evt(nport, EFC_EVT_NPORT_ALLOC_OK, mqe);
+ return 0;
+}
+
+static void
+efc_nport_alloc_init_vpi(struct efc_nport *nport)
+{
+ struct efc *efc = nport->efc;
+ u8 data[SLI4_BMBX_SIZE];
+ int rc;
+
+ /* If there is a pending free request, then handle it now */
+ if (nport->free_req_pending) {
+ efc_nport_free_resources(nport, EFC_EVT_NPORT_FREE_OK, data);
+ return;
+ }
+
+ rc = sli_cmd_init_vpi(efc->sli, data,
+ nport->indicator, nport->domain->indicator);
+ if (rc) {
+ efc_log_err(efc, "INIT_VPI format failure\n");
+ efc_nport_free_resources(nport, EFC_EVT_NPORT_ALLOC_FAIL, data);
+ return;
+ }
+
+ rc = efc->tt.issue_mbox_rqst(efc->base, data,
+ efc_nport_alloc_init_vpi_cb, nport);
+ if (rc) {
+ efc_log_err(efc, "INIT_VPI command failure\n");
+ efc_nport_free_resources(nport, EFC_EVT_NPORT_ALLOC_FAIL, data);
+ }
+}
+
+static int
+efc_nport_alloc_read_sparm64_cb(struct efc *efc, int status, u8 *mqe, void *arg)
+{
+ struct efc_nport *nport = arg;
+ u8 *payload = NULL;
+
+ if (efc_nport_get_mbox_status(nport, mqe, status)) {
+ efc_nport_free_resources(nport, EFC_EVT_NPORT_ALLOC_FAIL, mqe);
+ return -EIO;
+ }
+
+ payload = nport->dma.virt;
+
+ memcpy(&nport->sli_wwpn, payload + SLI4_READ_SPARM64_WWPN_OFFSET,
+ sizeof(nport->sli_wwpn));
+ memcpy(&nport->sli_wwnn, payload + SLI4_READ_SPARM64_WWNN_OFFSET,
+ sizeof(nport->sli_wwnn));
+
+ dma_free_coherent(&efc->pci->dev, nport->dma.size, nport->dma.virt,
+ nport->dma.phys);
+ memset(&nport->dma, 0, sizeof(struct efc_dma));
+ efc_nport_alloc_init_vpi(nport);
+ return 0;
+}
+
+static void
+efc_nport_alloc_read_sparm64(struct efc *efc, struct efc_nport *nport)
+{
+ u8 data[SLI4_BMBX_SIZE];
+ int rc;
+
+ /* Allocate memory for the service parameters */
+ nport->dma.size = EFC_SPARAM_DMA_SZ;
+ nport->dma.virt = dma_alloc_coherent(&efc->pci->dev,
+ nport->dma.size, &nport->dma.phys,
+ GFP_DMA);
+ if (!nport->dma.virt) {
+ efc_log_err(efc, "Failed to allocate DMA memory\n");
+ efc_nport_free_resources(nport, EFC_EVT_NPORT_ALLOC_FAIL, data);
+ return;
+ }
+
+ rc = sli_cmd_read_sparm64(efc->sli, data,
+ &nport->dma, nport->indicator);
+ if (rc) {
+ efc_log_err(efc, "READ_SPARM64 format failure\n");
+ efc_nport_free_resources(nport, EFC_EVT_NPORT_ALLOC_FAIL, data);
+ return;
+ }
+
+ rc = efc->tt.issue_mbox_rqst(efc->base, data,
+ efc_nport_alloc_read_sparm64_cb, nport);
+ if (rc) {
+ efc_log_err(efc, "READ_SPARM64 command failure\n");
+ efc_nport_free_resources(nport, EFC_EVT_NPORT_ALLOC_FAIL, data);
+ }
+}
+
+int
+efc_cmd_nport_alloc(struct efc *efc, struct efc_nport *nport,
+ struct efc_domain *domain, u8 *wwpn)
+{
+ u32 index;
+
+ nport->indicator = U32_MAX;
+ nport->free_req_pending = false;
+
+ if (wwpn)
+ memcpy(&nport->sli_wwpn, wwpn, sizeof(nport->sli_wwpn));
+
+ /*
+ * allocate a VPI object for the port and stores it in the
+ * indicator field of the port object.
+ */
+ if (sli_resource_alloc(efc->sli, SLI4_RSRC_VPI,
+ &nport->indicator, &index)) {
+ efc_log_err(efc, "VPI allocation failure\n");
+ return -EIO;
+ }
+
+ if (domain) {
+ /*
+ * If the WWPN is NULL, fetch the default
+ * WWPN and WWNN before initializing the VPI
+ */
+ if (!wwpn)
+ efc_nport_alloc_read_sparm64(efc, nport);
+ else
+ efc_nport_alloc_init_vpi(nport);
+ } else if (!wwpn) {
+ /* domain NULL and wwpn non-NULL */
+ efc_log_err(efc, "need WWN for physical port\n");
+ sli_resource_free(efc->sli, SLI4_RSRC_VPI, nport->indicator);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int
+efc_nport_attach_reg_vpi_cb(struct efc *efc, int status, u8 *mqe,
+ void *arg)
+{
+ struct efc_nport *nport = arg;
+
+ if (efc_nport_get_mbox_status(nport, mqe, status)) {
+ efc_nport_free_resources(nport, EFC_EVT_NPORT_ATTACH_FAIL, mqe);
+ return -EIO;
+ }
+
+ efc_nport_send_evt(nport, EFC_EVT_NPORT_ATTACH_OK, mqe);
+ return 0;
+}
+
+int
+efc_cmd_nport_attach(struct efc *efc, struct efc_nport *nport, u32 fc_id)
+{
+ u8 buf[SLI4_BMBX_SIZE];
+ int rc = 0;
+
+ if (!nport) {
+ efc_log_err(efc, "bad param(s) nport=%p\n", nport);
+ return -EIO;
+ }
+
+ nport->fc_id = fc_id;
+
+ /* register previously-allocated VPI with the device */
+ rc = sli_cmd_reg_vpi(efc->sli, buf, nport->fc_id,
+ nport->sli_wwpn, nport->indicator,
+ nport->domain->indicator, false);
+ if (rc) {
+ efc_log_err(efc, "REG_VPI format failure\n");
+ efc_nport_free_resources(nport, EFC_EVT_NPORT_ATTACH_FAIL, buf);
+ return rc;
+ }
+
+ rc = efc->tt.issue_mbox_rqst(efc->base, buf,
+ efc_nport_attach_reg_vpi_cb, nport);
+ if (rc) {
+ efc_log_err(efc, "REG_VPI command failure\n");
+ efc_nport_free_resources(nport, EFC_EVT_NPORT_ATTACH_FAIL, buf);
+ }
+
+ return rc;
+}
+
+int
+efc_cmd_nport_free(struct efc *efc, struct efc_nport *nport)
+{
+ if (!nport) {
+ efc_log_err(efc, "bad parameter(s) nport=%p\n", nport);
+ return -EIO;
+ }
+
+ /* Issue the UNREG_VPI command to free the assigned VPI context */
+ if (nport->attached)
+ efc_nport_free_unreg_vpi(nport);
+ else
+ nport->free_req_pending = true;
+
+ return 0;
+}
+
+static int
+efc_domain_get_mbox_status(struct efc_domain *domain, u8 *mqe, int status)
+{
+ struct efc *efc = domain->efc;
+ struct sli4_mbox_command_header *hdr =
+ (struct sli4_mbox_command_header *)mqe;
+
+ if (status || le16_to_cpu(hdr->status)) {
+ efc_log_debug(efc, "bad status vfi=%#x st=%x hdr=%x\n",
+ domain->indicator, status,
+ le16_to_cpu(hdr->status));
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static void
+efc_domain_free_resources(struct efc_domain *domain, int evt, void *data)
+{
+ struct efc *efc = domain->efc;
+
+ /* Free the service parameters buffer */
+ if (domain->dma.virt) {
+ dma_free_coherent(&efc->pci->dev,
+ domain->dma.size, domain->dma.virt,
+ domain->dma.phys);
+ memset(&domain->dma, 0, sizeof(struct efc_dma));
+ }
+
+ /* Free the SLI resources */
+ sli_resource_free(efc->sli, SLI4_RSRC_VFI, domain->indicator);
+
+ efc_domain_cb(efc, evt, domain);
+}
+
+static void
+efc_domain_send_nport_evt(struct efc_domain *domain,
+ int port_evt, int domain_evt, void *data)
+{
+ struct efc *efc = domain->efc;
+
+ /* Send alloc/attach ok to the physical nport */
+ efc_nport_send_evt(domain->nport, port_evt, NULL);
+
+ /* Now inform the registered callbacks */
+ efc_domain_cb(efc, domain_evt, domain);
+}
+
+static int
+efc_domain_alloc_read_sparm64_cb(struct efc *efc, int status, u8 *mqe,
+ void *arg)
+{
+ struct efc_domain *domain = arg;
+
+ if (efc_domain_get_mbox_status(domain, mqe, status)) {
+ efc_domain_free_resources(domain,
+ EFC_HW_DOMAIN_ALLOC_FAIL, mqe);
+ return -EIO;
+ }
+
+ efc_domain_send_nport_evt(domain, EFC_EVT_NPORT_ALLOC_OK,
+ EFC_HW_DOMAIN_ALLOC_OK, mqe);
+ return 0;
+}
+
+static void
+efc_domain_alloc_read_sparm64(struct efc_domain *domain)
+{
+ struct efc *efc = domain->efc;
+ u8 data[SLI4_BMBX_SIZE];
+ int rc;
+
+ rc = sli_cmd_read_sparm64(efc->sli, data, &domain->dma, 0);
+ if (rc) {
+ efc_log_err(efc, "READ_SPARM64 format failure\n");
+ efc_domain_free_resources(domain,
+ EFC_HW_DOMAIN_ALLOC_FAIL, data);
+ return;
+ }
+
+ rc = efc->tt.issue_mbox_rqst(efc->base, data,
+ efc_domain_alloc_read_sparm64_cb, domain);
+ if (rc) {
+ efc_log_err(efc, "READ_SPARM64 command failure\n");
+ efc_domain_free_resources(domain,
+ EFC_HW_DOMAIN_ALLOC_FAIL, data);
+ }
+}
+
+static int
+efc_domain_alloc_init_vfi_cb(struct efc *efc, int status, u8 *mqe,
+ void *arg)
+{
+ struct efc_domain *domain = arg;
+
+ if (efc_domain_get_mbox_status(domain, mqe, status)) {
+ efc_domain_free_resources(domain,
+ EFC_HW_DOMAIN_ALLOC_FAIL, mqe);
+ return -EIO;
+ }
+
+ efc_domain_alloc_read_sparm64(domain);
+ return 0;
+}
+
+static void
+efc_domain_alloc_init_vfi(struct efc_domain *domain)
+{
+ struct efc *efc = domain->efc;
+ struct efc_nport *nport = domain->nport;
+ u8 data[SLI4_BMBX_SIZE];
+ int rc;
+
+ /*
+ * For FC, the HW alread registered an FCFI.
+ * Copy FCF information into the domain and jump to INIT_VFI.
+ */
+ domain->fcf_indicator = efc->fcfi;
+ rc = sli_cmd_init_vfi(efc->sli, data, domain->indicator,
+ domain->fcf_indicator, nport->indicator);
+ if (rc) {
+ efc_log_err(efc, "INIT_VFI format failure\n");
+ efc_domain_free_resources(domain,
+ EFC_HW_DOMAIN_ALLOC_FAIL, data);
+ return;
+ }
+
+ efc_log_err(efc, "%s issue mbox\n", __func__);
+ rc = efc->tt.issue_mbox_rqst(efc->base, data,
+ efc_domain_alloc_init_vfi_cb, domain);
+ if (rc) {
+ efc_log_err(efc, "INIT_VFI command failure\n");
+ efc_domain_free_resources(domain,
+ EFC_HW_DOMAIN_ALLOC_FAIL, data);
+ }
+}
+
+int
+efc_cmd_domain_alloc(struct efc *efc, struct efc_domain *domain, u32 fcf)
+{
+ u32 index;
+
+ if (!domain || !domain->nport) {
+ efc_log_err(efc, "bad parameter(s) domain=%p nport=%p\n",
+ domain, domain ? domain->nport : NULL);
+ return -EIO;
+ }
+
+ /* allocate memory for the service parameters */
+ domain->dma.size = EFC_SPARAM_DMA_SZ;
+ domain->dma.virt = dma_alloc_coherent(&efc->pci->dev,
+ domain->dma.size,
+ &domain->dma.phys, GFP_DMA);
+ if (!domain->dma.virt) {
+ efc_log_err(efc, "Failed to allocate DMA memory\n");
+ return -EIO;
+ }
+
+ domain->fcf = fcf;
+ domain->fcf_indicator = U32_MAX;
+ domain->indicator = U32_MAX;
+
+ if (sli_resource_alloc(efc->sli, SLI4_RSRC_VFI, &domain->indicator,
+ &index)) {
+ efc_log_err(efc, "VFI allocation failure\n");
+
+ dma_free_coherent(&efc->pci->dev,
+ domain->dma.size, domain->dma.virt,
+ domain->dma.phys);
+ memset(&domain->dma, 0, sizeof(struct efc_dma));
+
+ return -EIO;
+ }
+
+ efc_domain_alloc_init_vfi(domain);
+ return 0;
+}
+
+static int
+efc_domain_attach_reg_vfi_cb(struct efc *efc, int status, u8 *mqe,
+ void *arg)
+{
+ struct efc_domain *domain = arg;
+
+ if (efc_domain_get_mbox_status(domain, mqe, status)) {
+ efc_domain_free_resources(domain,
+ EFC_HW_DOMAIN_ATTACH_FAIL, mqe);
+ return -EIO;
+ }
+
+ efc_domain_send_nport_evt(domain, EFC_EVT_NPORT_ATTACH_OK,
+ EFC_HW_DOMAIN_ATTACH_OK, mqe);
+ return 0;
+}
+
+int
+efc_cmd_domain_attach(struct efc *efc, struct efc_domain *domain, u32 fc_id)
+{
+ u8 buf[SLI4_BMBX_SIZE];
+ int rc = 0;
+
+ if (!domain) {
+ efc_log_err(efc, "bad param(s) domain=%p\n", domain);
+ return -EIO;
+ }
+
+ domain->nport->fc_id = fc_id;
+
+ rc = sli_cmd_reg_vfi(efc->sli, buf, SLI4_BMBX_SIZE, domain->indicator,
+ domain->fcf_indicator, domain->dma,
+ domain->nport->indicator, domain->nport->sli_wwpn,
+ domain->nport->fc_id);
+ if (rc) {
+ efc_log_err(efc, "REG_VFI format failure\n");
+ goto cleanup;
+ }
+
+ rc = efc->tt.issue_mbox_rqst(efc->base, buf,
+ efc_domain_attach_reg_vfi_cb, domain);
+ if (rc) {
+ efc_log_err(efc, "REG_VFI command failure\n");
+ goto cleanup;
+ }
+
+ return rc;
+
+cleanup:
+ efc_domain_free_resources(domain, EFC_HW_DOMAIN_ATTACH_FAIL, buf);
+
+ return rc;
+}
+
+static int
+efc_domain_free_unreg_vfi_cb(struct efc *efc, int status, u8 *mqe, void *arg)
+{
+ struct efc_domain *domain = arg;
+ int evt = EFC_HW_DOMAIN_FREE_OK;
+ int rc;
+
+ rc = efc_domain_get_mbox_status(domain, mqe, status);
+ if (rc) {
+ evt = EFC_HW_DOMAIN_FREE_FAIL;
+ rc = -EIO;
+ }
+
+ efc_domain_free_resources(domain, evt, mqe);
+ return rc;
+}
+
+static void
+efc_domain_free_unreg_vfi(struct efc_domain *domain)
+{
+ struct efc *efc = domain->efc;
+ int rc;
+ u8 data[SLI4_BMBX_SIZE];
+
+ rc = sli_cmd_unreg_vfi(efc->sli, data, domain->indicator,
+ SLI4_UNREG_TYPE_DOMAIN);
+ if (rc) {
+ efc_log_err(efc, "UNREG_VFI format failure\n");
+ goto cleanup;
+ }
+
+ rc = efc->tt.issue_mbox_rqst(efc->base, data,
+ efc_domain_free_unreg_vfi_cb, domain);
+ if (rc) {
+ efc_log_err(efc, "UNREG_VFI command failure\n");
+ goto cleanup;
+ }
+
+ return;
+
+cleanup:
+ efc_domain_free_resources(domain, EFC_HW_DOMAIN_FREE_FAIL, data);
+}
+
+int
+efc_cmd_domain_free(struct efc *efc, struct efc_domain *domain)
+{
+ if (!domain) {
+ efc_log_err(efc, "bad parameter(s) domain=%p\n", domain);
+ return -EIO;
+ }
+
+ efc_domain_free_unreg_vfi(domain);
+ return 0;
+}
+
+int
+efc_cmd_node_alloc(struct efc *efc, struct efc_remote_node *rnode, u32 fc_addr,
+ struct efc_nport *nport)
+{
+ /* Check for invalid indicator */
+ if (rnode->indicator != U32_MAX) {
+ efc_log_err(efc,
+ "RPI allocation failure addr=%#x rpi=%#x\n",
+ fc_addr, rnode->indicator);
+ return -EIO;
+ }
+
+ /* NULL SLI port indicates an unallocated remote node */
+ rnode->nport = NULL;
+
+ if (sli_resource_alloc(efc->sli, SLI4_RSRC_RPI,
+ &rnode->indicator, &rnode->index)) {
+ efc_log_err(efc, "RPI allocation failure addr=%#x\n",
+ fc_addr);
+ return -EIO;
+ }
+
+ rnode->fc_id = fc_addr;
+ rnode->nport = nport;
+
+ return 0;
+}
+
+static int
+efc_cmd_node_attach_cb(struct efc *efc, int status, u8 *mqe, void *arg)
+{
+ struct efc_remote_node *rnode = arg;
+ struct sli4_mbox_command_header *hdr =
+ (struct sli4_mbox_command_header *)mqe;
+ int evt = 0;
+
+ if (status || le16_to_cpu(hdr->status)) {
+ efc_log_debug(efc, "bad status cqe=%#x mqe=%#x\n", status,
+ le16_to_cpu(hdr->status));
+ rnode->attached = false;
+ evt = EFC_EVT_NODE_ATTACH_FAIL;
+ } else {
+ rnode->attached = true;
+ evt = EFC_EVT_NODE_ATTACH_OK;
+ }
+
+ efc_remote_node_cb(efc, evt, rnode);
+
+ return 0;
+}
+
+int
+efc_cmd_node_attach(struct efc *efc, struct efc_remote_node *rnode,
+ struct efc_dma *sparms)
+{
+ int rc = -EIO;
+ u8 buf[SLI4_BMBX_SIZE];
+
+ if (!rnode || !sparms) {
+ efc_log_err(efc, "bad parameter(s) rnode=%p sparms=%p\n",
+ rnode, sparms);
+ return -EIO;
+ }
+
+ /*
+ * If the attach count is non-zero, this RPI has already been reg'd.
+ * Otherwise, register the RPI
+ */
+ if (rnode->index == U32_MAX) {
+ efc_log_err(efc, "bad parameter rnode->index invalid\n");
+ return -EIO;
+ }
+
+ /* Update a remote node object with the remote port's service params */
+ if (!sli_cmd_reg_rpi(efc->sli, buf, rnode->indicator,
+ rnode->nport->indicator, rnode->fc_id, sparms, 0, 0))
+ rc = efc->tt.issue_mbox_rqst(efc->base, buf,
+ efc_cmd_node_attach_cb, rnode);
+
+ return rc;
+}
+
+int
+efc_node_free_resources(struct efc *efc, struct efc_remote_node *rnode)
+{
+ int rc = 0;
+
+ if (!rnode) {
+ efc_log_err(efc, "bad parameter rnode=%p\n", rnode);
+ return -EIO;
+ }
+
+ if (rnode->nport) {
+ if (rnode->attached) {
+ efc_log_err(efc, "rnode is still attached\n");
+ return -EIO;
+ }
+ if (rnode->indicator != U32_MAX) {
+ if (sli_resource_free(efc->sli, SLI4_RSRC_RPI,
+ rnode->indicator)) {
+ efc_log_err(efc,
+ "RPI free fail RPI %d addr=%#x\n",
+ rnode->indicator, rnode->fc_id);
+ rc = -EIO;
+ } else {
+ rnode->indicator = U32_MAX;
+ rnode->index = U32_MAX;
+ }
+ }
+ }
+
+ return rc;
+}
+
+static int
+efc_cmd_node_free_cb(struct efc *efc, int status, u8 *mqe, void *arg)
+{
+ struct efc_remote_node *rnode = arg;
+ struct sli4_mbox_command_header *hdr =
+ (struct sli4_mbox_command_header *)mqe;
+ int evt = EFC_EVT_NODE_FREE_FAIL;
+ int rc = 0;
+
+ if (status || le16_to_cpu(hdr->status)) {
+ efc_log_debug(efc, "bad status cqe=%#x mqe=%#x\n", status,
+ le16_to_cpu(hdr->status));
+
+ /*
+ * In certain cases, a non-zero MQE status is OK (all must be
+ * true):
+ * - node is attached
+ * - status is 0x1400
+ */
+ if (!rnode->attached ||
+ (le16_to_cpu(hdr->status) != SLI4_MBX_STATUS_RPI_NOT_REG))
+ rc = -EIO;
+ }
+
+ if (!rc) {
+ rnode->attached = false;
+ evt = EFC_EVT_NODE_FREE_OK;
+ }
+
+ efc_remote_node_cb(efc, evt, rnode);
+
+ return rc;
+}
+
+int
+efc_cmd_node_detach(struct efc *efc, struct efc_remote_node *rnode)
+{
+ u8 buf[SLI4_BMBX_SIZE];
+ int rc = -EIO;
+
+ if (!rnode) {
+ efc_log_err(efc, "bad parameter rnode=%p\n", rnode);
+ return -EIO;
+ }
+
+ if (rnode->nport) {
+ if (!rnode->attached)
+ return -EIO;
+
+ rc = -EIO;
+
+ if (!sli_cmd_unreg_rpi(efc->sli, buf, rnode->indicator,
+ SLI4_RSRC_RPI, U32_MAX))
+ rc = efc->tt.issue_mbox_rqst(efc->base, buf,
+ efc_cmd_node_free_cb, rnode);
+
+ if (rc != 0) {
+ efc_log_err(efc, "UNREG_RPI failed\n");
+ rc = -EIO;
+ }
+ }
+
+ return rc;
+}
diff --git a/drivers/scsi/elx/libefc/efc_cmds.h b/drivers/scsi/elx/libefc/efc_cmds.h
new file mode 100644
index 000000000000..4d353ab04dc3
--- /dev/null
+++ b/drivers/scsi/elx/libefc/efc_cmds.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+#ifndef __EFC_CMDS_H__
+#define __EFC_CMDS_H__
+
+#define EFC_SPARAM_DMA_SZ 112
+int
+efc_cmd_nport_alloc(struct efc *efc, struct efc_nport *nport,
+ struct efc_domain *domain, u8 *wwpn);
+int
+efc_cmd_nport_attach(struct efc *efc, struct efc_nport *nport, u32 fc_id);
+int
+efc_cmd_nport_free(struct efc *efc, struct efc_nport *nport);
+int
+efc_cmd_domain_alloc(struct efc *efc, struct efc_domain *domain, u32 fcf);
+int
+efc_cmd_domain_attach(struct efc *efc, struct efc_domain *domain, u32 fc_id);
+int
+efc_cmd_domain_free(struct efc *efc, struct efc_domain *domain);
+int
+efc_cmd_node_detach(struct efc *efc, struct efc_remote_node *rnode);
+int
+efc_node_free_resources(struct efc *efc, struct efc_remote_node *rnode);
+int
+efc_cmd_node_attach(struct efc *efc, struct efc_remote_node *rnode,
+ struct efc_dma *sparms);
+int
+efc_cmd_node_alloc(struct efc *efc, struct efc_remote_node *rnode, u32 fc_addr,
+ struct efc_nport *nport);
+
+#endif /* __EFC_CMDS_H */
diff --git a/drivers/scsi/elx/libefc/efc_device.c b/drivers/scsi/elx/libefc/efc_device.c
new file mode 100644
index 000000000000..725ca2a23fb2
--- /dev/null
+++ b/drivers/scsi/elx/libefc/efc_device.c
@@ -0,0 +1,1603 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+/*
+ * device_sm Node State Machine: Remote Device States
+ */
+
+#include "efc.h"
+#include "efc_device.h"
+#include "efc_fabric.h"
+
+void
+efc_d_send_prli_rsp(struct efc_node *node, u16 ox_id)
+{
+ int rc = EFC_SCSI_CALL_COMPLETE;
+ struct efc *efc = node->efc;
+
+ node->ls_acc_oxid = ox_id;
+ node->send_ls_acc = EFC_NODE_SEND_LS_ACC_PRLI;
+
+ /*
+ * Wait for backend session registration
+ * to complete before sending PRLI resp
+ */
+
+ if (node->init) {
+ efc_log_info(efc, "[%s] found(initiator) WWPN:%s WWNN:%s\n",
+ node->display_name, node->wwpn, node->wwnn);
+ if (node->nport->enable_tgt)
+ rc = efc->tt.scsi_new_node(efc, node);
+ }
+
+ if (rc < 0)
+ efc_node_post_event(node, EFC_EVT_NODE_SESS_REG_FAIL, NULL);
+
+ if (rc == EFC_SCSI_CALL_COMPLETE)
+ efc_node_post_event(node, EFC_EVT_NODE_SESS_REG_OK, NULL);
+}
+
+static void
+__efc_d_common(const char *funcname, struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node *node = NULL;
+ struct efc *efc = NULL;
+
+ node = ctx->app;
+ efc = node->efc;
+
+ switch (evt) {
+ /* Handle shutdown events */
+ case EFC_EVT_SHUTDOWN:
+ efc_log_debug(efc, "[%s] %-20s %-20s\n", node->display_name,
+ funcname, efc_sm_event_name(evt));
+ node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
+ efc_node_transition(node, __efc_d_initiate_shutdown, NULL);
+ break;
+ case EFC_EVT_SHUTDOWN_EXPLICIT_LOGO:
+ efc_log_debug(efc, "[%s] %-20s %-20s\n",
+ node->display_name, funcname,
+ efc_sm_event_name(evt));
+ node->shutdown_reason = EFC_NODE_SHUTDOWN_EXPLICIT_LOGO;
+ efc_node_transition(node, __efc_d_initiate_shutdown, NULL);
+ break;
+ case EFC_EVT_SHUTDOWN_IMPLICIT_LOGO:
+ efc_log_debug(efc, "[%s] %-20s %-20s\n", node->display_name,
+ funcname, efc_sm_event_name(evt));
+ node->shutdown_reason = EFC_NODE_SHUTDOWN_IMPLICIT_LOGO;
+ efc_node_transition(node, __efc_d_initiate_shutdown, NULL);
+ break;
+
+ default:
+ /* call default event handler common to all nodes */
+ __efc_node_common(funcname, ctx, evt, arg);
+ }
+}
+
+static void
+__efc_d_wait_del_node(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ /*
+ * State is entered when a node sends a delete initiator/target call
+ * to the target-server/initiator-client and needs to wait for that
+ * work to complete.
+ */
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ efc_node_hold_frames(node);
+ fallthrough;
+
+ case EFC_EVT_NODE_ACTIVE_IO_LIST_EMPTY:
+ case EFC_EVT_ALL_CHILD_NODES_FREE:
+ /* These are expected events. */
+ break;
+
+ case EFC_EVT_NODE_DEL_INI_COMPLETE:
+ case EFC_EVT_NODE_DEL_TGT_COMPLETE:
+ /*
+ * node has either been detached or is in the process
+ * of being detached,
+ * call common node's initiate cleanup function
+ */
+ efc_node_initiate_cleanup(node);
+ break;
+
+ case EFC_EVT_EXIT:
+ efc_node_accept_frames(node);
+ break;
+
+ case EFC_EVT_SRRS_ELS_REQ_FAIL:
+ /* Can happen as ELS IO IO's complete */
+ WARN_ON(!node->els_req_cnt);
+ node->els_req_cnt--;
+ break;
+
+ /* ignore shutdown events as we're already in shutdown path */
+ case EFC_EVT_SHUTDOWN:
+ /* have default shutdown event take precedence */
+ node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
+ fallthrough;
+
+ case EFC_EVT_SHUTDOWN_EXPLICIT_LOGO:
+ case EFC_EVT_SHUTDOWN_IMPLICIT_LOGO:
+ node_printf(node, "%s received\n", efc_sm_event_name(evt));
+ break;
+ case EFC_EVT_DOMAIN_ATTACH_OK:
+ /* don't care about domain_attach_ok */
+ break;
+ default:
+ __efc_d_common(__func__, ctx, evt, arg);
+ }
+}
+
+static void
+__efc_d_wait_del_ini_tgt(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ efc_node_hold_frames(node);
+ fallthrough;
+
+ case EFC_EVT_NODE_ACTIVE_IO_LIST_EMPTY:
+ case EFC_EVT_ALL_CHILD_NODES_FREE:
+ /* These are expected events. */
+ break;
+
+ case EFC_EVT_NODE_DEL_INI_COMPLETE:
+ case EFC_EVT_NODE_DEL_TGT_COMPLETE:
+ efc_node_transition(node, __efc_d_wait_del_node, NULL);
+ break;
+
+ case EFC_EVT_EXIT:
+ efc_node_accept_frames(node);
+ break;
+
+ case EFC_EVT_SRRS_ELS_REQ_FAIL:
+ /* Can happen as ELS IO IO's complete */
+ WARN_ON(!node->els_req_cnt);
+ node->els_req_cnt--;
+ break;
+
+ /* ignore shutdown events as we're already in shutdown path */
+ case EFC_EVT_SHUTDOWN:
+ /* have default shutdown event take precedence */
+ node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
+ fallthrough;
+
+ case EFC_EVT_SHUTDOWN_EXPLICIT_LOGO:
+ case EFC_EVT_SHUTDOWN_IMPLICIT_LOGO:
+ node_printf(node, "%s received\n", efc_sm_event_name(evt));
+ break;
+ case EFC_EVT_DOMAIN_ATTACH_OK:
+ /* don't care about domain_attach_ok */
+ break;
+ default:
+ __efc_d_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_d_initiate_shutdown(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node *node = ctx->app;
+ struct efc *efc = node->efc;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_ENTER: {
+ int rc = EFC_SCSI_CALL_COMPLETE;
+
+ /* assume no wait needed */
+ node->els_io_enabled = false;
+
+ /* make necessary delete upcall(s) */
+ if (node->init && !node->targ) {
+ efc_log_info(node->efc,
+ "[%s] delete (initiator) WWPN %s WWNN %s\n",
+ node->display_name,
+ node->wwpn, node->wwnn);
+ efc_node_transition(node,
+ __efc_d_wait_del_node,
+ NULL);
+ if (node->nport->enable_tgt)
+ rc = efc->tt.scsi_del_node(efc, node,
+ EFC_SCSI_INITIATOR_DELETED);
+
+ if (rc == EFC_SCSI_CALL_COMPLETE || rc < 0)
+ efc_node_post_event(node,
+ EFC_EVT_NODE_DEL_INI_COMPLETE, NULL);
+
+ } else if (node->targ && !node->init) {
+ efc_log_info(node->efc,
+ "[%s] delete (target) WWPN %s WWNN %s\n",
+ node->display_name,
+ node->wwpn, node->wwnn);
+ efc_node_transition(node,
+ __efc_d_wait_del_node,
+ NULL);
+ if (node->nport->enable_ini)
+ rc = efc->tt.scsi_del_node(efc, node,
+ EFC_SCSI_TARGET_DELETED);
+
+ if (rc == EFC_SCSI_CALL_COMPLETE)
+ efc_node_post_event(node,
+ EFC_EVT_NODE_DEL_TGT_COMPLETE, NULL);
+
+ } else if (node->init && node->targ) {
+ efc_log_info(node->efc,
+ "[%s] delete (I+T) WWPN %s WWNN %s\n",
+ node->display_name, node->wwpn, node->wwnn);
+ efc_node_transition(node, __efc_d_wait_del_ini_tgt,
+ NULL);
+ if (node->nport->enable_tgt)
+ rc = efc->tt.scsi_del_node(efc, node,
+ EFC_SCSI_INITIATOR_DELETED);
+
+ if (rc == EFC_SCSI_CALL_COMPLETE)
+ efc_node_post_event(node,
+ EFC_EVT_NODE_DEL_INI_COMPLETE, NULL);
+ /* assume no wait needed */
+ rc = EFC_SCSI_CALL_COMPLETE;
+ if (node->nport->enable_ini)
+ rc = efc->tt.scsi_del_node(efc, node,
+ EFC_SCSI_TARGET_DELETED);
+
+ if (rc == EFC_SCSI_CALL_COMPLETE)
+ efc_node_post_event(node,
+ EFC_EVT_NODE_DEL_TGT_COMPLETE, NULL);
+ }
+
+ /* we've initiated the upcalls as needed, now kick off the node
+ * detach to precipitate the aborting of outstanding exchanges
+ * associated with said node
+ *
+ * Beware: if we've made upcall(s), we've already transitioned
+ * to a new state by the time we execute this.
+ * consider doing this before the upcalls?
+ */
+ if (node->attached) {
+ /* issue hw node free; don't care if succeeds right
+ * away or sometime later, will check node->attached
+ * later in shutdown process
+ */
+ rc = efc_cmd_node_detach(efc, &node->rnode);
+ if (rc < 0)
+ node_printf(node,
+ "Failed freeing HW node, rc=%d\n",
+ rc);
+ }
+
+ /* if neither initiator nor target, proceed to cleanup */
+ if (!node->init && !node->targ) {
+ /*
+ * node has either been detached or is in
+ * the process of being detached,
+ * call common node's initiate cleanup function
+ */
+ efc_node_initiate_cleanup(node);
+ }
+ break;
+ }
+ case EFC_EVT_ALL_CHILD_NODES_FREE:
+ /* Ignore, this can happen if an ELS is
+ * aborted while in a delay/retry state
+ */
+ break;
+ default:
+ __efc_d_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_d_wait_loop(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ efc_node_hold_frames(node);
+ break;
+
+ case EFC_EVT_EXIT:
+ efc_node_accept_frames(node);
+ break;
+
+ case EFC_EVT_DOMAIN_ATTACH_OK: {
+ /* send PLOGI automatically if initiator */
+ efc_node_init_device(node, true);
+ break;
+ }
+ default:
+ __efc_d_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+efc_send_ls_acc_after_attach(struct efc_node *node,
+ struct fc_frame_header *hdr,
+ enum efc_node_send_ls_acc ls)
+{
+ u16 ox_id = be16_to_cpu(hdr->fh_ox_id);
+
+ /* Save the OX_ID for sending LS_ACC sometime later */
+ WARN_ON(node->send_ls_acc != EFC_NODE_SEND_LS_ACC_NONE);
+
+ node->ls_acc_oxid = ox_id;
+ node->send_ls_acc = ls;
+ node->ls_acc_did = ntoh24(hdr->fh_d_id);
+}
+
+void
+efc_process_prli_payload(struct efc_node *node, void *prli)
+{
+ struct {
+ struct fc_els_prli prli;
+ struct fc_els_spp sp;
+ } *pp;
+
+ pp = prli;
+ node->init = (pp->sp.spp_flags & FCP_SPPF_INIT_FCN) != 0;
+ node->targ = (pp->sp.spp_flags & FCP_SPPF_TARG_FCN) != 0;
+}
+
+void
+__efc_d_wait_plogi_acc_cmpl(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ efc_node_hold_frames(node);
+ break;
+
+ case EFC_EVT_EXIT:
+ efc_node_accept_frames(node);
+ break;
+
+ case EFC_EVT_SRRS_ELS_CMPL_FAIL:
+ WARN_ON(!node->els_cmpl_cnt);
+ node->els_cmpl_cnt--;
+ node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
+ efc_node_transition(node, __efc_d_initiate_shutdown, NULL);
+ break;
+
+ case EFC_EVT_SRRS_ELS_CMPL_OK: /* PLOGI ACC completions */
+ WARN_ON(!node->els_cmpl_cnt);
+ node->els_cmpl_cnt--;
+ efc_node_transition(node, __efc_d_port_logged_in, NULL);
+ break;
+
+ default:
+ __efc_d_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_d_wait_logo_rsp(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ efc_node_hold_frames(node);
+ break;
+
+ case EFC_EVT_EXIT:
+ efc_node_accept_frames(node);
+ break;
+
+ case EFC_EVT_SRRS_ELS_REQ_OK:
+ case EFC_EVT_SRRS_ELS_REQ_RJT:
+ case EFC_EVT_SRRS_ELS_REQ_FAIL:
+ /* LOGO response received, sent shutdown */
+ if (efc_node_check_els_req(ctx, evt, arg, ELS_LOGO,
+ __efc_d_common, __func__))
+ return;
+
+ WARN_ON(!node->els_req_cnt);
+ node->els_req_cnt--;
+ node_printf(node,
+ "LOGO sent (evt=%s), shutdown node\n",
+ efc_sm_event_name(evt));
+ /* sm: / post explicit logout */
+ efc_node_post_event(node, EFC_EVT_SHUTDOWN_EXPLICIT_LOGO,
+ NULL);
+ break;
+
+ default:
+ __efc_d_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+efc_node_init_device(struct efc_node *node, bool send_plogi)
+{
+ node->send_plogi = send_plogi;
+ if ((node->efc->nodedb_mask & EFC_NODEDB_PAUSE_NEW_NODES) &&
+ (node->rnode.fc_id != FC_FID_DOM_MGR)) {
+ node->nodedb_state = __efc_d_init;
+ efc_node_transition(node, __efc_node_paused, NULL);
+ } else {
+ efc_node_transition(node, __efc_d_init, NULL);
+ }
+}
+
+static void
+efc_d_check_plogi_topology(struct efc_node *node, u32 d_id)
+{
+ switch (node->nport->topology) {
+ case EFC_NPORT_TOPO_P2P:
+ /* we're not attached and nport is p2p,
+ * need to attach
+ */
+ efc_domain_attach(node->nport->domain, d_id);
+ efc_node_transition(node, __efc_d_wait_domain_attach, NULL);
+ break;
+ case EFC_NPORT_TOPO_FABRIC:
+ /* we're not attached and nport is fabric, domain
+ * attach should have already been requested as part
+ * of the fabric state machine, wait for it
+ */
+ efc_node_transition(node, __efc_d_wait_domain_attach, NULL);
+ break;
+ case EFC_NPORT_TOPO_UNKNOWN:
+ /* Two possibilities:
+ * 1. received a PLOGI before our FLOGI has completed
+ * (possible since completion comes in on another
+ * CQ), thus we don't know what we're connected to
+ * yet; transition to a state to wait for the
+ * fabric node to tell us;
+ * 2. PLOGI received before link went down and we
+ * haven't performed domain attach yet.
+ * Note: we cannot distinguish between 1. and 2.
+ * so have to assume PLOGI
+ * was received after link back up.
+ */
+ node_printf(node, "received PLOGI, unknown topology did=0x%x\n",
+ d_id);
+ efc_node_transition(node, __efc_d_wait_topology_notify, NULL);
+ break;
+ default:
+ node_printf(node, "received PLOGI, unexpected topology %d\n",
+ node->nport->topology);
+ }
+}
+
+void
+__efc_d_init(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg)
+{
+ struct efc_node_cb *cbdata = arg;
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ /*
+ * This state is entered when a node is instantiated,
+ * either having been discovered from a name services query,
+ * or having received a PLOGI/FLOGI.
+ */
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ if (!node->send_plogi)
+ break;
+ /* only send if we have initiator capability,
+ * and domain is attached
+ */
+ if (node->nport->enable_ini &&
+ node->nport->domain->attached) {
+ efc_send_plogi(node);
+
+ efc_node_transition(node, __efc_d_wait_plogi_rsp, NULL);
+ } else {
+ node_printf(node, "not sending plogi nport.ini=%d,",
+ node->nport->enable_ini);
+ node_printf(node, "domain attached=%d\n",
+ node->nport->domain->attached);
+ }
+ break;
+ case EFC_EVT_PLOGI_RCVD: {
+ /* T, or I+T */
+ struct fc_frame_header *hdr = cbdata->header->dma.virt;
+ int rc;
+
+ efc_node_save_sparms(node, cbdata->payload->dma.virt);
+ efc_send_ls_acc_after_attach(node,
+ cbdata->header->dma.virt,
+ EFC_NODE_SEND_LS_ACC_PLOGI);
+
+ /* domain not attached; several possibilities: */
+ if (!node->nport->domain->attached) {
+ efc_d_check_plogi_topology(node, ntoh24(hdr->fh_d_id));
+ break;
+ }
+
+ /* domain already attached */
+ rc = efc_node_attach(node);
+ efc_node_transition(node, __efc_d_wait_node_attach, NULL);
+ if (rc < 0)
+ efc_node_post_event(node, EFC_EVT_NODE_ATTACH_FAIL, NULL);
+
+ break;
+ }
+
+ case EFC_EVT_FDISC_RCVD: {
+ __efc_d_common(__func__, ctx, evt, arg);
+ break;
+ }
+
+ case EFC_EVT_FLOGI_RCVD: {
+ struct fc_frame_header *hdr = cbdata->header->dma.virt;
+ u32 d_id = ntoh24(hdr->fh_d_id);
+
+ /* sm: / save sparams, send FLOGI acc */
+ memcpy(node->nport->domain->flogi_service_params,
+ cbdata->payload->dma.virt,
+ sizeof(struct fc_els_flogi));
+
+ /* send FC LS_ACC response, override s_id */
+ efc_fabric_set_topology(node, EFC_NPORT_TOPO_P2P);
+
+ efc_send_flogi_p2p_acc(node, be16_to_cpu(hdr->fh_ox_id), d_id);
+
+ if (efc_p2p_setup(node->nport)) {
+ node_printf(node, "p2p failed, shutting down node\n");
+ efc_node_post_event(node, EFC_EVT_SHUTDOWN, NULL);
+ break;
+ }
+
+ efc_node_transition(node, __efc_p2p_wait_flogi_acc_cmpl, NULL);
+ break;
+ }
+
+ case EFC_EVT_LOGO_RCVD: {
+ struct fc_frame_header *hdr = cbdata->header->dma.virt;
+
+ if (!node->nport->domain->attached) {
+ /* most likely a frame left over from before a link
+ * down; drop and
+ * shut node down w/ "explicit logout" so pending
+ * frames are processed
+ */
+ node_printf(node, "%s domain not attached, dropping\n",
+ efc_sm_event_name(evt));
+ efc_node_post_event(node,
+ EFC_EVT_SHUTDOWN_EXPLICIT_LOGO, NULL);
+ break;
+ }
+
+ efc_send_logo_acc(node, be16_to_cpu(hdr->fh_ox_id));
+ efc_node_transition(node, __efc_d_wait_logo_acc_cmpl, NULL);
+ break;
+ }
+
+ case EFC_EVT_PRLI_RCVD:
+ case EFC_EVT_PRLO_RCVD:
+ case EFC_EVT_PDISC_RCVD:
+ case EFC_EVT_ADISC_RCVD:
+ case EFC_EVT_RSCN_RCVD: {
+ struct fc_frame_header *hdr = cbdata->header->dma.virt;
+
+ if (!node->nport->domain->attached) {
+ /* most likely a frame left over from before a link
+ * down; drop and shut node down w/ "explicit logout"
+ * so pending frames are processed
+ */
+ node_printf(node, "%s domain not attached, dropping\n",
+ efc_sm_event_name(evt));
+
+ efc_node_post_event(node,
+ EFC_EVT_SHUTDOWN_EXPLICIT_LOGO,
+ NULL);
+ break;
+ }
+ node_printf(node, "%s received, sending reject\n",
+ efc_sm_event_name(evt));
+
+ efc_send_ls_rjt(node, be16_to_cpu(hdr->fh_ox_id),
+ ELS_RJT_UNAB, ELS_EXPL_PLOGI_REQD, 0);
+
+ break;
+ }
+
+ case EFC_EVT_FCP_CMD_RCVD: {
+ /* note: problem, we're now expecting an ELS REQ completion
+ * from both the LOGO and PLOGI
+ */
+ if (!node->nport->domain->attached) {
+ /* most likely a frame left over from before a
+ * link down; drop and
+ * shut node down w/ "explicit logout" so pending
+ * frames are processed
+ */
+ node_printf(node, "%s domain not attached, dropping\n",
+ efc_sm_event_name(evt));
+ efc_node_post_event(node,
+ EFC_EVT_SHUTDOWN_EXPLICIT_LOGO,
+ NULL);
+ break;
+ }
+
+ /* Send LOGO */
+ node_printf(node, "FCP_CMND received, send LOGO\n");
+ if (efc_send_logo(node)) {
+ /*
+ * failed to send LOGO, go ahead and cleanup node
+ * anyways
+ */
+ node_printf(node, "Failed to send LOGO\n");
+ efc_node_post_event(node,
+ EFC_EVT_SHUTDOWN_EXPLICIT_LOGO,
+ NULL);
+ } else {
+ /* sent LOGO, wait for response */
+ efc_node_transition(node,
+ __efc_d_wait_logo_rsp, NULL);
+ }
+ break;
+ }
+ case EFC_EVT_DOMAIN_ATTACH_OK:
+ /* don't care about domain_attach_ok */
+ break;
+
+ default:
+ __efc_d_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_d_wait_plogi_rsp(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ int rc;
+ struct efc_node_cb *cbdata = arg;
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_PLOGI_RCVD: {
+ /* T, or I+T */
+ /* received PLOGI with svc parms, go ahead and attach node
+ * when PLOGI that was sent ultimately completes, it'll be a
+ * no-op
+ *
+ * If there is an outstanding PLOGI sent, can we set a flag
+ * to indicate that we don't want to retry it if it times out?
+ */
+ efc_node_save_sparms(node, cbdata->payload->dma.virt);
+ efc_send_ls_acc_after_attach(node,
+ cbdata->header->dma.virt,
+ EFC_NODE_SEND_LS_ACC_PLOGI);
+ /* sm: domain->attached / efc_node_attach */
+ rc = efc_node_attach(node);
+ efc_node_transition(node, __efc_d_wait_node_attach, NULL);
+ if (rc < 0)
+ efc_node_post_event(node,
+ EFC_EVT_NODE_ATTACH_FAIL, NULL);
+
+ break;
+ }
+
+ case EFC_EVT_PRLI_RCVD:
+ /* I, or I+T */
+ /* sent PLOGI and before completion was seen, received the
+ * PRLI from the remote node (WCQEs and RCQEs come in on
+ * different queues and order of processing cannot be assumed)
+ * Save OXID so PRLI can be sent after the attach and continue
+ * to wait for PLOGI response
+ */
+ efc_process_prli_payload(node, cbdata->payload->dma.virt);
+ efc_send_ls_acc_after_attach(node,
+ cbdata->header->dma.virt,
+ EFC_NODE_SEND_LS_ACC_PRLI);
+ efc_node_transition(node, __efc_d_wait_plogi_rsp_recvd_prli,
+ NULL);
+ break;
+
+ case EFC_EVT_LOGO_RCVD: /* why don't we do a shutdown here?? */
+ case EFC_EVT_PRLO_RCVD:
+ case EFC_EVT_PDISC_RCVD:
+ case EFC_EVT_FDISC_RCVD:
+ case EFC_EVT_ADISC_RCVD:
+ case EFC_EVT_RSCN_RCVD:
+ case EFC_EVT_SCR_RCVD: {
+ struct fc_frame_header *hdr = cbdata->header->dma.virt;
+
+ node_printf(node, "%s received, sending reject\n",
+ efc_sm_event_name(evt));
+
+ efc_send_ls_rjt(node, be16_to_cpu(hdr->fh_ox_id),
+ ELS_RJT_UNAB, ELS_EXPL_PLOGI_REQD, 0);
+
+ break;
+ }
+
+ case EFC_EVT_SRRS_ELS_REQ_OK: /* PLOGI response received */
+ /* Completion from PLOGI sent */
+ if (efc_node_check_els_req(ctx, evt, arg, ELS_PLOGI,
+ __efc_d_common, __func__))
+ return;
+
+ WARN_ON(!node->els_req_cnt);
+ node->els_req_cnt--;
+ /* sm: / save sparams, efc_node_attach */
+ efc_node_save_sparms(node, cbdata->els_rsp.virt);
+ rc = efc_node_attach(node);
+ efc_node_transition(node, __efc_d_wait_node_attach, NULL);
+ if (rc < 0)
+ efc_node_post_event(node,
+ EFC_EVT_NODE_ATTACH_FAIL, NULL);
+
+ break;
+
+ case EFC_EVT_SRRS_ELS_REQ_FAIL: /* PLOGI response received */
+ /* PLOGI failed, shutdown the node */
+ if (efc_node_check_els_req(ctx, evt, arg, ELS_PLOGI,
+ __efc_d_common, __func__))
+ return;
+
+ WARN_ON(!node->els_req_cnt);
+ node->els_req_cnt--;
+ efc_node_post_event(node, EFC_EVT_SHUTDOWN, NULL);
+ break;
+
+ case EFC_EVT_SRRS_ELS_REQ_RJT:
+ /* Our PLOGI was rejected, this is ok in some cases */
+ if (efc_node_check_els_req(ctx, evt, arg, ELS_PLOGI,
+ __efc_d_common, __func__))
+ return;
+
+ WARN_ON(!node->els_req_cnt);
+ node->els_req_cnt--;
+ break;
+
+ case EFC_EVT_FCP_CMD_RCVD: {
+ /* not logged in yet and outstanding PLOGI so don't send LOGO,
+ * just drop
+ */
+ node_printf(node, "FCP_CMND received, drop\n");
+ break;
+ }
+
+ default:
+ __efc_d_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_d_wait_plogi_rsp_recvd_prli(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ int rc;
+ struct efc_node_cb *cbdata = arg;
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ /*
+ * Since we've received a PRLI, we have a port login and will
+ * just need to wait for the PLOGI response to do the node
+ * attach and then we can send the LS_ACC for the PRLI. If,
+ * during this time, we receive FCP_CMNDs (which is possible
+ * since we've already sent a PRLI and our peer may have
+ * accepted). At this time, we are not waiting on any other
+ * unsolicited frames to continue with the login process. Thus,
+ * it will not hurt to hold frames here.
+ */
+ efc_node_hold_frames(node);
+ break;
+
+ case EFC_EVT_EXIT:
+ efc_node_accept_frames(node);
+ break;
+
+ case EFC_EVT_SRRS_ELS_REQ_OK: /* PLOGI response received */
+ /* Completion from PLOGI sent */
+ if (efc_node_check_els_req(ctx, evt, arg, ELS_PLOGI,
+ __efc_d_common, __func__))
+ return;
+
+ WARN_ON(!node->els_req_cnt);
+ node->els_req_cnt--;
+ /* sm: / save sparams, efc_node_attach */
+ efc_node_save_sparms(node, cbdata->els_rsp.virt);
+ rc = efc_node_attach(node);
+ efc_node_transition(node, __efc_d_wait_node_attach, NULL);
+ if (rc < 0)
+ efc_node_post_event(node, EFC_EVT_NODE_ATTACH_FAIL,
+ NULL);
+
+ break;
+
+ case EFC_EVT_SRRS_ELS_REQ_FAIL: /* PLOGI response received */
+ case EFC_EVT_SRRS_ELS_REQ_RJT:
+ /* PLOGI failed, shutdown the node */
+ if (efc_node_check_els_req(ctx, evt, arg, ELS_PLOGI,
+ __efc_d_common, __func__))
+ return;
+
+ WARN_ON(!node->els_req_cnt);
+ node->els_req_cnt--;
+ efc_node_post_event(node, EFC_EVT_SHUTDOWN, NULL);
+ break;
+
+ default:
+ __efc_d_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_d_wait_domain_attach(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ int rc;
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ efc_node_hold_frames(node);
+ break;
+
+ case EFC_EVT_EXIT:
+ efc_node_accept_frames(node);
+ break;
+
+ case EFC_EVT_DOMAIN_ATTACH_OK:
+ WARN_ON(!node->nport->domain->attached);
+ /* sm: / efc_node_attach */
+ rc = efc_node_attach(node);
+ efc_node_transition(node, __efc_d_wait_node_attach, NULL);
+ if (rc < 0)
+ efc_node_post_event(node, EFC_EVT_NODE_ATTACH_FAIL,
+ NULL);
+
+ break;
+
+ default:
+ __efc_d_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_d_wait_topology_notify(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ int rc;
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ efc_node_hold_frames(node);
+ break;
+
+ case EFC_EVT_EXIT:
+ efc_node_accept_frames(node);
+ break;
+
+ case EFC_EVT_NPORT_TOPOLOGY_NOTIFY: {
+ enum efc_nport_topology topology =
+ (enum efc_nport_topology)arg;
+
+ WARN_ON(node->nport->domain->attached);
+
+ WARN_ON(node->send_ls_acc != EFC_NODE_SEND_LS_ACC_PLOGI);
+
+ node_printf(node, "topology notification, topology=%d\n",
+ topology);
+
+ /* At the time the PLOGI was received, the topology was unknown,
+ * so we didn't know which node would perform the domain attach:
+ * 1. The node from which the PLOGI was sent (p2p) or
+ * 2. The node to which the FLOGI was sent (fabric).
+ */
+ if (topology == EFC_NPORT_TOPO_P2P) {
+ /* if this is p2p, need to attach to the domain using
+ * the d_id from the PLOGI received
+ */
+ efc_domain_attach(node->nport->domain,
+ node->ls_acc_did);
+ }
+ /* else, if this is fabric, the domain attach
+ * should be performed by the fabric node (node sending FLOGI);
+ * just wait for attach to complete
+ */
+
+ efc_node_transition(node, __efc_d_wait_domain_attach, NULL);
+ break;
+ }
+ case EFC_EVT_DOMAIN_ATTACH_OK:
+ WARN_ON(!node->nport->domain->attached);
+ node_printf(node, "domain attach ok\n");
+ /* sm: / efc_node_attach */
+ rc = efc_node_attach(node);
+ efc_node_transition(node, __efc_d_wait_node_attach, NULL);
+ if (rc < 0)
+ efc_node_post_event(node,
+ EFC_EVT_NODE_ATTACH_FAIL, NULL);
+
+ break;
+
+ default:
+ __efc_d_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_d_wait_node_attach(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ efc_node_hold_frames(node);
+ break;
+
+ case EFC_EVT_EXIT:
+ efc_node_accept_frames(node);
+ break;
+
+ case EFC_EVT_NODE_ATTACH_OK:
+ node->attached = true;
+ switch (node->send_ls_acc) {
+ case EFC_NODE_SEND_LS_ACC_PLOGI: {
+ /* sm: send_plogi_acc is set / send PLOGI acc */
+ /* Normal case for T, or I+T */
+ efc_send_plogi_acc(node, node->ls_acc_oxid);
+ efc_node_transition(node, __efc_d_wait_plogi_acc_cmpl,
+ NULL);
+ node->send_ls_acc = EFC_NODE_SEND_LS_ACC_NONE;
+ node->ls_acc_io = NULL;
+ break;
+ }
+ case EFC_NODE_SEND_LS_ACC_PRLI: {
+ efc_d_send_prli_rsp(node, node->ls_acc_oxid);
+ node->send_ls_acc = EFC_NODE_SEND_LS_ACC_NONE;
+ node->ls_acc_io = NULL;
+ break;
+ }
+ case EFC_NODE_SEND_LS_ACC_NONE:
+ default:
+ /* Normal case for I */
+ /* sm: send_plogi_acc is not set / send PLOGI acc */
+ efc_node_transition(node,
+ __efc_d_port_logged_in, NULL);
+ break;
+ }
+ break;
+
+ case EFC_EVT_NODE_ATTACH_FAIL:
+ /* node attach failed, shutdown the node */
+ node->attached = false;
+ node_printf(node, "node attach failed\n");
+ node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
+ efc_node_transition(node, __efc_d_initiate_shutdown, NULL);
+ break;
+
+ /* Handle shutdown events */
+ case EFC_EVT_SHUTDOWN:
+ node_printf(node, "%s received\n", efc_sm_event_name(evt));
+ node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
+ efc_node_transition(node, __efc_d_wait_attach_evt_shutdown,
+ NULL);
+ break;
+ case EFC_EVT_SHUTDOWN_EXPLICIT_LOGO:
+ node_printf(node, "%s received\n", efc_sm_event_name(evt));
+ node->shutdown_reason = EFC_NODE_SHUTDOWN_EXPLICIT_LOGO;
+ efc_node_transition(node, __efc_d_wait_attach_evt_shutdown,
+ NULL);
+ break;
+ case EFC_EVT_SHUTDOWN_IMPLICIT_LOGO:
+ node_printf(node, "%s received\n", efc_sm_event_name(evt));
+ node->shutdown_reason = EFC_NODE_SHUTDOWN_IMPLICIT_LOGO;
+ efc_node_transition(node,
+ __efc_d_wait_attach_evt_shutdown, NULL);
+ break;
+ default:
+ __efc_d_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_d_wait_attach_evt_shutdown(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ efc_node_hold_frames(node);
+ break;
+
+ case EFC_EVT_EXIT:
+ efc_node_accept_frames(node);
+ break;
+
+ /* wait for any of these attach events and then shutdown */
+ case EFC_EVT_NODE_ATTACH_OK:
+ node->attached = true;
+ node_printf(node, "Attach evt=%s, proceed to shutdown\n",
+ efc_sm_event_name(evt));
+ efc_node_transition(node, __efc_d_initiate_shutdown, NULL);
+ break;
+
+ case EFC_EVT_NODE_ATTACH_FAIL:
+ /* node attach failed, shutdown the node */
+ node->attached = false;
+ node_printf(node, "Attach evt=%s, proceed to shutdown\n",
+ efc_sm_event_name(evt));
+ efc_node_transition(node, __efc_d_initiate_shutdown, NULL);
+ break;
+
+ /* ignore shutdown events as we're already in shutdown path */
+ case EFC_EVT_SHUTDOWN:
+ /* have default shutdown event take precedence */
+ node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
+ fallthrough;
+
+ case EFC_EVT_SHUTDOWN_EXPLICIT_LOGO:
+ case EFC_EVT_SHUTDOWN_IMPLICIT_LOGO:
+ node_printf(node, "%s received\n", efc_sm_event_name(evt));
+ break;
+
+ default:
+ __efc_d_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_d_port_logged_in(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node_cb *cbdata = arg;
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ /* Normal case for I or I+T */
+ if (node->nport->enable_ini &&
+ !(node->rnode.fc_id != FC_FID_DOM_MGR)) {
+ /* sm: if enable_ini / send PRLI */
+ efc_send_prli(node);
+ /* can now expect ELS_REQ_OK/FAIL/RJT */
+ }
+ break;
+
+ case EFC_EVT_FCP_CMD_RCVD: {
+ break;
+ }
+
+ case EFC_EVT_PRLI_RCVD: {
+ /* Normal case for T or I+T */
+ struct fc_frame_header *hdr = cbdata->header->dma.virt;
+ struct {
+ struct fc_els_prli prli;
+ struct fc_els_spp sp;
+ } *pp;
+
+ pp = cbdata->payload->dma.virt;
+ if (pp->sp.spp_type != FC_TYPE_FCP) {
+ /*Only FCP is supported*/
+ efc_send_ls_rjt(node, be16_to_cpu(hdr->fh_ox_id),
+ ELS_RJT_UNAB, ELS_EXPL_UNSUPR, 0);
+ break;
+ }
+
+ efc_process_prli_payload(node, cbdata->payload->dma.virt);
+ efc_d_send_prli_rsp(node, be16_to_cpu(hdr->fh_ox_id));
+ break;
+ }
+
+ case EFC_EVT_NODE_SESS_REG_OK:
+ if (node->send_ls_acc == EFC_NODE_SEND_LS_ACC_PRLI)
+ efc_send_prli_acc(node, node->ls_acc_oxid);
+
+ node->send_ls_acc = EFC_NODE_SEND_LS_ACC_NONE;
+ efc_node_transition(node, __efc_d_device_ready, NULL);
+ break;
+
+ case EFC_EVT_NODE_SESS_REG_FAIL:
+ efc_send_ls_rjt(node, node->ls_acc_oxid, ELS_RJT_UNAB,
+ ELS_EXPL_UNSUPR, 0);
+ node->send_ls_acc = EFC_NODE_SEND_LS_ACC_NONE;
+ break;
+
+ case EFC_EVT_SRRS_ELS_REQ_OK: { /* PRLI response */
+ /* Normal case for I or I+T */
+ if (efc_node_check_els_req(ctx, evt, arg, ELS_PRLI,
+ __efc_d_common, __func__))
+ return;
+
+ WARN_ON(!node->els_req_cnt);
+ node->els_req_cnt--;
+ /* sm: / process PRLI payload */
+ efc_process_prli_payload(node, cbdata->els_rsp.virt);
+ efc_node_transition(node, __efc_d_device_ready, NULL);
+ break;
+ }
+
+ case EFC_EVT_SRRS_ELS_REQ_FAIL: { /* PRLI response failed */
+ /* I, I+T, assume some link failure, shutdown node */
+ if (efc_node_check_els_req(ctx, evt, arg, ELS_PRLI,
+ __efc_d_common, __func__))
+ return;
+
+ WARN_ON(!node->els_req_cnt);
+ node->els_req_cnt--;
+ efc_node_post_event(node, EFC_EVT_SHUTDOWN, NULL);
+ break;
+ }
+
+ case EFC_EVT_SRRS_ELS_REQ_RJT: {
+ /* PRLI rejected by remote
+ * Normal for I, I+T (connected to an I)
+ * Node doesn't want to be a target, stay here and wait for a
+ * PRLI from the remote node
+ * if it really wants to connect to us as target
+ */
+ if (efc_node_check_els_req(ctx, evt, arg, ELS_PRLI,
+ __efc_d_common, __func__))
+ return;
+
+ WARN_ON(!node->els_req_cnt);
+ node->els_req_cnt--;
+ break;
+ }
+
+ case EFC_EVT_SRRS_ELS_CMPL_OK: {
+ /* Normal T, I+T, target-server rejected the process login */
+ /* This would be received only in the case where we sent
+ * LS_RJT for the PRLI, so
+ * do nothing. (note: as T only we could shutdown the node)
+ */
+ WARN_ON(!node->els_cmpl_cnt);
+ node->els_cmpl_cnt--;
+ break;
+ }
+
+ case EFC_EVT_PLOGI_RCVD: {
+ /*sm: / save sparams, set send_plogi_acc,
+ *post implicit logout
+ * Save plogi parameters
+ */
+ efc_node_save_sparms(node, cbdata->payload->dma.virt);
+ efc_send_ls_acc_after_attach(node,
+ cbdata->header->dma.virt,
+ EFC_NODE_SEND_LS_ACC_PLOGI);
+
+ /* Restart node attach with new service parameters,
+ * and send ACC
+ */
+ efc_node_post_event(node, EFC_EVT_SHUTDOWN_IMPLICIT_LOGO,
+ NULL);
+ break;
+ }
+
+ case EFC_EVT_LOGO_RCVD: {
+ /* I, T, I+T */
+ struct fc_frame_header *hdr = cbdata->header->dma.virt;
+
+ node_printf(node, "%s received attached=%d\n",
+ efc_sm_event_name(evt),
+ node->attached);
+ /* sm: / send LOGO acc */
+ efc_send_logo_acc(node, be16_to_cpu(hdr->fh_ox_id));
+ efc_node_transition(node, __efc_d_wait_logo_acc_cmpl, NULL);
+ break;
+ }
+
+ default:
+ __efc_d_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_d_wait_logo_acc_cmpl(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ efc_node_hold_frames(node);
+ break;
+
+ case EFC_EVT_EXIT:
+ efc_node_accept_frames(node);
+ break;
+
+ case EFC_EVT_SRRS_ELS_CMPL_OK:
+ case EFC_EVT_SRRS_ELS_CMPL_FAIL:
+ /* sm: / post explicit logout */
+ WARN_ON(!node->els_cmpl_cnt);
+ node->els_cmpl_cnt--;
+ efc_node_post_event(node,
+ EFC_EVT_SHUTDOWN_EXPLICIT_LOGO, NULL);
+ break;
+ default:
+ __efc_d_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_d_device_ready(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node_cb *cbdata = arg;
+ struct efc_node *node = ctx->app;
+ struct efc *efc = node->efc;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ if (evt != EFC_EVT_FCP_CMD_RCVD)
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ node->fcp_enabled = true;
+ if (node->targ) {
+ efc_log_info(efc,
+ "[%s] found (target) WWPN %s WWNN %s\n",
+ node->display_name,
+ node->wwpn, node->wwnn);
+ if (node->nport->enable_ini)
+ efc->tt.scsi_new_node(efc, node);
+ }
+ break;
+
+ case EFC_EVT_EXIT:
+ node->fcp_enabled = false;
+ break;
+
+ case EFC_EVT_PLOGI_RCVD: {
+ /* sm: / save sparams, set send_plogi_acc, post implicit
+ * logout
+ * Save plogi parameters
+ */
+ efc_node_save_sparms(node, cbdata->payload->dma.virt);
+ efc_send_ls_acc_after_attach(node,
+ cbdata->header->dma.virt,
+ EFC_NODE_SEND_LS_ACC_PLOGI);
+
+ /*
+ * Restart node attach with new service parameters,
+ * and send ACC
+ */
+ efc_node_post_event(node,
+ EFC_EVT_SHUTDOWN_IMPLICIT_LOGO, NULL);
+ break;
+ }
+
+ case EFC_EVT_PRLI_RCVD: {
+ /* T, I+T: remote initiator is slow to get started */
+ struct fc_frame_header *hdr = cbdata->header->dma.virt;
+ struct {
+ struct fc_els_prli prli;
+ struct fc_els_spp sp;
+ } *pp;
+
+ pp = cbdata->payload->dma.virt;
+ if (pp->sp.spp_type != FC_TYPE_FCP) {
+ /*Only FCP is supported*/
+ efc_send_ls_rjt(node, be16_to_cpu(hdr->fh_ox_id),
+ ELS_RJT_UNAB, ELS_EXPL_UNSUPR, 0);
+ break;
+ }
+
+ efc_process_prli_payload(node, cbdata->payload->dma.virt);
+ efc_send_prli_acc(node, be16_to_cpu(hdr->fh_ox_id));
+ break;
+ }
+
+ case EFC_EVT_PRLO_RCVD: {
+ struct fc_frame_header *hdr = cbdata->header->dma.virt;
+ /* sm: / send PRLO acc */
+ efc_send_prlo_acc(node, be16_to_cpu(hdr->fh_ox_id));
+ /* need implicit logout? */
+ break;
+ }
+
+ case EFC_EVT_LOGO_RCVD: {
+ struct fc_frame_header *hdr = cbdata->header->dma.virt;
+
+ node_printf(node, "%s received attached=%d\n",
+ efc_sm_event_name(evt), node->attached);
+ /* sm: / send LOGO acc */
+ efc_send_logo_acc(node, be16_to_cpu(hdr->fh_ox_id));
+ efc_node_transition(node, __efc_d_wait_logo_acc_cmpl, NULL);
+ break;
+ }
+
+ case EFC_EVT_ADISC_RCVD: {
+ struct fc_frame_header *hdr = cbdata->header->dma.virt;
+ /* sm: / send ADISC acc */
+ efc_send_adisc_acc(node, be16_to_cpu(hdr->fh_ox_id));
+ break;
+ }
+
+ case EFC_EVT_ABTS_RCVD:
+ /* sm: / process ABTS */
+ efc_log_err(efc, "Unexpected event:%s\n",
+ efc_sm_event_name(evt));
+ break;
+
+ case EFC_EVT_NODE_ACTIVE_IO_LIST_EMPTY:
+ break;
+
+ case EFC_EVT_NODE_REFOUND:
+ break;
+
+ case EFC_EVT_NODE_MISSING:
+ if (node->nport->enable_rscn)
+ efc_node_transition(node, __efc_d_device_gone, NULL);
+
+ break;
+
+ case EFC_EVT_SRRS_ELS_CMPL_OK:
+ /* T, or I+T, PRLI accept completed ok */
+ WARN_ON(!node->els_cmpl_cnt);
+ node->els_cmpl_cnt--;
+ break;
+
+ case EFC_EVT_SRRS_ELS_CMPL_FAIL:
+ /* T, or I+T, PRLI accept failed to complete */
+ WARN_ON(!node->els_cmpl_cnt);
+ node->els_cmpl_cnt--;
+ node_printf(node, "Failed to send PRLI LS_ACC\n");
+ break;
+
+ default:
+ __efc_d_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_d_device_gone(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node_cb *cbdata = arg;
+ struct efc_node *node = ctx->app;
+ struct efc *efc = node->efc;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_ENTER: {
+ int rc = EFC_SCSI_CALL_COMPLETE;
+ int rc_2 = EFC_SCSI_CALL_COMPLETE;
+ static const char * const labels[] = {
+ "none", "initiator", "target", "initiator+target"
+ };
+
+ efc_log_info(efc, "[%s] missing (%s) WWPN %s WWNN %s\n",
+ node->display_name,
+ labels[(node->targ << 1) | (node->init)],
+ node->wwpn, node->wwnn);
+
+ switch (efc_node_get_enable(node)) {
+ case EFC_NODE_ENABLE_T_TO_T:
+ case EFC_NODE_ENABLE_I_TO_T:
+ case EFC_NODE_ENABLE_IT_TO_T:
+ rc = efc->tt.scsi_del_node(efc, node,
+ EFC_SCSI_TARGET_MISSING);
+ break;
+
+ case EFC_NODE_ENABLE_T_TO_I:
+ case EFC_NODE_ENABLE_I_TO_I:
+ case EFC_NODE_ENABLE_IT_TO_I:
+ rc = efc->tt.scsi_del_node(efc, node,
+ EFC_SCSI_INITIATOR_MISSING);
+ break;
+
+ case EFC_NODE_ENABLE_T_TO_IT:
+ rc = efc->tt.scsi_del_node(efc, node,
+ EFC_SCSI_INITIATOR_MISSING);
+ break;
+
+ case EFC_NODE_ENABLE_I_TO_IT:
+ rc = efc->tt.scsi_del_node(efc, node,
+ EFC_SCSI_TARGET_MISSING);
+ break;
+
+ case EFC_NODE_ENABLE_IT_TO_IT:
+ rc = efc->tt.scsi_del_node(efc, node,
+ EFC_SCSI_INITIATOR_MISSING);
+ rc_2 = efc->tt.scsi_del_node(efc, node,
+ EFC_SCSI_TARGET_MISSING);
+ break;
+
+ default:
+ rc = EFC_SCSI_CALL_COMPLETE;
+ break;
+ }
+
+ if (rc == EFC_SCSI_CALL_COMPLETE &&
+ rc_2 == EFC_SCSI_CALL_COMPLETE)
+ efc_node_post_event(node, EFC_EVT_SHUTDOWN, NULL);
+
+ break;
+ }
+ case EFC_EVT_NODE_REFOUND:
+ /* two approaches, reauthenticate with PLOGI/PRLI, or ADISC */
+
+ /* reauthenticate with PLOGI/PRLI */
+ /* efc_node_transition(node, __efc_d_discovered, NULL); */
+
+ /* reauthenticate with ADISC */
+ /* sm: / send ADISC */
+ efc_send_adisc(node);
+ efc_node_transition(node, __efc_d_wait_adisc_rsp, NULL);
+ break;
+
+ case EFC_EVT_PLOGI_RCVD: {
+ /* sm: / save sparams, set send_plogi_acc, post implicit
+ * logout
+ * Save plogi parameters
+ */
+ efc_node_save_sparms(node, cbdata->payload->dma.virt);
+ efc_send_ls_acc_after_attach(node,
+ cbdata->header->dma.virt,
+ EFC_NODE_SEND_LS_ACC_PLOGI);
+
+ /*
+ * Restart node attach with new service parameters, and send
+ * ACC
+ */
+ efc_node_post_event(node, EFC_EVT_SHUTDOWN_IMPLICIT_LOGO,
+ NULL);
+ break;
+ }
+
+ case EFC_EVT_FCP_CMD_RCVD: {
+ /* most likely a stale frame (received prior to link down),
+ * if attempt to send LOGO, will probably timeout and eat
+ * up 20s; thus, drop FCP_CMND
+ */
+ node_printf(node, "FCP_CMND received, drop\n");
+ break;
+ }
+ case EFC_EVT_LOGO_RCVD: {
+ /* I, T, I+T */
+ struct fc_frame_header *hdr = cbdata->header->dma.virt;
+
+ node_printf(node, "%s received attached=%d\n",
+ efc_sm_event_name(evt), node->attached);
+ /* sm: / send LOGO acc */
+ efc_send_logo_acc(node, be16_to_cpu(hdr->fh_ox_id));
+ efc_node_transition(node, __efc_d_wait_logo_acc_cmpl, NULL);
+ break;
+ }
+ default:
+ __efc_d_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_d_wait_adisc_rsp(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node_cb *cbdata = arg;
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_SRRS_ELS_REQ_OK:
+ if (efc_node_check_els_req(ctx, evt, arg, ELS_ADISC,
+ __efc_d_common, __func__))
+ return;
+
+ WARN_ON(!node->els_req_cnt);
+ node->els_req_cnt--;
+ efc_node_transition(node, __efc_d_device_ready, NULL);
+ break;
+
+ case EFC_EVT_SRRS_ELS_REQ_RJT:
+ /* received an LS_RJT, in this case, send shutdown
+ * (explicit logo) event which will unregister the node,
+ * and start over with PLOGI
+ */
+ if (efc_node_check_els_req(ctx, evt, arg, ELS_ADISC,
+ __efc_d_common, __func__))
+ return;
+
+ WARN_ON(!node->els_req_cnt);
+ node->els_req_cnt--;
+ /* sm: / post explicit logout */
+ efc_node_post_event(node,
+ EFC_EVT_SHUTDOWN_EXPLICIT_LOGO,
+ NULL);
+ break;
+
+ case EFC_EVT_LOGO_RCVD: {
+ /* In this case, we have the equivalent of an LS_RJT for
+ * the ADISC, so we need to abort the ADISC, and re-login
+ * with PLOGI
+ */
+ /* sm: / request abort, send LOGO acc */
+ struct fc_frame_header *hdr = cbdata->header->dma.virt;
+
+ node_printf(node, "%s received attached=%d\n",
+ efc_sm_event_name(evt), node->attached);
+
+ efc_send_logo_acc(node, be16_to_cpu(hdr->fh_ox_id));
+ efc_node_transition(node, __efc_d_wait_logo_acc_cmpl, NULL);
+ break;
+ }
+ default:
+ __efc_d_common(__func__, ctx, evt, arg);
+ }
+}
diff --git a/drivers/scsi/elx/libefc/efc_device.h b/drivers/scsi/elx/libefc/efc_device.h
new file mode 100644
index 000000000000..3cf1d8c6698f
--- /dev/null
+++ b/drivers/scsi/elx/libefc/efc_device.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+/*
+ * Node state machine functions for remote device node sm
+ */
+
+#ifndef __EFCT_DEVICE_H__
+#define __EFCT_DEVICE_H__
+void
+efc_node_init_device(struct efc_node *node, bool send_plogi);
+void
+efc_process_prli_payload(struct efc_node *node,
+ void *prli);
+void
+efc_d_send_prli_rsp(struct efc_node *node, uint16_t ox_id);
+void
+efc_send_ls_acc_after_attach(struct efc_node *node,
+ struct fc_frame_header *hdr,
+ enum efc_node_send_ls_acc ls);
+void
+__efc_d_wait_loop(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_d_wait_plogi_acc_cmpl(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_d_init(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg);
+void
+__efc_d_wait_plogi_rsp(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_d_wait_plogi_rsp_recvd_prli(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_d_wait_domain_attach(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_d_wait_topology_notify(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_d_wait_node_attach(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_d_wait_attach_evt_shutdown(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_d_initiate_shutdown(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_d_port_logged_in(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_d_wait_logo_acc_cmpl(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_d_device_ready(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_d_device_gone(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_d_wait_adisc_rsp(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_d_wait_logo_rsp(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+
+#endif /* __EFCT_DEVICE_H__ */
diff --git a/drivers/scsi/elx/libefc/efc_domain.c b/drivers/scsi/elx/libefc/efc_domain.c
new file mode 100644
index 000000000000..ca9d7ff2c0d2
--- /dev/null
+++ b/drivers/scsi/elx/libefc/efc_domain.c
@@ -0,0 +1,1088 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+/*
+ * domain_sm Domain State Machine: States
+ */
+
+#include "efc.h"
+
+int
+efc_domain_cb(void *arg, int event, void *data)
+{
+ struct efc *efc = arg;
+ struct efc_domain *domain = NULL;
+ int rc = 0;
+ unsigned long flags = 0;
+
+ if (event != EFC_HW_DOMAIN_FOUND)
+ domain = data;
+
+ /* Accept domain callback events from the user driver */
+ spin_lock_irqsave(&efc->lock, flags);
+ switch (event) {
+ case EFC_HW_DOMAIN_FOUND: {
+ u64 fcf_wwn = 0;
+ struct efc_domain_record *drec = data;
+
+ /* extract the fcf_wwn */
+ fcf_wwn = be64_to_cpu(*((__be64 *)drec->wwn));
+
+ efc_log_debug(efc, "Domain found: wwn %016llX\n", fcf_wwn);
+
+ /* lookup domain, or allocate a new one */
+ domain = efc->domain;
+ if (!domain) {
+ domain = efc_domain_alloc(efc, fcf_wwn);
+ if (!domain) {
+ efc_log_err(efc, "efc_domain_alloc() failed\n");
+ rc = -1;
+ break;
+ }
+ efc_sm_transition(&domain->drvsm, __efc_domain_init,
+ NULL);
+ }
+ efc_domain_post_event(domain, EFC_EVT_DOMAIN_FOUND, drec);
+ break;
+ }
+
+ case EFC_HW_DOMAIN_LOST:
+ domain_trace(domain, "EFC_HW_DOMAIN_LOST:\n");
+ efc->hold_frames = true;
+ efc_domain_post_event(domain, EFC_EVT_DOMAIN_LOST, NULL);
+ break;
+
+ case EFC_HW_DOMAIN_ALLOC_OK:
+ domain_trace(domain, "EFC_HW_DOMAIN_ALLOC_OK:\n");
+ efc_domain_post_event(domain, EFC_EVT_DOMAIN_ALLOC_OK, NULL);
+ break;
+
+ case EFC_HW_DOMAIN_ALLOC_FAIL:
+ domain_trace(domain, "EFC_HW_DOMAIN_ALLOC_FAIL:\n");
+ efc_domain_post_event(domain, EFC_EVT_DOMAIN_ALLOC_FAIL,
+ NULL);
+ break;
+
+ case EFC_HW_DOMAIN_ATTACH_OK:
+ domain_trace(domain, "EFC_HW_DOMAIN_ATTACH_OK:\n");
+ efc_domain_post_event(domain, EFC_EVT_DOMAIN_ATTACH_OK, NULL);
+ break;
+
+ case EFC_HW_DOMAIN_ATTACH_FAIL:
+ domain_trace(domain, "EFC_HW_DOMAIN_ATTACH_FAIL:\n");
+ efc_domain_post_event(domain,
+ EFC_EVT_DOMAIN_ATTACH_FAIL, NULL);
+ break;
+
+ case EFC_HW_DOMAIN_FREE_OK:
+ domain_trace(domain, "EFC_HW_DOMAIN_FREE_OK:\n");
+ efc_domain_post_event(domain, EFC_EVT_DOMAIN_FREE_OK, NULL);
+ break;
+
+ case EFC_HW_DOMAIN_FREE_FAIL:
+ domain_trace(domain, "EFC_HW_DOMAIN_FREE_FAIL:\n");
+ efc_domain_post_event(domain, EFC_EVT_DOMAIN_FREE_FAIL, NULL);
+ break;
+
+ default:
+ efc_log_warn(efc, "unsupported event %#x\n", event);
+ }
+ spin_unlock_irqrestore(&efc->lock, flags);
+
+ if (efc->domain && domain->req_accept_frames) {
+ domain->req_accept_frames = false;
+ efc->hold_frames = false;
+ }
+
+ return rc;
+}
+
+static void
+_efc_domain_free(struct kref *arg)
+{
+ struct efc_domain *domain = container_of(arg, struct efc_domain, ref);
+ struct efc *efc = domain->efc;
+
+ if (efc->domain_free_cb)
+ (*efc->domain_free_cb)(efc, efc->domain_free_cb_arg);
+
+ kfree(domain);
+}
+
+void
+efc_domain_free(struct efc_domain *domain)
+{
+ struct efc *efc;
+
+ efc = domain->efc;
+
+ /* Hold frames to clear the domain pointer from the xport lookup */
+ efc->hold_frames = false;
+
+ efc_log_debug(efc, "Domain free: wwn %016llX\n", domain->fcf_wwn);
+
+ xa_destroy(&domain->lookup);
+ efc->domain = NULL;
+ kref_put(&domain->ref, domain->release);
+}
+
+struct efc_domain *
+efc_domain_alloc(struct efc *efc, uint64_t fcf_wwn)
+{
+ struct efc_domain *domain;
+
+ domain = kzalloc(sizeof(*domain), GFP_ATOMIC);
+ if (!domain)
+ return NULL;
+
+ domain->efc = efc;
+ domain->drvsm.app = domain;
+
+ /* initialize refcount */
+ kref_init(&domain->ref);
+ domain->release = _efc_domain_free;
+
+ xa_init(&domain->lookup);
+
+ INIT_LIST_HEAD(&domain->nport_list);
+ efc->domain = domain;
+ domain->fcf_wwn = fcf_wwn;
+ efc_log_debug(efc, "Domain allocated: wwn %016llX\n", domain->fcf_wwn);
+
+ return domain;
+}
+
+void
+efc_register_domain_free_cb(struct efc *efc,
+ void (*callback)(struct efc *efc, void *arg),
+ void *arg)
+{
+ /* Register a callback to be called when the domain is freed */
+ efc->domain_free_cb = callback;
+ efc->domain_free_cb_arg = arg;
+ if (!efc->domain && callback)
+ (*callback)(efc, arg);
+}
+
+static void
+__efc_domain_common(const char *funcname, struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_domain *domain = ctx->app;
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ case EFC_EVT_REENTER:
+ case EFC_EVT_EXIT:
+ case EFC_EVT_ALL_CHILD_NODES_FREE:
+ /*
+ * this can arise if an FLOGI fails on the NPORT,
+ * and the NPORT is shutdown
+ */
+ break;
+ default:
+ efc_log_warn(domain->efc, "%-20s %-20s not handled\n",
+ funcname, efc_sm_event_name(evt));
+ }
+}
+
+static void
+__efc_domain_common_shutdown(const char *funcname, struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_domain *domain = ctx->app;
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ case EFC_EVT_REENTER:
+ case EFC_EVT_EXIT:
+ break;
+ case EFC_EVT_DOMAIN_FOUND:
+ /* save drec, mark domain_found_pending */
+ memcpy(&domain->pending_drec, arg,
+ sizeof(domain->pending_drec));
+ domain->domain_found_pending = true;
+ break;
+ case EFC_EVT_DOMAIN_LOST:
+ /* unmark domain_found_pending */
+ domain->domain_found_pending = false;
+ break;
+
+ default:
+ efc_log_warn(domain->efc, "%-20s %-20s not handled\n",
+ funcname, efc_sm_event_name(evt));
+ }
+}
+
+#define std_domain_state_decl(...)\
+ struct efc_domain *domain = NULL;\
+ struct efc *efc = NULL;\
+ \
+ WARN_ON(!ctx || !ctx->app);\
+ domain = ctx->app;\
+ WARN_ON(!domain->efc);\
+ efc = domain->efc
+
+void
+__efc_domain_init(struct efc_sm_ctx *ctx, enum efc_sm_event evt,
+ void *arg)
+{
+ std_domain_state_decl();
+
+ domain_sm_trace(domain);
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ domain->attached = false;
+ break;
+
+ case EFC_EVT_DOMAIN_FOUND: {
+ u32 i;
+ struct efc_domain_record *drec = arg;
+ struct efc_nport *nport;
+
+ u64 my_wwnn = efc->req_wwnn;
+ u64 my_wwpn = efc->req_wwpn;
+ __be64 bewwpn;
+
+ if (my_wwpn == 0 || my_wwnn == 0) {
+ efc_log_debug(efc, "using default hardware WWN config\n");
+ my_wwpn = efc->def_wwpn;
+ my_wwnn = efc->def_wwnn;
+ }
+
+ efc_log_debug(efc, "Create nport WWPN %016llX WWNN %016llX\n",
+ my_wwpn, my_wwnn);
+
+ /* Allocate a nport and transition to __efc_nport_allocated */
+ nport = efc_nport_alloc(domain, my_wwpn, my_wwnn, U32_MAX,
+ efc->enable_ini, efc->enable_tgt);
+
+ if (!nport) {
+ efc_log_err(efc, "efc_nport_alloc() failed\n");
+ break;
+ }
+ efc_sm_transition(&nport->sm, __efc_nport_allocated, NULL);
+
+ bewwpn = cpu_to_be64(nport->wwpn);
+
+ /* allocate struct efc_nport object for local port
+ * Note: drec->fc_id is ALPA from read_topology only if loop
+ */
+ if (efc_cmd_nport_alloc(efc, nport, NULL, (uint8_t *)&bewwpn)) {
+ efc_log_err(efc, "Can't allocate port\n");
+ efc_nport_free(nport);
+ break;
+ }
+
+ domain->is_loop = drec->is_loop;
+
+ /*
+ * If the loop position map includes ALPA == 0,
+ * then we are in a public loop (NL_PORT)
+ * Note that the first element of the loopmap[]
+ * contains the count of elements, and if
+ * ALPA == 0 is present, it will occupy the first
+ * location after the count.
+ */
+ domain->is_nlport = drec->map.loop[1] == 0x00;
+
+ if (!domain->is_loop) {
+ /* Initiate HW domain alloc */
+ if (efc_cmd_domain_alloc(efc, domain, drec->index)) {
+ efc_log_err(efc,
+ "Failed to initiate HW domain allocation\n");
+ break;
+ }
+ efc_sm_transition(ctx, __efc_domain_wait_alloc, arg);
+ break;
+ }
+
+ efc_log_debug(efc, "%s fc_id=%#x speed=%d\n",
+ drec->is_loop ?
+ (domain->is_nlport ?
+ "public-loop" : "loop") : "other",
+ drec->fc_id, drec->speed);
+
+ nport->fc_id = drec->fc_id;
+ nport->topology = EFC_NPORT_TOPO_FC_AL;
+ snprintf(nport->display_name, sizeof(nport->display_name),
+ "s%06x", drec->fc_id);
+
+ if (efc->enable_ini) {
+ u32 count = drec->map.loop[0];
+
+ efc_log_debug(efc, "%d position map entries\n",
+ count);
+ for (i = 1; i <= count; i++) {
+ if (drec->map.loop[i] != drec->fc_id) {
+ struct efc_node *node;
+
+ efc_log_debug(efc, "%#x -> %#x\n",
+ drec->fc_id,
+ drec->map.loop[i]);
+ node = efc_node_alloc(nport,
+ drec->map.loop[i],
+ false, true);
+ if (!node) {
+ efc_log_err(efc,
+ "efc_node_alloc() failed\n");
+ break;
+ }
+ efc_node_transition(node,
+ __efc_d_wait_loop,
+ NULL);
+ }
+ }
+ }
+
+ /* Initiate HW domain alloc */
+ if (efc_cmd_domain_alloc(efc, domain, drec->index)) {
+ efc_log_err(efc,
+ "Failed to initiate HW domain allocation\n");
+ break;
+ }
+ efc_sm_transition(ctx, __efc_domain_wait_alloc, arg);
+ break;
+ }
+ default:
+ __efc_domain_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_domain_wait_alloc(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ std_domain_state_decl();
+
+ domain_sm_trace(domain);
+
+ switch (evt) {
+ case EFC_EVT_DOMAIN_ALLOC_OK: {
+ struct fc_els_flogi *sp;
+ struct efc_nport *nport;
+
+ nport = domain->nport;
+ if (WARN_ON(!nport))
+ return;
+
+ sp = (struct fc_els_flogi *)nport->service_params;
+
+ /* Save the domain service parameters */
+ memcpy(domain->service_params + 4, domain->dma.virt,
+ sizeof(struct fc_els_flogi) - 4);
+ memcpy(nport->service_params + 4, domain->dma.virt,
+ sizeof(struct fc_els_flogi) - 4);
+
+ /*
+ * Update the nport's service parameters,
+ * user might have specified non-default names
+ */
+ sp->fl_wwpn = cpu_to_be64(nport->wwpn);
+ sp->fl_wwnn = cpu_to_be64(nport->wwnn);
+
+ /*
+ * Take the loop topology path,
+ * unless we are an NL_PORT (public loop)
+ */
+ if (domain->is_loop && !domain->is_nlport) {
+ /*
+ * For loop, we already have our FC ID
+ * and don't need fabric login.
+ * Transition to the allocated state and
+ * post an event to attach to
+ * the domain. Note that this breaks the
+ * normal action/transition
+ * pattern here to avoid a race with the
+ * domain attach callback.
+ */
+ /* sm: is_loop / domain_attach */
+ efc_sm_transition(ctx, __efc_domain_allocated, NULL);
+ __efc_domain_attach_internal(domain, nport->fc_id);
+ break;
+ }
+ {
+ struct efc_node *node;
+
+ /* alloc fabric node, send FLOGI */
+ node = efc_node_find(nport, FC_FID_FLOGI);
+ if (node) {
+ efc_log_err(efc,
+ "Fabric Controller node already exists\n");
+ break;
+ }
+ node = efc_node_alloc(nport, FC_FID_FLOGI,
+ false, false);
+ if (!node) {
+ efc_log_err(efc,
+ "Error: efc_node_alloc() failed\n");
+ } else {
+ efc_node_transition(node,
+ __efc_fabric_init, NULL);
+ }
+ /* Accept frames */
+ domain->req_accept_frames = true;
+ }
+ /* sm: / start fabric logins */
+ efc_sm_transition(ctx, __efc_domain_allocated, NULL);
+ break;
+ }
+
+ case EFC_EVT_DOMAIN_ALLOC_FAIL:
+ efc_log_err(efc, "%s recv'd waiting for DOMAIN_ALLOC_OK;",
+ efc_sm_event_name(evt));
+ efc_log_err(efc, "shutting down domain\n");
+ domain->req_domain_free = true;
+ break;
+
+ case EFC_EVT_DOMAIN_FOUND:
+ /* Should not happen */
+ break;
+
+ case EFC_EVT_DOMAIN_LOST:
+ efc_log_debug(efc,
+ "%s received while waiting for hw_domain_alloc()\n",
+ efc_sm_event_name(evt));
+ efc_sm_transition(ctx, __efc_domain_wait_domain_lost, NULL);
+ break;
+
+ default:
+ __efc_domain_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_domain_allocated(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ std_domain_state_decl();
+
+ domain_sm_trace(domain);
+
+ switch (evt) {
+ case EFC_EVT_DOMAIN_REQ_ATTACH: {
+ int rc = 0;
+ u32 fc_id;
+
+ if (WARN_ON(!arg))
+ return;
+
+ fc_id = *((u32 *)arg);
+ efc_log_debug(efc, "Requesting hw domain attach fc_id x%x\n",
+ fc_id);
+ /* Update nport lookup */
+ rc = xa_err(xa_store(&domain->lookup, fc_id, domain->nport,
+ GFP_ATOMIC));
+ if (rc) {
+ efc_log_err(efc, "Sport lookup store failed: %d\n", rc);
+ return;
+ }
+
+ /* Update display name for the nport */
+ efc_node_fcid_display(fc_id, domain->nport->display_name,
+ sizeof(domain->nport->display_name));
+
+ /* Issue domain attach call */
+ rc = efc_cmd_domain_attach(efc, domain, fc_id);
+ if (rc) {
+ efc_log_err(efc, "efc_hw_domain_attach failed: %d\n",
+ rc);
+ return;
+ }
+ /* sm: / domain_attach */
+ efc_sm_transition(ctx, __efc_domain_wait_attach, NULL);
+ break;
+ }
+
+ case EFC_EVT_DOMAIN_FOUND:
+ /* Should not happen */
+ efc_log_err(efc, "%s: evt: %d should not happen\n",
+ __func__, evt);
+ break;
+
+ case EFC_EVT_DOMAIN_LOST: {
+ efc_log_debug(efc,
+ "%s received while in EFC_EVT_DOMAIN_REQ_ATTACH\n",
+ efc_sm_event_name(evt));
+ if (!list_empty(&domain->nport_list)) {
+ /*
+ * if there are nports, transition to
+ * wait state and send shutdown to each
+ * nport
+ */
+ struct efc_nport *nport = NULL, *nport_next = NULL;
+
+ efc_sm_transition(ctx, __efc_domain_wait_nports_free,
+ NULL);
+ list_for_each_entry_safe(nport, nport_next,
+ &domain->nport_list,
+ list_entry) {
+ efc_sm_post_event(&nport->sm,
+ EFC_EVT_SHUTDOWN, NULL);
+ }
+ } else {
+ /* no nports exist, free domain */
+ efc_sm_transition(ctx, __efc_domain_wait_shutdown,
+ NULL);
+ if (efc_cmd_domain_free(efc, domain))
+ efc_log_err(efc, "hw_domain_free failed\n");
+ }
+
+ break;
+ }
+
+ default:
+ __efc_domain_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_domain_wait_attach(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ std_domain_state_decl();
+
+ domain_sm_trace(domain);
+
+ switch (evt) {
+ case EFC_EVT_DOMAIN_ATTACH_OK: {
+ struct efc_node *node = NULL;
+ struct efc_nport *nport, *next_nport;
+ unsigned long index;
+
+ /*
+ * Set domain notify pending state to avoid
+ * duplicate domain event post
+ */
+ domain->domain_notify_pend = true;
+
+ /* Mark as attached */
+ domain->attached = true;
+
+ /* Transition to ready */
+ /* sm: / forward event to all nports and nodes */
+ efc_sm_transition(ctx, __efc_domain_ready, NULL);
+
+ /* We have an FCFI, so we can accept frames */
+ domain->req_accept_frames = true;
+
+ /*
+ * Notify all nodes that the domain attach request
+ * has completed
+ * Note: nport will have already received notification
+ * of nport attached as a result of the HW's port attach.
+ */
+ list_for_each_entry_safe(nport, next_nport,
+ &domain->nport_list, list_entry) {
+ xa_for_each(&nport->lookup, index, node) {
+ efc_node_post_event(node,
+ EFC_EVT_DOMAIN_ATTACH_OK,
+ NULL);
+ }
+ }
+ domain->domain_notify_pend = false;
+ break;
+ }
+
+ case EFC_EVT_DOMAIN_ATTACH_FAIL:
+ efc_log_debug(efc,
+ "%s received while waiting for hw attach\n",
+ efc_sm_event_name(evt));
+ break;
+
+ case EFC_EVT_DOMAIN_FOUND:
+ /* Should not happen */
+ efc_log_err(efc, "%s: evt: %d should not happen\n",
+ __func__, evt);
+ break;
+
+ case EFC_EVT_DOMAIN_LOST:
+ /*
+ * Domain lost while waiting for an attach to complete,
+ * go to a state that waits for the domain attach to
+ * complete, then handle domain lost
+ */
+ efc_sm_transition(ctx, __efc_domain_wait_domain_lost, NULL);
+ break;
+
+ case EFC_EVT_DOMAIN_REQ_ATTACH:
+ /*
+ * In P2P we can get an attach request from
+ * the other FLOGI path, so drop this one
+ */
+ break;
+
+ default:
+ __efc_domain_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_domain_ready(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg)
+{
+ std_domain_state_decl();
+
+ domain_sm_trace(domain);
+
+ switch (evt) {
+ case EFC_EVT_ENTER: {
+ /* start any pending vports */
+ if (efc_vport_start(domain)) {
+ efc_log_debug(domain->efc,
+ "efc_vport_start didn't start vports\n");
+ }
+ break;
+ }
+ case EFC_EVT_DOMAIN_LOST: {
+ if (!list_empty(&domain->nport_list)) {
+ /*
+ * if there are nports, transition to wait state
+ * and send shutdown to each nport
+ */
+ struct efc_nport *nport = NULL, *nport_next = NULL;
+
+ efc_sm_transition(ctx, __efc_domain_wait_nports_free,
+ NULL);
+ list_for_each_entry_safe(nport, nport_next,
+ &domain->nport_list,
+ list_entry) {
+ efc_sm_post_event(&nport->sm,
+ EFC_EVT_SHUTDOWN, NULL);
+ }
+ } else {
+ /* no nports exist, free domain */
+ efc_sm_transition(ctx, __efc_domain_wait_shutdown,
+ NULL);
+ if (efc_cmd_domain_free(efc, domain))
+ efc_log_err(efc, "hw_domain_free failed\n");
+ }
+ break;
+ }
+
+ case EFC_EVT_DOMAIN_FOUND:
+ /* Should not happen */
+ efc_log_err(efc, "%s: evt: %d should not happen\n",
+ __func__, evt);
+ break;
+
+ case EFC_EVT_DOMAIN_REQ_ATTACH: {
+ /* can happen during p2p */
+ u32 fc_id;
+
+ fc_id = *((u32 *)arg);
+
+ /* Assume that the domain is attached */
+ WARN_ON(!domain->attached);
+
+ /*
+ * Verify that the requested FC_ID
+ * is the same as the one we're working with
+ */
+ WARN_ON(domain->nport->fc_id != fc_id);
+ break;
+ }
+
+ default:
+ __efc_domain_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_domain_wait_nports_free(struct efc_sm_ctx *ctx, enum efc_sm_event evt,
+ void *arg)
+{
+ std_domain_state_decl();
+
+ domain_sm_trace(domain);
+
+ /* Wait for nodes to free prior to the domain shutdown */
+ switch (evt) {
+ case EFC_EVT_ALL_CHILD_NODES_FREE: {
+ int rc;
+
+ /* sm: / efc_hw_domain_free */
+ efc_sm_transition(ctx, __efc_domain_wait_shutdown, NULL);
+
+ /* Request efc_hw_domain_free and wait for completion */
+ rc = efc_cmd_domain_free(efc, domain);
+ if (rc) {
+ efc_log_err(efc, "efc_hw_domain_free() failed: %d\n",
+ rc);
+ }
+ break;
+ }
+ default:
+ __efc_domain_common_shutdown(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_domain_wait_shutdown(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ std_domain_state_decl();
+
+ domain_sm_trace(domain);
+
+ switch (evt) {
+ case EFC_EVT_DOMAIN_FREE_OK:
+ /* sm: / domain_free */
+ if (domain->domain_found_pending) {
+ /*
+ * save fcf_wwn and drec from this domain,
+ * free current domain and allocate
+ * a new one with the same fcf_wwn
+ * could use a SLI-4 "re-register VPI"
+ * operation here?
+ */
+ u64 fcf_wwn = domain->fcf_wwn;
+ struct efc_domain_record drec = domain->pending_drec;
+
+ efc_log_debug(efc, "Reallocating domain\n");
+ domain->req_domain_free = true;
+ domain = efc_domain_alloc(efc, fcf_wwn);
+
+ if (!domain) {
+ efc_log_err(efc,
+ "efc_domain_alloc() failed\n");
+ return;
+ }
+ /*
+ * got a new domain; at this point,
+ * there are at least two domains
+ * once the req_domain_free flag is processed,
+ * the associated domain will be removed.
+ */
+ efc_sm_transition(&domain->drvsm, __efc_domain_init,
+ NULL);
+ efc_sm_post_event(&domain->drvsm,
+ EFC_EVT_DOMAIN_FOUND, &drec);
+ } else {
+ domain->req_domain_free = true;
+ }
+ break;
+ default:
+ __efc_domain_common_shutdown(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_domain_wait_domain_lost(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ std_domain_state_decl();
+
+ domain_sm_trace(domain);
+
+ /*
+ * Wait for the domain alloc/attach completion
+ * after receiving a domain lost.
+ */
+ switch (evt) {
+ case EFC_EVT_DOMAIN_ALLOC_OK:
+ case EFC_EVT_DOMAIN_ATTACH_OK: {
+ if (!list_empty(&domain->nport_list)) {
+ /*
+ * if there are nports, transition to
+ * wait state and send shutdown to each nport
+ */
+ struct efc_nport *nport = NULL, *nport_next = NULL;
+
+ efc_sm_transition(ctx, __efc_domain_wait_nports_free,
+ NULL);
+ list_for_each_entry_safe(nport, nport_next,
+ &domain->nport_list,
+ list_entry) {
+ efc_sm_post_event(&nport->sm,
+ EFC_EVT_SHUTDOWN, NULL);
+ }
+ } else {
+ /* no nports exist, free domain */
+ efc_sm_transition(ctx, __efc_domain_wait_shutdown,
+ NULL);
+ if (efc_cmd_domain_free(efc, domain))
+ efc_log_err(efc, "hw_domain_free() failed\n");
+ }
+ break;
+ }
+ case EFC_EVT_DOMAIN_ALLOC_FAIL:
+ case EFC_EVT_DOMAIN_ATTACH_FAIL:
+ efc_log_err(efc, "[domain] %-20s: failed\n",
+ efc_sm_event_name(evt));
+ break;
+
+ default:
+ __efc_domain_common_shutdown(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_domain_attach_internal(struct efc_domain *domain, u32 s_id)
+{
+ memcpy(domain->dma.virt,
+ ((uint8_t *)domain->flogi_service_params) + 4,
+ sizeof(struct fc_els_flogi) - 4);
+ (void)efc_sm_post_event(&domain->drvsm, EFC_EVT_DOMAIN_REQ_ATTACH,
+ &s_id);
+}
+
+void
+efc_domain_attach(struct efc_domain *domain, u32 s_id)
+{
+ __efc_domain_attach_internal(domain, s_id);
+}
+
+int
+efc_domain_post_event(struct efc_domain *domain,
+ enum efc_sm_event event, void *arg)
+{
+ int rc;
+ bool req_domain_free;
+
+ rc = efc_sm_post_event(&domain->drvsm, event, arg);
+
+ req_domain_free = domain->req_domain_free;
+ domain->req_domain_free = false;
+
+ if (req_domain_free)
+ efc_domain_free(domain);
+
+ return rc;
+}
+
+static void
+efct_domain_process_pending(struct efc_domain *domain)
+{
+ struct efc *efc = domain->efc;
+ struct efc_hw_sequence *seq = NULL;
+ u32 processed = 0;
+ unsigned long flags = 0;
+
+ for (;;) {
+ /* need to check for hold frames condition after each frame
+ * processed because any given frame could cause a transition
+ * to a state that holds frames
+ */
+ if (efc->hold_frames)
+ break;
+
+ /* Get next frame/sequence */
+ spin_lock_irqsave(&efc->pend_frames_lock, flags);
+
+ if (!list_empty(&efc->pend_frames)) {
+ seq = list_first_entry(&efc->pend_frames,
+ struct efc_hw_sequence, list_entry);
+ list_del(&seq->list_entry);
+ }
+
+ if (!seq) {
+ processed = efc->pend_frames_processed;
+ efc->pend_frames_processed = 0;
+ spin_unlock_irqrestore(&efc->pend_frames_lock, flags);
+ break;
+ }
+ efc->pend_frames_processed++;
+
+ spin_unlock_irqrestore(&efc->pend_frames_lock, flags);
+
+ /* now dispatch frame(s) to dispatch function */
+ if (efc_domain_dispatch_frame(domain, seq))
+ efc->tt.hw_seq_free(efc, seq);
+
+ seq = NULL;
+ }
+
+ if (processed != 0)
+ efc_log_debug(efc, "%u domain frames held and processed\n",
+ processed);
+}
+
+void
+efc_dispatch_frame(struct efc *efc, struct efc_hw_sequence *seq)
+{
+ struct efc_domain *domain = efc->domain;
+
+ /*
+ * If we are holding frames or the domain is not yet registered or
+ * there's already frames on the pending list,
+ * then add the new frame to pending list
+ */
+ if (!domain || efc->hold_frames || !list_empty(&efc->pend_frames)) {
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&efc->pend_frames_lock, flags);
+ INIT_LIST_HEAD(&seq->list_entry);
+ list_add_tail(&seq->list_entry, &efc->pend_frames);
+ spin_unlock_irqrestore(&efc->pend_frames_lock, flags);
+
+ if (domain) {
+ /* immediately process pending frames */
+ efct_domain_process_pending(domain);
+ }
+ } else {
+ /*
+ * We are not holding frames and pending list is empty,
+ * just process frame. A non-zero return means the frame
+ * was not handled - so cleanup
+ */
+ if (efc_domain_dispatch_frame(domain, seq))
+ efc->tt.hw_seq_free(efc, seq);
+ }
+}
+
+int
+efc_domain_dispatch_frame(void *arg, struct efc_hw_sequence *seq)
+{
+ struct efc_domain *domain = (struct efc_domain *)arg;
+ struct efc *efc = domain->efc;
+ struct fc_frame_header *hdr;
+ struct efc_node *node = NULL;
+ struct efc_nport *nport = NULL;
+ unsigned long flags = 0;
+ u32 s_id, d_id, rc = EFC_HW_SEQ_FREE;
+
+ if (!seq->header || !seq->header->dma.virt || !seq->payload->dma.virt) {
+ efc_log_err(efc, "Sequence header or payload is null\n");
+ return rc;
+ }
+
+ hdr = seq->header->dma.virt;
+
+ /* extract the s_id and d_id */
+ s_id = ntoh24(hdr->fh_s_id);
+ d_id = ntoh24(hdr->fh_d_id);
+
+ spin_lock_irqsave(&efc->lock, flags);
+
+ nport = efc_nport_find(domain, d_id);
+ if (!nport) {
+ if (hdr->fh_type == FC_TYPE_FCP) {
+ /* Drop frame */
+ efc_log_warn(efc, "FCP frame with invalid d_id x%x\n",
+ d_id);
+ goto out;
+ }
+
+ /* p2p will use this case */
+ nport = domain->nport;
+ if (!nport || !kref_get_unless_zero(&nport->ref)) {
+ efc_log_err(efc, "Physical nport is NULL\n");
+ goto out;
+ }
+ }
+
+ /* Lookup the node given the remote s_id */
+ node = efc_node_find(nport, s_id);
+
+ /* If not found, then create a new node */
+ if (!node) {
+ /*
+ * If this is solicited data or control based on R_CTL and
+ * there is no node context, then we can drop the frame
+ */
+ if ((hdr->fh_r_ctl == FC_RCTL_DD_SOL_DATA) ||
+ (hdr->fh_r_ctl == FC_RCTL_DD_SOL_CTL)) {
+ efc_log_debug(efc, "sol data/ctrl frame without node\n");
+ goto out_release;
+ }
+
+ node = efc_node_alloc(nport, s_id, false, false);
+ if (!node) {
+ efc_log_err(efc, "efc_node_alloc() failed\n");
+ goto out_release;
+ }
+ /* don't send PLOGI on efc_d_init entry */
+ efc_node_init_device(node, false);
+ }
+
+ if (node->hold_frames || !list_empty(&node->pend_frames)) {
+ /* add frame to node's pending list */
+ spin_lock(&node->pend_frames_lock);
+ INIT_LIST_HEAD(&seq->list_entry);
+ list_add_tail(&seq->list_entry, &node->pend_frames);
+ spin_unlock(&node->pend_frames_lock);
+ rc = EFC_HW_SEQ_HOLD;
+ goto out_release;
+ }
+
+ /* now dispatch frame to the node frame handler */
+ efc_node_dispatch_frame(node, seq);
+
+out_release:
+ kref_put(&nport->ref, nport->release);
+out:
+ spin_unlock_irqrestore(&efc->lock, flags);
+ return rc;
+}
+
+void
+efc_node_dispatch_frame(void *arg, struct efc_hw_sequence *seq)
+{
+ struct fc_frame_header *hdr = seq->header->dma.virt;
+ u32 port_id;
+ struct efc_node *node = (struct efc_node *)arg;
+ struct efc *efc = node->efc;
+
+ port_id = ntoh24(hdr->fh_s_id);
+
+ if (WARN_ON(port_id != node->rnode.fc_id))
+ return;
+
+ if ((!(ntoh24(hdr->fh_f_ctl) & FC_FC_END_SEQ)) ||
+ !(ntoh24(hdr->fh_f_ctl) & FC_FC_SEQ_INIT)) {
+ node_printf(node,
+ "Drop frame hdr = %08x %08x %08x %08x %08x %08x\n",
+ cpu_to_be32(((u32 *)hdr)[0]),
+ cpu_to_be32(((u32 *)hdr)[1]),
+ cpu_to_be32(((u32 *)hdr)[2]),
+ cpu_to_be32(((u32 *)hdr)[3]),
+ cpu_to_be32(((u32 *)hdr)[4]),
+ cpu_to_be32(((u32 *)hdr)[5]));
+ return;
+ }
+
+ switch (hdr->fh_r_ctl) {
+ case FC_RCTL_ELS_REQ:
+ case FC_RCTL_ELS_REP:
+ efc_node_recv_els_frame(node, seq);
+ break;
+
+ case FC_RCTL_BA_ABTS:
+ case FC_RCTL_BA_ACC:
+ case FC_RCTL_BA_RJT:
+ case FC_RCTL_BA_NOP:
+ efc_log_err(efc, "Received ABTS:\n");
+ break;
+
+ case FC_RCTL_DD_UNSOL_CMD:
+ case FC_RCTL_DD_UNSOL_CTL:
+ switch (hdr->fh_type) {
+ case FC_TYPE_FCP:
+ if ((hdr->fh_r_ctl & 0xf) == FC_RCTL_DD_UNSOL_CMD) {
+ if (!node->fcp_enabled) {
+ efc_node_recv_fcp_cmd(node, seq);
+ break;
+ }
+ efc_log_err(efc, "Recvd FCP CMD. Drop IO\n");
+ } else if ((hdr->fh_r_ctl & 0xf) ==
+ FC_RCTL_DD_SOL_DATA) {
+ node_printf(node,
+ "solicited data recvd. Drop IO\n");
+ }
+ break;
+
+ case FC_TYPE_CT:
+ efc_node_recv_ct_frame(node, seq);
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ efc_log_err(efc, "Unhandled frame rctl: %02x\n", hdr->fh_r_ctl);
+ }
+}
diff --git a/drivers/scsi/elx/libefc/efc_domain.h b/drivers/scsi/elx/libefc/efc_domain.h
new file mode 100644
index 000000000000..5468ea7ab19b
--- /dev/null
+++ b/drivers/scsi/elx/libefc/efc_domain.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+/*
+ * Declare driver's domain handler exported interface
+ */
+
+#ifndef __EFCT_DOMAIN_H__
+#define __EFCT_DOMAIN_H__
+
+struct efc_domain *
+efc_domain_alloc(struct efc *efc, uint64_t fcf_wwn);
+void
+efc_domain_free(struct efc_domain *domain);
+
+void
+__efc_domain_init(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg);
+void
+__efc_domain_wait_alloc(struct efc_sm_ctx *ctx, enum efc_sm_event evt,
+ void *arg);
+void
+__efc_domain_allocated(struct efc_sm_ctx *ctx, enum efc_sm_event evt,
+ void *arg);
+void
+__efc_domain_wait_attach(struct efc_sm_ctx *ctx, enum efc_sm_event evt,
+ void *arg);
+void
+__efc_domain_ready(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg);
+void
+__efc_domain_wait_nports_free(struct efc_sm_ctx *ctx, enum efc_sm_event evt,
+ void *arg);
+void
+__efc_domain_wait_shutdown(struct efc_sm_ctx *ctx, enum efc_sm_event evt,
+ void *arg);
+void
+__efc_domain_wait_domain_lost(struct efc_sm_ctx *ctx, enum efc_sm_event evt,
+ void *arg);
+void
+efc_domain_attach(struct efc_domain *domain, u32 s_id);
+int
+efc_domain_post_event(struct efc_domain *domain, enum efc_sm_event event,
+ void *arg);
+void
+__efc_domain_attach_internal(struct efc_domain *domain, u32 s_id);
+
+int
+efc_domain_dispatch_frame(void *arg, struct efc_hw_sequence *seq);
+void
+efc_node_dispatch_frame(void *arg, struct efc_hw_sequence *seq);
+
+#endif /* __EFCT_DOMAIN_H__ */
diff --git a/drivers/scsi/elx/libefc/efc_els.c b/drivers/scsi/elx/libefc/efc_els.c
new file mode 100644
index 000000000000..24db0accb256
--- /dev/null
+++ b/drivers/scsi/elx/libefc/efc_els.c
@@ -0,0 +1,1098 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+/*
+ * Functions to build and send ELS/CT/BLS commands and responses.
+ */
+
+#include "efc.h"
+#include "efc_els.h"
+#include "../libefc_sli/sli4.h"
+
+#define EFC_LOG_ENABLE_ELS_TRACE(efc) \
+ (((efc) != NULL) ? (((efc)->logmask & (1U << 1)) != 0) : 0)
+
+#define node_els_trace() \
+ do { \
+ if (EFC_LOG_ENABLE_ELS_TRACE(efc)) \
+ efc_log_info(efc, "[%s] %-20s\n", \
+ node->display_name, __func__); \
+ } while (0)
+
+#define els_io_printf(els, fmt, ...) \
+ efc_log_err((struct efc *)els->node->efc,\
+ "[%s] %-8s " fmt, \
+ els->node->display_name,\
+ els->display_name, ##__VA_ARGS__)
+
+#define EFC_ELS_RSP_LEN 1024
+#define EFC_ELS_GID_PT_RSP_LEN 8096
+
+struct efc_els_io_req *
+efc_els_io_alloc(struct efc_node *node, u32 reqlen)
+{
+ return efc_els_io_alloc_size(node, reqlen, EFC_ELS_RSP_LEN);
+}
+
+struct efc_els_io_req *
+efc_els_io_alloc_size(struct efc_node *node, u32 reqlen, u32 rsplen)
+{
+ struct efc *efc;
+ struct efc_els_io_req *els;
+ unsigned long flags = 0;
+
+ efc = node->efc;
+
+ spin_lock_irqsave(&node->els_ios_lock, flags);
+
+ if (!node->els_io_enabled) {
+ efc_log_err(efc, "els io alloc disabled\n");
+ spin_unlock_irqrestore(&node->els_ios_lock, flags);
+ return NULL;
+ }
+
+ els = mempool_alloc(efc->els_io_pool, GFP_ATOMIC);
+ if (!els) {
+ atomic_add_return(1, &efc->els_io_alloc_failed_count);
+ spin_unlock_irqrestore(&node->els_ios_lock, flags);
+ return NULL;
+ }
+
+ /* initialize refcount */
+ kref_init(&els->ref);
+ els->release = _efc_els_io_free;
+
+ /* populate generic io fields */
+ els->node = node;
+
+ /* now allocate DMA for request and response */
+ els->io.req.size = reqlen;
+ els->io.req.virt = dma_alloc_coherent(&efc->pci->dev, els->io.req.size,
+ &els->io.req.phys, GFP_DMA);
+ if (!els->io.req.virt) {
+ mempool_free(els, efc->els_io_pool);
+ spin_unlock_irqrestore(&node->els_ios_lock, flags);
+ return NULL;
+ }
+
+ els->io.rsp.size = rsplen;
+ els->io.rsp.virt = dma_alloc_coherent(&efc->pci->dev, els->io.rsp.size,
+ &els->io.rsp.phys, GFP_DMA);
+ if (!els->io.rsp.virt) {
+ dma_free_coherent(&efc->pci->dev, els->io.req.size,
+ els->io.req.virt, els->io.req.phys);
+ mempool_free(els, efc->els_io_pool);
+ els = NULL;
+ }
+
+ if (els) {
+ /* initialize fields */
+ els->els_retries_remaining = EFC_FC_ELS_DEFAULT_RETRIES;
+
+ /* add els structure to ELS IO list */
+ INIT_LIST_HEAD(&els->list_entry);
+ list_add_tail(&els->list_entry, &node->els_ios_list);
+ }
+
+ spin_unlock_irqrestore(&node->els_ios_lock, flags);
+ return els;
+}
+
+void
+efc_els_io_free(struct efc_els_io_req *els)
+{
+ kref_put(&els->ref, els->release);
+}
+
+void
+_efc_els_io_free(struct kref *arg)
+{
+ struct efc_els_io_req *els =
+ container_of(arg, struct efc_els_io_req, ref);
+ struct efc *efc;
+ struct efc_node *node;
+ int send_empty_event = false;
+ unsigned long flags = 0;
+
+ node = els->node;
+ efc = node->efc;
+
+ spin_lock_irqsave(&node->els_ios_lock, flags);
+
+ list_del(&els->list_entry);
+ /* Send list empty event if the IO allocator
+ * is disabled, and the list is empty
+ * If node->els_io_enabled was not checked,
+ * the event would be posted continually
+ */
+ send_empty_event = (!node->els_io_enabled &&
+ list_empty(&node->els_ios_list));
+
+ spin_unlock_irqrestore(&node->els_ios_lock, flags);
+
+ /* free ELS request and response buffers */
+ dma_free_coherent(&efc->pci->dev, els->io.rsp.size,
+ els->io.rsp.virt, els->io.rsp.phys);
+ dma_free_coherent(&efc->pci->dev, els->io.req.size,
+ els->io.req.virt, els->io.req.phys);
+
+ mempool_free(els, efc->els_io_pool);
+
+ if (send_empty_event)
+ efc_scsi_io_list_empty(node->efc, node);
+}
+
+static void
+efc_els_retry(struct efc_els_io_req *els);
+
+static void
+efc_els_delay_timer_cb(struct timer_list *t)
+{
+ struct efc_els_io_req *els = from_timer(els, t, delay_timer);
+
+ /* Retry delay timer expired, retry the ELS request */
+ efc_els_retry(els);
+}
+
+static int
+efc_els_req_cb(void *arg, u32 length, int status, u32 ext_status)
+{
+ struct efc_els_io_req *els;
+ struct efc_node *node;
+ struct efc *efc;
+ struct efc_node_cb cbdata;
+ u32 reason_code;
+
+ els = arg;
+ node = els->node;
+ efc = node->efc;
+
+ if (status)
+ els_io_printf(els, "status x%x ext x%x\n", status, ext_status);
+
+ /* set the response len element of els->rsp */
+ els->io.rsp.len = length;
+
+ cbdata.status = status;
+ cbdata.ext_status = ext_status;
+ cbdata.header = NULL;
+ cbdata.els_rsp = els->io.rsp;
+
+ /* set the response len element of els->rsp */
+ cbdata.rsp_len = length;
+
+ /* FW returns the number of bytes received on the link in
+ * the WCQE, not the amount placed in the buffer; use this info to
+ * check if there was an overrun.
+ */
+ if (length > els->io.rsp.size) {
+ efc_log_warn(efc,
+ "ELS response returned len=%d > buflen=%zu\n",
+ length, els->io.rsp.size);
+ efc_els_io_cleanup(els, EFC_EVT_SRRS_ELS_REQ_FAIL, &cbdata);
+ return 0;
+ }
+
+ /* Post event to ELS IO object */
+ switch (status) {
+ case SLI4_FC_WCQE_STATUS_SUCCESS:
+ efc_els_io_cleanup(els, EFC_EVT_SRRS_ELS_REQ_OK, &cbdata);
+ break;
+
+ case SLI4_FC_WCQE_STATUS_LS_RJT:
+ reason_code = (ext_status >> 16) & 0xff;
+
+ /* delay and retry if reason code is Logical Busy */
+ switch (reason_code) {
+ case ELS_RJT_BUSY:
+ els->node->els_req_cnt--;
+ els_io_printf(els,
+ "LS_RJT Logical Busy, delay and retry\n");
+ timer_setup(&els->delay_timer,
+ efc_els_delay_timer_cb, 0);
+ mod_timer(&els->delay_timer,
+ jiffies + msecs_to_jiffies(5000));
+ break;
+ default:
+ efc_els_io_cleanup(els, EFC_EVT_SRRS_ELS_REQ_RJT,
+ &cbdata);
+ break;
+ }
+ break;
+
+ case SLI4_FC_WCQE_STATUS_LOCAL_REJECT:
+ switch (ext_status) {
+ case SLI4_FC_LOCAL_REJECT_SEQUENCE_TIMEOUT:
+ efc_els_retry(els);
+ break;
+ default:
+ efc_log_err(efc, "LOCAL_REJECT with ext status:%x\n",
+ ext_status);
+ efc_els_io_cleanup(els, EFC_EVT_SRRS_ELS_REQ_FAIL,
+ &cbdata);
+ break;
+ }
+ break;
+ default: /* Other error */
+ efc_log_warn(efc, "els req failed status x%x, ext_status x%x\n",
+ status, ext_status);
+ efc_els_io_cleanup(els, EFC_EVT_SRRS_ELS_REQ_FAIL, &cbdata);
+ break;
+ }
+
+ return 0;
+}
+
+void efc_disc_io_complete(struct efc_disc_io *io, u32 len, u32 status,
+ u32 ext_status)
+{
+ struct efc_els_io_req *els =
+ container_of(io, struct efc_els_io_req, io);
+
+ WARN_ON_ONCE(!els->cb);
+
+ ((efc_hw_srrs_cb_t)els->cb) (els, len, status, ext_status);
+}
+
+static int efc_els_send_req(struct efc_node *node, struct efc_els_io_req *els,
+ enum efc_disc_io_type io_type)
+{
+ int rc = 0;
+ struct efc *efc = node->efc;
+ struct efc_node_cb cbdata;
+
+ /* update ELS request counter */
+ els->node->els_req_cnt++;
+
+ /* Prepare the IO request details */
+ els->io.io_type = io_type;
+ els->io.xmit_len = els->io.req.size;
+ els->io.rsp_len = els->io.rsp.size;
+ els->io.rpi = node->rnode.indicator;
+ els->io.vpi = node->nport->indicator;
+ els->io.s_id = node->nport->fc_id;
+ els->io.d_id = node->rnode.fc_id;
+
+ if (node->rnode.attached)
+ els->io.rpi_registered = true;
+
+ els->cb = efc_els_req_cb;
+
+ rc = efc->tt.send_els(efc, &els->io);
+ if (!rc)
+ return rc;
+
+ cbdata.status = EFC_STATUS_INVALID;
+ cbdata.ext_status = EFC_STATUS_INVALID;
+ cbdata.els_rsp = els->io.rsp;
+ efc_log_err(efc, "efc_els_send failed: %d\n", rc);
+ efc_els_io_cleanup(els, EFC_EVT_SRRS_ELS_REQ_FAIL, &cbdata);
+
+ return rc;
+}
+
+static void
+efc_els_retry(struct efc_els_io_req *els)
+{
+ struct efc *efc;
+ struct efc_node_cb cbdata;
+ u32 rc;
+
+ efc = els->node->efc;
+ cbdata.status = EFC_STATUS_INVALID;
+ cbdata.ext_status = EFC_STATUS_INVALID;
+ cbdata.els_rsp = els->io.rsp;
+
+ if (els->els_retries_remaining) {
+ els->els_retries_remaining--;
+ rc = efc->tt.send_els(efc, &els->io);
+ } else {
+ rc = -EIO;
+ }
+
+ if (rc) {
+ efc_log_err(efc, "ELS retries exhausted\n");
+ efc_els_io_cleanup(els, EFC_EVT_SRRS_ELS_REQ_FAIL, &cbdata);
+ }
+}
+
+static int
+efc_els_acc_cb(void *arg, u32 length, int status, u32 ext_status)
+{
+ struct efc_els_io_req *els;
+ struct efc_node *node;
+ struct efc *efc;
+ struct efc_node_cb cbdata;
+
+ els = arg;
+ node = els->node;
+ efc = node->efc;
+
+ cbdata.status = status;
+ cbdata.ext_status = ext_status;
+ cbdata.header = NULL;
+ cbdata.els_rsp = els->io.rsp;
+
+ /* Post node event */
+ switch (status) {
+ case SLI4_FC_WCQE_STATUS_SUCCESS:
+ efc_els_io_cleanup(els, EFC_EVT_SRRS_ELS_CMPL_OK, &cbdata);
+ break;
+
+ default: /* Other error */
+ efc_log_warn(efc, "[%s] %-8s failed status x%x, ext x%x\n",
+ node->display_name, els->display_name,
+ status, ext_status);
+ efc_els_io_cleanup(els, EFC_EVT_SRRS_ELS_CMPL_FAIL, &cbdata);
+ break;
+ }
+
+ return 0;
+}
+
+static int
+efc_els_send_rsp(struct efc_els_io_req *els, u32 rsplen)
+{
+ int rc = 0;
+ struct efc_node_cb cbdata;
+ struct efc_node *node = els->node;
+ struct efc *efc = node->efc;
+
+ /* increment ELS completion counter */
+ node->els_cmpl_cnt++;
+
+ els->io.io_type = EFC_DISC_IO_ELS_RESP;
+ els->cb = efc_els_acc_cb;
+
+ /* Prepare the IO request details */
+ els->io.xmit_len = rsplen;
+ els->io.rsp_len = els->io.rsp.size;
+ els->io.rpi = node->rnode.indicator;
+ els->io.vpi = node->nport->indicator;
+ if (node->nport->fc_id != U32_MAX)
+ els->io.s_id = node->nport->fc_id;
+ else
+ els->io.s_id = els->io.iparam.els.s_id;
+ els->io.d_id = node->rnode.fc_id;
+
+ if (node->attached)
+ els->io.rpi_registered = true;
+
+ rc = efc->tt.send_els(efc, &els->io);
+ if (!rc)
+ return rc;
+
+ cbdata.status = EFC_STATUS_INVALID;
+ cbdata.ext_status = EFC_STATUS_INVALID;
+ cbdata.els_rsp = els->io.rsp;
+ efc_els_io_cleanup(els, EFC_EVT_SRRS_ELS_CMPL_FAIL, &cbdata);
+
+ return rc;
+}
+
+int
+efc_send_plogi(struct efc_node *node)
+{
+ struct efc_els_io_req *els;
+ struct efc *efc = node->efc;
+ struct fc_els_flogi *plogi;
+
+ node_els_trace();
+
+ els = efc_els_io_alloc(node, sizeof(*plogi));
+ if (!els) {
+ efc_log_err(efc, "IO alloc failed\n");
+ return -EIO;
+ }
+ els->display_name = "plogi";
+
+ /* Build PLOGI request */
+ plogi = els->io.req.virt;
+
+ memcpy(plogi, node->nport->service_params, sizeof(*plogi));
+
+ plogi->fl_cmd = ELS_PLOGI;
+ memset(plogi->_fl_resvd, 0, sizeof(plogi->_fl_resvd));
+
+ return efc_els_send_req(node, els, EFC_DISC_IO_ELS_REQ);
+}
+
+int
+efc_send_flogi(struct efc_node *node)
+{
+ struct efc_els_io_req *els;
+ struct efc *efc;
+ struct fc_els_flogi *flogi;
+
+ efc = node->efc;
+
+ node_els_trace();
+
+ els = efc_els_io_alloc(node, sizeof(*flogi));
+ if (!els) {
+ efc_log_err(efc, "IO alloc failed\n");
+ return -EIO;
+ }
+
+ els->display_name = "flogi";
+
+ /* Build FLOGI request */
+ flogi = els->io.req.virt;
+
+ memcpy(flogi, node->nport->service_params, sizeof(*flogi));
+ flogi->fl_cmd = ELS_FLOGI;
+ memset(flogi->_fl_resvd, 0, sizeof(flogi->_fl_resvd));
+
+ return efc_els_send_req(node, els, EFC_DISC_IO_ELS_REQ);
+}
+
+int
+efc_send_fdisc(struct efc_node *node)
+{
+ struct efc_els_io_req *els;
+ struct efc *efc;
+ struct fc_els_flogi *fdisc;
+
+ efc = node->efc;
+
+ node_els_trace();
+
+ els = efc_els_io_alloc(node, sizeof(*fdisc));
+ if (!els) {
+ efc_log_err(efc, "IO alloc failed\n");
+ return -EIO;
+ }
+
+ els->display_name = "fdisc";
+
+ /* Build FDISC request */
+ fdisc = els->io.req.virt;
+
+ memcpy(fdisc, node->nport->service_params, sizeof(*fdisc));
+ fdisc->fl_cmd = ELS_FDISC;
+ memset(fdisc->_fl_resvd, 0, sizeof(fdisc->_fl_resvd));
+
+ return efc_els_send_req(node, els, EFC_DISC_IO_ELS_REQ);
+}
+
+int
+efc_send_prli(struct efc_node *node)
+{
+ struct efc *efc = node->efc;
+ struct efc_els_io_req *els;
+ struct {
+ struct fc_els_prli prli;
+ struct fc_els_spp spp;
+ } *pp;
+
+ node_els_trace();
+
+ els = efc_els_io_alloc(node, sizeof(*pp));
+ if (!els) {
+ efc_log_err(efc, "IO alloc failed\n");
+ return -EIO;
+ }
+
+ els->display_name = "prli";
+
+ /* Build PRLI request */
+ pp = els->io.req.virt;
+
+ memset(pp, 0, sizeof(*pp));
+
+ pp->prli.prli_cmd = ELS_PRLI;
+ pp->prli.prli_spp_len = 16;
+ pp->prli.prli_len = cpu_to_be16(sizeof(*pp));
+ pp->spp.spp_type = FC_TYPE_FCP;
+ pp->spp.spp_type_ext = 0;
+ pp->spp.spp_flags = FC_SPP_EST_IMG_PAIR;
+ pp->spp.spp_params = cpu_to_be32(FCP_SPPF_RD_XRDY_DIS |
+ (node->nport->enable_ini ?
+ FCP_SPPF_INIT_FCN : 0) |
+ (node->nport->enable_tgt ?
+ FCP_SPPF_TARG_FCN : 0));
+
+ return efc_els_send_req(node, els, EFC_DISC_IO_ELS_REQ);
+}
+
+int
+efc_send_logo(struct efc_node *node)
+{
+ struct efc *efc = node->efc;
+ struct efc_els_io_req *els;
+ struct fc_els_logo *logo;
+ struct fc_els_flogi *sparams;
+
+ node_els_trace();
+
+ sparams = (struct fc_els_flogi *)node->nport->service_params;
+
+ els = efc_els_io_alloc(node, sizeof(*logo));
+ if (!els) {
+ efc_log_err(efc, "IO alloc failed\n");
+ return -EIO;
+ }
+
+ els->display_name = "logo";
+
+ /* Build LOGO request */
+
+ logo = els->io.req.virt;
+
+ memset(logo, 0, sizeof(*logo));
+ logo->fl_cmd = ELS_LOGO;
+ hton24(logo->fl_n_port_id, node->rnode.nport->fc_id);
+ logo->fl_n_port_wwn = sparams->fl_wwpn;
+
+ return efc_els_send_req(node, els, EFC_DISC_IO_ELS_REQ);
+}
+
+int
+efc_send_adisc(struct efc_node *node)
+{
+ struct efc *efc = node->efc;
+ struct efc_els_io_req *els;
+ struct fc_els_adisc *adisc;
+ struct fc_els_flogi *sparams;
+ struct efc_nport *nport = node->nport;
+
+ node_els_trace();
+
+ sparams = (struct fc_els_flogi *)node->nport->service_params;
+
+ els = efc_els_io_alloc(node, sizeof(*adisc));
+ if (!els) {
+ efc_log_err(efc, "IO alloc failed\n");
+ return -EIO;
+ }
+
+ els->display_name = "adisc";
+
+ /* Build ADISC request */
+
+ adisc = els->io.req.virt;
+
+ memset(adisc, 0, sizeof(*adisc));
+ adisc->adisc_cmd = ELS_ADISC;
+ hton24(adisc->adisc_hard_addr, nport->fc_id);
+ adisc->adisc_wwpn = sparams->fl_wwpn;
+ adisc->adisc_wwnn = sparams->fl_wwnn;
+ hton24(adisc->adisc_port_id, node->rnode.nport->fc_id);
+
+ return efc_els_send_req(node, els, EFC_DISC_IO_ELS_REQ);
+}
+
+int
+efc_send_scr(struct efc_node *node)
+{
+ struct efc_els_io_req *els;
+ struct efc *efc = node->efc;
+ struct fc_els_scr *req;
+
+ node_els_trace();
+
+ els = efc_els_io_alloc(node, sizeof(*req));
+ if (!els) {
+ efc_log_err(efc, "IO alloc failed\n");
+ return -EIO;
+ }
+
+ els->display_name = "scr";
+
+ req = els->io.req.virt;
+
+ memset(req, 0, sizeof(*req));
+ req->scr_cmd = ELS_SCR;
+ req->scr_reg_func = ELS_SCRF_FULL;
+
+ return efc_els_send_req(node, els, EFC_DISC_IO_ELS_REQ);
+}
+
+int
+efc_send_ls_rjt(struct efc_node *node, u32 ox_id, u32 reason_code,
+ u32 reason_code_expl, u32 vendor_unique)
+{
+ struct efc *efc = node->efc;
+ struct efc_els_io_req *els = NULL;
+ struct fc_els_ls_rjt *rjt;
+
+ els = efc_els_io_alloc(node, sizeof(*rjt));
+ if (!els) {
+ efc_log_err(efc, "els IO alloc failed\n");
+ return -EIO;
+ }
+
+ node_els_trace();
+
+ els->display_name = "ls_rjt";
+
+ memset(&els->io.iparam, 0, sizeof(els->io.iparam));
+ els->io.iparam.els.ox_id = ox_id;
+
+ rjt = els->io.req.virt;
+ memset(rjt, 0, sizeof(*rjt));
+
+ rjt->er_cmd = ELS_LS_RJT;
+ rjt->er_reason = reason_code;
+ rjt->er_explan = reason_code_expl;
+
+ return efc_els_send_rsp(els, sizeof(*rjt));
+}
+
+int
+efc_send_plogi_acc(struct efc_node *node, u32 ox_id)
+{
+ struct efc *efc = node->efc;
+ struct efc_els_io_req *els = NULL;
+ struct fc_els_flogi *plogi;
+ struct fc_els_flogi *req = (struct fc_els_flogi *)node->service_params;
+
+ node_els_trace();
+
+ els = efc_els_io_alloc(node, sizeof(*plogi));
+ if (!els) {
+ efc_log_err(efc, "els IO alloc failed\n");
+ return -EIO;
+ }
+
+ els->display_name = "plogi_acc";
+
+ memset(&els->io.iparam, 0, sizeof(els->io.iparam));
+ els->io.iparam.els.ox_id = ox_id;
+
+ plogi = els->io.req.virt;
+
+ /* copy our port's service parameters to payload */
+ memcpy(plogi, node->nport->service_params, sizeof(*plogi));
+ plogi->fl_cmd = ELS_LS_ACC;
+ memset(plogi->_fl_resvd, 0, sizeof(plogi->_fl_resvd));
+
+ /* Set Application header support bit if requested */
+ if (req->fl_csp.sp_features & cpu_to_be16(FC_SP_FT_BCAST))
+ plogi->fl_csp.sp_features |= cpu_to_be16(FC_SP_FT_BCAST);
+
+ return efc_els_send_rsp(els, sizeof(*plogi));
+}
+
+int
+efc_send_flogi_p2p_acc(struct efc_node *node, u32 ox_id, u32 s_id)
+{
+ struct efc *efc = node->efc;
+ struct efc_els_io_req *els = NULL;
+ struct fc_els_flogi *flogi;
+
+ node_els_trace();
+
+ els = efc_els_io_alloc(node, sizeof(*flogi));
+ if (!els) {
+ efc_log_err(efc, "els IO alloc failed\n");
+ return -EIO;
+ }
+
+ els->display_name = "flogi_p2p_acc";
+
+ memset(&els->io.iparam, 0, sizeof(els->io.iparam));
+ els->io.iparam.els.ox_id = ox_id;
+ els->io.iparam.els.s_id = s_id;
+
+ flogi = els->io.req.virt;
+
+ /* copy our port's service parameters to payload */
+ memcpy(flogi, node->nport->service_params, sizeof(*flogi));
+ flogi->fl_cmd = ELS_LS_ACC;
+ memset(flogi->_fl_resvd, 0, sizeof(flogi->_fl_resvd));
+
+ memset(flogi->fl_cssp, 0, sizeof(flogi->fl_cssp));
+
+ return efc_els_send_rsp(els, sizeof(*flogi));
+}
+
+int
+efc_send_prli_acc(struct efc_node *node, u32 ox_id)
+{
+ struct efc *efc = node->efc;
+ struct efc_els_io_req *els = NULL;
+ struct {
+ struct fc_els_prli prli;
+ struct fc_els_spp spp;
+ } *pp;
+
+ node_els_trace();
+
+ els = efc_els_io_alloc(node, sizeof(*pp));
+ if (!els) {
+ efc_log_err(efc, "els IO alloc failed\n");
+ return -EIO;
+ }
+
+ els->display_name = "prli_acc";
+
+ memset(&els->io.iparam, 0, sizeof(els->io.iparam));
+ els->io.iparam.els.ox_id = ox_id;
+
+ pp = els->io.req.virt;
+ memset(pp, 0, sizeof(*pp));
+
+ pp->prli.prli_cmd = ELS_LS_ACC;
+ pp->prli.prli_spp_len = 0x10;
+ pp->prli.prli_len = cpu_to_be16(sizeof(*pp));
+ pp->spp.spp_type = FC_TYPE_FCP;
+ pp->spp.spp_type_ext = 0;
+ pp->spp.spp_flags = FC_SPP_EST_IMG_PAIR | FC_SPP_RESP_ACK;
+
+ pp->spp.spp_params = cpu_to_be32(FCP_SPPF_RD_XRDY_DIS |
+ (node->nport->enable_ini ?
+ FCP_SPPF_INIT_FCN : 0) |
+ (node->nport->enable_tgt ?
+ FCP_SPPF_TARG_FCN : 0));
+
+ return efc_els_send_rsp(els, sizeof(*pp));
+}
+
+int
+efc_send_prlo_acc(struct efc_node *node, u32 ox_id)
+{
+ struct efc *efc = node->efc;
+ struct efc_els_io_req *els = NULL;
+ struct {
+ struct fc_els_prlo prlo;
+ struct fc_els_spp spp;
+ } *pp;
+
+ node_els_trace();
+
+ els = efc_els_io_alloc(node, sizeof(*pp));
+ if (!els) {
+ efc_log_err(efc, "els IO alloc failed\n");
+ return -EIO;
+ }
+
+ els->display_name = "prlo_acc";
+
+ memset(&els->io.iparam, 0, sizeof(els->io.iparam));
+ els->io.iparam.els.ox_id = ox_id;
+
+ pp = els->io.req.virt;
+ memset(pp, 0, sizeof(*pp));
+ pp->prlo.prlo_cmd = ELS_LS_ACC;
+ pp->prlo.prlo_obs = 0x10;
+ pp->prlo.prlo_len = cpu_to_be16(sizeof(*pp));
+
+ pp->spp.spp_type = FC_TYPE_FCP;
+ pp->spp.spp_type_ext = 0;
+ pp->spp.spp_flags = FC_SPP_RESP_ACK;
+
+ return efc_els_send_rsp(els, sizeof(*pp));
+}
+
+int
+efc_send_ls_acc(struct efc_node *node, u32 ox_id)
+{
+ struct efc *efc = node->efc;
+ struct efc_els_io_req *els = NULL;
+ struct fc_els_ls_acc *acc;
+
+ node_els_trace();
+
+ els = efc_els_io_alloc(node, sizeof(*acc));
+ if (!els) {
+ efc_log_err(efc, "els IO alloc failed\n");
+ return -EIO;
+ }
+
+ els->display_name = "ls_acc";
+
+ memset(&els->io.iparam, 0, sizeof(els->io.iparam));
+ els->io.iparam.els.ox_id = ox_id;
+
+ acc = els->io.req.virt;
+ memset(acc, 0, sizeof(*acc));
+
+ acc->la_cmd = ELS_LS_ACC;
+
+ return efc_els_send_rsp(els, sizeof(*acc));
+}
+
+int
+efc_send_logo_acc(struct efc_node *node, u32 ox_id)
+{
+ struct efc_els_io_req *els = NULL;
+ struct efc *efc = node->efc;
+ struct fc_els_ls_acc *logo;
+
+ node_els_trace();
+
+ els = efc_els_io_alloc(node, sizeof(*logo));
+ if (!els) {
+ efc_log_err(efc, "els IO alloc failed\n");
+ return -EIO;
+ }
+
+ els->display_name = "logo_acc";
+
+ memset(&els->io.iparam, 0, sizeof(els->io.iparam));
+ els->io.iparam.els.ox_id = ox_id;
+
+ logo = els->io.req.virt;
+ memset(logo, 0, sizeof(*logo));
+
+ logo->la_cmd = ELS_LS_ACC;
+
+ return efc_els_send_rsp(els, sizeof(*logo));
+}
+
+int
+efc_send_adisc_acc(struct efc_node *node, u32 ox_id)
+{
+ struct efc *efc = node->efc;
+ struct efc_els_io_req *els = NULL;
+ struct fc_els_adisc *adisc;
+ struct fc_els_flogi *sparams;
+
+ node_els_trace();
+
+ els = efc_els_io_alloc(node, sizeof(*adisc));
+ if (!els) {
+ efc_log_err(efc, "els IO alloc failed\n");
+ return -EIO;
+ }
+
+ els->display_name = "adisc_acc";
+
+ /* Go ahead and send the ELS_ACC */
+ memset(&els->io.iparam, 0, sizeof(els->io.iparam));
+ els->io.iparam.els.ox_id = ox_id;
+
+ sparams = (struct fc_els_flogi *)node->nport->service_params;
+ adisc = els->io.req.virt;
+ memset(adisc, 0, sizeof(*adisc));
+ adisc->adisc_cmd = ELS_LS_ACC;
+ adisc->adisc_wwpn = sparams->fl_wwpn;
+ adisc->adisc_wwnn = sparams->fl_wwnn;
+ hton24(adisc->adisc_port_id, node->rnode.nport->fc_id);
+
+ return efc_els_send_rsp(els, sizeof(*adisc));
+}
+
+static inline void
+fcct_build_req_header(struct fc_ct_hdr *hdr, u16 cmd, u16 max_size)
+{
+ hdr->ct_rev = FC_CT_REV;
+ hdr->ct_fs_type = FC_FST_DIR;
+ hdr->ct_fs_subtype = FC_NS_SUBTYPE;
+ hdr->ct_options = 0;
+ hdr->ct_cmd = cpu_to_be16(cmd);
+ /* words */
+ hdr->ct_mr_size = cpu_to_be16(max_size / (sizeof(u32)));
+ hdr->ct_reason = 0;
+ hdr->ct_explan = 0;
+ hdr->ct_vendor = 0;
+}
+
+int
+efc_ns_send_rftid(struct efc_node *node)
+{
+ struct efc *efc = node->efc;
+ struct efc_els_io_req *els;
+ struct {
+ struct fc_ct_hdr hdr;
+ struct fc_ns_rft_id rftid;
+ } *ct;
+
+ node_els_trace();
+
+ els = efc_els_io_alloc(node, sizeof(*ct));
+ if (!els) {
+ efc_log_err(efc, "IO alloc failed\n");
+ return -EIO;
+ }
+
+ els->io.iparam.ct.r_ctl = FC_RCTL_ELS_REQ;
+ els->io.iparam.ct.type = FC_TYPE_CT;
+ els->io.iparam.ct.df_ctl = 0;
+ els->io.iparam.ct.timeout = EFC_FC_ELS_SEND_DEFAULT_TIMEOUT;
+
+ els->display_name = "rftid";
+
+ ct = els->io.req.virt;
+ memset(ct, 0, sizeof(*ct));
+ fcct_build_req_header(&ct->hdr, FC_NS_RFT_ID,
+ sizeof(struct fc_ns_rft_id));
+
+ hton24(ct->rftid.fr_fid.fp_fid, node->rnode.nport->fc_id);
+ ct->rftid.fr_fts.ff_type_map[FC_TYPE_FCP / FC_NS_BPW] =
+ cpu_to_be32(1 << (FC_TYPE_FCP % FC_NS_BPW));
+
+ return efc_els_send_req(node, els, EFC_DISC_IO_CT_REQ);
+}
+
+int
+efc_ns_send_rffid(struct efc_node *node)
+{
+ struct efc *efc = node->efc;
+ struct efc_els_io_req *els;
+ struct {
+ struct fc_ct_hdr hdr;
+ struct fc_ns_rff_id rffid;
+ } *ct;
+
+ node_els_trace();
+
+ els = efc_els_io_alloc(node, sizeof(*ct));
+ if (!els) {
+ efc_log_err(efc, "IO alloc failed\n");
+ return -EIO;
+ }
+
+ els->io.iparam.ct.r_ctl = FC_RCTL_ELS_REQ;
+ els->io.iparam.ct.type = FC_TYPE_CT;
+ els->io.iparam.ct.df_ctl = 0;
+ els->io.iparam.ct.timeout = EFC_FC_ELS_SEND_DEFAULT_TIMEOUT;
+
+ els->display_name = "rffid";
+ ct = els->io.req.virt;
+
+ memset(ct, 0, sizeof(*ct));
+ fcct_build_req_header(&ct->hdr, FC_NS_RFF_ID,
+ sizeof(struct fc_ns_rff_id));
+
+ hton24(ct->rffid.fr_fid.fp_fid, node->rnode.nport->fc_id);
+ if (node->nport->enable_ini)
+ ct->rffid.fr_feat |= FCP_FEAT_INIT;
+ if (node->nport->enable_tgt)
+ ct->rffid.fr_feat |= FCP_FEAT_TARG;
+ ct->rffid.fr_type = FC_TYPE_FCP;
+
+ return efc_els_send_req(node, els, EFC_DISC_IO_CT_REQ);
+}
+
+int
+efc_ns_send_gidpt(struct efc_node *node)
+{
+ struct efc_els_io_req *els = NULL;
+ struct efc *efc = node->efc;
+ struct {
+ struct fc_ct_hdr hdr;
+ struct fc_ns_gid_pt gidpt;
+ } *ct;
+
+ node_els_trace();
+
+ els = efc_els_io_alloc_size(node, sizeof(*ct), EFC_ELS_GID_PT_RSP_LEN);
+ if (!els) {
+ efc_log_err(efc, "IO alloc failed\n");
+ return -EIO;
+ }
+
+ els->io.iparam.ct.r_ctl = FC_RCTL_ELS_REQ;
+ els->io.iparam.ct.type = FC_TYPE_CT;
+ els->io.iparam.ct.df_ctl = 0;
+ els->io.iparam.ct.timeout = EFC_FC_ELS_SEND_DEFAULT_TIMEOUT;
+
+ els->display_name = "gidpt";
+
+ ct = els->io.req.virt;
+
+ memset(ct, 0, sizeof(*ct));
+ fcct_build_req_header(&ct->hdr, FC_NS_GID_PT,
+ sizeof(struct fc_ns_gid_pt));
+
+ ct->gidpt.fn_pt_type = FC_TYPE_FCP;
+
+ return efc_els_send_req(node, els, EFC_DISC_IO_CT_REQ);
+}
+
+void
+efc_els_io_cleanup(struct efc_els_io_req *els, int evt, void *arg)
+{
+ /* don't want further events that could come; e.g. abort requests
+ * from the node state machine; thus, disable state machine
+ */
+ els->els_req_free = true;
+ efc_node_post_els_resp(els->node, evt, arg);
+
+ efc_els_io_free(els);
+}
+
+static int
+efc_ct_acc_cb(void *arg, u32 length, int status, u32 ext_status)
+{
+ struct efc_els_io_req *els = arg;
+
+ efc_els_io_free(els);
+
+ return 0;
+}
+
+int
+efc_send_ct_rsp(struct efc *efc, struct efc_node *node, u16 ox_id,
+ struct fc_ct_hdr *ct_hdr, u32 cmd_rsp_code,
+ u32 reason_code, u32 reason_code_explanation)
+{
+ struct efc_els_io_req *els = NULL;
+ struct fc_ct_hdr *rsp = NULL;
+
+ els = efc_els_io_alloc(node, 256);
+ if (!els) {
+ efc_log_err(efc, "IO alloc failed\n");
+ return -EIO;
+ }
+
+ rsp = els->io.rsp.virt;
+
+ *rsp = *ct_hdr;
+
+ fcct_build_req_header(rsp, cmd_rsp_code, 0);
+ rsp->ct_reason = reason_code;
+ rsp->ct_explan = reason_code_explanation;
+
+ els->display_name = "ct_rsp";
+ els->cb = efc_ct_acc_cb;
+
+ /* Prepare the IO request details */
+ els->io.io_type = EFC_DISC_IO_CT_RESP;
+ els->io.xmit_len = sizeof(*rsp);
+
+ els->io.rpi = node->rnode.indicator;
+ els->io.d_id = node->rnode.fc_id;
+
+ memset(&els->io.iparam, 0, sizeof(els->io.iparam));
+
+ els->io.iparam.ct.ox_id = ox_id;
+ els->io.iparam.ct.r_ctl = 3;
+ els->io.iparam.ct.type = FC_TYPE_CT;
+ els->io.iparam.ct.df_ctl = 0;
+ els->io.iparam.ct.timeout = 5;
+
+ if (efc->tt.send_els(efc, &els->io)) {
+ efc_els_io_free(els);
+ return -EIO;
+ }
+ return 0;
+}
+
+int
+efc_send_bls_acc(struct efc_node *node, struct fc_frame_header *hdr)
+{
+ struct sli_bls_params bls;
+ struct fc_ba_acc *acc;
+ struct efc *efc = node->efc;
+
+ memset(&bls, 0, sizeof(bls));
+ bls.ox_id = be16_to_cpu(hdr->fh_ox_id);
+ bls.rx_id = be16_to_cpu(hdr->fh_rx_id);
+ bls.s_id = ntoh24(hdr->fh_d_id);
+ bls.d_id = node->rnode.fc_id;
+ bls.rpi = node->rnode.indicator;
+ bls.vpi = node->nport->indicator;
+
+ acc = (void *)bls.payload;
+ acc->ba_ox_id = cpu_to_be16(bls.ox_id);
+ acc->ba_rx_id = cpu_to_be16(bls.rx_id);
+ acc->ba_high_seq_cnt = cpu_to_be16(U16_MAX);
+
+ return efc->tt.send_bls(efc, FC_RCTL_BA_ACC, &bls);
+}
diff --git a/drivers/scsi/elx/libefc/efc_els.h b/drivers/scsi/elx/libefc/efc_els.h
new file mode 100644
index 000000000000..3c4f820f602e
--- /dev/null
+++ b/drivers/scsi/elx/libefc/efc_els.h
@@ -0,0 +1,107 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+#ifndef __EFC_ELS_H__
+#define __EFC_ELS_H__
+
+#define EFC_STATUS_INVALID INT_MAX
+#define EFC_ELS_IO_POOL_SZ 1024
+
+struct efc_els_io_req {
+ struct list_head list_entry;
+ struct kref ref;
+ void (*release)(struct kref *arg);
+ struct efc_node *node;
+ void *cb;
+ u32 els_retries_remaining;
+ bool els_req_free;
+ struct timer_list delay_timer;
+
+ const char *display_name;
+
+ struct efc_disc_io io;
+};
+
+typedef int(*efc_hw_srrs_cb_t)(void *arg, u32 length, int status,
+ u32 ext_status);
+
+void _efc_els_io_free(struct kref *arg);
+struct efc_els_io_req *
+efc_els_io_alloc(struct efc_node *node, u32 reqlen);
+struct efc_els_io_req *
+efc_els_io_alloc_size(struct efc_node *node, u32 reqlen, u32 rsplen);
+void efc_els_io_free(struct efc_els_io_req *els);
+
+/* ELS command send */
+typedef void (*els_cb_t)(struct efc_node *node,
+ struct efc_node_cb *cbdata, void *arg);
+int
+efc_send_plogi(struct efc_node *node);
+int
+efc_send_flogi(struct efc_node *node);
+int
+efc_send_fdisc(struct efc_node *node);
+int
+efc_send_prli(struct efc_node *node);
+int
+efc_send_prlo(struct efc_node *node);
+int
+efc_send_logo(struct efc_node *node);
+int
+efc_send_adisc(struct efc_node *node);
+int
+efc_send_pdisc(struct efc_node *node);
+int
+efc_send_scr(struct efc_node *node);
+int
+efc_ns_send_rftid(struct efc_node *node);
+int
+efc_ns_send_rffid(struct efc_node *node);
+int
+efc_ns_send_gidpt(struct efc_node *node);
+void
+efc_els_io_cleanup(struct efc_els_io_req *els, int evt, void *arg);
+
+/* ELS acc send */
+int
+efc_send_ls_acc(struct efc_node *node, u32 ox_id);
+int
+efc_send_ls_rjt(struct efc_node *node, u32 ox_id, u32 reason_cod,
+ u32 reason_code_expl, u32 vendor_unique);
+int
+efc_send_flogi_p2p_acc(struct efc_node *node, u32 ox_id, u32 s_id);
+int
+efc_send_flogi_acc(struct efc_node *node, u32 ox_id, u32 is_fport);
+int
+efc_send_plogi_acc(struct efc_node *node, u32 ox_id);
+int
+efc_send_prli_acc(struct efc_node *node, u32 ox_id);
+int
+efc_send_logo_acc(struct efc_node *node, u32 ox_id);
+int
+efc_send_prlo_acc(struct efc_node *node, u32 ox_id);
+int
+efc_send_adisc_acc(struct efc_node *node, u32 ox_id);
+
+int
+efc_bls_send_acc_hdr(struct efc *efc, struct efc_node *node,
+ struct fc_frame_header *hdr);
+int
+efc_bls_send_rjt_hdr(struct efc_els_io_req *io, struct fc_frame_header *hdr);
+
+int
+efc_els_io_list_empty(struct efc_node *node, struct list_head *list);
+
+/* CT */
+int
+efc_send_ct_rsp(struct efc *efc, struct efc_node *node, u16 ox_id,
+ struct fc_ct_hdr *ct_hdr, u32 cmd_rsp_code, u32 reason_code,
+ u32 reason_code_explanation);
+
+int
+efc_send_bls_acc(struct efc_node *node, struct fc_frame_header *hdr);
+
+#endif /* __EFC_ELS_H__ */
diff --git a/drivers/scsi/elx/libefc/efc_fabric.c b/drivers/scsi/elx/libefc/efc_fabric.c
new file mode 100644
index 000000000000..d397220d9e54
--- /dev/null
+++ b/drivers/scsi/elx/libefc/efc_fabric.c
@@ -0,0 +1,1564 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+/*
+ * This file implements remote node state machines for:
+ * - Fabric logins.
+ * - Fabric controller events.
+ * - Name/directory services interaction.
+ * - Point-to-point logins.
+ */
+
+/*
+ * fabric_sm Node State Machine: Fabric States
+ * ns_sm Node State Machine: Name/Directory Services States
+ * p2p_sm Node State Machine: Point-to-Point Node States
+ */
+
+#include "efc.h"
+
+static void
+efc_fabric_initiate_shutdown(struct efc_node *node)
+{
+ struct efc *efc = node->efc;
+
+ node->els_io_enabled = false;
+
+ if (node->attached) {
+ int rc;
+
+ /* issue hw node free; don't care if succeeds right away
+ * or sometime later, will check node->attached later in
+ * shutdown process
+ */
+ rc = efc_cmd_node_detach(efc, &node->rnode);
+ if (rc < 0) {
+ node_printf(node, "Failed freeing HW node, rc=%d\n",
+ rc);
+ }
+ }
+ /*
+ * node has either been detached or is in the process of being detached,
+ * call common node's initiate cleanup function
+ */
+ efc_node_initiate_cleanup(node);
+}
+
+static void
+__efc_fabric_common(const char *funcname, struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node *node = NULL;
+
+ node = ctx->app;
+
+ switch (evt) {
+ case EFC_EVT_DOMAIN_ATTACH_OK:
+ break;
+ case EFC_EVT_SHUTDOWN:
+ node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
+ efc_fabric_initiate_shutdown(node);
+ break;
+
+ default:
+ /* call default event handler common to all nodes */
+ __efc_node_common(funcname, ctx, evt, arg);
+ }
+}
+
+void
+__efc_fabric_init(struct efc_sm_ctx *ctx, enum efc_sm_event evt,
+ void *arg)
+{
+ struct efc_node *node = ctx->app;
+ struct efc *efc = node->efc;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_REENTER:
+ efc_log_debug(efc, ">>> reenter !!\n");
+ fallthrough;
+
+ case EFC_EVT_ENTER:
+ /* send FLOGI */
+ efc_send_flogi(node);
+ efc_node_transition(node, __efc_fabric_flogi_wait_rsp, NULL);
+ break;
+
+ default:
+ __efc_fabric_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+efc_fabric_set_topology(struct efc_node *node,
+ enum efc_nport_topology topology)
+{
+ node->nport->topology = topology;
+}
+
+void
+efc_fabric_notify_topology(struct efc_node *node)
+{
+ struct efc_node *tmp_node;
+ enum efc_nport_topology topology = node->nport->topology;
+ unsigned long index;
+
+ /*
+ * now loop through the nodes in the nport
+ * and send topology notification
+ */
+ xa_for_each(&node->nport->lookup, index, tmp_node) {
+ if (tmp_node != node) {
+ efc_node_post_event(tmp_node,
+ EFC_EVT_NPORT_TOPOLOGY_NOTIFY,
+ (void *)topology);
+ }
+ }
+}
+
+static bool efc_rnode_is_nport(struct fc_els_flogi *rsp)
+{
+ return !(ntohs(rsp->fl_csp.sp_features) & FC_SP_FT_FPORT);
+}
+
+void
+__efc_fabric_flogi_wait_rsp(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node_cb *cbdata = arg;
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_SRRS_ELS_REQ_OK: {
+ if (efc_node_check_els_req(ctx, evt, arg, ELS_FLOGI,
+ __efc_fabric_common, __func__)) {
+ return;
+ }
+ WARN_ON(!node->els_req_cnt);
+ node->els_req_cnt--;
+
+ memcpy(node->nport->domain->flogi_service_params,
+ cbdata->els_rsp.virt,
+ sizeof(struct fc_els_flogi));
+
+ /* Check to see if the fabric is an F_PORT or and N_PORT */
+ if (!efc_rnode_is_nport(cbdata->els_rsp.virt)) {
+ /* sm: if not nport / efc_domain_attach */
+ /* ext_status has the fc_id, attach domain */
+ efc_fabric_set_topology(node, EFC_NPORT_TOPO_FABRIC);
+ efc_fabric_notify_topology(node);
+ WARN_ON(node->nport->domain->attached);
+ efc_domain_attach(node->nport->domain,
+ cbdata->ext_status);
+ efc_node_transition(node,
+ __efc_fabric_wait_domain_attach,
+ NULL);
+ break;
+ }
+
+ /* sm: if nport and p2p_winner / efc_domain_attach */
+ efc_fabric_set_topology(node, EFC_NPORT_TOPO_P2P);
+ if (efc_p2p_setup(node->nport)) {
+ node_printf(node,
+ "p2p setup failed, shutting down node\n");
+ node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
+ efc_fabric_initiate_shutdown(node);
+ break;
+ }
+
+ if (node->nport->p2p_winner) {
+ efc_node_transition(node,
+ __efc_p2p_wait_domain_attach,
+ NULL);
+ if (node->nport->domain->attached &&
+ !node->nport->domain->domain_notify_pend) {
+ /*
+ * already attached,
+ * just send ATTACH_OK
+ */
+ node_printf(node,
+ "p2p winner, domain already attached\n");
+ efc_node_post_event(node,
+ EFC_EVT_DOMAIN_ATTACH_OK,
+ NULL);
+ }
+ } else {
+ /*
+ * peer is p2p winner;
+ * PLOGI will be received on the
+ * remote SID=1 node;
+ * this node has served its purpose
+ */
+ node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
+ efc_fabric_initiate_shutdown(node);
+ }
+
+ break;
+ }
+
+ case EFC_EVT_ELS_REQ_ABORTED:
+ case EFC_EVT_SRRS_ELS_REQ_RJT:
+ case EFC_EVT_SRRS_ELS_REQ_FAIL: {
+ struct efc_nport *nport = node->nport;
+ /*
+ * with these errors, we have no recovery,
+ * so shutdown the nport, leave the link
+ * up and the domain ready
+ */
+ if (efc_node_check_els_req(ctx, evt, arg, ELS_FLOGI,
+ __efc_fabric_common, __func__)) {
+ return;
+ }
+ node_printf(node,
+ "FLOGI failed evt=%s, shutting down nport [%s]\n",
+ efc_sm_event_name(evt), nport->display_name);
+ WARN_ON(!node->els_req_cnt);
+ node->els_req_cnt--;
+ efc_sm_post_event(&nport->sm, EFC_EVT_SHUTDOWN, NULL);
+ break;
+ }
+
+ default:
+ __efc_fabric_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_vport_fabric_init(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ /* sm: / send FDISC */
+ efc_send_fdisc(node);
+ efc_node_transition(node, __efc_fabric_fdisc_wait_rsp, NULL);
+ break;
+
+ default:
+ __efc_fabric_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_fabric_fdisc_wait_rsp(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node_cb *cbdata = arg;
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_SRRS_ELS_REQ_OK: {
+ /* fc_id is in ext_status */
+ if (efc_node_check_els_req(ctx, evt, arg, ELS_FDISC,
+ __efc_fabric_common, __func__)) {
+ return;
+ }
+
+ WARN_ON(!node->els_req_cnt);
+ node->els_req_cnt--;
+ /* sm: / efc_nport_attach */
+ efc_nport_attach(node->nport, cbdata->ext_status);
+ efc_node_transition(node, __efc_fabric_wait_domain_attach,
+ NULL);
+ break;
+ }
+
+ case EFC_EVT_SRRS_ELS_REQ_RJT:
+ case EFC_EVT_SRRS_ELS_REQ_FAIL: {
+ if (efc_node_check_els_req(ctx, evt, arg, ELS_FDISC,
+ __efc_fabric_common, __func__)) {
+ return;
+ }
+ WARN_ON(!node->els_req_cnt);
+ node->els_req_cnt--;
+ efc_log_err(node->efc, "FDISC failed, shutting down nport\n");
+ /* sm: / shutdown nport */
+ efc_sm_post_event(&node->nport->sm, EFC_EVT_SHUTDOWN, NULL);
+ break;
+ }
+
+ default:
+ __efc_fabric_common(__func__, ctx, evt, arg);
+ }
+}
+
+static int
+efc_start_ns_node(struct efc_nport *nport)
+{
+ struct efc_node *ns;
+
+ /* Instantiate a name services node */
+ ns = efc_node_find(nport, FC_FID_DIR_SERV);
+ if (!ns) {
+ ns = efc_node_alloc(nport, FC_FID_DIR_SERV, false, false);
+ if (!ns)
+ return -EIO;
+ }
+ /*
+ * for found ns, should we be transitioning from here?
+ * breaks transition only
+ * 1. from within state machine or
+ * 2. if after alloc
+ */
+ if (ns->efc->nodedb_mask & EFC_NODEDB_PAUSE_NAMESERVER)
+ efc_node_pause(ns, __efc_ns_init);
+ else
+ efc_node_transition(ns, __efc_ns_init, NULL);
+ return 0;
+}
+
+static int
+efc_start_fabctl_node(struct efc_nport *nport)
+{
+ struct efc_node *fabctl;
+
+ fabctl = efc_node_find(nport, FC_FID_FCTRL);
+ if (!fabctl) {
+ fabctl = efc_node_alloc(nport, FC_FID_FCTRL,
+ false, false);
+ if (!fabctl)
+ return -EIO;
+ }
+ /*
+ * for found ns, should we be transitioning from here?
+ * breaks transition only
+ * 1. from within state machine or
+ * 2. if after alloc
+ */
+ efc_node_transition(fabctl, __efc_fabctl_init, NULL);
+ return 0;
+}
+
+void
+__efc_fabric_wait_domain_attach(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ efc_node_hold_frames(node);
+ break;
+
+ case EFC_EVT_EXIT:
+ efc_node_accept_frames(node);
+ break;
+ case EFC_EVT_DOMAIN_ATTACH_OK:
+ case EFC_EVT_NPORT_ATTACH_OK: {
+ int rc;
+
+ rc = efc_start_ns_node(node->nport);
+ if (rc)
+ return;
+
+ /* sm: if enable_ini / start fabctl node */
+ /* Instantiate the fabric controller (sends SCR) */
+ if (node->nport->enable_rscn) {
+ rc = efc_start_fabctl_node(node->nport);
+ if (rc)
+ return;
+ }
+ efc_node_transition(node, __efc_fabric_idle, NULL);
+ break;
+ }
+ default:
+ __efc_fabric_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_fabric_idle(struct efc_sm_ctx *ctx, enum efc_sm_event evt,
+ void *arg)
+{
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_DOMAIN_ATTACH_OK:
+ break;
+ default:
+ __efc_fabric_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_ns_init(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg)
+{
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ /* sm: / send PLOGI */
+ efc_send_plogi(node);
+ efc_node_transition(node, __efc_ns_plogi_wait_rsp, NULL);
+ break;
+ default:
+ __efc_fabric_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_ns_plogi_wait_rsp(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node_cb *cbdata = arg;
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_SRRS_ELS_REQ_OK: {
+ int rc;
+
+ /* Save service parameters */
+ if (efc_node_check_els_req(ctx, evt, arg, ELS_PLOGI,
+ __efc_fabric_common, __func__)) {
+ return;
+ }
+ WARN_ON(!node->els_req_cnt);
+ node->els_req_cnt--;
+ /* sm: / save sparams, efc_node_attach */
+ efc_node_save_sparms(node, cbdata->els_rsp.virt);
+ rc = efc_node_attach(node);
+ efc_node_transition(node, __efc_ns_wait_node_attach, NULL);
+ if (rc < 0)
+ efc_node_post_event(node, EFC_EVT_NODE_ATTACH_FAIL,
+ NULL);
+ break;
+ }
+ default:
+ __efc_fabric_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_ns_wait_node_attach(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ efc_node_hold_frames(node);
+ break;
+
+ case EFC_EVT_EXIT:
+ efc_node_accept_frames(node);
+ break;
+
+ case EFC_EVT_NODE_ATTACH_OK:
+ node->attached = true;
+ /* sm: / send RFTID */
+ efc_ns_send_rftid(node);
+ efc_node_transition(node, __efc_ns_rftid_wait_rsp, NULL);
+ break;
+
+ case EFC_EVT_NODE_ATTACH_FAIL:
+ /* node attach failed, shutdown the node */
+ node->attached = false;
+ node_printf(node, "Node attach failed\n");
+ node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
+ efc_fabric_initiate_shutdown(node);
+ break;
+
+ case EFC_EVT_SHUTDOWN:
+ node_printf(node, "Shutdown event received\n");
+ node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
+ efc_node_transition(node,
+ __efc_fabric_wait_attach_evt_shutdown,
+ NULL);
+ break;
+
+ /*
+ * if receive RSCN just ignore,
+ * we haven't sent GID_PT yet (ACC sent by fabctl node)
+ */
+ case EFC_EVT_RSCN_RCVD:
+ break;
+
+ default:
+ __efc_fabric_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_fabric_wait_attach_evt_shutdown(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ efc_node_hold_frames(node);
+ break;
+
+ case EFC_EVT_EXIT:
+ efc_node_accept_frames(node);
+ break;
+
+ /* wait for any of these attach events and then shutdown */
+ case EFC_EVT_NODE_ATTACH_OK:
+ node->attached = true;
+ node_printf(node, "Attach evt=%s, proceed to shutdown\n",
+ efc_sm_event_name(evt));
+ efc_fabric_initiate_shutdown(node);
+ break;
+
+ case EFC_EVT_NODE_ATTACH_FAIL:
+ node->attached = false;
+ node_printf(node, "Attach evt=%s, proceed to shutdown\n",
+ efc_sm_event_name(evt));
+ efc_fabric_initiate_shutdown(node);
+ break;
+
+ /* ignore shutdown event as we're already in shutdown path */
+ case EFC_EVT_SHUTDOWN:
+ node_printf(node, "Shutdown event received\n");
+ break;
+
+ default:
+ __efc_fabric_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_ns_rftid_wait_rsp(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_SRRS_ELS_REQ_OK:
+ if (efc_node_check_ns_req(ctx, evt, arg, FC_NS_RFT_ID,
+ __efc_fabric_common, __func__)) {
+ return;
+ }
+ WARN_ON(!node->els_req_cnt);
+ node->els_req_cnt--;
+ /* sm: / send RFFID */
+ efc_ns_send_rffid(node);
+ efc_node_transition(node, __efc_ns_rffid_wait_rsp, NULL);
+ break;
+
+ /*
+ * if receive RSCN just ignore,
+ * we haven't sent GID_PT yet (ACC sent by fabctl node)
+ */
+ case EFC_EVT_RSCN_RCVD:
+ break;
+
+ default:
+ __efc_fabric_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_ns_rffid_wait_rsp(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ /*
+ * Waits for an RFFID response event;
+ * if rscn enabled, a GIDPT name services request is issued.
+ */
+ switch (evt) {
+ case EFC_EVT_SRRS_ELS_REQ_OK: {
+ if (efc_node_check_ns_req(ctx, evt, arg, FC_NS_RFF_ID,
+ __efc_fabric_common, __func__)) {
+ return;
+ }
+ WARN_ON(!node->els_req_cnt);
+ node->els_req_cnt--;
+ if (node->nport->enable_rscn) {
+ /* sm: if enable_rscn / send GIDPT */
+ efc_ns_send_gidpt(node);
+
+ efc_node_transition(node, __efc_ns_gidpt_wait_rsp,
+ NULL);
+ } else {
+ /* if 'T' only, we're done, go to idle */
+ efc_node_transition(node, __efc_ns_idle, NULL);
+ }
+ break;
+ }
+ /*
+ * if receive RSCN just ignore,
+ * we haven't sent GID_PT yet (ACC sent by fabctl node)
+ */
+ case EFC_EVT_RSCN_RCVD:
+ break;
+
+ default:
+ __efc_fabric_common(__func__, ctx, evt, arg);
+ }
+}
+
+static int
+efc_process_gidpt_payload(struct efc_node *node,
+ void *data, u32 gidpt_len)
+{
+ u32 i, j;
+ struct efc_node *newnode;
+ struct efc_nport *nport = node->nport;
+ struct efc *efc = node->efc;
+ u32 port_id = 0, port_count, plist_count;
+ struct efc_node *n;
+ struct efc_node **active_nodes;
+ int residual;
+ struct {
+ struct fc_ct_hdr hdr;
+ struct fc_gid_pn_resp pn_rsp;
+ } *rsp;
+ struct fc_gid_pn_resp *gidpt;
+ unsigned long index;
+
+ rsp = data;
+ gidpt = &rsp->pn_rsp;
+ residual = be16_to_cpu(rsp->hdr.ct_mr_size);
+
+ if (residual != 0)
+ efc_log_debug(node->efc, "residual is %u words\n", residual);
+
+ if (be16_to_cpu(rsp->hdr.ct_cmd) == FC_FS_RJT) {
+ node_printf(node,
+ "GIDPT request failed: rsn x%x rsn_expl x%x\n",
+ rsp->hdr.ct_reason, rsp->hdr.ct_explan);
+ return -EIO;
+ }
+
+ plist_count = (gidpt_len - sizeof(struct fc_ct_hdr)) / sizeof(*gidpt);
+
+ /* Count the number of nodes */
+ port_count = 0;
+ xa_for_each(&nport->lookup, index, n) {
+ port_count++;
+ }
+
+ /* Allocate a buffer for all nodes */
+ active_nodes = kzalloc(port_count * sizeof(*active_nodes), GFP_ATOMIC);
+ if (!active_nodes) {
+ node_printf(node, "efc_malloc failed\n");
+ return -EIO;
+ }
+
+ /* Fill buffer with fc_id of active nodes */
+ i = 0;
+ xa_for_each(&nport->lookup, index, n) {
+ port_id = n->rnode.fc_id;
+ switch (port_id) {
+ case FC_FID_FLOGI:
+ case FC_FID_FCTRL:
+ case FC_FID_DIR_SERV:
+ break;
+ default:
+ if (port_id != FC_FID_DOM_MGR)
+ active_nodes[i++] = n;
+ break;
+ }
+ }
+
+ /* update the active nodes buffer */
+ for (i = 0; i < plist_count; i++) {
+ hton24(gidpt[i].fp_fid, port_id);
+
+ for (j = 0; j < port_count; j++) {
+ if (active_nodes[j] &&
+ port_id == active_nodes[j]->rnode.fc_id) {
+ active_nodes[j] = NULL;
+ }
+ }
+
+ if (gidpt[i].fp_resvd & FC_NS_FID_LAST)
+ break;
+ }
+
+ /* Those remaining in the active_nodes[] are now gone ! */
+ for (i = 0; i < port_count; i++) {
+ /*
+ * if we're an initiator and the remote node
+ * is a target, then post the node missing event.
+ * if we're target and we have enabled
+ * target RSCN, then post the node missing event.
+ */
+ if (!active_nodes[i])
+ continue;
+
+ if ((node->nport->enable_ini && active_nodes[i]->targ) ||
+ (node->nport->enable_tgt && enable_target_rscn(efc))) {
+ efc_node_post_event(active_nodes[i],
+ EFC_EVT_NODE_MISSING, NULL);
+ } else {
+ node_printf(node,
+ "GID_PT: skipping non-tgt port_id x%06x\n",
+ active_nodes[i]->rnode.fc_id);
+ }
+ }
+ kfree(active_nodes);
+
+ for (i = 0; i < plist_count; i++) {
+ hton24(gidpt[i].fp_fid, port_id);
+
+ /* Don't create node for ourselves */
+ if (port_id == node->rnode.nport->fc_id) {
+ if (gidpt[i].fp_resvd & FC_NS_FID_LAST)
+ break;
+ continue;
+ }
+
+ newnode = efc_node_find(nport, port_id);
+ if (!newnode) {
+ if (!node->nport->enable_ini)
+ continue;
+
+ newnode = efc_node_alloc(nport, port_id, false, false);
+ if (!newnode) {
+ efc_log_err(efc, "efc_node_alloc() failed\n");
+ return -EIO;
+ }
+ /*
+ * send PLOGI automatically
+ * if initiator
+ */
+ efc_node_init_device(newnode, true);
+ }
+
+ if (node->nport->enable_ini && newnode->targ) {
+ efc_node_post_event(newnode, EFC_EVT_NODE_REFOUND,
+ NULL);
+ }
+
+ if (gidpt[i].fp_resvd & FC_NS_FID_LAST)
+ break;
+ }
+ return 0;
+}
+
+void
+__efc_ns_gidpt_wait_rsp(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node_cb *cbdata = arg;
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+ /*
+ * Wait for a GIDPT response from the name server. Process the FC_IDs
+ * that are reported by creating new remote ports, as needed.
+ */
+
+ switch (evt) {
+ case EFC_EVT_SRRS_ELS_REQ_OK: {
+ if (efc_node_check_ns_req(ctx, evt, arg, FC_NS_GID_PT,
+ __efc_fabric_common, __func__)) {
+ return;
+ }
+ WARN_ON(!node->els_req_cnt);
+ node->els_req_cnt--;
+ /* sm: / process GIDPT payload */
+ efc_process_gidpt_payload(node, cbdata->els_rsp.virt,
+ cbdata->els_rsp.len);
+ efc_node_transition(node, __efc_ns_idle, NULL);
+ break;
+ }
+
+ case EFC_EVT_SRRS_ELS_REQ_FAIL: {
+ /* not much we can do; will retry with the next RSCN */
+ node_printf(node, "GID_PT failed to complete\n");
+ WARN_ON(!node->els_req_cnt);
+ node->els_req_cnt--;
+ efc_node_transition(node, __efc_ns_idle, NULL);
+ break;
+ }
+
+ /* if receive RSCN here, queue up another discovery processing */
+ case EFC_EVT_RSCN_RCVD: {
+ node_printf(node, "RSCN received during GID_PT processing\n");
+ node->rscn_pending = true;
+ break;
+ }
+
+ default:
+ __efc_fabric_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_ns_idle(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg)
+{
+ struct efc_node *node = ctx->app;
+ struct efc *efc = node->efc;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ /*
+ * Wait for RSCN received events (posted from the fabric controller)
+ * and restart the GIDPT name services query and processing.
+ */
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ if (!node->rscn_pending)
+ break;
+
+ node_printf(node, "RSCN pending, restart discovery\n");
+ node->rscn_pending = false;
+ fallthrough;
+
+ case EFC_EVT_RSCN_RCVD: {
+ /* sm: / send GIDPT */
+ /*
+ * If target RSCN processing is enabled,
+ * and this is target only (not initiator),
+ * and tgt_rscn_delay is non-zero,
+ * then we delay issuing the GID_PT
+ */
+ if (efc->tgt_rscn_delay_msec != 0 &&
+ !node->nport->enable_ini && node->nport->enable_tgt &&
+ enable_target_rscn(efc)) {
+ efc_node_transition(node, __efc_ns_gidpt_delay, NULL);
+ } else {
+ efc_ns_send_gidpt(node);
+ efc_node_transition(node, __efc_ns_gidpt_wait_rsp,
+ NULL);
+ }
+ break;
+ }
+
+ default:
+ __efc_fabric_common(__func__, ctx, evt, arg);
+ }
+}
+
+static void
+gidpt_delay_timer_cb(struct timer_list *t)
+{
+ struct efc_node *node = from_timer(node, t, gidpt_delay_timer);
+
+ del_timer(&node->gidpt_delay_timer);
+
+ efc_node_post_event(node, EFC_EVT_GIDPT_DELAY_EXPIRED, NULL);
+}
+
+void
+__efc_ns_gidpt_delay(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node *node = ctx->app;
+ struct efc *efc = node->efc;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_ENTER: {
+ u64 delay_msec, tmp;
+
+ /*
+ * Compute the delay time.
+ * Set to tgt_rscn_delay, if the time since last GIDPT
+ * is less than tgt_rscn_period, then use tgt_rscn_period.
+ */
+ delay_msec = efc->tgt_rscn_delay_msec;
+ tmp = jiffies_to_msecs(jiffies) - node->time_last_gidpt_msec;
+ if (tmp < efc->tgt_rscn_period_msec)
+ delay_msec = efc->tgt_rscn_period_msec;
+
+ timer_setup(&node->gidpt_delay_timer, &gidpt_delay_timer_cb,
+ 0);
+ mod_timer(&node->gidpt_delay_timer,
+ jiffies + msecs_to_jiffies(delay_msec));
+
+ break;
+ }
+
+ case EFC_EVT_GIDPT_DELAY_EXPIRED:
+ node->time_last_gidpt_msec = jiffies_to_msecs(jiffies);
+
+ efc_ns_send_gidpt(node);
+ efc_node_transition(node, __efc_ns_gidpt_wait_rsp, NULL);
+ break;
+
+ case EFC_EVT_RSCN_RCVD: {
+ efc_log_debug(efc,
+ "RSCN received while in GIDPT delay - no action\n");
+ break;
+ }
+
+ default:
+ __efc_fabric_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_fabctl_init(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node *node = ctx->app;
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ /* no need to login to fabric controller, just send SCR */
+ efc_send_scr(node);
+ efc_node_transition(node, __efc_fabctl_wait_scr_rsp, NULL);
+ break;
+
+ case EFC_EVT_NODE_ATTACH_OK:
+ node->attached = true;
+ break;
+
+ default:
+ __efc_fabric_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_fabctl_wait_scr_rsp(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ /*
+ * Fabric controller node state machine:
+ * Wait for an SCR response from the fabric controller.
+ */
+ switch (evt) {
+ case EFC_EVT_SRRS_ELS_REQ_OK:
+ if (efc_node_check_els_req(ctx, evt, arg, ELS_SCR,
+ __efc_fabric_common, __func__)) {
+ return;
+ }
+ WARN_ON(!node->els_req_cnt);
+ node->els_req_cnt--;
+ efc_node_transition(node, __efc_fabctl_ready, NULL);
+ break;
+
+ default:
+ __efc_fabric_common(__func__, ctx, evt, arg);
+ }
+}
+
+static void
+efc_process_rscn(struct efc_node *node, struct efc_node_cb *cbdata)
+{
+ struct efc *efc = node->efc;
+ struct efc_nport *nport = node->nport;
+ struct efc_node *ns;
+
+ /* Forward this event to the name-services node */
+ ns = efc_node_find(nport, FC_FID_DIR_SERV);
+ if (ns)
+ efc_node_post_event(ns, EFC_EVT_RSCN_RCVD, cbdata);
+ else
+ efc_log_warn(efc, "can't find name server node\n");
+}
+
+void
+__efc_fabctl_ready(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node_cb *cbdata = arg;
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ /*
+ * Fabric controller node state machine: Ready.
+ * In this state, the fabric controller sends a RSCN, which is received
+ * by this node and is forwarded to the name services node object; and
+ * the RSCN LS_ACC is sent.
+ */
+ switch (evt) {
+ case EFC_EVT_RSCN_RCVD: {
+ struct fc_frame_header *hdr = cbdata->header->dma.virt;
+
+ /*
+ * sm: / process RSCN (forward to name services node),
+ * send LS_ACC
+ */
+ efc_process_rscn(node, cbdata);
+ efc_send_ls_acc(node, be16_to_cpu(hdr->fh_ox_id));
+ efc_node_transition(node, __efc_fabctl_wait_ls_acc_cmpl,
+ NULL);
+ break;
+ }
+
+ default:
+ __efc_fabric_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_fabctl_wait_ls_acc_cmpl(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ efc_node_hold_frames(node);
+ break;
+
+ case EFC_EVT_EXIT:
+ efc_node_accept_frames(node);
+ break;
+
+ case EFC_EVT_SRRS_ELS_CMPL_OK:
+ WARN_ON(!node->els_cmpl_cnt);
+ node->els_cmpl_cnt--;
+ efc_node_transition(node, __efc_fabctl_ready, NULL);
+ break;
+
+ default:
+ __efc_fabric_common(__func__, ctx, evt, arg);
+ }
+}
+
+static uint64_t
+efc_get_wwpn(struct fc_els_flogi *sp)
+{
+ return be64_to_cpu(sp->fl_wwnn);
+}
+
+static int
+efc_rnode_is_winner(struct efc_nport *nport)
+{
+ struct fc_els_flogi *remote_sp;
+ u64 remote_wwpn;
+ u64 local_wwpn = nport->wwpn;
+ u64 wwn_bump = 0;
+
+ remote_sp = (struct fc_els_flogi *)nport->domain->flogi_service_params;
+ remote_wwpn = efc_get_wwpn(remote_sp);
+
+ local_wwpn ^= wwn_bump;
+
+ efc_log_debug(nport->efc, "r: %llx\n",
+ be64_to_cpu(remote_sp->fl_wwpn));
+ efc_log_debug(nport->efc, "l: %llx\n", local_wwpn);
+
+ if (remote_wwpn == local_wwpn) {
+ efc_log_warn(nport->efc,
+ "WWPN of remote node [%08x %08x] matches local WWPN\n",
+ (u32)(local_wwpn >> 32ll),
+ (u32)local_wwpn);
+ return -1;
+ }
+
+ return (remote_wwpn > local_wwpn);
+}
+
+void
+__efc_p2p_wait_domain_attach(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node *node = ctx->app;
+ struct efc *efc = node->efc;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ efc_node_hold_frames(node);
+ break;
+
+ case EFC_EVT_EXIT:
+ efc_node_accept_frames(node);
+ break;
+
+ case EFC_EVT_DOMAIN_ATTACH_OK: {
+ struct efc_nport *nport = node->nport;
+ struct efc_node *rnode;
+
+ /*
+ * this transient node (SID=0 (recv'd FLOGI)
+ * or DID=fabric (sent FLOGI))
+ * is the p2p winner, will use a separate node
+ * to send PLOGI to peer
+ */
+ WARN_ON(!node->nport->p2p_winner);
+
+ rnode = efc_node_find(nport, node->nport->p2p_remote_port_id);
+ if (rnode) {
+ /*
+ * the "other" transient p2p node has
+ * already kicked off the
+ * new node from which PLOGI is sent
+ */
+ node_printf(node,
+ "Node with fc_id x%x already exists\n",
+ rnode->rnode.fc_id);
+ } else {
+ /*
+ * create new node (SID=1, DID=2)
+ * from which to send PLOGI
+ */
+ rnode = efc_node_alloc(nport,
+ nport->p2p_remote_port_id,
+ false, false);
+ if (!rnode) {
+ efc_log_err(efc, "node alloc failed\n");
+ return;
+ }
+
+ efc_fabric_notify_topology(node);
+ /* sm: / allocate p2p remote node */
+ efc_node_transition(rnode, __efc_p2p_rnode_init,
+ NULL);
+ }
+
+ /*
+ * the transient node (SID=0 or DID=fabric)
+ * has served its purpose
+ */
+ if (node->rnode.fc_id == 0) {
+ /*
+ * if this is the SID=0 node,
+ * move to the init state in case peer
+ * has restarted FLOGI discovery and FLOGI is pending
+ */
+ /* don't send PLOGI on efc_d_init entry */
+ efc_node_init_device(node, false);
+ } else {
+ /*
+ * if this is the DID=fabric node
+ * (we initiated FLOGI), shut it down
+ */
+ node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
+ efc_fabric_initiate_shutdown(node);
+ }
+ break;
+ }
+
+ default:
+ __efc_fabric_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_p2p_rnode_init(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node_cb *cbdata = arg;
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ /* sm: / send PLOGI */
+ efc_send_plogi(node);
+ efc_node_transition(node, __efc_p2p_wait_plogi_rsp, NULL);
+ break;
+
+ case EFC_EVT_ABTS_RCVD:
+ /* sm: send BA_ACC */
+ efc_send_bls_acc(node, cbdata->header->dma.virt);
+
+ break;
+
+ default:
+ __efc_fabric_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_p2p_wait_flogi_acc_cmpl(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node_cb *cbdata = arg;
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ efc_node_hold_frames(node);
+ break;
+
+ case EFC_EVT_EXIT:
+ efc_node_accept_frames(node);
+ break;
+
+ case EFC_EVT_SRRS_ELS_CMPL_OK:
+ WARN_ON(!node->els_cmpl_cnt);
+ node->els_cmpl_cnt--;
+
+ /* sm: if p2p_winner / domain_attach */
+ if (node->nport->p2p_winner) {
+ efc_node_transition(node,
+ __efc_p2p_wait_domain_attach,
+ NULL);
+ if (!node->nport->domain->attached) {
+ node_printf(node, "Domain not attached\n");
+ efc_domain_attach(node->nport->domain,
+ node->nport->p2p_port_id);
+ } else {
+ node_printf(node, "Domain already attached\n");
+ efc_node_post_event(node,
+ EFC_EVT_DOMAIN_ATTACH_OK,
+ NULL);
+ }
+ } else {
+ /* this node has served its purpose;
+ * we'll expect a PLOGI on a separate
+ * node (remote SID=0x1); return this node
+ * to init state in case peer
+ * restarts discovery -- it may already
+ * have (pending frames may exist).
+ */
+ /* don't send PLOGI on efc_d_init entry */
+ efc_node_init_device(node, false);
+ }
+ break;
+
+ case EFC_EVT_SRRS_ELS_CMPL_FAIL:
+ /*
+ * LS_ACC failed, possibly due to link down;
+ * shutdown node and wait
+ * for FLOGI discovery to restart
+ */
+ node_printf(node, "FLOGI LS_ACC failed, shutting down\n");
+ WARN_ON(!node->els_cmpl_cnt);
+ node->els_cmpl_cnt--;
+ node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
+ efc_fabric_initiate_shutdown(node);
+ break;
+
+ case EFC_EVT_ABTS_RCVD: {
+ /* sm: / send BA_ACC */
+ efc_send_bls_acc(node, cbdata->header->dma.virt);
+ break;
+ }
+
+ default:
+ __efc_fabric_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_p2p_wait_plogi_rsp(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node_cb *cbdata = arg;
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_SRRS_ELS_REQ_OK: {
+ int rc;
+
+ if (efc_node_check_els_req(ctx, evt, arg, ELS_PLOGI,
+ __efc_fabric_common, __func__)) {
+ return;
+ }
+ WARN_ON(!node->els_req_cnt);
+ node->els_req_cnt--;
+ /* sm: / save sparams, efc_node_attach */
+ efc_node_save_sparms(node, cbdata->els_rsp.virt);
+ rc = efc_node_attach(node);
+ efc_node_transition(node, __efc_p2p_wait_node_attach, NULL);
+ if (rc < 0)
+ efc_node_post_event(node, EFC_EVT_NODE_ATTACH_FAIL,
+ NULL);
+ break;
+ }
+ case EFC_EVT_SRRS_ELS_REQ_FAIL: {
+ if (efc_node_check_els_req(ctx, evt, arg, ELS_PLOGI,
+ __efc_fabric_common, __func__)) {
+ return;
+ }
+ node_printf(node, "PLOGI failed, shutting down\n");
+ WARN_ON(!node->els_req_cnt);
+ node->els_req_cnt--;
+ node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
+ efc_fabric_initiate_shutdown(node);
+ break;
+ }
+
+ case EFC_EVT_PLOGI_RCVD: {
+ struct fc_frame_header *hdr = cbdata->header->dma.virt;
+ /* if we're in external loopback mode, just send LS_ACC */
+ if (node->efc->external_loopback) {
+ efc_send_plogi_acc(node, be16_to_cpu(hdr->fh_ox_id));
+ } else {
+ /*
+ * if this isn't external loopback,
+ * pass to default handler
+ */
+ __efc_fabric_common(__func__, ctx, evt, arg);
+ }
+ break;
+ }
+ case EFC_EVT_PRLI_RCVD:
+ /* I, or I+T */
+ /* sent PLOGI and before completion was seen, received the
+ * PRLI from the remote node (WCQEs and RCQEs come in on
+ * different queues and order of processing cannot be assumed)
+ * Save OXID so PRLI can be sent after the attach and continue
+ * to wait for PLOGI response
+ */
+ efc_process_prli_payload(node, cbdata->payload->dma.virt);
+ efc_send_ls_acc_after_attach(node,
+ cbdata->header->dma.virt,
+ EFC_NODE_SEND_LS_ACC_PRLI);
+ efc_node_transition(node, __efc_p2p_wait_plogi_rsp_recvd_prli,
+ NULL);
+ break;
+ default:
+ __efc_fabric_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_p2p_wait_plogi_rsp_recvd_prli(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node_cb *cbdata = arg;
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ /*
+ * Since we've received a PRLI, we have a port login and will
+ * just need to wait for the PLOGI response to do the node
+ * attach and then we can send the LS_ACC for the PRLI. If,
+ * during this time, we receive FCP_CMNDs (which is possible
+ * since we've already sent a PRLI and our peer may have
+ * accepted).
+ * At this time, we are not waiting on any other unsolicited
+ * frames to continue with the login process. Thus, it will not
+ * hurt to hold frames here.
+ */
+ efc_node_hold_frames(node);
+ break;
+
+ case EFC_EVT_EXIT:
+ efc_node_accept_frames(node);
+ break;
+
+ case EFC_EVT_SRRS_ELS_REQ_OK: { /* PLOGI response received */
+ int rc;
+
+ /* Completion from PLOGI sent */
+ if (efc_node_check_els_req(ctx, evt, arg, ELS_PLOGI,
+ __efc_fabric_common, __func__)) {
+ return;
+ }
+ WARN_ON(!node->els_req_cnt);
+ node->els_req_cnt--;
+ /* sm: / save sparams, efc_node_attach */
+ efc_node_save_sparms(node, cbdata->els_rsp.virt);
+ rc = efc_node_attach(node);
+ efc_node_transition(node, __efc_p2p_wait_node_attach, NULL);
+ if (rc < 0)
+ efc_node_post_event(node, EFC_EVT_NODE_ATTACH_FAIL,
+ NULL);
+ break;
+ }
+ case EFC_EVT_SRRS_ELS_REQ_FAIL: /* PLOGI response received */
+ case EFC_EVT_SRRS_ELS_REQ_RJT:
+ /* PLOGI failed, shutdown the node */
+ if (efc_node_check_els_req(ctx, evt, arg, ELS_PLOGI,
+ __efc_fabric_common, __func__)) {
+ return;
+ }
+ WARN_ON(!node->els_req_cnt);
+ node->els_req_cnt--;
+ node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
+ efc_fabric_initiate_shutdown(node);
+ break;
+
+ default:
+ __efc_fabric_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_p2p_wait_node_attach(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node_cb *cbdata = arg;
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ efc_node_hold_frames(node);
+ break;
+
+ case EFC_EVT_EXIT:
+ efc_node_accept_frames(node);
+ break;
+
+ case EFC_EVT_NODE_ATTACH_OK:
+ node->attached = true;
+ switch (node->send_ls_acc) {
+ case EFC_NODE_SEND_LS_ACC_PRLI: {
+ efc_d_send_prli_rsp(node->ls_acc_io,
+ node->ls_acc_oxid);
+ node->send_ls_acc = EFC_NODE_SEND_LS_ACC_NONE;
+ node->ls_acc_io = NULL;
+ break;
+ }
+ case EFC_NODE_SEND_LS_ACC_PLOGI: /* Can't happen in P2P */
+ case EFC_NODE_SEND_LS_ACC_NONE:
+ default:
+ /* Normal case for I */
+ /* sm: send_plogi_acc is not set / send PLOGI acc */
+ efc_node_transition(node, __efc_d_port_logged_in,
+ NULL);
+ break;
+ }
+ break;
+
+ case EFC_EVT_NODE_ATTACH_FAIL:
+ /* node attach failed, shutdown the node */
+ node->attached = false;
+ node_printf(node, "Node attach failed\n");
+ node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
+ efc_fabric_initiate_shutdown(node);
+ break;
+
+ case EFC_EVT_SHUTDOWN:
+ node_printf(node, "%s received\n", efc_sm_event_name(evt));
+ node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
+ efc_node_transition(node,
+ __efc_fabric_wait_attach_evt_shutdown,
+ NULL);
+ break;
+ case EFC_EVT_PRLI_RCVD:
+ node_printf(node, "%s: PRLI received before node is attached\n",
+ efc_sm_event_name(evt));
+ efc_process_prli_payload(node, cbdata->payload->dma.virt);
+ efc_send_ls_acc_after_attach(node,
+ cbdata->header->dma.virt,
+ EFC_NODE_SEND_LS_ACC_PRLI);
+ break;
+
+ default:
+ __efc_fabric_common(__func__, ctx, evt, arg);
+ }
+}
+
+int
+efc_p2p_setup(struct efc_nport *nport)
+{
+ struct efc *efc = nport->efc;
+ int rnode_winner;
+
+ rnode_winner = efc_rnode_is_winner(nport);
+
+ /* set nport flags to indicate p2p "winner" */
+ if (rnode_winner == 1) {
+ nport->p2p_remote_port_id = 0;
+ nport->p2p_port_id = 0;
+ nport->p2p_winner = false;
+ } else if (rnode_winner == 0) {
+ nport->p2p_remote_port_id = 2;
+ nport->p2p_port_id = 1;
+ nport->p2p_winner = true;
+ } else {
+ /* no winner; only okay if external loopback enabled */
+ if (nport->efc->external_loopback) {
+ /*
+ * External loopback mode enabled;
+ * local nport and remote node
+ * will be registered with an NPortID = 1;
+ */
+ efc_log_debug(efc,
+ "External loopback mode enabled\n");
+ nport->p2p_remote_port_id = 1;
+ nport->p2p_port_id = 1;
+ nport->p2p_winner = true;
+ } else {
+ efc_log_warn(efc,
+ "failed to determine p2p winner\n");
+ return rnode_winner;
+ }
+ }
+ return 0;
+}
diff --git a/drivers/scsi/elx/libefc/efc_fabric.h b/drivers/scsi/elx/libefc/efc_fabric.h
new file mode 100644
index 000000000000..b0947ae6fdca
--- /dev/null
+++ b/drivers/scsi/elx/libefc/efc_fabric.h
@@ -0,0 +1,116 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+/*
+ * Declarations for the interface exported by efc_fabric
+ */
+
+#ifndef __EFCT_FABRIC_H__
+#define __EFCT_FABRIC_H__
+#include "scsi/fc/fc_els.h"
+#include "scsi/fc/fc_fs.h"
+#include "scsi/fc/fc_ns.h"
+
+void
+__efc_fabric_init(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_fabric_flogi_wait_rsp(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_fabric_domain_attach_wait(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_fabric_wait_domain_attach(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+
+void
+__efc_vport_fabric_init(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_fabric_fdisc_wait_rsp(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_fabric_wait_nport_attach(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+
+void
+__efc_ns_init(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg);
+void
+__efc_ns_plogi_wait_rsp(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_ns_rftid_wait_rsp(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_ns_rffid_wait_rsp(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_ns_wait_node_attach(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_fabric_wait_attach_evt_shutdown(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_ns_logo_wait_rsp(struct efc_sm_ctx *ctx,
+ enum efc_sm_event, void *arg);
+void
+__efc_ns_gidpt_wait_rsp(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_ns_idle(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg);
+void
+__efc_ns_gidpt_delay(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_fabctl_init(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_fabctl_wait_node_attach(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_fabctl_wait_scr_rsp(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_fabctl_ready(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_fabctl_wait_ls_acc_cmpl(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_fabric_idle(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+
+void
+__efc_p2p_rnode_init(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_p2p_domain_attach_wait(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_p2p_wait_flogi_acc_cmpl(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_p2p_wait_plogi_rsp(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_p2p_wait_plogi_rsp_recvd_prli(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_p2p_wait_domain_attach(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_p2p_wait_node_attach(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+
+int
+efc_p2p_setup(struct efc_nport *nport);
+void
+efc_fabric_set_topology(struct efc_node *node,
+ enum efc_nport_topology topology);
+void efc_fabric_notify_topology(struct efc_node *node);
+
+#endif /* __EFCT_FABRIC_H__ */
diff --git a/drivers/scsi/elx/libefc/efc_node.c b/drivers/scsi/elx/libefc/efc_node.c
new file mode 100644
index 000000000000..a1b4ce6a27b4
--- /dev/null
+++ b/drivers/scsi/elx/libefc/efc_node.c
@@ -0,0 +1,1102 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+#include "efc.h"
+
+int
+efc_remote_node_cb(void *arg, int event, void *data)
+{
+ struct efc *efc = arg;
+ struct efc_remote_node *rnode = data;
+ struct efc_node *node = rnode->node;
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&efc->lock, flags);
+ efc_node_post_event(node, event, NULL);
+ spin_unlock_irqrestore(&efc->lock, flags);
+
+ return 0;
+}
+
+struct efc_node *
+efc_node_find(struct efc_nport *nport, u32 port_id)
+{
+ /* Find an FC node structure given the FC port ID */
+ return xa_load(&nport->lookup, port_id);
+}
+
+static void
+_efc_node_free(struct kref *arg)
+{
+ struct efc_node *node = container_of(arg, struct efc_node, ref);
+ struct efc *efc = node->efc;
+ struct efc_dma *dma;
+
+ dma = &node->sparm_dma_buf;
+ dma_pool_free(efc->node_dma_pool, dma->virt, dma->phys);
+ memset(dma, 0, sizeof(struct efc_dma));
+ mempool_free(node, efc->node_pool);
+}
+
+struct efc_node *efc_node_alloc(struct efc_nport *nport,
+ u32 port_id, bool init, bool targ)
+{
+ int rc;
+ struct efc_node *node = NULL;
+ struct efc *efc = nport->efc;
+ struct efc_dma *dma;
+
+ if (nport->shutting_down) {
+ efc_log_debug(efc, "node allocation when shutting down %06x",
+ port_id);
+ return NULL;
+ }
+
+ node = mempool_alloc(efc->node_pool, GFP_ATOMIC);
+ if (!node) {
+ efc_log_err(efc, "node allocation failed %06x", port_id);
+ return NULL;
+ }
+ memset(node, 0, sizeof(*node));
+
+ dma = &node->sparm_dma_buf;
+ dma->size = NODE_SPARAMS_SIZE;
+ dma->virt = dma_pool_zalloc(efc->node_dma_pool, GFP_ATOMIC, &dma->phys);
+ if (!dma->virt) {
+ efc_log_err(efc, "node dma alloc failed\n");
+ goto dma_fail;
+ }
+ node->rnode.indicator = U32_MAX;
+ node->nport = nport;
+
+ node->efc = efc;
+ node->init = init;
+ node->targ = targ;
+
+ spin_lock_init(&node->pend_frames_lock);
+ INIT_LIST_HEAD(&node->pend_frames);
+ spin_lock_init(&node->els_ios_lock);
+ INIT_LIST_HEAD(&node->els_ios_list);
+ node->els_io_enabled = true;
+
+ rc = efc_cmd_node_alloc(efc, &node->rnode, port_id, nport);
+ if (rc) {
+ efc_log_err(efc, "efc_hw_node_alloc failed: %d\n", rc);
+ goto hw_alloc_fail;
+ }
+
+ node->rnode.node = node;
+ node->sm.app = node;
+ node->evtdepth = 0;
+
+ efc_node_update_display_name(node);
+
+ rc = xa_err(xa_store(&nport->lookup, port_id, node, GFP_ATOMIC));
+ if (rc) {
+ efc_log_err(efc, "Node lookup store failed: %d\n", rc);
+ goto xa_fail;
+ }
+
+ /* initialize refcount */
+ kref_init(&node->ref);
+ node->release = _efc_node_free;
+ kref_get(&nport->ref);
+
+ return node;
+
+xa_fail:
+ efc_node_free_resources(efc, &node->rnode);
+hw_alloc_fail:
+ dma_pool_free(efc->node_dma_pool, dma->virt, dma->phys);
+dma_fail:
+ mempool_free(node, efc->node_pool);
+ return NULL;
+}
+
+void
+efc_node_free(struct efc_node *node)
+{
+ struct efc_nport *nport;
+ struct efc *efc;
+ int rc = 0;
+ struct efc_node *ns = NULL;
+
+ nport = node->nport;
+ efc = node->efc;
+
+ node_printf(node, "Free'd\n");
+
+ if (node->refound) {
+ /*
+ * Save the name server node. We will send fake RSCN event at
+ * the end to handle ignored RSCN event during node deletion
+ */
+ ns = efc_node_find(node->nport, FC_FID_DIR_SERV);
+ }
+
+ if (!node->nport) {
+ efc_log_err(efc, "Node already Freed\n");
+ return;
+ }
+
+ /* Free HW resources */
+ rc = efc_node_free_resources(efc, &node->rnode);
+ if (rc < 0)
+ efc_log_err(efc, "efc_hw_node_free failed: %d\n", rc);
+
+ /* if the gidpt_delay_timer is still running, then delete it */
+ if (timer_pending(&node->gidpt_delay_timer))
+ del_timer(&node->gidpt_delay_timer);
+
+ xa_erase(&nport->lookup, node->rnode.fc_id);
+
+ /*
+ * If the node_list is empty,
+ * then post a ALL_CHILD_NODES_FREE event to the nport,
+ * after the lock is released.
+ * The nport may be free'd as a result of the event.
+ */
+ if (xa_empty(&nport->lookup))
+ efc_sm_post_event(&nport->sm, EFC_EVT_ALL_CHILD_NODES_FREE,
+ NULL);
+
+ node->nport = NULL;
+ node->sm.current_state = NULL;
+
+ kref_put(&nport->ref, nport->release);
+ kref_put(&node->ref, node->release);
+
+ if (ns) {
+ /* sending fake RSCN event to name server node */
+ efc_node_post_event(ns, EFC_EVT_RSCN_RCVD, NULL);
+ }
+}
+
+static void
+efc_dma_copy_in(struct efc_dma *dma, void *buffer, u32 buffer_length)
+{
+ if (!dma || !buffer || !buffer_length)
+ return;
+
+ if (buffer_length > dma->size)
+ buffer_length = dma->size;
+
+ memcpy(dma->virt, buffer, buffer_length);
+ dma->len = buffer_length;
+}
+
+int
+efc_node_attach(struct efc_node *node)
+{
+ int rc = 0;
+ struct efc_nport *nport = node->nport;
+ struct efc_domain *domain = nport->domain;
+ struct efc *efc = node->efc;
+
+ if (!domain->attached) {
+ efc_log_err(efc, "Warning: unattached domain\n");
+ return -EIO;
+ }
+ /* Update node->wwpn/wwnn */
+
+ efc_node_build_eui_name(node->wwpn, sizeof(node->wwpn),
+ efc_node_get_wwpn(node));
+ efc_node_build_eui_name(node->wwnn, sizeof(node->wwnn),
+ efc_node_get_wwnn(node));
+
+ efc_dma_copy_in(&node->sparm_dma_buf, node->service_params + 4,
+ sizeof(node->service_params) - 4);
+
+ /* take lock to protect node->rnode.attached */
+ rc = efc_cmd_node_attach(efc, &node->rnode, &node->sparm_dma_buf);
+ if (rc < 0)
+ efc_log_debug(efc, "efc_hw_node_attach failed: %d\n", rc);
+
+ return rc;
+}
+
+void
+efc_node_fcid_display(u32 fc_id, char *buffer, u32 buffer_length)
+{
+ switch (fc_id) {
+ case FC_FID_FLOGI:
+ snprintf(buffer, buffer_length, "fabric");
+ break;
+ case FC_FID_FCTRL:
+ snprintf(buffer, buffer_length, "fabctl");
+ break;
+ case FC_FID_DIR_SERV:
+ snprintf(buffer, buffer_length, "nserve");
+ break;
+ default:
+ if (fc_id == FC_FID_DOM_MGR) {
+ snprintf(buffer, buffer_length, "dctl%02x",
+ (fc_id & 0x0000ff));
+ } else {
+ snprintf(buffer, buffer_length, "%06x", fc_id);
+ }
+ break;
+ }
+}
+
+void
+efc_node_update_display_name(struct efc_node *node)
+{
+ u32 port_id = node->rnode.fc_id;
+ struct efc_nport *nport = node->nport;
+ char portid_display[16];
+
+ efc_node_fcid_display(port_id, portid_display, sizeof(portid_display));
+
+ snprintf(node->display_name, sizeof(node->display_name), "%s.%s",
+ nport->display_name, portid_display);
+}
+
+void
+efc_node_send_ls_io_cleanup(struct efc_node *node)
+{
+ if (node->send_ls_acc != EFC_NODE_SEND_LS_ACC_NONE) {
+ efc_log_debug(node->efc, "[%s] cleaning up LS_ACC oxid=0x%x\n",
+ node->display_name, node->ls_acc_oxid);
+
+ node->send_ls_acc = EFC_NODE_SEND_LS_ACC_NONE;
+ node->ls_acc_io = NULL;
+ }
+}
+
+static void efc_node_handle_implicit_logo(struct efc_node *node)
+{
+ int rc;
+
+ /*
+ * currently, only case for implicit logo is PLOGI
+ * recvd. Thus, node's ELS IO pending list won't be
+ * empty (PLOGI will be on it)
+ */
+ WARN_ON(node->send_ls_acc != EFC_NODE_SEND_LS_ACC_PLOGI);
+ node_printf(node, "Reason: implicit logout, re-authenticate\n");
+
+ /* Re-attach node with the same HW node resources */
+ node->req_free = false;
+ rc = efc_node_attach(node);
+ efc_node_transition(node, __efc_d_wait_node_attach, NULL);
+ node->els_io_enabled = true;
+
+ if (rc < 0)
+ efc_node_post_event(node, EFC_EVT_NODE_ATTACH_FAIL, NULL);
+}
+
+static void efc_node_handle_explicit_logo(struct efc_node *node)
+{
+ s8 pend_frames_empty;
+ unsigned long flags = 0;
+
+ /* cleanup any pending LS_ACC ELSs */
+ efc_node_send_ls_io_cleanup(node);
+
+ spin_lock_irqsave(&node->pend_frames_lock, flags);
+ pend_frames_empty = list_empty(&node->pend_frames);
+ spin_unlock_irqrestore(&node->pend_frames_lock, flags);
+
+ /*
+ * there are two scenarios where we want to keep
+ * this node alive:
+ * 1. there are pending frames that need to be
+ * processed or
+ * 2. we're an initiator and the remote node is
+ * a target and we need to re-authenticate
+ */
+ node_printf(node, "Shutdown: explicit logo pend=%d ", !pend_frames_empty);
+ node_printf(node, "nport.ini=%d node.tgt=%d\n",
+ node->nport->enable_ini, node->targ);
+ if (!pend_frames_empty || (node->nport->enable_ini && node->targ)) {
+ u8 send_plogi = false;
+
+ if (node->nport->enable_ini && node->targ) {
+ /*
+ * we're an initiator and
+ * node shutting down is a target;
+ * we'll need to re-authenticate in
+ * initial state
+ */
+ send_plogi = true;
+ }
+
+ /*
+ * transition to __efc_d_init
+ * (will retain HW node resources)
+ */
+ node->els_io_enabled = true;
+ node->req_free = false;
+
+ /*
+ * either pending frames exist or we are re-authenticating
+ * with PLOGI (or both); in either case, return to initial
+ * state
+ */
+ efc_node_init_device(node, send_plogi);
+ }
+ /* else: let node shutdown occur */
+}
+
+static void
+efc_node_purge_pending(struct efc_node *node)
+{
+ struct efc *efc = node->efc;
+ struct efc_hw_sequence *frame, *next;
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&node->pend_frames_lock, flags);
+
+ list_for_each_entry_safe(frame, next, &node->pend_frames, list_entry) {
+ list_del(&frame->list_entry);
+ efc->tt.hw_seq_free(efc, frame);
+ }
+
+ spin_unlock_irqrestore(&node->pend_frames_lock, flags);
+}
+
+void
+__efc_node_shutdown(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_ENTER: {
+ efc_node_hold_frames(node);
+ WARN_ON(!efc_els_io_list_empty(node, &node->els_ios_list));
+ /* by default, we will be freeing node after we unwind */
+ node->req_free = true;
+
+ switch (node->shutdown_reason) {
+ case EFC_NODE_SHUTDOWN_IMPLICIT_LOGO:
+ /* Node shutdown b/c of PLOGI received when node
+ * already logged in. We have PLOGI service
+ * parameters, so submit node attach; we won't be
+ * freeing this node
+ */
+
+ efc_node_handle_implicit_logo(node);
+ break;
+
+ case EFC_NODE_SHUTDOWN_EXPLICIT_LOGO:
+ efc_node_handle_explicit_logo(node);
+ break;
+
+ case EFC_NODE_SHUTDOWN_DEFAULT:
+ default: {
+ /*
+ * shutdown due to link down,
+ * node going away (xport event) or
+ * nport shutdown, purge pending and
+ * proceed to cleanup node
+ */
+
+ /* cleanup any pending LS_ACC ELSs */
+ efc_node_send_ls_io_cleanup(node);
+
+ node_printf(node,
+ "Shutdown reason: default, purge pending\n");
+ efc_node_purge_pending(node);
+ break;
+ }
+ }
+
+ break;
+ }
+ case EFC_EVT_EXIT:
+ efc_node_accept_frames(node);
+ break;
+
+ default:
+ __efc_node_common(__func__, ctx, evt, arg);
+ }
+}
+
+static bool
+efc_node_check_els_quiesced(struct efc_node *node)
+{
+ /* check to see if ELS requests, completions are quiesced */
+ if (node->els_req_cnt == 0 && node->els_cmpl_cnt == 0 &&
+ efc_els_io_list_empty(node, &node->els_ios_list)) {
+ if (!node->attached) {
+ /* hw node detach already completed, proceed */
+ node_printf(node, "HW node not attached\n");
+ efc_node_transition(node,
+ __efc_node_wait_ios_shutdown,
+ NULL);
+ } else {
+ /*
+ * hw node detach hasn't completed,
+ * transition and wait
+ */
+ node_printf(node, "HW node still attached\n");
+ efc_node_transition(node, __efc_node_wait_node_free,
+ NULL);
+ }
+ return true;
+ }
+ return false;
+}
+
+void
+efc_node_initiate_cleanup(struct efc_node *node)
+{
+ /*
+ * if ELS's have already been quiesced, will move to next state
+ * if ELS's have not been quiesced, abort them
+ */
+ if (!efc_node_check_els_quiesced(node)) {
+ efc_node_hold_frames(node);
+ efc_node_transition(node, __efc_node_wait_els_shutdown, NULL);
+ }
+}
+
+void
+__efc_node_wait_els_shutdown(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ bool check_quiesce = false;
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+ /* Node state machine: Wait for all ELSs to complete */
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ efc_node_hold_frames(node);
+ if (efc_els_io_list_empty(node, &node->els_ios_list)) {
+ node_printf(node, "All ELS IOs complete\n");
+ check_quiesce = true;
+ }
+ break;
+ case EFC_EVT_EXIT:
+ efc_node_accept_frames(node);
+ break;
+
+ case EFC_EVT_SRRS_ELS_REQ_OK:
+ case EFC_EVT_SRRS_ELS_REQ_FAIL:
+ case EFC_EVT_SRRS_ELS_REQ_RJT:
+ case EFC_EVT_ELS_REQ_ABORTED:
+ if (WARN_ON(!node->els_req_cnt))
+ break;
+ node->els_req_cnt--;
+ check_quiesce = true;
+ break;
+
+ case EFC_EVT_SRRS_ELS_CMPL_OK:
+ case EFC_EVT_SRRS_ELS_CMPL_FAIL:
+ if (WARN_ON(!node->els_cmpl_cnt))
+ break;
+ node->els_cmpl_cnt--;
+ check_quiesce = true;
+ break;
+
+ case EFC_EVT_ALL_CHILD_NODES_FREE:
+ /* all ELS IO's complete */
+ node_printf(node, "All ELS IOs complete\n");
+ WARN_ON(!efc_els_io_list_empty(node, &node->els_ios_list));
+ check_quiesce = true;
+ break;
+
+ case EFC_EVT_NODE_ACTIVE_IO_LIST_EMPTY:
+ check_quiesce = true;
+ break;
+
+ case EFC_EVT_DOMAIN_ATTACH_OK:
+ /* don't care about domain_attach_ok */
+ break;
+
+ /* ignore shutdown events as we're already in shutdown path */
+ case EFC_EVT_SHUTDOWN:
+ /* have default shutdown event take precedence */
+ node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
+ fallthrough;
+
+ case EFC_EVT_SHUTDOWN_EXPLICIT_LOGO:
+ case EFC_EVT_SHUTDOWN_IMPLICIT_LOGO:
+ node_printf(node, "%s received\n", efc_sm_event_name(evt));
+ break;
+
+ default:
+ __efc_node_common(__func__, ctx, evt, arg);
+ }
+
+ if (check_quiesce)
+ efc_node_check_els_quiesced(node);
+}
+
+void
+__efc_node_wait_node_free(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ efc_node_hold_frames(node);
+ break;
+
+ case EFC_EVT_EXIT:
+ efc_node_accept_frames(node);
+ break;
+
+ case EFC_EVT_NODE_FREE_OK:
+ /* node is officially no longer attached */
+ node->attached = false;
+ efc_node_transition(node, __efc_node_wait_ios_shutdown, NULL);
+ break;
+
+ case EFC_EVT_ALL_CHILD_NODES_FREE:
+ case EFC_EVT_NODE_ACTIVE_IO_LIST_EMPTY:
+ /* As IOs and ELS IO's complete we expect to get these events */
+ break;
+
+ case EFC_EVT_DOMAIN_ATTACH_OK:
+ /* don't care about domain_attach_ok */
+ break;
+
+ /* ignore shutdown events as we're already in shutdown path */
+ case EFC_EVT_SHUTDOWN:
+ /* have default shutdown event take precedence */
+ node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
+ fallthrough;
+
+ case EFC_EVT_SHUTDOWN_EXPLICIT_LOGO:
+ case EFC_EVT_SHUTDOWN_IMPLICIT_LOGO:
+ node_printf(node, "%s received\n", efc_sm_event_name(evt));
+ break;
+ default:
+ __efc_node_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_node_wait_ios_shutdown(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node *node = ctx->app;
+ struct efc *efc = node->efc;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ efc_node_hold_frames(node);
+
+ /* first check to see if no ELS IOs are outstanding */
+ if (efc_els_io_list_empty(node, &node->els_ios_list))
+ /* If there are any active IOS, Free them. */
+ efc_node_transition(node, __efc_node_shutdown, NULL);
+ break;
+
+ case EFC_EVT_NODE_ACTIVE_IO_LIST_EMPTY:
+ case EFC_EVT_ALL_CHILD_NODES_FREE:
+ if (efc_els_io_list_empty(node, &node->els_ios_list))
+ efc_node_transition(node, __efc_node_shutdown, NULL);
+ break;
+
+ case EFC_EVT_EXIT:
+ efc_node_accept_frames(node);
+ break;
+
+ case EFC_EVT_SRRS_ELS_REQ_FAIL:
+ /* Can happen as ELS IO IO's complete */
+ if (WARN_ON(!node->els_req_cnt))
+ break;
+ node->els_req_cnt--;
+ break;
+
+ /* ignore shutdown events as we're already in shutdown path */
+ case EFC_EVT_SHUTDOWN:
+ /* have default shutdown event take precedence */
+ node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
+ fallthrough;
+
+ case EFC_EVT_SHUTDOWN_EXPLICIT_LOGO:
+ case EFC_EVT_SHUTDOWN_IMPLICIT_LOGO:
+ efc_log_debug(efc, "[%s] %-20s\n", node->display_name,
+ efc_sm_event_name(evt));
+ break;
+ case EFC_EVT_DOMAIN_ATTACH_OK:
+ /* don't care about domain_attach_ok */
+ break;
+ default:
+ __efc_node_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_node_common(const char *funcname, struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node *node = NULL;
+ struct efc *efc = NULL;
+ struct efc_node_cb *cbdata = arg;
+
+ node = ctx->app;
+ efc = node->efc;
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ case EFC_EVT_REENTER:
+ case EFC_EVT_EXIT:
+ case EFC_EVT_NPORT_TOPOLOGY_NOTIFY:
+ case EFC_EVT_NODE_MISSING:
+ case EFC_EVT_FCP_CMD_RCVD:
+ break;
+
+ case EFC_EVT_NODE_REFOUND:
+ node->refound = true;
+ break;
+
+ /*
+ * node->attached must be set appropriately
+ * for all node attach/detach events
+ */
+ case EFC_EVT_NODE_ATTACH_OK:
+ node->attached = true;
+ break;
+
+ case EFC_EVT_NODE_FREE_OK:
+ case EFC_EVT_NODE_ATTACH_FAIL:
+ node->attached = false;
+ break;
+
+ /*
+ * handle any ELS completions that
+ * other states either didn't care about
+ * or forgot about
+ */
+ case EFC_EVT_SRRS_ELS_CMPL_OK:
+ case EFC_EVT_SRRS_ELS_CMPL_FAIL:
+ if (WARN_ON(!node->els_cmpl_cnt))
+ break;
+ node->els_cmpl_cnt--;
+ break;
+
+ /*
+ * handle any ELS request completions that
+ * other states either didn't care about
+ * or forgot about
+ */
+ case EFC_EVT_SRRS_ELS_REQ_OK:
+ case EFC_EVT_SRRS_ELS_REQ_FAIL:
+ case EFC_EVT_SRRS_ELS_REQ_RJT:
+ case EFC_EVT_ELS_REQ_ABORTED:
+ if (WARN_ON(!node->els_req_cnt))
+ break;
+ node->els_req_cnt--;
+ break;
+
+ case EFC_EVT_ELS_RCVD: {
+ struct fc_frame_header *hdr = cbdata->header->dma.virt;
+
+ /*
+ * Unsupported ELS was received,
+ * send LS_RJT, command not supported
+ */
+ efc_log_debug(efc,
+ "[%s] (%s) ELS x%02x, LS_RJT not supported\n",
+ node->display_name, funcname,
+ ((u8 *)cbdata->payload->dma.virt)[0]);
+
+ efc_send_ls_rjt(node, be16_to_cpu(hdr->fh_ox_id),
+ ELS_RJT_UNSUP, ELS_EXPL_NONE, 0);
+ break;
+ }
+
+ case EFC_EVT_PLOGI_RCVD:
+ case EFC_EVT_FLOGI_RCVD:
+ case EFC_EVT_LOGO_RCVD:
+ case EFC_EVT_PRLI_RCVD:
+ case EFC_EVT_PRLO_RCVD:
+ case EFC_EVT_PDISC_RCVD:
+ case EFC_EVT_FDISC_RCVD:
+ case EFC_EVT_ADISC_RCVD:
+ case EFC_EVT_RSCN_RCVD:
+ case EFC_EVT_SCR_RCVD: {
+ struct fc_frame_header *hdr = cbdata->header->dma.virt;
+
+ /* sm: / send ELS_RJT */
+ efc_log_debug(efc, "[%s] (%s) %s sending ELS_RJT\n",
+ node->display_name, funcname,
+ efc_sm_event_name(evt));
+ /* if we didn't catch this in a state, send generic LS_RJT */
+ efc_send_ls_rjt(node, be16_to_cpu(hdr->fh_ox_id),
+ ELS_RJT_UNAB, ELS_EXPL_NONE, 0);
+ break;
+ }
+ case EFC_EVT_ABTS_RCVD: {
+ efc_log_debug(efc, "[%s] (%s) %s sending BA_ACC\n",
+ node->display_name, funcname,
+ efc_sm_event_name(evt));
+
+ /* sm: / send BA_ACC */
+ efc_send_bls_acc(node, cbdata->header->dma.virt);
+ break;
+ }
+
+ default:
+ efc_log_debug(node->efc, "[%s] %-20s %-20s not handled\n",
+ node->display_name, funcname,
+ efc_sm_event_name(evt));
+ }
+}
+
+void
+efc_node_save_sparms(struct efc_node *node, void *payload)
+{
+ memcpy(node->service_params, payload, sizeof(node->service_params));
+}
+
+void
+efc_node_post_event(struct efc_node *node,
+ enum efc_sm_event evt, void *arg)
+{
+ bool free_node = false;
+
+ node->evtdepth++;
+
+ efc_sm_post_event(&node->sm, evt, arg);
+
+ /* If our event call depth is one and
+ * we're not holding frames
+ * then we can dispatch any pending frames.
+ * We don't want to allow the efc_process_node_pending()
+ * call to recurse.
+ */
+ if (!node->hold_frames && node->evtdepth == 1)
+ efc_process_node_pending(node);
+
+ node->evtdepth--;
+
+ /*
+ * Free the node object if so requested,
+ * and we're at an event call depth of zero
+ */
+ if (node->evtdepth == 0 && node->req_free)
+ free_node = true;
+
+ if (free_node)
+ efc_node_free(node);
+}
+
+void
+efc_node_transition(struct efc_node *node,
+ void (*state)(struct efc_sm_ctx *,
+ enum efc_sm_event, void *), void *data)
+{
+ struct efc_sm_ctx *ctx = &node->sm;
+
+ if (ctx->current_state == state) {
+ efc_node_post_event(node, EFC_EVT_REENTER, data);
+ } else {
+ efc_node_post_event(node, EFC_EVT_EXIT, data);
+ ctx->current_state = state;
+ efc_node_post_event(node, EFC_EVT_ENTER, data);
+ }
+}
+
+void
+efc_node_build_eui_name(char *buf, u32 buf_len, uint64_t eui_name)
+{
+ memset(buf, 0, buf_len);
+
+ snprintf(buf, buf_len, "eui.%016llX", (unsigned long long)eui_name);
+}
+
+u64
+efc_node_get_wwpn(struct efc_node *node)
+{
+ struct fc_els_flogi *sp =
+ (struct fc_els_flogi *)node->service_params;
+
+ return be64_to_cpu(sp->fl_wwpn);
+}
+
+u64
+efc_node_get_wwnn(struct efc_node *node)
+{
+ struct fc_els_flogi *sp =
+ (struct fc_els_flogi *)node->service_params;
+
+ return be64_to_cpu(sp->fl_wwnn);
+}
+
+int
+efc_node_check_els_req(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg,
+ u8 cmd, void (*efc_node_common_func)(const char *,
+ struct efc_sm_ctx *, enum efc_sm_event, void *),
+ const char *funcname)
+{
+ return 0;
+}
+
+int
+efc_node_check_ns_req(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg,
+ u16 cmd, void (*efc_node_common_func)(const char *,
+ struct efc_sm_ctx *, enum efc_sm_event, void *),
+ const char *funcname)
+{
+ return 0;
+}
+
+int
+efc_els_io_list_empty(struct efc_node *node, struct list_head *list)
+{
+ int empty;
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&node->els_ios_lock, flags);
+ empty = list_empty(list);
+ spin_unlock_irqrestore(&node->els_ios_lock, flags);
+ return empty;
+}
+
+void
+efc_node_pause(struct efc_node *node,
+ void (*state)(struct efc_sm_ctx *,
+ enum efc_sm_event, void *))
+
+{
+ node->nodedb_state = state;
+ efc_node_transition(node, __efc_node_paused, NULL);
+}
+
+void
+__efc_node_paused(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_node *node = ctx->app;
+
+ efc_node_evt_set(ctx, evt, __func__);
+
+ node_sm_trace();
+
+ /*
+ * This state is entered when a state is "paused". When resumed, the
+ * node is transitioned to a previously saved state (node->ndoedb_state)
+ */
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ node_printf(node, "Paused\n");
+ break;
+
+ case EFC_EVT_RESUME: {
+ void (*pf)(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+
+ pf = node->nodedb_state;
+
+ node->nodedb_state = NULL;
+ efc_node_transition(node, pf, NULL);
+ break;
+ }
+
+ case EFC_EVT_DOMAIN_ATTACH_OK:
+ break;
+
+ case EFC_EVT_SHUTDOWN:
+ node->req_free = true;
+ break;
+
+ default:
+ __efc_node_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+efc_node_recv_els_frame(struct efc_node *node,
+ struct efc_hw_sequence *seq)
+{
+ u32 prli_size = sizeof(struct fc_els_prli) + sizeof(struct fc_els_spp);
+ struct {
+ u32 cmd;
+ enum efc_sm_event evt;
+ u32 payload_size;
+ } els_cmd_list[] = {
+ {ELS_PLOGI, EFC_EVT_PLOGI_RCVD, sizeof(struct fc_els_flogi)},
+ {ELS_FLOGI, EFC_EVT_FLOGI_RCVD, sizeof(struct fc_els_flogi)},
+ {ELS_LOGO, EFC_EVT_LOGO_RCVD, sizeof(struct fc_els_ls_acc)},
+ {ELS_PRLI, EFC_EVT_PRLI_RCVD, prli_size},
+ {ELS_PRLO, EFC_EVT_PRLO_RCVD, prli_size},
+ {ELS_PDISC, EFC_EVT_PDISC_RCVD, MAX_ACC_REJECT_PAYLOAD},
+ {ELS_FDISC, EFC_EVT_FDISC_RCVD, MAX_ACC_REJECT_PAYLOAD},
+ {ELS_ADISC, EFC_EVT_ADISC_RCVD, sizeof(struct fc_els_adisc)},
+ {ELS_RSCN, EFC_EVT_RSCN_RCVD, MAX_ACC_REJECT_PAYLOAD},
+ {ELS_SCR, EFC_EVT_SCR_RCVD, MAX_ACC_REJECT_PAYLOAD},
+ };
+ struct efc_node_cb cbdata;
+ u8 *buf = seq->payload->dma.virt;
+ enum efc_sm_event evt = EFC_EVT_ELS_RCVD;
+ u32 i;
+
+ memset(&cbdata, 0, sizeof(cbdata));
+ cbdata.header = seq->header;
+ cbdata.payload = seq->payload;
+
+ /* find a matching event for the ELS command */
+ for (i = 0; i < ARRAY_SIZE(els_cmd_list); i++) {
+ if (els_cmd_list[i].cmd == buf[0]) {
+ evt = els_cmd_list[i].evt;
+ break;
+ }
+ }
+
+ efc_node_post_event(node, evt, &cbdata);
+}
+
+void
+efc_node_recv_ct_frame(struct efc_node *node,
+ struct efc_hw_sequence *seq)
+{
+ struct fc_ct_hdr *iu = seq->payload->dma.virt;
+ struct fc_frame_header *hdr = seq->header->dma.virt;
+ struct efc *efc = node->efc;
+ u16 gscmd = be16_to_cpu(iu->ct_cmd);
+
+ efc_log_err(efc, "[%s] Received cmd :%x sending CT_REJECT\n",
+ node->display_name, gscmd);
+ efc_send_ct_rsp(efc, node, be16_to_cpu(hdr->fh_ox_id), iu,
+ FC_FS_RJT, FC_FS_RJT_UNSUP, 0);
+}
+
+void
+efc_node_recv_fcp_cmd(struct efc_node *node, struct efc_hw_sequence *seq)
+{
+ struct efc_node_cb cbdata;
+
+ memset(&cbdata, 0, sizeof(cbdata));
+ cbdata.header = seq->header;
+ cbdata.payload = seq->payload;
+
+ efc_node_post_event(node, EFC_EVT_FCP_CMD_RCVD, &cbdata);
+}
+
+void
+efc_process_node_pending(struct efc_node *node)
+{
+ struct efc *efc = node->efc;
+ struct efc_hw_sequence *seq = NULL;
+ u32 pend_frames_processed = 0;
+ unsigned long flags = 0;
+
+ for (;;) {
+ /* need to check for hold frames condition after each frame
+ * processed because any given frame could cause a transition
+ * to a state that holds frames
+ */
+ if (node->hold_frames)
+ break;
+
+ seq = NULL;
+ /* Get next frame/sequence */
+ spin_lock_irqsave(&node->pend_frames_lock, flags);
+
+ if (!list_empty(&node->pend_frames)) {
+ seq = list_first_entry(&node->pend_frames,
+ struct efc_hw_sequence, list_entry);
+ list_del(&seq->list_entry);
+ }
+ spin_unlock_irqrestore(&node->pend_frames_lock, flags);
+
+ if (!seq) {
+ pend_frames_processed = node->pend_frames_processed;
+ node->pend_frames_processed = 0;
+ break;
+ }
+ node->pend_frames_processed++;
+
+ /* now dispatch frame(s) to dispatch function */
+ efc_node_dispatch_frame(node, seq);
+ efc->tt.hw_seq_free(efc, seq);
+ }
+
+ if (pend_frames_processed != 0)
+ efc_log_debug(efc, "%u node frames held and processed\n",
+ pend_frames_processed);
+}
+
+void
+efc_scsi_sess_reg_complete(struct efc_node *node, u32 status)
+{
+ unsigned long flags = 0;
+ enum efc_sm_event evt = EFC_EVT_NODE_SESS_REG_OK;
+ struct efc *efc = node->efc;
+
+ if (status)
+ evt = EFC_EVT_NODE_SESS_REG_FAIL;
+
+ spin_lock_irqsave(&efc->lock, flags);
+ /* Notify the node to resume */
+ efc_node_post_event(node, evt, NULL);
+ spin_unlock_irqrestore(&efc->lock, flags);
+}
+
+void
+efc_scsi_del_initiator_complete(struct efc *efc, struct efc_node *node)
+{
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&efc->lock, flags);
+ /* Notify the node to resume */
+ efc_node_post_event(node, EFC_EVT_NODE_DEL_INI_COMPLETE, NULL);
+ spin_unlock_irqrestore(&efc->lock, flags);
+}
+
+void
+efc_scsi_del_target_complete(struct efc *efc, struct efc_node *node)
+{
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&efc->lock, flags);
+ /* Notify the node to resume */
+ efc_node_post_event(node, EFC_EVT_NODE_DEL_TGT_COMPLETE, NULL);
+ spin_unlock_irqrestore(&efc->lock, flags);
+}
+
+void
+efc_scsi_io_list_empty(struct efc *efc, struct efc_node *node)
+{
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&efc->lock, flags);
+ efc_node_post_event(node, EFC_EVT_NODE_ACTIVE_IO_LIST_EMPTY, NULL);
+ spin_unlock_irqrestore(&efc->lock, flags);
+}
+
+void efc_node_post_els_resp(struct efc_node *node, u32 evt, void *arg)
+{
+ struct efc *efc = node->efc;
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&efc->lock, flags);
+ efc_node_post_event(node, evt, arg);
+ spin_unlock_irqrestore(&efc->lock, flags);
+}
+
+void efc_node_post_shutdown(struct efc_node *node, void *arg)
+{
+ unsigned long flags = 0;
+ struct efc *efc = node->efc;
+
+ spin_lock_irqsave(&efc->lock, flags);
+ efc_node_post_event(node, EFC_EVT_SHUTDOWN, arg);
+ spin_unlock_irqrestore(&efc->lock, flags);
+}
diff --git a/drivers/scsi/elx/libefc/efc_node.h b/drivers/scsi/elx/libefc/efc_node.h
new file mode 100644
index 000000000000..e9c600ac45d5
--- /dev/null
+++ b/drivers/scsi/elx/libefc/efc_node.h
@@ -0,0 +1,191 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+#if !defined(__EFC_NODE_H__)
+#define __EFC_NODE_H__
+#include "scsi/fc/fc_ns.h"
+
+#define EFC_NODEDB_PAUSE_FABRIC_LOGIN (1 << 0)
+#define EFC_NODEDB_PAUSE_NAMESERVER (1 << 1)
+#define EFC_NODEDB_PAUSE_NEW_NODES (1 << 2)
+
+#define MAX_ACC_REJECT_PAYLOAD sizeof(struct fc_els_ls_rjt)
+
+#define scsi_io_printf(io, fmt, ...) \
+ efc_log_debug(io->efc, "[%s] [%04x][i:%04x t:%04x h:%04x]" fmt, \
+ io->node->display_name, io->instance_index, io->init_task_tag, \
+ io->tgt_task_tag, io->hw_tag, ##__VA_ARGS__)
+
+static inline void
+efc_node_evt_set(struct efc_sm_ctx *ctx, enum efc_sm_event evt,
+ const char *handler)
+{
+ struct efc_node *node = ctx->app;
+
+ if (evt == EFC_EVT_ENTER) {
+ strncpy(node->current_state_name, handler,
+ sizeof(node->current_state_name));
+ } else if (evt == EFC_EVT_EXIT) {
+ strncpy(node->prev_state_name, node->current_state_name,
+ sizeof(node->prev_state_name));
+ strncpy(node->current_state_name, "invalid",
+ sizeof(node->current_state_name));
+ }
+ node->prev_evt = node->current_evt;
+ node->current_evt = evt;
+}
+
+/**
+ * hold frames in pending frame list
+ *
+ * Unsolicited receive frames are held on the node pending frame list,
+ * rather than being processed.
+ */
+
+static inline void
+efc_node_hold_frames(struct efc_node *node)
+{
+ node->hold_frames = true;
+}
+
+/**
+ * accept frames
+ *
+ * Unsolicited receive frames processed rather than being held on the node
+ * pending frame list.
+ */
+
+static inline void
+efc_node_accept_frames(struct efc_node *node)
+{
+ node->hold_frames = false;
+}
+
+/*
+ * Node initiator/target enable defines
+ * All combinations of the SLI port (nport) initiator/target enable,
+ * and remote node initiator/target enable are enumerated.
+ * ex: EFC_NODE_ENABLE_T_TO_IT decodes to target mode is enabled on SLI port
+ * and I+T is enabled on remote node.
+ */
+enum efc_node_enable {
+ EFC_NODE_ENABLE_x_TO_x,
+ EFC_NODE_ENABLE_x_TO_T,
+ EFC_NODE_ENABLE_x_TO_I,
+ EFC_NODE_ENABLE_x_TO_IT,
+ EFC_NODE_ENABLE_T_TO_x,
+ EFC_NODE_ENABLE_T_TO_T,
+ EFC_NODE_ENABLE_T_TO_I,
+ EFC_NODE_ENABLE_T_TO_IT,
+ EFC_NODE_ENABLE_I_TO_x,
+ EFC_NODE_ENABLE_I_TO_T,
+ EFC_NODE_ENABLE_I_TO_I,
+ EFC_NODE_ENABLE_I_TO_IT,
+ EFC_NODE_ENABLE_IT_TO_x,
+ EFC_NODE_ENABLE_IT_TO_T,
+ EFC_NODE_ENABLE_IT_TO_I,
+ EFC_NODE_ENABLE_IT_TO_IT,
+};
+
+static inline enum efc_node_enable
+efc_node_get_enable(struct efc_node *node)
+{
+ u32 retval = 0;
+
+ if (node->nport->enable_ini)
+ retval |= (1U << 3);
+ if (node->nport->enable_tgt)
+ retval |= (1U << 2);
+ if (node->init)
+ retval |= (1U << 1);
+ if (node->targ)
+ retval |= (1U << 0);
+ return (enum efc_node_enable)retval;
+}
+
+int
+efc_node_check_els_req(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg,
+ u8 cmd, void (*efc_node_common_func)(const char *,
+ struct efc_sm_ctx *, enum efc_sm_event, void *),
+ const char *funcname);
+int
+efc_node_check_ns_req(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg,
+ u16 cmd, void (*efc_node_common_func)(const char *,
+ struct efc_sm_ctx *, enum efc_sm_event, void *),
+ const char *funcname);
+int
+efc_node_attach(struct efc_node *node);
+struct efc_node *
+efc_node_alloc(struct efc_nport *nport, u32 port_id,
+ bool init, bool targ);
+void
+efc_node_free(struct efc_node *efc);
+void
+efc_node_update_display_name(struct efc_node *node);
+void efc_node_post_event(struct efc_node *node, enum efc_sm_event evt,
+ void *arg);
+
+void
+__efc_node_shutdown(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_node_wait_node_free(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_node_wait_els_shutdown(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_node_wait_ios_shutdown(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+efc_node_save_sparms(struct efc_node *node, void *payload);
+void
+efc_node_transition(struct efc_node *node,
+ void (*state)(struct efc_sm_ctx *, enum efc_sm_event,
+ void *), void *data);
+void
+__efc_node_common(const char *funcname, struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+
+void
+efc_node_initiate_cleanup(struct efc_node *node);
+
+void
+efc_node_build_eui_name(char *buf, u32 buf_len, uint64_t eui_name);
+
+void
+efc_node_pause(struct efc_node *node,
+ void (*state)(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg));
+void
+__efc_node_paused(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+int
+efc_node_active_ios_empty(struct efc_node *node);
+void
+efc_node_send_ls_io_cleanup(struct efc_node *node);
+
+int
+efc_els_io_list_empty(struct efc_node *node, struct list_head *list);
+
+void
+efc_process_node_pending(struct efc_node *domain);
+
+u64 efc_node_get_wwnn(struct efc_node *node);
+struct efc_node *
+efc_node_find(struct efc_nport *nport, u32 id);
+void
+efc_node_post_els_resp(struct efc_node *node, u32 evt, void *arg);
+void
+efc_node_recv_els_frame(struct efc_node *node, struct efc_hw_sequence *s);
+void
+efc_node_recv_ct_frame(struct efc_node *node, struct efc_hw_sequence *seq);
+void
+efc_node_recv_fcp_cmd(struct efc_node *node, struct efc_hw_sequence *seq);
+
+#endif /* __EFC_NODE_H__ */
diff --git a/drivers/scsi/elx/libefc/efc_nport.c b/drivers/scsi/elx/libefc/efc_nport.c
new file mode 100644
index 000000000000..2e83a667901f
--- /dev/null
+++ b/drivers/scsi/elx/libefc/efc_nport.c
@@ -0,0 +1,777 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+/*
+ * NPORT
+ *
+ * Port object for physical port and NPIV ports.
+ */
+
+/*
+ * NPORT REFERENCE COUNTING
+ *
+ * A nport reference should be taken when:
+ * - an nport is allocated
+ * - a vport populates associated nport
+ * - a remote node is allocated
+ * - a unsolicited frame is processed
+ * The reference should be dropped when:
+ * - the unsolicited frame processesing is done
+ * - the remote node is removed
+ * - the vport is removed
+ * - the nport is removed
+ */
+
+#include "efc.h"
+
+void
+efc_nport_cb(void *arg, int event, void *data)
+{
+ struct efc *efc = arg;
+ struct efc_nport *nport = data;
+ unsigned long flags = 0;
+
+ efc_log_debug(efc, "nport event: %s\n", efc_sm_event_name(event));
+
+ spin_lock_irqsave(&efc->lock, flags);
+ efc_sm_post_event(&nport->sm, event, NULL);
+ spin_unlock_irqrestore(&efc->lock, flags);
+}
+
+static struct efc_nport *
+efc_nport_find_wwn(struct efc_domain *domain, uint64_t wwnn, uint64_t wwpn)
+{
+ struct efc_nport *nport = NULL;
+
+ /* Find a nport, given the WWNN and WWPN */
+ list_for_each_entry(nport, &domain->nport_list, list_entry) {
+ if (nport->wwnn == wwnn && nport->wwpn == wwpn)
+ return nport;
+ }
+ return NULL;
+}
+
+static void
+_efc_nport_free(struct kref *arg)
+{
+ struct efc_nport *nport = container_of(arg, struct efc_nport, ref);
+
+ kfree(nport);
+}
+
+struct efc_nport *
+efc_nport_alloc(struct efc_domain *domain, uint64_t wwpn, uint64_t wwnn,
+ u32 fc_id, bool enable_ini, bool enable_tgt)
+{
+ struct efc_nport *nport;
+
+ if (domain->efc->enable_ini)
+ enable_ini = 0;
+
+ /* Return a failure if this nport has already been allocated */
+ if ((wwpn != 0) || (wwnn != 0)) {
+ nport = efc_nport_find_wwn(domain, wwnn, wwpn);
+ if (nport) {
+ efc_log_err(domain->efc,
+ "NPORT %016llX %016llX already allocated\n",
+ wwnn, wwpn);
+ return NULL;
+ }
+ }
+
+ nport = kzalloc(sizeof(*nport), GFP_ATOMIC);
+ if (!nport)
+ return nport;
+
+ /* initialize refcount */
+ kref_init(&nport->ref);
+ nport->release = _efc_nport_free;
+
+ nport->efc = domain->efc;
+ snprintf(nport->display_name, sizeof(nport->display_name), "------");
+ nport->domain = domain;
+ xa_init(&nport->lookup);
+ nport->instance_index = domain->nport_count++;
+ nport->sm.app = nport;
+ nport->enable_ini = enable_ini;
+ nport->enable_tgt = enable_tgt;
+ nport->enable_rscn = (nport->enable_ini ||
+ (nport->enable_tgt && enable_target_rscn(nport->efc)));
+
+ /* Copy service parameters from domain */
+ memcpy(nport->service_params, domain->service_params,
+ sizeof(struct fc_els_flogi));
+
+ /* Update requested fc_id */
+ nport->fc_id = fc_id;
+
+ /* Update the nport's service parameters for the new wwn's */
+ nport->wwpn = wwpn;
+ nport->wwnn = wwnn;
+ snprintf(nport->wwnn_str, sizeof(nport->wwnn_str), "%016llX",
+ (unsigned long long)wwnn);
+
+ /*
+ * if this is the "first" nport of the domain,
+ * then make it the "phys" nport
+ */
+ if (list_empty(&domain->nport_list))
+ domain->nport = nport;
+
+ INIT_LIST_HEAD(&nport->list_entry);
+ list_add_tail(&nport->list_entry, &domain->nport_list);
+
+ kref_get(&domain->ref);
+
+ efc_log_debug(domain->efc, "New Nport [%s]\n", nport->display_name);
+
+ return nport;
+}
+
+void
+efc_nport_free(struct efc_nport *nport)
+{
+ struct efc_domain *domain;
+
+ if (!nport)
+ return;
+
+ domain = nport->domain;
+ efc_log_debug(domain->efc, "[%s] free nport\n", nport->display_name);
+ list_del(&nport->list_entry);
+ /*
+ * if this is the physical nport,
+ * then clear it out of the domain
+ */
+ if (nport == domain->nport)
+ domain->nport = NULL;
+
+ xa_destroy(&nport->lookup);
+ xa_erase(&domain->lookup, nport->fc_id);
+
+ if (list_empty(&domain->nport_list))
+ efc_domain_post_event(domain, EFC_EVT_ALL_CHILD_NODES_FREE,
+ NULL);
+
+ kref_put(&domain->ref, domain->release);
+ kref_put(&nport->ref, nport->release);
+}
+
+struct efc_nport *
+efc_nport_find(struct efc_domain *domain, u32 d_id)
+{
+ struct efc_nport *nport;
+
+ /* Find a nport object, given an FC_ID */
+ nport = xa_load(&domain->lookup, d_id);
+ if (!nport || !kref_get_unless_zero(&nport->ref))
+ return NULL;
+
+ return nport;
+}
+
+int
+efc_nport_attach(struct efc_nport *nport, u32 fc_id)
+{
+ int rc;
+ struct efc_node *node;
+ struct efc *efc = nport->efc;
+ unsigned long index;
+
+ /* Set our lookup */
+ rc = xa_err(xa_store(&nport->domain->lookup, fc_id, nport, GFP_ATOMIC));
+ if (rc) {
+ efc_log_err(efc, "Sport lookup store failed: %d\n", rc);
+ return rc;
+ }
+
+ /* Update our display_name */
+ efc_node_fcid_display(fc_id, nport->display_name,
+ sizeof(nport->display_name));
+
+ xa_for_each(&nport->lookup, index, node) {
+ efc_node_update_display_name(node);
+ }
+
+ efc_log_debug(nport->efc, "[%s] attach nport: fc_id x%06x\n",
+ nport->display_name, fc_id);
+
+ /* Register a nport, given an FC_ID */
+ rc = efc_cmd_nport_attach(efc, nport, fc_id);
+ if (rc < 0) {
+ efc_log_err(nport->efc,
+ "efc_hw_port_attach failed: %d\n", rc);
+ return -EIO;
+ }
+ return 0;
+}
+
+static void
+efc_nport_shutdown(struct efc_nport *nport)
+{
+ struct efc *efc = nport->efc;
+ struct efc_node *node;
+ unsigned long index;
+
+ xa_for_each(&nport->lookup, index, node) {
+ if (!(node->rnode.fc_id == FC_FID_FLOGI && nport->is_vport)) {
+ efc_node_post_event(node, EFC_EVT_SHUTDOWN, NULL);
+ continue;
+ }
+
+ /*
+ * If this is a vport, logout of the fabric
+ * controller so that it deletes the vport
+ * on the switch.
+ */
+ /* if link is down, don't send logo */
+ if (efc->link_status == EFC_LINK_STATUS_DOWN) {
+ efc_node_post_event(node, EFC_EVT_SHUTDOWN, NULL);
+ continue;
+ }
+
+ efc_log_debug(efc, "[%s] nport shutdown vport, send logo\n",
+ node->display_name);
+
+ if (!efc_send_logo(node)) {
+ /* sent LOGO, wait for response */
+ efc_node_transition(node, __efc_d_wait_logo_rsp, NULL);
+ continue;
+ }
+
+ /*
+ * failed to send LOGO,
+ * go ahead and cleanup node anyways
+ */
+ node_printf(node, "Failed to send LOGO\n");
+ efc_node_post_event(node, EFC_EVT_SHUTDOWN_EXPLICIT_LOGO, NULL);
+ }
+}
+
+static void
+efc_vport_link_down(struct efc_nport *nport)
+{
+ struct efc *efc = nport->efc;
+ struct efc_vport *vport;
+
+ /* Clear the nport reference in the vport specification */
+ list_for_each_entry(vport, &efc->vport_list, list_entry) {
+ if (vport->nport == nport) {
+ kref_put(&nport->ref, nport->release);
+ vport->nport = NULL;
+ break;
+ }
+ }
+}
+
+static void
+__efc_nport_common(const char *funcname, struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_nport *nport = ctx->app;
+ struct efc_domain *domain = nport->domain;
+ struct efc *efc = nport->efc;
+
+ switch (evt) {
+ case EFC_EVT_ENTER:
+ case EFC_EVT_REENTER:
+ case EFC_EVT_EXIT:
+ case EFC_EVT_ALL_CHILD_NODES_FREE:
+ break;
+ case EFC_EVT_NPORT_ATTACH_OK:
+ efc_sm_transition(ctx, __efc_nport_attached, NULL);
+ break;
+ case EFC_EVT_SHUTDOWN:
+ /* Flag this nport as shutting down */
+ nport->shutting_down = true;
+
+ if (nport->is_vport)
+ efc_vport_link_down(nport);
+
+ if (xa_empty(&nport->lookup)) {
+ /* Remove the nport from the domain's lookup table */
+ xa_erase(&domain->lookup, nport->fc_id);
+ efc_sm_transition(ctx, __efc_nport_wait_port_free,
+ NULL);
+ if (efc_cmd_nport_free(efc, nport)) {
+ efc_log_debug(nport->efc,
+ "efc_hw_port_free failed\n");
+ /* Not much we can do, free the nport anyways */
+ efc_nport_free(nport);
+ }
+ } else {
+ /* sm: node list is not empty / shutdown nodes */
+ efc_sm_transition(ctx,
+ __efc_nport_wait_shutdown, NULL);
+ efc_nport_shutdown(nport);
+ }
+ break;
+ default:
+ efc_log_debug(nport->efc, "[%s] %-20s %-20s not handled\n",
+ nport->display_name, funcname,
+ efc_sm_event_name(evt));
+ }
+}
+
+void
+__efc_nport_allocated(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_nport *nport = ctx->app;
+ struct efc_domain *domain = nport->domain;
+
+ nport_sm_trace(nport);
+
+ switch (evt) {
+ /* the physical nport is attached */
+ case EFC_EVT_NPORT_ATTACH_OK:
+ WARN_ON(nport != domain->nport);
+ efc_sm_transition(ctx, __efc_nport_attached, NULL);
+ break;
+
+ case EFC_EVT_NPORT_ALLOC_OK:
+ /* ignore */
+ break;
+ default:
+ __efc_nport_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_nport_vport_init(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_nport *nport = ctx->app;
+ struct efc *efc = nport->efc;
+
+ nport_sm_trace(nport);
+
+ switch (evt) {
+ case EFC_EVT_ENTER: {
+ __be64 be_wwpn = cpu_to_be64(nport->wwpn);
+
+ if (nport->wwpn == 0)
+ efc_log_debug(efc, "vport: letting f/w select WWN\n");
+
+ if (nport->fc_id != U32_MAX) {
+ efc_log_debug(efc, "vport: hard coding port id: %x\n",
+ nport->fc_id);
+ }
+
+ efc_sm_transition(ctx, __efc_nport_vport_wait_alloc, NULL);
+ /* If wwpn is zero, then we'll let the f/w assign wwpn*/
+ if (efc_cmd_nport_alloc(efc, nport, nport->domain,
+ nport->wwpn == 0 ? NULL :
+ (uint8_t *)&be_wwpn)) {
+ efc_log_err(efc, "Can't allocate port\n");
+ break;
+ }
+
+ break;
+ }
+ default:
+ __efc_nport_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_nport_vport_wait_alloc(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_nport *nport = ctx->app;
+ struct efc *efc = nport->efc;
+
+ nport_sm_trace(nport);
+
+ switch (evt) {
+ case EFC_EVT_NPORT_ALLOC_OK: {
+ struct fc_els_flogi *sp;
+
+ sp = (struct fc_els_flogi *)nport->service_params;
+
+ if (nport->wwnn == 0) {
+ nport->wwnn = be64_to_cpu(nport->sli_wwnn);
+ nport->wwpn = be64_to_cpu(nport->sli_wwpn);
+ snprintf(nport->wwnn_str, sizeof(nport->wwnn_str),
+ "%016llX", nport->wwpn);
+ }
+
+ /* Update the nport's service parameters */
+ sp->fl_wwpn = cpu_to_be64(nport->wwpn);
+ sp->fl_wwnn = cpu_to_be64(nport->wwnn);
+
+ /*
+ * if nport->fc_id is uninitialized,
+ * then request that the fabric node use FDISC
+ * to find an fc_id.
+ * Otherwise we're restoring vports, or we're in
+ * fabric emulation mode, so attach the fc_id
+ */
+ if (nport->fc_id == U32_MAX) {
+ struct efc_node *fabric;
+
+ fabric = efc_node_alloc(nport, FC_FID_FLOGI, false,
+ false);
+ if (!fabric) {
+ efc_log_err(efc, "efc_node_alloc() failed\n");
+ return;
+ }
+ efc_node_transition(fabric, __efc_vport_fabric_init,
+ NULL);
+ } else {
+ snprintf(nport->wwnn_str, sizeof(nport->wwnn_str),
+ "%016llX", nport->wwpn);
+ efc_nport_attach(nport, nport->fc_id);
+ }
+ efc_sm_transition(ctx, __efc_nport_vport_allocated, NULL);
+ break;
+ }
+ default:
+ __efc_nport_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_nport_vport_allocated(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_nport *nport = ctx->app;
+ struct efc *efc = nport->efc;
+
+ nport_sm_trace(nport);
+
+ /*
+ * This state is entered after the nport is allocated;
+ * it then waits for a fabric node
+ * FDISC to complete, which requests a nport attach.
+ * The nport attach complete is handled in this state.
+ */
+ switch (evt) {
+ case EFC_EVT_NPORT_ATTACH_OK: {
+ struct efc_node *node;
+
+ /* Find our fabric node, and forward this event */
+ node = efc_node_find(nport, FC_FID_FLOGI);
+ if (!node) {
+ efc_log_debug(efc, "can't find node %06x\n", FC_FID_FLOGI);
+ break;
+ }
+ /* sm: / forward nport attach to fabric node */
+ efc_node_post_event(node, evt, NULL);
+ efc_sm_transition(ctx, __efc_nport_attached, NULL);
+ break;
+ }
+ default:
+ __efc_nport_common(__func__, ctx, evt, arg);
+ }
+}
+
+static void
+efc_vport_update_spec(struct efc_nport *nport)
+{
+ struct efc *efc = nport->efc;
+ struct efc_vport *vport;
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&efc->vport_lock, flags);
+ list_for_each_entry(vport, &efc->vport_list, list_entry) {
+ if (vport->nport == nport) {
+ vport->wwnn = nport->wwnn;
+ vport->wwpn = nport->wwpn;
+ vport->tgt_data = nport->tgt_data;
+ vport->ini_data = nport->ini_data;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&efc->vport_lock, flags);
+}
+
+void
+__efc_nport_attached(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_nport *nport = ctx->app;
+ struct efc *efc = nport->efc;
+
+ nport_sm_trace(nport);
+
+ switch (evt) {
+ case EFC_EVT_ENTER: {
+ struct efc_node *node;
+ unsigned long index;
+
+ efc_log_debug(efc,
+ "[%s] NPORT attached WWPN %016llX WWNN %016llX\n",
+ nport->display_name,
+ nport->wwpn, nport->wwnn);
+
+ xa_for_each(&nport->lookup, index, node)
+ efc_node_update_display_name(node);
+
+ efc->tt.new_nport(efc, nport);
+
+ /*
+ * Update the vport (if its not the physical nport)
+ * parameters
+ */
+ if (nport->is_vport)
+ efc_vport_update_spec(nport);
+ break;
+ }
+
+ case EFC_EVT_EXIT:
+ efc_log_debug(efc,
+ "[%s] NPORT deattached WWPN %016llX WWNN %016llX\n",
+ nport->display_name,
+ nport->wwpn, nport->wwnn);
+
+ efc->tt.del_nport(efc, nport);
+ break;
+ default:
+ __efc_nport_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_nport_wait_shutdown(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_nport *nport = ctx->app;
+ struct efc_domain *domain = nport->domain;
+ struct efc *efc = nport->efc;
+
+ nport_sm_trace(nport);
+
+ switch (evt) {
+ case EFC_EVT_NPORT_ALLOC_OK:
+ case EFC_EVT_NPORT_ALLOC_FAIL:
+ case EFC_EVT_NPORT_ATTACH_OK:
+ case EFC_EVT_NPORT_ATTACH_FAIL:
+ /* ignore these events - just wait for the all free event */
+ break;
+
+ case EFC_EVT_ALL_CHILD_NODES_FREE: {
+ /*
+ * Remove the nport from the domain's
+ * sparse vector lookup table
+ */
+ xa_erase(&domain->lookup, nport->fc_id);
+ efc_sm_transition(ctx, __efc_nport_wait_port_free, NULL);
+ if (efc_cmd_nport_free(efc, nport)) {
+ efc_log_err(nport->efc, "efc_hw_port_free failed\n");
+ /* Not much we can do, free the nport anyways */
+ efc_nport_free(nport);
+ }
+ break;
+ }
+ default:
+ __efc_nport_common(__func__, ctx, evt, arg);
+ }
+}
+
+void
+__efc_nport_wait_port_free(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg)
+{
+ struct efc_nport *nport = ctx->app;
+
+ nport_sm_trace(nport);
+
+ switch (evt) {
+ case EFC_EVT_NPORT_ATTACH_OK:
+ /* Ignore as we are waiting for the free CB */
+ break;
+ case EFC_EVT_NPORT_FREE_OK: {
+ /* All done, free myself */
+ efc_nport_free(nport);
+ break;
+ }
+ default:
+ __efc_nport_common(__func__, ctx, evt, arg);
+ }
+}
+
+static int
+efc_vport_nport_alloc(struct efc_domain *domain, struct efc_vport *vport)
+{
+ struct efc_nport *nport;
+
+ lockdep_assert_held(&domain->efc->lock);
+
+ nport = efc_nport_alloc(domain, vport->wwpn, vport->wwnn, vport->fc_id,
+ vport->enable_ini, vport->enable_tgt);
+ vport->nport = nport;
+ if (!nport)
+ return -EIO;
+
+ kref_get(&nport->ref);
+ nport->is_vport = true;
+ nport->tgt_data = vport->tgt_data;
+ nport->ini_data = vport->ini_data;
+
+ efc_sm_transition(&nport->sm, __efc_nport_vport_init, NULL);
+
+ return 0;
+}
+
+int
+efc_vport_start(struct efc_domain *domain)
+{
+ struct efc *efc = domain->efc;
+ struct efc_vport *vport;
+ struct efc_vport *next;
+ int rc = 0;
+ unsigned long flags = 0;
+
+ /* Use the vport spec to find the associated vports and start them */
+ spin_lock_irqsave(&efc->vport_lock, flags);
+ list_for_each_entry_safe(vport, next, &efc->vport_list, list_entry) {
+ if (!vport->nport) {
+ if (efc_vport_nport_alloc(domain, vport))
+ rc = -EIO;
+ }
+ }
+ spin_unlock_irqrestore(&efc->vport_lock, flags);
+
+ return rc;
+}
+
+int
+efc_nport_vport_new(struct efc_domain *domain, uint64_t wwpn, uint64_t wwnn,
+ u32 fc_id, bool ini, bool tgt, void *tgt_data,
+ void *ini_data)
+{
+ struct efc *efc = domain->efc;
+ struct efc_vport *vport;
+ int rc = 0;
+ unsigned long flags = 0;
+
+ if (ini && domain->efc->enable_ini == 0) {
+ efc_log_debug(efc, "driver initiator mode not enabled\n");
+ return -EIO;
+ }
+
+ if (tgt && domain->efc->enable_tgt == 0) {
+ efc_log_debug(efc, "driver target mode not enabled\n");
+ return -EIO;
+ }
+
+ /*
+ * Create a vport spec if we need to recreate
+ * this vport after a link up event
+ */
+ vport = efc_vport_create_spec(domain->efc, wwnn, wwpn, fc_id, ini, tgt,
+ tgt_data, ini_data);
+ if (!vport) {
+ efc_log_err(efc, "failed to create vport object entry\n");
+ return -EIO;
+ }
+
+ spin_lock_irqsave(&efc->lock, flags);
+ rc = efc_vport_nport_alloc(domain, vport);
+ spin_unlock_irqrestore(&efc->lock, flags);
+
+ return rc;
+}
+
+int
+efc_nport_vport_del(struct efc *efc, struct efc_domain *domain,
+ u64 wwpn, uint64_t wwnn)
+{
+ struct efc_nport *nport;
+ struct efc_vport *vport;
+ struct efc_vport *next;
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&efc->vport_lock, flags);
+ /* walk the efc_vport_list and remove from there */
+ list_for_each_entry_safe(vport, next, &efc->vport_list, list_entry) {
+ if (vport->wwpn == wwpn && vport->wwnn == wwnn) {
+ list_del(&vport->list_entry);
+ kfree(vport);
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&efc->vport_lock, flags);
+
+ if (!domain) {
+ /* No domain means no nport to look for */
+ return 0;
+ }
+
+ spin_lock_irqsave(&efc->lock, flags);
+ list_for_each_entry(nport, &domain->nport_list, list_entry) {
+ if (nport->wwpn == wwpn && nport->wwnn == wwnn) {
+ kref_put(&nport->ref, nport->release);
+ /* Shutdown this NPORT */
+ efc_sm_post_event(&nport->sm, EFC_EVT_SHUTDOWN, NULL);
+ break;
+ }
+ }
+
+ spin_unlock_irqrestore(&efc->lock, flags);
+ return 0;
+}
+
+void
+efc_vport_del_all(struct efc *efc)
+{
+ struct efc_vport *vport;
+ struct efc_vport *next;
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&efc->vport_lock, flags);
+ list_for_each_entry_safe(vport, next, &efc->vport_list, list_entry) {
+ list_del(&vport->list_entry);
+ kfree(vport);
+ }
+ spin_unlock_irqrestore(&efc->vport_lock, flags);
+}
+
+struct efc_vport *
+efc_vport_create_spec(struct efc *efc, uint64_t wwnn, uint64_t wwpn,
+ u32 fc_id, bool enable_ini,
+ bool enable_tgt, void *tgt_data, void *ini_data)
+{
+ struct efc_vport *vport;
+ unsigned long flags = 0;
+
+ /*
+ * walk the efc_vport_list and return failure
+ * if a valid(vport with non zero WWPN and WWNN) vport entry
+ * is already created
+ */
+ spin_lock_irqsave(&efc->vport_lock, flags);
+ list_for_each_entry(vport, &efc->vport_list, list_entry) {
+ if ((wwpn && vport->wwpn == wwpn) &&
+ (wwnn && vport->wwnn == wwnn)) {
+ efc_log_err(efc,
+ "VPORT %016llX %016llX already allocated\n",
+ wwnn, wwpn);
+ spin_unlock_irqrestore(&efc->vport_lock, flags);
+ return NULL;
+ }
+ }
+
+ vport = kzalloc(sizeof(*vport), GFP_ATOMIC);
+ if (!vport) {
+ spin_unlock_irqrestore(&efc->vport_lock, flags);
+ return NULL;
+ }
+
+ vport->wwnn = wwnn;
+ vport->wwpn = wwpn;
+ vport->fc_id = fc_id;
+ vport->enable_tgt = enable_tgt;
+ vport->enable_ini = enable_ini;
+ vport->tgt_data = tgt_data;
+ vport->ini_data = ini_data;
+
+ INIT_LIST_HEAD(&vport->list_entry);
+ list_add_tail(&vport->list_entry, &efc->vport_list);
+ spin_unlock_irqrestore(&efc->vport_lock, flags);
+ return vport;
+}
diff --git a/drivers/scsi/elx/libefc/efc_nport.h b/drivers/scsi/elx/libefc/efc_nport.h
new file mode 100644
index 000000000000..b575ea205bbf
--- /dev/null
+++ b/drivers/scsi/elx/libefc/efc_nport.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+/**
+ * EFC FC port (NPORT) exported declarations
+ *
+ */
+
+#ifndef __EFC_NPORT_H__
+#define __EFC_NPORT_H__
+
+struct efc_nport *
+efc_nport_find(struct efc_domain *domain, u32 d_id);
+struct efc_nport *
+efc_nport_alloc(struct efc_domain *domain, uint64_t wwpn, uint64_t wwnn,
+ u32 fc_id, bool enable_ini, bool enable_tgt);
+void
+efc_nport_free(struct efc_nport *nport);
+int
+efc_nport_attach(struct efc_nport *nport, u32 fc_id);
+
+void
+__efc_nport_allocated(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_nport_wait_shutdown(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_nport_wait_port_free(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_nport_vport_init(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_nport_vport_wait_alloc(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_nport_vport_allocated(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+void
+__efc_nport_attached(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg);
+
+int
+efc_vport_start(struct efc_domain *domain);
+
+#endif /* __EFC_NPORT_H__ */
diff --git a/drivers/scsi/elx/libefc/efc_sm.c b/drivers/scsi/elx/libefc/efc_sm.c
new file mode 100644
index 000000000000..afd963782c1c
--- /dev/null
+++ b/drivers/scsi/elx/libefc/efc_sm.c
@@ -0,0 +1,54 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+/*
+ * Generic state machine framework.
+ */
+#include "efc.h"
+#include "efc_sm.h"
+
+/**
+ * efc_sm_post_event() - Post an event to a context.
+ *
+ * @ctx: State machine context
+ * @evt: Event to post
+ * @data: Event-specific data (if any)
+ */
+int
+efc_sm_post_event(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *data)
+{
+ if (!ctx->current_state)
+ return -EIO;
+
+ ctx->current_state(ctx, evt, data);
+ return 0;
+}
+
+void
+efc_sm_transition(struct efc_sm_ctx *ctx,
+ void (*state)(struct efc_sm_ctx *,
+ enum efc_sm_event, void *), void *data)
+
+{
+ if (ctx->current_state == state) {
+ efc_sm_post_event(ctx, EFC_EVT_REENTER, data);
+ } else {
+ efc_sm_post_event(ctx, EFC_EVT_EXIT, data);
+ ctx->current_state = state;
+ efc_sm_post_event(ctx, EFC_EVT_ENTER, data);
+ }
+}
+
+static char *event_name[] = EFC_SM_EVENT_NAME;
+
+const char *efc_sm_event_name(enum efc_sm_event evt)
+{
+ if (evt > EFC_EVT_LAST)
+ return "unknown";
+
+ return event_name[evt];
+}
diff --git a/drivers/scsi/elx/libefc/efc_sm.h b/drivers/scsi/elx/libefc/efc_sm.h
new file mode 100644
index 000000000000..e26867b4db24
--- /dev/null
+++ b/drivers/scsi/elx/libefc/efc_sm.h
@@ -0,0 +1,197 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ *
+ */
+
+/**
+ * Generic state machine framework declarations.
+ */
+
+#ifndef _EFC_SM_H
+#define _EFC_SM_H
+
+struct efc_sm_ctx;
+
+/* State Machine events */
+enum efc_sm_event {
+ /* Common Events */
+ EFC_EVT_ENTER,
+ EFC_EVT_REENTER,
+ EFC_EVT_EXIT,
+ EFC_EVT_SHUTDOWN,
+ EFC_EVT_ALL_CHILD_NODES_FREE,
+ EFC_EVT_RESUME,
+ EFC_EVT_TIMER_EXPIRED,
+
+ /* Domain Events */
+ EFC_EVT_RESPONSE,
+ EFC_EVT_ERROR,
+
+ EFC_EVT_DOMAIN_FOUND,
+ EFC_EVT_DOMAIN_ALLOC_OK,
+ EFC_EVT_DOMAIN_ALLOC_FAIL,
+ EFC_EVT_DOMAIN_REQ_ATTACH,
+ EFC_EVT_DOMAIN_ATTACH_OK,
+ EFC_EVT_DOMAIN_ATTACH_FAIL,
+ EFC_EVT_DOMAIN_LOST,
+ EFC_EVT_DOMAIN_FREE_OK,
+ EFC_EVT_DOMAIN_FREE_FAIL,
+ EFC_EVT_HW_DOMAIN_REQ_ATTACH,
+ EFC_EVT_HW_DOMAIN_REQ_FREE,
+
+ /* Sport Events */
+ EFC_EVT_NPORT_ALLOC_OK,
+ EFC_EVT_NPORT_ALLOC_FAIL,
+ EFC_EVT_NPORT_ATTACH_OK,
+ EFC_EVT_NPORT_ATTACH_FAIL,
+ EFC_EVT_NPORT_FREE_OK,
+ EFC_EVT_NPORT_FREE_FAIL,
+ EFC_EVT_NPORT_TOPOLOGY_NOTIFY,
+ EFC_EVT_HW_PORT_ALLOC_OK,
+ EFC_EVT_HW_PORT_ALLOC_FAIL,
+ EFC_EVT_HW_PORT_ATTACH_OK,
+ EFC_EVT_HW_PORT_REQ_ATTACH,
+ EFC_EVT_HW_PORT_REQ_FREE,
+ EFC_EVT_HW_PORT_FREE_OK,
+
+ /* Login Events */
+ EFC_EVT_SRRS_ELS_REQ_OK,
+ EFC_EVT_SRRS_ELS_CMPL_OK,
+ EFC_EVT_SRRS_ELS_REQ_FAIL,
+ EFC_EVT_SRRS_ELS_CMPL_FAIL,
+ EFC_EVT_SRRS_ELS_REQ_RJT,
+ EFC_EVT_NODE_ATTACH_OK,
+ EFC_EVT_NODE_ATTACH_FAIL,
+ EFC_EVT_NODE_FREE_OK,
+ EFC_EVT_NODE_FREE_FAIL,
+ EFC_EVT_ELS_FRAME,
+ EFC_EVT_ELS_REQ_TIMEOUT,
+ EFC_EVT_ELS_REQ_ABORTED,
+ /* request an ELS IO be aborted */
+ EFC_EVT_ABORT_ELS,
+ /* ELS abort process complete */
+ EFC_EVT_ELS_ABORT_CMPL,
+
+ EFC_EVT_ABTS_RCVD,
+
+ /* node is not in the GID_PT payload */
+ EFC_EVT_NODE_MISSING,
+ /* node is allocated and in the GID_PT payload */
+ EFC_EVT_NODE_REFOUND,
+ /* node shutting down due to PLOGI recvd (implicit logo) */
+ EFC_EVT_SHUTDOWN_IMPLICIT_LOGO,
+ /* node shutting down due to LOGO recvd/sent (explicit logo) */
+ EFC_EVT_SHUTDOWN_EXPLICIT_LOGO,
+
+ EFC_EVT_PLOGI_RCVD,
+ EFC_EVT_FLOGI_RCVD,
+ EFC_EVT_LOGO_RCVD,
+ EFC_EVT_PRLI_RCVD,
+ EFC_EVT_PRLO_RCVD,
+ EFC_EVT_PDISC_RCVD,
+ EFC_EVT_FDISC_RCVD,
+ EFC_EVT_ADISC_RCVD,
+ EFC_EVT_RSCN_RCVD,
+ EFC_EVT_SCR_RCVD,
+ EFC_EVT_ELS_RCVD,
+
+ EFC_EVT_FCP_CMD_RCVD,
+
+ EFC_EVT_GIDPT_DELAY_EXPIRED,
+
+ /* SCSI Target Server events */
+ EFC_EVT_NODE_ACTIVE_IO_LIST_EMPTY,
+ EFC_EVT_NODE_DEL_INI_COMPLETE,
+ EFC_EVT_NODE_DEL_TGT_COMPLETE,
+ EFC_EVT_NODE_SESS_REG_OK,
+ EFC_EVT_NODE_SESS_REG_FAIL,
+
+ /* Must be last */
+ EFC_EVT_LAST
+};
+
+/* State Machine event name lookup array */
+#define EFC_SM_EVENT_NAME { \
+ [EFC_EVT_ENTER] = "EFC_EVT_ENTER", \
+ [EFC_EVT_REENTER] = "EFC_EVT_REENTER", \
+ [EFC_EVT_EXIT] = "EFC_EVT_EXIT", \
+ [EFC_EVT_SHUTDOWN] = "EFC_EVT_SHUTDOWN", \
+ [EFC_EVT_ALL_CHILD_NODES_FREE] = "EFC_EVT_ALL_CHILD_NODES_FREE",\
+ [EFC_EVT_RESUME] = "EFC_EVT_RESUME", \
+ [EFC_EVT_TIMER_EXPIRED] = "EFC_EVT_TIMER_EXPIRED", \
+ [EFC_EVT_RESPONSE] = "EFC_EVT_RESPONSE", \
+ [EFC_EVT_ERROR] = "EFC_EVT_ERROR", \
+ [EFC_EVT_DOMAIN_FOUND] = "EFC_EVT_DOMAIN_FOUND", \
+ [EFC_EVT_DOMAIN_ALLOC_OK] = "EFC_EVT_DOMAIN_ALLOC_OK", \
+ [EFC_EVT_DOMAIN_ALLOC_FAIL] = "EFC_EVT_DOMAIN_ALLOC_FAIL", \
+ [EFC_EVT_DOMAIN_REQ_ATTACH] = "EFC_EVT_DOMAIN_REQ_ATTACH", \
+ [EFC_EVT_DOMAIN_ATTACH_OK] = "EFC_EVT_DOMAIN_ATTACH_OK", \
+ [EFC_EVT_DOMAIN_ATTACH_FAIL] = "EFC_EVT_DOMAIN_ATTACH_FAIL", \
+ [EFC_EVT_DOMAIN_LOST] = "EFC_EVT_DOMAIN_LOST", \
+ [EFC_EVT_DOMAIN_FREE_OK] = "EFC_EVT_DOMAIN_FREE_OK", \
+ [EFC_EVT_DOMAIN_FREE_FAIL] = "EFC_EVT_DOMAIN_FREE_FAIL", \
+ [EFC_EVT_HW_DOMAIN_REQ_ATTACH] = "EFC_EVT_HW_DOMAIN_REQ_ATTACH",\
+ [EFC_EVT_HW_DOMAIN_REQ_FREE] = "EFC_EVT_HW_DOMAIN_REQ_FREE", \
+ [EFC_EVT_NPORT_ALLOC_OK] = "EFC_EVT_NPORT_ALLOC_OK", \
+ [EFC_EVT_NPORT_ALLOC_FAIL] = "EFC_EVT_NPORT_ALLOC_FAIL", \
+ [EFC_EVT_NPORT_ATTACH_OK] = "EFC_EVT_NPORT_ATTACH_OK", \
+ [EFC_EVT_NPORT_ATTACH_FAIL] = "EFC_EVT_NPORT_ATTACH_FAIL", \
+ [EFC_EVT_NPORT_FREE_OK] = "EFC_EVT_NPORT_FREE_OK", \
+ [EFC_EVT_NPORT_FREE_FAIL] = "EFC_EVT_NPORT_FREE_FAIL", \
+ [EFC_EVT_NPORT_TOPOLOGY_NOTIFY] = "EFC_EVT_NPORT_TOPOLOGY_NOTIFY",\
+ [EFC_EVT_HW_PORT_ALLOC_OK] = "EFC_EVT_HW_PORT_ALLOC_OK", \
+ [EFC_EVT_HW_PORT_ALLOC_FAIL] = "EFC_EVT_HW_PORT_ALLOC_FAIL", \
+ [EFC_EVT_HW_PORT_ATTACH_OK] = "EFC_EVT_HW_PORT_ATTACH_OK", \
+ [EFC_EVT_HW_PORT_REQ_ATTACH] = "EFC_EVT_HW_PORT_REQ_ATTACH", \
+ [EFC_EVT_HW_PORT_REQ_FREE] = "EFC_EVT_HW_PORT_REQ_FREE", \
+ [EFC_EVT_HW_PORT_FREE_OK] = "EFC_EVT_HW_PORT_FREE_OK", \
+ [EFC_EVT_SRRS_ELS_REQ_OK] = "EFC_EVT_SRRS_ELS_REQ_OK", \
+ [EFC_EVT_SRRS_ELS_CMPL_OK] = "EFC_EVT_SRRS_ELS_CMPL_OK", \
+ [EFC_EVT_SRRS_ELS_REQ_FAIL] = "EFC_EVT_SRRS_ELS_REQ_FAIL", \
+ [EFC_EVT_SRRS_ELS_CMPL_FAIL] = "EFC_EVT_SRRS_ELS_CMPL_FAIL", \
+ [EFC_EVT_SRRS_ELS_REQ_RJT] = "EFC_EVT_SRRS_ELS_REQ_RJT", \
+ [EFC_EVT_NODE_ATTACH_OK] = "EFC_EVT_NODE_ATTACH_OK", \
+ [EFC_EVT_NODE_ATTACH_FAIL] = "EFC_EVT_NODE_ATTACH_FAIL", \
+ [EFC_EVT_NODE_FREE_OK] = "EFC_EVT_NODE_FREE_OK", \
+ [EFC_EVT_NODE_FREE_FAIL] = "EFC_EVT_NODE_FREE_FAIL", \
+ [EFC_EVT_ELS_FRAME] = "EFC_EVT_ELS_FRAME", \
+ [EFC_EVT_ELS_REQ_TIMEOUT] = "EFC_EVT_ELS_REQ_TIMEOUT", \
+ [EFC_EVT_ELS_REQ_ABORTED] = "EFC_EVT_ELS_REQ_ABORTED", \
+ [EFC_EVT_ABORT_ELS] = "EFC_EVT_ABORT_ELS", \
+ [EFC_EVT_ELS_ABORT_CMPL] = "EFC_EVT_ELS_ABORT_CMPL", \
+ [EFC_EVT_ABTS_RCVD] = "EFC_EVT_ABTS_RCVD", \
+ [EFC_EVT_NODE_MISSING] = "EFC_EVT_NODE_MISSING", \
+ [EFC_EVT_NODE_REFOUND] = "EFC_EVT_NODE_REFOUND", \
+ [EFC_EVT_SHUTDOWN_IMPLICIT_LOGO] = "EFC_EVT_SHUTDOWN_IMPLICIT_LOGO",\
+ [EFC_EVT_SHUTDOWN_EXPLICIT_LOGO] = "EFC_EVT_SHUTDOWN_EXPLICIT_LOGO",\
+ [EFC_EVT_PLOGI_RCVD] = "EFC_EVT_PLOGI_RCVD", \
+ [EFC_EVT_FLOGI_RCVD] = "EFC_EVT_FLOGI_RCVD", \
+ [EFC_EVT_LOGO_RCVD] = "EFC_EVT_LOGO_RCVD", \
+ [EFC_EVT_PRLI_RCVD] = "EFC_EVT_PRLI_RCVD", \
+ [EFC_EVT_PRLO_RCVD] = "EFC_EVT_PRLO_RCVD", \
+ [EFC_EVT_PDISC_RCVD] = "EFC_EVT_PDISC_RCVD", \
+ [EFC_EVT_FDISC_RCVD] = "EFC_EVT_FDISC_RCVD", \
+ [EFC_EVT_ADISC_RCVD] = "EFC_EVT_ADISC_RCVD", \
+ [EFC_EVT_RSCN_RCVD] = "EFC_EVT_RSCN_RCVD", \
+ [EFC_EVT_SCR_RCVD] = "EFC_EVT_SCR_RCVD", \
+ [EFC_EVT_ELS_RCVD] = "EFC_EVT_ELS_RCVD", \
+ [EFC_EVT_FCP_CMD_RCVD] = "EFC_EVT_FCP_CMD_RCVD", \
+ [EFC_EVT_NODE_DEL_INI_COMPLETE] = "EFC_EVT_NODE_DEL_INI_COMPLETE",\
+ [EFC_EVT_NODE_DEL_TGT_COMPLETE] = "EFC_EVT_NODE_DEL_TGT_COMPLETE",\
+ [EFC_EVT_LAST] = "EFC_EVT_LAST", \
+}
+
+int
+efc_sm_post_event(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *data);
+void
+efc_sm_transition(struct efc_sm_ctx *ctx,
+ void (*state)(struct efc_sm_ctx *ctx,
+ enum efc_sm_event evt, void *arg),
+ void *data);
+void efc_sm_disable(struct efc_sm_ctx *ctx);
+const char *efc_sm_event_name(enum efc_sm_event evt);
+
+#endif /* ! _EFC_SM_H */
diff --git a/drivers/scsi/elx/libefc/efclib.c b/drivers/scsi/elx/libefc/efclib.c
new file mode 100644
index 000000000000..dd3e3d0a4761
--- /dev/null
+++ b/drivers/scsi/elx/libefc/efclib.c
@@ -0,0 +1,81 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+/*
+ * LIBEFC LOCKING
+ *
+ * The critical sections protected by the efc's spinlock are quite broad and
+ * may be improved upon in the future. The libefc code and its locking doesn't
+ * influence the I/O path, so excessive locking doesn't impact I/O performance.
+ *
+ * The strategy is to lock whenever processing a request from user driver. This
+ * means that the entry points into the libefc library are protected by efc
+ * lock. So all the state machine transitions are protected.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include "efc.h"
+
+int efcport_init(struct efc *efc)
+{
+ u32 rc = 0;
+
+ spin_lock_init(&efc->lock);
+ INIT_LIST_HEAD(&efc->vport_list);
+ efc->hold_frames = false;
+ spin_lock_init(&efc->pend_frames_lock);
+ INIT_LIST_HEAD(&efc->pend_frames);
+
+ /* Create Node pool */
+ efc->node_pool = mempool_create_kmalloc_pool(EFC_MAX_REMOTE_NODES,
+ sizeof(struct efc_node));
+ if (!efc->node_pool) {
+ efc_log_err(efc, "Can't allocate node pool\n");
+ return -ENOMEM;
+ }
+
+ efc->node_dma_pool = dma_pool_create("node_dma_pool", &efc->pci->dev,
+ NODE_SPARAMS_SIZE, 0, 0);
+ if (!efc->node_dma_pool) {
+ efc_log_err(efc, "Can't allocate node dma pool\n");
+ mempool_destroy(efc->node_pool);
+ return -ENOMEM;
+ }
+
+ efc->els_io_pool = mempool_create_kmalloc_pool(EFC_ELS_IO_POOL_SZ,
+ sizeof(struct efc_els_io_req));
+ if (!efc->els_io_pool) {
+ efc_log_err(efc, "Can't allocate els io pool\n");
+ return -ENOMEM;
+ }
+
+ return rc;
+}
+
+static void
+efc_purge_pending(struct efc *efc)
+{
+ struct efc_hw_sequence *frame, *next;
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&efc->pend_frames_lock, flags);
+
+ list_for_each_entry_safe(frame, next, &efc->pend_frames, list_entry) {
+ list_del(&frame->list_entry);
+ efc->tt.hw_seq_free(efc, frame);
+ }
+
+ spin_unlock_irqrestore(&efc->pend_frames_lock, flags);
+}
+
+void efcport_destroy(struct efc *efc)
+{
+ efc_purge_pending(efc);
+ mempool_destroy(efc->els_io_pool);
+ mempool_destroy(efc->node_pool);
+ dma_pool_destroy(efc->node_dma_pool);
+}
diff --git a/drivers/scsi/elx/libefc/efclib.h b/drivers/scsi/elx/libefc/efclib.h
new file mode 100644
index 000000000000..ee291cabf7e0
--- /dev/null
+++ b/drivers/scsi/elx/libefc/efclib.h
@@ -0,0 +1,620 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+#ifndef __EFCLIB_H__
+#define __EFCLIB_H__
+
+#include "scsi/fc/fc_els.h"
+#include "scsi/fc/fc_fs.h"
+#include "scsi/fc/fc_ns.h"
+#include "scsi/fc/fc_gs.h"
+#include "scsi/fc_frame.h"
+#include "../include/efc_common.h"
+#include "../libefc_sli/sli4.h"
+
+#define EFC_SERVICE_PARMS_LENGTH 120
+#define EFC_NAME_LENGTH 32
+#define EFC_SM_NAME_LENGTH 64
+#define EFC_DISPLAY_BUS_INFO_LENGTH 16
+
+#define EFC_WWN_LENGTH 32
+
+#define EFC_FC_ELS_DEFAULT_RETRIES 3
+
+/* Timeouts */
+#define EFC_FC_ELS_SEND_DEFAULT_TIMEOUT 0
+#define EFC_FC_FLOGI_TIMEOUT_SEC 5
+#define EFC_SHUTDOWN_TIMEOUT_USEC 30000000
+
+/* Return values for calls from base driver to libefc */
+#define EFC_SCSI_CALL_COMPLETE 0
+#define EFC_SCSI_CALL_ASYNC 1
+
+/* Local port topology */
+enum efc_nport_topology {
+ EFC_NPORT_TOPO_UNKNOWN = 0,
+ EFC_NPORT_TOPO_FABRIC,
+ EFC_NPORT_TOPO_P2P,
+ EFC_NPORT_TOPO_FC_AL,
+};
+
+#define enable_target_rscn(efc) 1
+
+enum efc_node_shutd_rsn {
+ EFC_NODE_SHUTDOWN_DEFAULT = 0,
+ EFC_NODE_SHUTDOWN_EXPLICIT_LOGO,
+ EFC_NODE_SHUTDOWN_IMPLICIT_LOGO,
+};
+
+enum efc_node_send_ls_acc {
+ EFC_NODE_SEND_LS_ACC_NONE = 0,
+ EFC_NODE_SEND_LS_ACC_PLOGI,
+ EFC_NODE_SEND_LS_ACC_PRLI,
+};
+
+#define EFC_LINK_STATUS_UP 0
+#define EFC_LINK_STATUS_DOWN 1
+
+/* State machine context header */
+struct efc_sm_ctx {
+ void (*current_state)(struct efc_sm_ctx *ctx,
+ u32 evt, void *arg);
+
+ const char *description;
+ void *app;
+};
+
+/* Description of discovered Fabric Domain */
+struct efc_domain_record {
+ u32 index;
+ u32 priority;
+ u8 address[6];
+ u8 wwn[8];
+ union {
+ u8 vlan[512];
+ u8 loop[128];
+ } map;
+ u32 speed;
+ u32 fc_id;
+ bool is_loop;
+ bool is_nport;
+};
+
+/* Domain events */
+enum efc_hw_domain_event {
+ EFC_HW_DOMAIN_ALLOC_OK,
+ EFC_HW_DOMAIN_ALLOC_FAIL,
+ EFC_HW_DOMAIN_ATTACH_OK,
+ EFC_HW_DOMAIN_ATTACH_FAIL,
+ EFC_HW_DOMAIN_FREE_OK,
+ EFC_HW_DOMAIN_FREE_FAIL,
+ EFC_HW_DOMAIN_LOST,
+ EFC_HW_DOMAIN_FOUND,
+ EFC_HW_DOMAIN_CHANGED,
+};
+
+/**
+ * Fibre Channel port object
+ *
+ * @list_entry: nport list entry
+ * @ref: reference count, each node takes a reference
+ * @release: function to free nport object
+ * @efc: pointer back to efc
+ * @instance_index: unique instance index value
+ * @display_name: port display name
+ * @is_vport: Is NPIV port
+ * @free_req_pending: pending request to free resources
+ * @attached: mark attached if reg VPI succeeds
+ * @p2p_winner: TRUE if we're the point-to-point winner
+ * @domain: pointer back to domain
+ * @wwpn: port wwpn
+ * @wwnn: port wwnn
+ * @tgt_data: target backend private port data
+ * @ini_data: initiator backend private port data
+ * @indicator: VPI
+ * @fc_id: port FC address
+ * @dma: memory for Service Parameters
+ * @wwnn_str: wwpn string
+ * @sli_wwpn: SLI provided wwpn
+ * @sli_wwnn: SLI provided wwnn
+ * @sm: nport state machine context
+ * @lookup: fc_id to node lookup object
+ * @enable_ini: SCSI initiator enabled for this port
+ * @enable_tgt: SCSI target enabled for this port
+ * @enable_rscn: port will be expecting RSCN
+ * @shutting_down: nport in process of shutting down
+ * @p2p_port_id: our port id for point-to-point
+ * @topology: topology: fabric/p2p/unknown
+ * @service_params: login parameters
+ * @p2p_remote_port_id: remote node's port id for point-to-point
+ */
+
+struct efc_nport {
+ struct list_head list_entry;
+ struct kref ref;
+ void (*release)(struct kref *arg);
+ struct efc *efc;
+ u32 instance_index;
+ char display_name[EFC_NAME_LENGTH];
+ bool is_vport;
+ bool free_req_pending;
+ bool attached;
+ bool p2p_winner;
+ struct efc_domain *domain;
+ u64 wwpn;
+ u64 wwnn;
+ void *tgt_data;
+ void *ini_data;
+
+ u32 indicator;
+ u32 fc_id;
+ struct efc_dma dma;
+
+ u8 wwnn_str[EFC_WWN_LENGTH];
+ __be64 sli_wwpn;
+ __be64 sli_wwnn;
+
+ struct efc_sm_ctx sm;
+ struct xarray lookup;
+ bool enable_ini;
+ bool enable_tgt;
+ bool enable_rscn;
+ bool shutting_down;
+ u32 p2p_port_id;
+ enum efc_nport_topology topology;
+ u8 service_params[EFC_SERVICE_PARMS_LENGTH];
+ u32 p2p_remote_port_id;
+};
+
+/**
+ * Fibre Channel domain object
+ *
+ * This object is a container for the various SLI components needed
+ * to connect to the domain of a FC or FCoE switch
+ * @efc: pointer back to efc
+ * @instance_index: unique instance index value
+ * @display_name: Node display name
+ * @nport_list: linked list of nports associated with this domain
+ * @ref: Reference count, each nport takes a reference
+ * @release: Function to free domain object
+ * @ini_domain: initiator backend private domain data
+ * @tgt_domain: target backend private domain data
+ * @sm: state machine context
+ * @fcf: FC Forwarder table index
+ * @fcf_indicator: FCFI
+ * @indicator: VFI
+ * @nport_count: Number of nports allocated
+ * @dma: memory for Service Parameters
+ * @fcf_wwn: WWN for FCF/switch
+ * @drvsm: driver domain sm context
+ * @attached: set true after attach completes
+ * @is_fc: is FC
+ * @is_loop: is loop topology
+ * @is_nlport: is public loop
+ * @domain_found_pending:A domain found is pending, drec is updated
+ * @req_domain_free: True if domain object should be free'd
+ * @req_accept_frames: set in domain state machine to enable frames
+ * @domain_notify_pend: Set in domain SM to avoid duplicate node event post
+ * @pending_drec: Pending drec if a domain found is pending
+ * @service_params: any nports service parameters
+ * @flogi_service_params:Fabric/P2p service parameters from FLOGI
+ * @lookup: d_id to node lookup object
+ * @nport: Pointer to first (physical) SLI port
+ */
+struct efc_domain {
+ struct efc *efc;
+ char display_name[EFC_NAME_LENGTH];
+ struct list_head nport_list;
+ struct kref ref;
+ void (*release)(struct kref *arg);
+ void *ini_domain;
+ void *tgt_domain;
+
+ /* Declarations private to HW/SLI */
+ u32 fcf;
+ u32 fcf_indicator;
+ u32 indicator;
+ u32 nport_count;
+ struct efc_dma dma;
+
+ /* Declarations private to FC trannport */
+ u64 fcf_wwn;
+ struct efc_sm_ctx drvsm;
+ bool attached;
+ bool is_fc;
+ bool is_loop;
+ bool is_nlport;
+ bool domain_found_pending;
+ bool req_domain_free;
+ bool req_accept_frames;
+ bool domain_notify_pend;
+
+ struct efc_domain_record pending_drec;
+ u8 service_params[EFC_SERVICE_PARMS_LENGTH];
+ u8 flogi_service_params[EFC_SERVICE_PARMS_LENGTH];
+
+ struct xarray lookup;
+
+ struct efc_nport *nport;
+};
+
+/**
+ * Remote Node object
+ *
+ * This object represents a connection between the SLI port and another
+ * Nx_Port on the fabric. Note this can be either a well known port such
+ * as a F_Port (i.e. ff:ff:fe) or another N_Port.
+ * @indicator: RPI
+ * @fc_id: FC address
+ * @attached: true if attached
+ * @nport: associated SLI port
+ * @node: associated node
+ */
+struct efc_remote_node {
+ u32 indicator;
+ u32 index;
+ u32 fc_id;
+
+ bool attached;
+
+ struct efc_nport *nport;
+ void *node;
+};
+
+/**
+ * FC Node object
+ * @efc: pointer back to efc structure
+ * @display_name: Node display name
+ * @nort: Assosiated nport pointer.
+ * @hold_frames: hold incoming frames if true
+ * @els_io_enabled: Enable allocating els ios for this node
+ * @els_ios_lock: lock to protect the els ios list
+ * @els_ios_list: ELS I/O's for this node
+ * @ini_node: backend initiator private node data
+ * @tgt_node: backend target private node data
+ * @rnode: Remote node
+ * @sm: state machine context
+ * @evtdepth: current event posting nesting depth
+ * @req_free: this node is to be free'd
+ * @attached: node is attached (REGLOGIN complete)
+ * @fcp_enabled: node is enabled to handle FCP
+ * @rscn_pending: for name server node RSCN is pending
+ * @send_plogi: send PLOGI accept, upon completion of node attach
+ * @send_plogi_acc: TRUE if io_alloc() is enabled.
+ * @send_ls_acc: type of LS acc to send
+ * @ls_acc_io: SCSI IO for LS acc
+ * @ls_acc_oxid: OX_ID for pending accept
+ * @ls_acc_did: D_ID for pending accept
+ * @shutdown_reason: reason for node shutdown
+ * @sparm_dma_buf: service parameters buffer
+ * @service_params: plogi/acc frame from remote device
+ * @pend_frames_lock: lock for inbound pending frames list
+ * @pend_frames: inbound pending frames list
+ * @pend_frames_processed:count of frames processed in hold frames interval
+ * @ox_id_in_use: used to verify one at a time us of ox_id
+ * @els_retries_remaining:for ELS, number of retries remaining
+ * @els_req_cnt: number of outstanding ELS requests
+ * @els_cmpl_cnt: number of outstanding ELS completions
+ * @abort_cnt: Abort counter for debugging purpos
+ * @current_state_name: current node state
+ * @prev_state_name: previous node state
+ * @current_evt: current event
+ * @prev_evt: previous event
+ * @targ: node is target capable
+ * @init: node is init capable
+ * @refound: Handle node refound case when node is being deleted
+ * @els_io_pend_list: list of pending (not yet processed) ELS IOs
+ * @els_io_active_list: list of active (processed) ELS IOs
+ * @nodedb_state: Node debugging, saved state
+ * @gidpt_delay_timer: GIDPT delay timer
+ * @time_last_gidpt_msec:Start time of last target RSCN GIDPT
+ * @wwnn: remote port WWNN
+ * @wwpn: remote port WWPN
+ */
+struct efc_node {
+ struct efc *efc;
+ char display_name[EFC_NAME_LENGTH];
+ struct efc_nport *nport;
+ struct kref ref;
+ void (*release)(struct kref *arg);
+ bool hold_frames;
+ bool els_io_enabled;
+ bool send_plogi_acc;
+ bool send_plogi;
+ bool rscn_pending;
+ bool fcp_enabled;
+ bool attached;
+ bool req_free;
+
+ spinlock_t els_ios_lock;
+ struct list_head els_ios_list;
+ void *ini_node;
+ void *tgt_node;
+
+ struct efc_remote_node rnode;
+ /* Declarations private to FC trannport */
+ struct efc_sm_ctx sm;
+ u32 evtdepth;
+
+ enum efc_node_send_ls_acc send_ls_acc;
+ void *ls_acc_io;
+ u32 ls_acc_oxid;
+ u32 ls_acc_did;
+ enum efc_node_shutd_rsn shutdown_reason;
+ bool targ;
+ bool init;
+ bool refound;
+ struct efc_dma sparm_dma_buf;
+ u8 service_params[EFC_SERVICE_PARMS_LENGTH];
+ spinlock_t pend_frames_lock;
+ struct list_head pend_frames;
+ u32 pend_frames_processed;
+ u32 ox_id_in_use;
+ u32 els_retries_remaining;
+ u32 els_req_cnt;
+ u32 els_cmpl_cnt;
+ u32 abort_cnt;
+
+ char current_state_name[EFC_SM_NAME_LENGTH];
+ char prev_state_name[EFC_SM_NAME_LENGTH];
+ int current_evt;
+ int prev_evt;
+
+ void (*nodedb_state)(struct efc_sm_ctx *ctx,
+ u32 evt, void *arg);
+ struct timer_list gidpt_delay_timer;
+ u64 time_last_gidpt_msec;
+
+ char wwnn[EFC_WWN_LENGTH];
+ char wwpn[EFC_WWN_LENGTH];
+};
+
+/**
+ * NPIV port
+ *
+ * Collection of the information required to restore a virtual port across
+ * link events
+ * @wwnn: node name
+ * @wwpn: port name
+ * @fc_id: port id
+ * @tgt_data: target backend pointer
+ * @ini_data: initiator backend pointe
+ * @nport: Used to match record after attaching for update
+ *
+ */
+
+struct efc_vport {
+ struct list_head list_entry;
+ u64 wwnn;
+ u64 wwpn;
+ u32 fc_id;
+ bool enable_tgt;
+ bool enable_ini;
+ void *tgt_data;
+ void *ini_data;
+ struct efc_nport *nport;
+};
+
+#define node_printf(node, fmt, args...) \
+ efc_log_info(node->efc, "[%s] " fmt, node->display_name, ##args)
+
+/* Node SM IO Context Callback structure */
+struct efc_node_cb {
+ int status;
+ int ext_status;
+ struct efc_hw_rq_buffer *header;
+ struct efc_hw_rq_buffer *payload;
+ struct efc_dma els_rsp;
+
+ /* Actual length of data received */
+ int rsp_len;
+};
+
+struct efc_hw_rq_buffer {
+ u16 rqindex;
+ struct efc_dma dma;
+};
+
+/**
+ * FC sequence object
+ *
+ * Defines a general FC sequence object
+ * @hw: HW that owns this sequence
+ * @fcfi: FCFI associated with sequence
+ * @header: Received frame header
+ * @payload: Received frame header
+ * @hw_priv: HW private context
+ */
+struct efc_hw_sequence {
+ struct list_head list_entry;
+ void *hw;
+ u8 fcfi;
+ struct efc_hw_rq_buffer *header;
+ struct efc_hw_rq_buffer *payload;
+ void *hw_priv;
+};
+
+enum efc_disc_io_type {
+ EFC_DISC_IO_ELS_REQ,
+ EFC_DISC_IO_ELS_RESP,
+ EFC_DISC_IO_CT_REQ,
+ EFC_DISC_IO_CT_RESP
+};
+
+struct efc_io_els_params {
+ u32 s_id;
+ u16 ox_id;
+ u8 timeout;
+};
+
+struct efc_io_ct_params {
+ u8 r_ctl;
+ u8 type;
+ u8 df_ctl;
+ u8 timeout;
+ u16 ox_id;
+};
+
+union efc_disc_io_param {
+ struct efc_io_els_params els;
+ struct efc_io_ct_params ct;
+};
+
+struct efc_disc_io {
+ struct efc_dma req; /* send buffer */
+ struct efc_dma rsp; /* receive buffer */
+ enum efc_disc_io_type io_type; /* EFC_DISC_IO_TYPE enum*/
+ u16 xmit_len; /* Length of els request*/
+ u16 rsp_len; /* Max length of rsps to be rcvd */
+ u32 rpi; /* Registered RPI */
+ u32 vpi; /* VPI for this nport */
+ u32 s_id;
+ u32 d_id;
+ bool rpi_registered; /* if false, use tmp RPI */
+ union efc_disc_io_param iparam;
+};
+
+/* Return value indiacating the sequence can not be freed */
+#define EFC_HW_SEQ_HOLD 0
+/* Return value indiacating the sequence can be freed */
+#define EFC_HW_SEQ_FREE 1
+
+struct libefc_function_template {
+ /*Sport*/
+ int (*new_nport)(struct efc *efc, struct efc_nport *sp);
+ void (*del_nport)(struct efc *efc, struct efc_nport *sp);
+
+ /*Scsi Node*/
+ int (*scsi_new_node)(struct efc *efc, struct efc_node *n);
+ int (*scsi_del_node)(struct efc *efc, struct efc_node *n, int reason);
+
+ int (*issue_mbox_rqst)(void *efct, void *buf, void *cb, void *arg);
+ /*Send ELS IO*/
+ int (*send_els)(struct efc *efc, struct efc_disc_io *io);
+ /*Send BLS IO*/
+ int (*send_bls)(struct efc *efc, u32 type, struct sli_bls_params *bls);
+ /*Free HW frame*/
+ int (*hw_seq_free)(struct efc *efc, struct efc_hw_sequence *seq);
+};
+
+#define EFC_LOG_LIB 0x01
+#define EFC_LOG_NODE 0x02
+#define EFC_LOG_PORT 0x04
+#define EFC_LOG_DOMAIN 0x08
+#define EFC_LOG_ELS 0x10
+#define EFC_LOG_DOMAIN_SM 0x20
+#define EFC_LOG_SM 0x40
+
+/* efc library port structure */
+struct efc {
+ void *base;
+ struct pci_dev *pci;
+ struct sli4 *sli;
+ u32 fcfi;
+ u64 req_wwpn;
+ u64 req_wwnn;
+
+ u64 def_wwpn;
+ u64 def_wwnn;
+ u64 max_xfer_size;
+ mempool_t *node_pool;
+ struct dma_pool *node_dma_pool;
+ u32 nodes_count;
+
+ u32 link_status;
+
+ struct list_head vport_list;
+ /* lock to protect the vport list */
+ spinlock_t vport_lock;
+
+ struct libefc_function_template tt;
+ /* lock to protect the discovery library.
+ * Refer to efclib.c for more details.
+ */
+ spinlock_t lock;
+
+ bool enable_ini;
+ bool enable_tgt;
+
+ u32 log_level;
+
+ struct efc_domain *domain;
+ void (*domain_free_cb)(struct efc *efc, void *arg);
+ void *domain_free_cb_arg;
+
+ u64 tgt_rscn_delay_msec;
+ u64 tgt_rscn_period_msec;
+
+ bool external_loopback;
+ u32 nodedb_mask;
+ u32 logmask;
+ mempool_t *els_io_pool;
+ atomic_t els_io_alloc_failed_count;
+
+ /* hold pending frames */
+ bool hold_frames;
+ /* lock to protect pending frames list access */
+ spinlock_t pend_frames_lock;
+ struct list_head pend_frames;
+ /* count of pending frames that were processed */
+ u32 pend_frames_processed;
+
+};
+
+/*
+ * EFC library registration
+ * **********************************/
+int efcport_init(struct efc *efc);
+void efcport_destroy(struct efc *efc);
+/*
+ * EFC Domain
+ * **********************************/
+int efc_domain_cb(void *arg, int event, void *data);
+void
+efc_register_domain_free_cb(struct efc *efc,
+ void (*callback)(struct efc *efc, void *arg),
+ void *arg);
+
+/*
+ * EFC nport
+ * **********************************/
+void efc_nport_cb(void *arg, int event, void *data);
+struct efc_vport *
+efc_vport_create_spec(struct efc *efc, u64 wwnn, u64 wwpn, u32 fc_id,
+ bool enable_ini, bool enable_tgt,
+ void *tgt_data, void *ini_data);
+int efc_nport_vport_new(struct efc_domain *domain, u64 wwpn,
+ u64 wwnn, u32 fc_id, bool ini, bool tgt,
+ void *tgt_data, void *ini_data);
+int efc_nport_vport_del(struct efc *efc, struct efc_domain *domain,
+ u64 wwpn, u64 wwnn);
+
+void efc_vport_del_all(struct efc *efc);
+
+/*
+ * EFC Node
+ * **********************************/
+int efc_remote_node_cb(void *arg, int event, void *data);
+void efc_node_fcid_display(u32 fc_id, char *buffer, u32 buf_len);
+void efc_node_post_shutdown(struct efc_node *node, void *arg);
+u64 efc_node_get_wwpn(struct efc_node *node);
+
+/*
+ * EFC FCP/ELS/CT interface
+ * **********************************/
+void efc_dispatch_frame(struct efc *efc, struct efc_hw_sequence *seq);
+void efc_disc_io_complete(struct efc_disc_io *io, u32 len, u32 status,
+ u32 ext_status);
+
+/*
+ * EFC SCSI INTERACTION LAYER
+ * **********************************/
+void efc_scsi_sess_reg_complete(struct efc_node *node, u32 status);
+void efc_scsi_del_initiator_complete(struct efc *efc, struct efc_node *node);
+void efc_scsi_del_target_complete(struct efc *efc, struct efc_node *node);
+void efc_scsi_io_list_empty(struct efc *efc, struct efc_node *node);
+
+#endif /* __EFCLIB_H__ */
diff --git a/drivers/scsi/elx/libefc_sli/sli4.c b/drivers/scsi/elx/libefc_sli/sli4.c
new file mode 100644
index 000000000000..fc24a50c5d6b
--- /dev/null
+++ b/drivers/scsi/elx/libefc_sli/sli4.c
@@ -0,0 +1,5162 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ */
+
+/**
+ * All common (i.e. transport-independent) SLI-4 functions are implemented
+ * in this file.
+ */
+#include "sli4.h"
+
+static struct sli4_asic_entry_t sli4_asic_table[] = {
+ { SLI4_ASIC_REV_B0, SLI4_ASIC_GEN_5},
+ { SLI4_ASIC_REV_D0, SLI4_ASIC_GEN_5},
+ { SLI4_ASIC_REV_A3, SLI4_ASIC_GEN_6},
+ { SLI4_ASIC_REV_A0, SLI4_ASIC_GEN_6},
+ { SLI4_ASIC_REV_A1, SLI4_ASIC_GEN_6},
+ { SLI4_ASIC_REV_A3, SLI4_ASIC_GEN_6},
+ { SLI4_ASIC_REV_A1, SLI4_ASIC_GEN_7},
+ { SLI4_ASIC_REV_A0, SLI4_ASIC_GEN_7},
+};
+
+/* Convert queue type enum (SLI_QTYPE_*) into a string */
+static char *SLI4_QNAME[] = {
+ "Event Queue",
+ "Completion Queue",
+ "Mailbox Queue",
+ "Work Queue",
+ "Receive Queue",
+ "Undefined"
+};
+
+/**
+ * sli_config_cmd_init() - Write a SLI_CONFIG command to the provided buffer.
+ *
+ * @sli4: SLI context pointer.
+ * @buf: Destination buffer for the command.
+ * @length: Length in bytes of attached command.
+ * @dma: DMA buffer for non-embedded commands.
+ * Return: Command payload buffer.
+ */
+static void *
+sli_config_cmd_init(struct sli4 *sli4, void *buf, u32 length,
+ struct efc_dma *dma)
+{
+ struct sli4_cmd_sli_config *config;
+ u32 flags;
+
+ if (length > sizeof(config->payload.embed) && !dma) {
+ efc_log_err(sli4, "Too big for an embedded cmd with len(%d)\n",
+ length);
+ return NULL;
+ }
+
+ memset(buf, 0, SLI4_BMBX_SIZE);
+
+ config = buf;
+
+ config->hdr.command = SLI4_MBX_CMD_SLI_CONFIG;
+ if (!dma) {
+ flags = SLI4_SLICONF_EMB;
+ config->dw1_flags = cpu_to_le32(flags);
+ config->payload_len = cpu_to_le32(length);
+ return config->payload.embed;
+ }
+
+ flags = SLI4_SLICONF_PMDCMD_VAL_1;
+ flags &= ~SLI4_SLICONF_EMB;
+ config->dw1_flags = cpu_to_le32(flags);
+
+ config->payload.mem.addr.low = cpu_to_le32(lower_32_bits(dma->phys));
+ config->payload.mem.addr.high = cpu_to_le32(upper_32_bits(dma->phys));
+ config->payload.mem.length =
+ cpu_to_le32(dma->size & SLI4_SLICONF_PMD_LEN);
+ config->payload_len = cpu_to_le32(dma->size);
+ /* save pointer to DMA for BMBX dumping purposes */
+ sli4->bmbx_non_emb_pmd = dma;
+ return dma->virt;
+}
+
+/**
+ * sli_cmd_common_create_cq() - Write a COMMON_CREATE_CQ V2 command.
+ *
+ * @sli4: SLI context pointer.
+ * @buf: Destination buffer for the command.
+ * @qmem: DMA memory for queue.
+ * @eq_id: EQ id assosiated with this cq.
+ * Return: status -EIO/0.
+ */
+static int
+sli_cmd_common_create_cq(struct sli4 *sli4, void *buf, struct efc_dma *qmem,
+ u16 eq_id)
+{
+ struct sli4_rqst_cmn_create_cq_v2 *cqv2 = NULL;
+ u32 p;
+ uintptr_t addr;
+ u32 num_pages = 0;
+ size_t cmd_size = 0;
+ u32 page_size = 0;
+ u32 n_cqe = 0;
+ u32 dw5_flags = 0;
+ u16 dw6w1_arm = 0;
+ __le32 len;
+
+ /* First calculate number of pages and the mailbox cmd length */
+ n_cqe = qmem->size / SLI4_CQE_BYTES;
+ switch (n_cqe) {
+ case 256:
+ case 512:
+ case 1024:
+ case 2048:
+ page_size = SZ_4K;
+ break;
+ case 4096:
+ page_size = SZ_8K;
+ break;
+ default:
+ return -EIO;
+ }
+ num_pages = sli_page_count(qmem->size, page_size);
+
+ cmd_size = SLI4_RQST_CMDSZ(cmn_create_cq_v2)
+ + SZ_DMAADDR * num_pages;
+
+ cqv2 = sli_config_cmd_init(sli4, buf, cmd_size, NULL);
+ if (!cqv2)
+ return -EIO;
+
+ len = SLI4_RQST_PYLD_LEN_VAR(cmn_create_cq_v2, SZ_DMAADDR * num_pages);
+ sli_cmd_fill_hdr(&cqv2->hdr, SLI4_CMN_CREATE_CQ, SLI4_SUBSYSTEM_COMMON,
+ CMD_V2, len);
+ cqv2->page_size = page_size / SLI_PAGE_SIZE;
+
+ /* valid values for number of pages: 1, 2, 4, 8 (sec 4.4.3) */
+ cqv2->num_pages = cpu_to_le16(num_pages);
+ if (!num_pages || num_pages > SLI4_CREATE_CQV2_MAX_PAGES)
+ return -EIO;
+
+ switch (num_pages) {
+ case 1:
+ dw5_flags |= SLI4_CQ_CNT_VAL(256);
+ break;
+ case 2:
+ dw5_flags |= SLI4_CQ_CNT_VAL(512);
+ break;
+ case 4:
+ dw5_flags |= SLI4_CQ_CNT_VAL(1024);
+ break;
+ case 8:
+ dw5_flags |= SLI4_CQ_CNT_VAL(LARGE);
+ cqv2->cqe_count = cpu_to_le16(n_cqe);
+ break;
+ default:
+ efc_log_err(sli4, "num_pages %d not valid\n", num_pages);
+ return -EIO;
+ }
+
+ if (sli4->if_type == SLI4_INTF_IF_TYPE_6)
+ dw5_flags |= SLI4_CREATE_CQV2_AUTOVALID;
+
+ dw5_flags |= SLI4_CREATE_CQV2_EVT;
+ dw5_flags |= SLI4_CREATE_CQV2_VALID;
+
+ cqv2->dw5_flags = cpu_to_le32(dw5_flags);
+ cqv2->dw6w1_arm = cpu_to_le16(dw6w1_arm);
+ cqv2->eq_id = cpu_to_le16(eq_id);
+
+ for (p = 0, addr = qmem->phys; p < num_pages; p++, addr += page_size) {
+ cqv2->page_phys_addr[p].low = cpu_to_le32(lower_32_bits(addr));
+ cqv2->page_phys_addr[p].high = cpu_to_le32(upper_32_bits(addr));
+ }
+
+ return 0;
+}
+
+static int
+sli_cmd_common_create_eq(struct sli4 *sli4, void *buf, struct efc_dma *qmem)
+{
+ struct sli4_rqst_cmn_create_eq *eq;
+ u32 p;
+ uintptr_t addr;
+ u16 num_pages;
+ u32 dw5_flags = 0;
+ u32 dw6_flags = 0, ver;
+
+ eq = sli_config_cmd_init(sli4, buf, SLI4_CFG_PYLD_LENGTH(cmn_create_eq),
+ NULL);
+ if (!eq)
+ return -EIO;
+
+ if (sli4->if_type == SLI4_INTF_IF_TYPE_6)
+ ver = CMD_V2;
+ else
+ ver = CMD_V0;
+
+ sli_cmd_fill_hdr(&eq->hdr, SLI4_CMN_CREATE_EQ, SLI4_SUBSYSTEM_COMMON,
+ ver, SLI4_RQST_PYLD_LEN(cmn_create_eq));
+
+ /* valid values for number of pages: 1, 2, 4 (sec 4.4.3) */
+ num_pages = qmem->size / SLI_PAGE_SIZE;
+ eq->num_pages = cpu_to_le16(num_pages);
+
+ switch (num_pages) {
+ case 1:
+ dw5_flags |= SLI4_EQE_SIZE_4;
+ dw6_flags |= SLI4_EQ_CNT_VAL(1024);
+ break;
+ case 2:
+ dw5_flags |= SLI4_EQE_SIZE_4;
+ dw6_flags |= SLI4_EQ_CNT_VAL(2048);
+ break;
+ case 4:
+ dw5_flags |= SLI4_EQE_SIZE_4;
+ dw6_flags |= SLI4_EQ_CNT_VAL(4096);
+ break;
+ default:
+ efc_log_err(sli4, "num_pages %d not valid\n", num_pages);
+ return -EIO;
+ }
+
+ if (sli4->if_type == SLI4_INTF_IF_TYPE_6)
+ dw5_flags |= SLI4_CREATE_EQ_AUTOVALID;
+
+ dw5_flags |= SLI4_CREATE_EQ_VALID;
+ dw6_flags &= (~SLI4_CREATE_EQ_ARM);
+ eq->dw5_flags = cpu_to_le32(dw5_flags);
+ eq->dw6_flags = cpu_to_le32(dw6_flags);
+ eq->dw7_delaymulti = cpu_to_le32(SLI4_CREATE_EQ_DELAYMULTI);
+
+ for (p = 0, addr = qmem->phys; p < num_pages;
+ p++, addr += SLI_PAGE_SIZE) {
+ eq->page_address[p].low = cpu_to_le32(lower_32_bits(addr));
+ eq->page_address[p].high = cpu_to_le32(upper_32_bits(addr));
+ }
+
+ return 0;
+}
+
+static int
+sli_cmd_common_create_mq_ext(struct sli4 *sli4, void *buf, struct efc_dma *qmem,
+ u16 cq_id)
+{
+ struct sli4_rqst_cmn_create_mq_ext *mq;
+ u32 p;
+ uintptr_t addr;
+ u32 num_pages;
+ u16 dw6w1_flags = 0;
+
+ mq = sli_config_cmd_init(sli4, buf,
+ SLI4_CFG_PYLD_LENGTH(cmn_create_mq_ext), NULL);
+ if (!mq)
+ return -EIO;
+
+ sli_cmd_fill_hdr(&mq->hdr, SLI4_CMN_CREATE_MQ_EXT,
+ SLI4_SUBSYSTEM_COMMON, CMD_V0,
+ SLI4_RQST_PYLD_LEN(cmn_create_mq_ext));
+
+ /* valid values for number of pages: 1, 2, 4, 8 (sec 4.4.12) */
+ num_pages = qmem->size / SLI_PAGE_SIZE;
+ mq->num_pages = cpu_to_le16(num_pages);
+ switch (num_pages) {
+ case 1:
+ dw6w1_flags |= SLI4_MQE_SIZE_16;
+ break;
+ case 2:
+ dw6w1_flags |= SLI4_MQE_SIZE_32;
+ break;
+ case 4:
+ dw6w1_flags |= SLI4_MQE_SIZE_64;
+ break;
+ case 8:
+ dw6w1_flags |= SLI4_MQE_SIZE_128;
+ break;
+ default:
+ efc_log_info(sli4, "num_pages %d not valid\n", num_pages);
+ return -EIO;
+ }
+
+ mq->async_event_bitmap = cpu_to_le32(SLI4_ASYNC_EVT_FC_ALL);
+
+ if (sli4->params.mq_create_version) {
+ mq->cq_id_v1 = cpu_to_le16(cq_id);
+ mq->hdr.dw3_version = cpu_to_le32(CMD_V1);
+ } else {
+ dw6w1_flags |= (cq_id << SLI4_CREATE_MQEXT_CQID_SHIFT);
+ }
+ mq->dw7_val = cpu_to_le32(SLI4_CREATE_MQEXT_VAL);
+
+ mq->dw6w1_flags = cpu_to_le16(dw6w1_flags);
+ for (p = 0, addr = qmem->phys; p < num_pages;
+ p++, addr += SLI_PAGE_SIZE) {
+ mq->page_phys_addr[p].low = cpu_to_le32(lower_32_bits(addr));
+ mq->page_phys_addr[p].high = cpu_to_le32(upper_32_bits(addr));
+ }
+
+ return 0;
+}
+
+int
+sli_cmd_wq_create(struct sli4 *sli4, void *buf, struct efc_dma *qmem, u16 cq_id)
+{
+ struct sli4_rqst_wq_create *wq;
+ u32 p;
+ uintptr_t addr;
+ u32 page_size = 0;
+ u32 n_wqe = 0;
+ u16 num_pages;
+
+ wq = sli_config_cmd_init(sli4, buf, SLI4_CFG_PYLD_LENGTH(wq_create),
+ NULL);
+ if (!wq)
+ return -EIO;
+
+ sli_cmd_fill_hdr(&wq->hdr, SLI4_OPC_WQ_CREATE, SLI4_SUBSYSTEM_FC,
+ CMD_V1, SLI4_RQST_PYLD_LEN(wq_create));
+ n_wqe = qmem->size / sli4->wqe_size;
+
+ switch (qmem->size) {
+ case 4096:
+ case 8192:
+ case 16384:
+ case 32768:
+ page_size = SZ_4K;
+ break;
+ case 65536:
+ page_size = SZ_8K;
+ break;
+ case 131072:
+ page_size = SZ_16K;
+ break;
+ case 262144:
+ page_size = SZ_32K;
+ break;
+ case 524288:
+ page_size = SZ_64K;
+ break;
+ default:
+ return -EIO;
+ }
+
+ /* valid values for number of pages(num_pages): 1-8 */
+ num_pages = sli_page_count(qmem->size, page_size);
+ wq->num_pages = cpu_to_le16(num_pages);
+ if (!num_pages || num_pages > SLI4_WQ_CREATE_MAX_PAGES)
+ return -EIO;
+
+ wq->cq_id = cpu_to_le16(cq_id);
+
+ wq->page_size = page_size / SLI_PAGE_SIZE;
+
+ if (sli4->wqe_size == SLI4_WQE_EXT_BYTES)
+ wq->wqe_size_byte |= SLI4_WQE_EXT_SIZE;
+ else
+ wq->wqe_size_byte |= SLI4_WQE_SIZE;
+
+ wq->wqe_count = cpu_to_le16(n_wqe);
+
+ for (p = 0, addr = qmem->phys; p < num_pages; p++, addr += page_size) {
+ wq->page_phys_addr[p].low = cpu_to_le32(lower_32_bits(addr));
+ wq->page_phys_addr[p].high = cpu_to_le32(upper_32_bits(addr));
+ }
+
+ return 0;
+}
+
+static int
+sli_cmd_rq_create_v1(struct sli4 *sli4, void *buf, struct efc_dma *qmem,
+ u16 cq_id, u16 buffer_size)
+{
+ struct sli4_rqst_rq_create_v1 *rq;
+ u32 p;
+ uintptr_t addr;
+ u32 num_pages;
+
+ rq = sli_config_cmd_init(sli4, buf, SLI4_CFG_PYLD_LENGTH(rq_create_v1),
+ NULL);
+ if (!rq)
+ return -EIO;
+
+ sli_cmd_fill_hdr(&rq->hdr, SLI4_OPC_RQ_CREATE, SLI4_SUBSYSTEM_FC,
+ CMD_V1, SLI4_RQST_PYLD_LEN(rq_create_v1));
+ /* Disable "no buffer warnings" to avoid Lancer bug */
+ rq->dim_dfd_dnb |= SLI4_RQ_CREATE_V1_DNB;
+
+ /* valid values for number of pages: 1-8 (sec 4.5.6) */
+ num_pages = sli_page_count(qmem->size, SLI_PAGE_SIZE);
+ rq->num_pages = cpu_to_le16(num_pages);
+ if (!num_pages ||
+ num_pages > SLI4_RQ_CREATE_V1_MAX_PAGES) {
+ efc_log_info(sli4, "num_pages %d not valid, max %d\n",
+ num_pages, SLI4_RQ_CREATE_V1_MAX_PAGES);
+ return -EIO;
+ }
+
+ /*
+ * RQE count is the total number of entries (note not lg2(# entries))
+ */
+ rq->rqe_count = cpu_to_le16(qmem->size / SLI4_RQE_SIZE);
+
+ rq->rqe_size_byte |= SLI4_RQE_SIZE_8;
+
+ rq->page_size = SLI4_RQ_PAGE_SIZE_4096;
+
+ if (buffer_size < sli4->rq_min_buf_size ||
+ buffer_size > sli4->rq_max_buf_size) {
+ efc_log_err(sli4, "buffer_size %d out of range (%d-%d)\n",
+ buffer_size, sli4->rq_min_buf_size,
+ sli4->rq_max_buf_size);
+ return -EIO;
+ }
+ rq->buffer_size = cpu_to_le32(buffer_size);
+
+ rq->cq_id = cpu_to_le16(cq_id);
+
+ for (p = 0, addr = qmem->phys;
+ p < num_pages;
+ p++, addr += SLI_PAGE_SIZE) {
+ rq->page_phys_addr[p].low = cpu_to_le32(lower_32_bits(addr));
+ rq->page_phys_addr[p].high = cpu_to_le32(upper_32_bits(addr));
+ }
+
+ return 0;
+}
+
+static int
+sli_cmd_rq_create_v2(struct sli4 *sli4, u32 num_rqs,
+ struct sli4_queue *qs[], u32 base_cq_id,
+ u32 header_buffer_size,
+ u32 payload_buffer_size, struct efc_dma *dma)
+{
+ struct sli4_rqst_rq_create_v2 *req = NULL;
+ u32 i, p, offset = 0;
+ u32 payload_size, page_count;
+ uintptr_t addr;
+ u32 num_pages;
+ __le32 len;
+
+ page_count = sli_page_count(qs[0]->dma.size, SLI_PAGE_SIZE) * num_rqs;
+
+ /* Payload length must accommodate both request and response */
+ payload_size = max(SLI4_RQST_CMDSZ(rq_create_v2) +
+ SZ_DMAADDR * page_count,
+ sizeof(struct sli4_rsp_cmn_create_queue_set));
+
+ dma->size = payload_size;
+ dma->virt = dma_alloc_coherent(&sli4->pci->dev, dma->size,
+ &dma->phys, GFP_DMA);
+ if (!dma->virt)
+ return -EIO;
+
+ memset(dma->virt, 0, payload_size);
+
+ req = sli_config_cmd_init(sli4, sli4->bmbx.virt, payload_size, dma);
+ if (!req)
+ return -EIO;
+
+ len = SLI4_RQST_PYLD_LEN_VAR(rq_create_v2, SZ_DMAADDR * page_count);
+ sli_cmd_fill_hdr(&req->hdr, SLI4_OPC_RQ_CREATE, SLI4_SUBSYSTEM_FC,
+ CMD_V2, len);
+ /* Fill Payload fields */
+ req->dim_dfd_dnb |= SLI4_RQCREATEV2_DNB;
+ num_pages = sli_page_count(qs[0]->dma.size, SLI_PAGE_SIZE);
+ req->num_pages = cpu_to_le16(num_pages);
+ req->rqe_count = cpu_to_le16(qs[0]->dma.size / SLI4_RQE_SIZE);
+ req->rqe_size_byte |= SLI4_RQE_SIZE_8;
+ req->page_size = SLI4_RQ_PAGE_SIZE_4096;
+ req->rq_count = num_rqs;
+ req->base_cq_id = cpu_to_le16(base_cq_id);
+ req->hdr_buffer_size = cpu_to_le16(header_buffer_size);
+ req->payload_buffer_size = cpu_to_le16(payload_buffer_size);
+
+ for (i = 0; i < num_rqs; i++) {
+ for (p = 0, addr = qs[i]->dma.phys; p < num_pages;
+ p++, addr += SLI_PAGE_SIZE) {
+ req->page_phys_addr[offset].low =
+ cpu_to_le32(lower_32_bits(addr));
+ req->page_phys_addr[offset].high =
+ cpu_to_le32(upper_32_bits(addr));
+ offset++;
+ }
+ }
+
+ return 0;
+}
+
+static void
+__sli_queue_destroy(struct sli4 *sli4, struct sli4_queue *q)
+{
+ if (!q->dma.size)
+ return;
+
+ dma_free_coherent(&sli4->pci->dev, q->dma.size,
+ q->dma.virt, q->dma.phys);
+ memset(&q->dma, 0, sizeof(struct efc_dma));
+}
+
+int
+__sli_queue_init(struct sli4 *sli4, struct sli4_queue *q, u32 qtype,
+ size_t size, u32 n_entries, u32 align)
+{
+ if (q->dma.virt) {
+ efc_log_err(sli4, "%s failed\n", __func__);
+ return -EIO;
+ }
+
+ memset(q, 0, sizeof(struct sli4_queue));
+
+ q->dma.size = size * n_entries;
+ q->dma.virt = dma_alloc_coherent(&sli4->pci->dev, q->dma.size,
+ &q->dma.phys, GFP_DMA);
+ if (!q->dma.virt) {
+ memset(&q->dma, 0, sizeof(struct efc_dma));
+ efc_log_err(sli4, "%s allocation failed\n", SLI4_QNAME[qtype]);
+ return -EIO;
+ }
+
+ memset(q->dma.virt, 0, size * n_entries);
+
+ spin_lock_init(&q->lock);
+
+ q->type = qtype;
+ q->size = size;
+ q->length = n_entries;
+
+ if (q->type == SLI4_QTYPE_EQ || q->type == SLI4_QTYPE_CQ) {
+ /* For prism, phase will be flipped after
+ * a sweep through eq and cq
+ */
+ q->phase = 1;
+ }
+
+ /* Limit to hwf the queue size per interrupt */
+ q->proc_limit = n_entries / 2;
+
+ if (q->type == SLI4_QTYPE_EQ)
+ q->posted_limit = q->length / 2;
+ else
+ q->posted_limit = 64;
+
+ return 0;
+}
+
+int
+sli_fc_rq_alloc(struct sli4 *sli4, struct sli4_queue *q,
+ u32 n_entries, u32 buffer_size,
+ struct sli4_queue *cq, bool is_hdr)
+{
+ if (__sli_queue_init(sli4, q, SLI4_QTYPE_RQ, SLI4_RQE_SIZE,
+ n_entries, SLI_PAGE_SIZE))
+ return -EIO;
+
+ if (sli_cmd_rq_create_v1(sli4, sli4->bmbx.virt, &q->dma, cq->id,
+ buffer_size))
+ goto error;
+
+ if (__sli_create_queue(sli4, q))
+ goto error;
+
+ if (is_hdr && q->id & 1) {
+ efc_log_info(sli4, "bad header RQ_ID %d\n", q->id);
+ goto error;
+ } else if (!is_hdr && (q->id & 1) == 0) {
+ efc_log_info(sli4, "bad data RQ_ID %d\n", q->id);
+ goto error;
+ }
+
+ if (is_hdr)
+ q->u.flag |= SLI4_QUEUE_FLAG_HDR;
+ else
+ q->u.flag &= ~SLI4_QUEUE_FLAG_HDR;
+
+ return 0;
+
+error:
+ __sli_queue_destroy(sli4, q);
+ return -EIO;
+}
+
+int
+sli_fc_rq_set_alloc(struct sli4 *sli4, u32 num_rq_pairs,
+ struct sli4_queue *qs[], u32 base_cq_id,
+ u32 n_entries, u32 header_buffer_size,
+ u32 payload_buffer_size)
+{
+ u32 i;
+ struct efc_dma dma = {0};
+ struct sli4_rsp_cmn_create_queue_set *rsp = NULL;
+ void __iomem *db_regaddr = NULL;
+ u32 num_rqs = num_rq_pairs * 2;
+
+ for (i = 0; i < num_rqs; i++) {
+ if (__sli_queue_init(sli4, qs[i], SLI4_QTYPE_RQ,
+ SLI4_RQE_SIZE, n_entries,
+ SLI_PAGE_SIZE)) {
+ goto error;
+ }
+ }
+
+ if (sli_cmd_rq_create_v2(sli4, num_rqs, qs, base_cq_id,
+ header_buffer_size, payload_buffer_size,
+ &dma)) {
+ goto error;
+ }
+
+ if (sli_bmbx_command(sli4)) {
+ efc_log_err(sli4, "bootstrap mailbox write failed RQSet\n");
+ goto error;
+ }
+
+ if (sli4->if_type == SLI4_INTF_IF_TYPE_6)
+ db_regaddr = sli4->reg[1] + SLI4_IF6_RQ_DB_REG;
+ else
+ db_regaddr = sli4->reg[0] + SLI4_RQ_DB_REG;
+
+ rsp = dma.virt;
+ if (rsp->hdr.status) {
+ efc_log_err(sli4, "bad create RQSet status=%#x addl=%#x\n",
+ rsp->hdr.status, rsp->hdr.additional_status);
+ goto error;
+ }
+
+ for (i = 0; i < num_rqs; i++) {
+ qs[i]->id = i + le16_to_cpu(rsp->q_id);
+ if ((qs[i]->id & 1) == 0)
+ qs[i]->u.flag |= SLI4_QUEUE_FLAG_HDR;
+ else
+ qs[i]->u.flag &= ~SLI4_QUEUE_FLAG_HDR;
+
+ qs[i]->db_regaddr = db_regaddr;
+ }
+
+ dma_free_coherent(&sli4->pci->dev, dma.size, dma.virt, dma.phys);
+
+ return 0;
+
+error:
+ for (i = 0; i < num_rqs; i++)
+ __sli_queue_destroy(sli4, qs[i]);
+
+ if (dma.virt)
+ dma_free_coherent(&sli4->pci->dev, dma.size, dma.virt,
+ dma.phys);
+
+ return -EIO;
+}
+
+static int
+sli_res_sli_config(struct sli4 *sli4, void *buf)
+{
+ struct sli4_cmd_sli_config *sli_config = buf;
+
+ /* sanity check */
+ if (!buf || sli_config->hdr.command !=
+ SLI4_MBX_CMD_SLI_CONFIG) {
+ efc_log_err(sli4, "bad parameter buf=%p cmd=%#x\n", buf,
+ buf ? sli_config->hdr.command : -1);
+ return -EIO;
+ }
+
+ if (le16_to_cpu(sli_config->hdr.status))
+ return le16_to_cpu(sli_config->hdr.status);
+
+ if (le32_to_cpu(sli_config->dw1_flags) & SLI4_SLICONF_EMB)
+ return sli_config->payload.embed[4];
+
+ efc_log_info(sli4, "external buffers not supported\n");
+ return -EIO;
+}
+
+int
+__sli_create_queue(struct sli4 *sli4, struct sli4_queue *q)
+{
+ struct sli4_rsp_cmn_create_queue *res_q = NULL;
+
+ if (sli_bmbx_command(sli4)) {
+ efc_log_crit(sli4, "bootstrap mailbox write fail %s\n",
+ SLI4_QNAME[q->type]);
+ return -EIO;
+ }
+ if (sli_res_sli_config(sli4, sli4->bmbx.virt)) {
+ efc_log_err(sli4, "bad status create %s\n",
+ SLI4_QNAME[q->type]);
+ return -EIO;
+ }
+ res_q = (void *)((u8 *)sli4->bmbx.virt +
+ offsetof(struct sli4_cmd_sli_config, payload));
+
+ if (res_q->hdr.status) {
+ efc_log_err(sli4, "bad create %s status=%#x addl=%#x\n",
+ SLI4_QNAME[q->type], res_q->hdr.status,
+ res_q->hdr.additional_status);
+ return -EIO;
+ }
+ q->id = le16_to_cpu(res_q->q_id);
+ switch (q->type) {
+ case SLI4_QTYPE_EQ:
+ if (sli4->if_type == SLI4_INTF_IF_TYPE_6)
+ q->db_regaddr = sli4->reg[1] + SLI4_IF6_EQ_DB_REG;
+ else
+ q->db_regaddr = sli4->reg[0] + SLI4_EQCQ_DB_REG;
+ break;
+ case SLI4_QTYPE_CQ:
+ if (sli4->if_type == SLI4_INTF_IF_TYPE_6)
+ q->db_regaddr = sli4->reg[1] + SLI4_IF6_CQ_DB_REG;
+ else
+ q->db_regaddr = sli4->reg[0] + SLI4_EQCQ_DB_REG;
+ break;
+ case SLI4_QTYPE_MQ:
+ if (sli4->if_type == SLI4_INTF_IF_TYPE_6)
+ q->db_regaddr = sli4->reg[1] + SLI4_IF6_MQ_DB_REG;
+ else
+ q->db_regaddr = sli4->reg[0] + SLI4_MQ_DB_REG;
+ break;
+ case SLI4_QTYPE_RQ:
+ if (sli4->if_type == SLI4_INTF_IF_TYPE_6)
+ q->db_regaddr = sli4->reg[1] + SLI4_IF6_RQ_DB_REG;
+ else
+ q->db_regaddr = sli4->reg[0] + SLI4_RQ_DB_REG;
+ break;
+ case SLI4_QTYPE_WQ:
+ if (sli4->if_type == SLI4_INTF_IF_TYPE_6)
+ q->db_regaddr = sli4->reg[1] + SLI4_IF6_WQ_DB_REG;
+ else
+ q->db_regaddr = sli4->reg[0] + SLI4_IO_WQ_DB_REG;
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+int
+sli_get_queue_entry_size(struct sli4 *sli4, u32 qtype)
+{
+ u32 size = 0;
+
+ switch (qtype) {
+ case SLI4_QTYPE_EQ:
+ size = sizeof(u32);
+ break;
+ case SLI4_QTYPE_CQ:
+ size = 16;
+ break;
+ case SLI4_QTYPE_MQ:
+ size = 256;
+ break;
+ case SLI4_QTYPE_WQ:
+ size = sli4->wqe_size;
+ break;
+ case SLI4_QTYPE_RQ:
+ size = SLI4_RQE_SIZE;
+ break;
+ default:
+ efc_log_info(sli4, "unknown queue type %d\n", qtype);
+ return -1;
+ }
+ return size;
+}
+
+int
+sli_queue_alloc(struct sli4 *sli4, u32 qtype,
+ struct sli4_queue *q, u32 n_entries,
+ struct sli4_queue *assoc)
+{
+ int size;
+ u32 align = 0;
+
+ /* get queue size */
+ size = sli_get_queue_entry_size(sli4, qtype);
+ if (size < 0)
+ return -EIO;
+ align = SLI_PAGE_SIZE;
+
+ if (__sli_queue_init(sli4, q, qtype, size, n_entries, align))
+ return -EIO;
+
+ switch (qtype) {
+ case SLI4_QTYPE_EQ:
+ if (!sli_cmd_common_create_eq(sli4, sli4->bmbx.virt, &q->dma) &&
+ !__sli_create_queue(sli4, q))
+ return 0;
+
+ break;
+ case SLI4_QTYPE_CQ:
+ if (!sli_cmd_common_create_cq(sli4, sli4->bmbx.virt, &q->dma,
+ assoc ? assoc->id : 0) &&
+ !__sli_create_queue(sli4, q))
+ return 0;
+
+ break;
+ case SLI4_QTYPE_MQ:
+ assoc->u.flag |= SLI4_QUEUE_FLAG_MQ;
+ if (!sli_cmd_common_create_mq_ext(sli4, sli4->bmbx.virt,
+ &q->dma, assoc->id) &&
+ !__sli_create_queue(sli4, q))
+ return 0;
+
+ break;
+ case SLI4_QTYPE_WQ:
+ if (!sli_cmd_wq_create(sli4, sli4->bmbx.virt, &q->dma,
+ assoc ? assoc->id : 0) &&
+ !__sli_create_queue(sli4, q))
+ return 0;
+
+ break;
+ default:
+ efc_log_info(sli4, "unknown queue type %d\n", qtype);
+ }
+
+ __sli_queue_destroy(sli4, q);
+ return -EIO;
+}
+
+static int sli_cmd_cq_set_create(struct sli4 *sli4,
+ struct sli4_queue *qs[], u32 num_cqs,
+ struct sli4_queue *eqs[],
+ struct efc_dma *dma)
+{
+ struct sli4_rqst_cmn_create_cq_set_v0 *req = NULL;
+ uintptr_t addr;
+ u32 i, offset = 0, page_bytes = 0, payload_size;
+ u32 p = 0, page_size = 0, n_cqe = 0, num_pages_cq;
+ u32 dw5_flags = 0;
+ u16 dw6w1_flags = 0;
+ __le32 req_len;
+
+ n_cqe = qs[0]->dma.size / SLI4_CQE_BYTES;
+ switch (n_cqe) {
+ case 256:
+ case 512:
+ case 1024:
+ case 2048:
+ page_size = 1;
+ break;
+ case 4096:
+ page_size = 2;
+ break;
+ default:
+ return -EIO;
+ }
+
+ page_bytes = page_size * SLI_PAGE_SIZE;
+ num_pages_cq = sli_page_count(qs[0]->dma.size, page_bytes);
+ payload_size = max(SLI4_RQST_CMDSZ(cmn_create_cq_set_v0) +
+ (SZ_DMAADDR * num_pages_cq * num_cqs),
+ sizeof(struct sli4_rsp_cmn_create_queue_set));
+
+ dma->size = payload_size;
+ dma->virt = dma_alloc_coherent(&sli4->pci->dev, dma->size,
+ &dma->phys, GFP_DMA);
+ if (!dma->virt)
+ return -EIO;
+
+ memset(dma->virt, 0, payload_size);
+
+ req = sli_config_cmd_init(sli4, sli4->bmbx.virt, payload_size, dma);
+ if (!req)
+ return -EIO;
+
+ req_len = SLI4_RQST_PYLD_LEN_VAR(cmn_create_cq_set_v0,
+ SZ_DMAADDR * num_pages_cq * num_cqs);
+ sli_cmd_fill_hdr(&req->hdr, SLI4_CMN_CREATE_CQ_SET, SLI4_SUBSYSTEM_FC,
+ CMD_V0, req_len);
+ req->page_size = page_size;
+
+ req->num_pages = cpu_to_le16(num_pages_cq);
+ switch (num_pages_cq) {
+ case 1:
+ dw5_flags |= SLI4_CQ_CNT_VAL(256);
+ break;
+ case 2:
+ dw5_flags |= SLI4_CQ_CNT_VAL(512);
+ break;
+ case 4:
+ dw5_flags |= SLI4_CQ_CNT_VAL(1024);
+ break;
+ case 8:
+ dw5_flags |= SLI4_CQ_CNT_VAL(LARGE);
+ dw6w1_flags |= (n_cqe & SLI4_CREATE_CQSETV0_CQE_COUNT);
+ break;
+ default:
+ efc_log_info(sli4, "num_pages %d not valid\n", num_pages_cq);
+ return -EIO;
+ }
+
+ dw5_flags |= SLI4_CREATE_CQSETV0_EVT;
+ dw5_flags |= SLI4_CREATE_CQSETV0_VALID;
+ if (sli4->if_type == SLI4_INTF_IF_TYPE_6)
+ dw5_flags |= SLI4_CREATE_CQSETV0_AUTOVALID;
+
+ dw6w1_flags &= ~SLI4_CREATE_CQSETV0_ARM;
+
+ req->dw5_flags = cpu_to_le32(dw5_flags);
+ req->dw6w1_flags = cpu_to_le16(dw6w1_flags);
+
+ req->num_cq_req = cpu_to_le16(num_cqs);
+
+ /* Fill page addresses of all the CQs. */
+ for (i = 0; i < num_cqs; i++) {
+ req->eq_id[i] = cpu_to_le16(eqs[i]->id);
+ for (p = 0, addr = qs[i]->dma.phys; p < num_pages_cq;
+ p++, addr += page_bytes) {
+ req->page_phys_addr[offset].low =
+ cpu_to_le32(lower_32_bits(addr));
+ req->page_phys_addr[offset].high =
+ cpu_to_le32(upper_32_bits(addr));
+ offset++;
+ }
+ }
+
+ return 0;
+}
+
+int
+sli_cq_alloc_set(struct sli4 *sli4, struct sli4_queue *qs[],
+ u32 num_cqs, u32 n_entries, struct sli4_queue *eqs[])
+{
+ u32 i;
+ struct efc_dma dma = {0};
+ struct sli4_rsp_cmn_create_queue_set *res;
+ void __iomem *db_regaddr;
+
+ /* Align the queue DMA memory */
+ for (i = 0; i < num_cqs; i++) {
+ if (__sli_queue_init(sli4, qs[i], SLI4_QTYPE_CQ, SLI4_CQE_BYTES,
+ n_entries, SLI_PAGE_SIZE))
+ goto error;
+ }
+
+ if (sli_cmd_cq_set_create(sli4, qs, num_cqs, eqs, &dma))
+ goto error;
+
+ if (sli_bmbx_command(sli4))
+ goto error;
+
+ if (sli4->if_type == SLI4_INTF_IF_TYPE_6)
+ db_regaddr = sli4->reg[1] + SLI4_IF6_CQ_DB_REG;
+ else
+ db_regaddr = sli4->reg[0] + SLI4_EQCQ_DB_REG;
+
+ res = dma.virt;
+ if (res->hdr.status) {
+ efc_log_err(sli4, "bad create CQSet status=%#x addl=%#x\n",
+ res->hdr.status, res->hdr.additional_status);
+ goto error;
+ }
+
+ /* Check if we got all requested CQs. */
+ if (le16_to_cpu(res->num_q_allocated) != num_cqs) {
+ efc_log_crit(sli4, "Requested count CQs doesn't match.\n");
+ goto error;
+ }
+ /* Fill the resp cq ids. */
+ for (i = 0; i < num_cqs; i++) {
+ qs[i]->id = le16_to_cpu(res->q_id) + i;
+ qs[i]->db_regaddr = db_regaddr;
+ }
+
+ dma_free_coherent(&sli4->pci->dev, dma.size, dma.virt, dma.phys);
+
+ return 0;
+
+error:
+ for (i = 0; i < num_cqs; i++)
+ __sli_queue_destroy(sli4, qs[i]);
+
+ if (dma.virt)
+ dma_free_coherent(&sli4->pci->dev, dma.size, dma.virt,
+ dma.phys);
+
+ return -EIO;
+}
+
+static int
+sli_cmd_common_destroy_q(struct sli4 *sli4, u8 opc, u8 subsystem, u16 q_id)
+{
+ struct sli4_rqst_cmn_destroy_q *req;
+
+ /* Payload length must accommodate both request and response */
+ req = sli_config_cmd_init(sli4, sli4->bmbx.virt,
+ SLI4_CFG_PYLD_LENGTH(cmn_destroy_q), NULL);
+ if (!req)
+ return -EIO;
+
+ sli_cmd_fill_hdr(&req->hdr, opc, subsystem,
+ CMD_V0, SLI4_RQST_PYLD_LEN(cmn_destroy_q));
+ req->q_id = cpu_to_le16(q_id);
+
+ return 0;
+}
+
+int
+sli_queue_free(struct sli4 *sli4, struct sli4_queue *q,
+ u32 destroy_queues, u32 free_memory)
+{
+ int rc = 0;
+ u8 opcode, subsystem;
+ struct sli4_rsp_hdr *res;
+
+ if (!q) {
+ efc_log_err(sli4, "bad parameter sli4=%p q=%p\n", sli4, q);
+ return -EIO;
+ }
+
+ if (!destroy_queues)
+ goto free_mem;
+
+ switch (q->type) {
+ case SLI4_QTYPE_EQ:
+ opcode = SLI4_CMN_DESTROY_EQ;
+ subsystem = SLI4_SUBSYSTEM_COMMON;
+ break;
+ case SLI4_QTYPE_CQ:
+ opcode = SLI4_CMN_DESTROY_CQ;
+ subsystem = SLI4_SUBSYSTEM_COMMON;
+ break;
+ case SLI4_QTYPE_MQ:
+ opcode = SLI4_CMN_DESTROY_MQ;
+ subsystem = SLI4_SUBSYSTEM_COMMON;
+ break;
+ case SLI4_QTYPE_WQ:
+ opcode = SLI4_OPC_WQ_DESTROY;
+ subsystem = SLI4_SUBSYSTEM_FC;
+ break;
+ case SLI4_QTYPE_RQ:
+ opcode = SLI4_OPC_RQ_DESTROY;
+ subsystem = SLI4_SUBSYSTEM_FC;
+ break;
+ default:
+ efc_log_info(sli4, "bad queue type %d\n", q->type);
+ rc = -EIO;
+ goto free_mem;
+ }
+
+ rc = sli_cmd_common_destroy_q(sli4, opcode, subsystem, q->id);
+ if (rc)
+ goto free_mem;
+
+ rc = sli_bmbx_command(sli4);
+ if (rc)
+ goto free_mem;
+
+ rc = sli_res_sli_config(sli4, sli4->bmbx.virt);
+ if (rc)
+ goto free_mem;
+
+ res = (void *)((u8 *)sli4->bmbx.virt +
+ offsetof(struct sli4_cmd_sli_config, payload));
+ if (res->status) {
+ efc_log_err(sli4, "destroy %s st=%#x addl=%#x\n",
+ SLI4_QNAME[q->type], res->status,
+ res->additional_status);
+ rc = -EIO;
+ goto free_mem;
+ }
+
+free_mem:
+ if (free_memory)
+ __sli_queue_destroy(sli4, q);
+
+ return rc;
+}
+
+int
+sli_queue_eq_arm(struct sli4 *sli4, struct sli4_queue *q, bool arm)
+{
+ u32 val;
+ unsigned long flags = 0;
+ u32 a = arm ? SLI4_EQCQ_ARM : SLI4_EQCQ_UNARM;
+
+ spin_lock_irqsave(&q->lock, flags);
+ if (sli4->if_type == SLI4_INTF_IF_TYPE_6)
+ val = sli_format_if6_eq_db_data(q->n_posted, q->id, a);
+ else
+ val = sli_format_eq_db_data(q->n_posted, q->id, a);
+
+ writel(val, q->db_regaddr);
+ q->n_posted = 0;
+ spin_unlock_irqrestore(&q->lock, flags);
+
+ return 0;
+}
+
+int
+sli_queue_arm(struct sli4 *sli4, struct sli4_queue *q, bool arm)
+{
+ u32 val = 0;
+ unsigned long flags = 0;
+ u32 a = arm ? SLI4_EQCQ_ARM : SLI4_EQCQ_UNARM;
+
+ spin_lock_irqsave(&q->lock, flags);
+
+ switch (q->type) {
+ case SLI4_QTYPE_EQ:
+ if (sli4->if_type == SLI4_INTF_IF_TYPE_6)
+ val = sli_format_if6_eq_db_data(q->n_posted, q->id, a);
+ else
+ val = sli_format_eq_db_data(q->n_posted, q->id, a);
+
+ writel(val, q->db_regaddr);
+ q->n_posted = 0;
+ break;
+ case SLI4_QTYPE_CQ:
+ if (sli4->if_type == SLI4_INTF_IF_TYPE_6)
+ val = sli_format_if6_cq_db_data(q->n_posted, q->id, a);
+ else
+ val = sli_format_cq_db_data(q->n_posted, q->id, a);
+
+ writel(val, q->db_regaddr);
+ q->n_posted = 0;
+ break;
+ default:
+ efc_log_info(sli4, "should only be used for EQ/CQ, not %s\n",
+ SLI4_QNAME[q->type]);
+ }
+
+ spin_unlock_irqrestore(&q->lock, flags);
+
+ return 0;
+}
+
+int
+sli_wq_write(struct sli4 *sli4, struct sli4_queue *q, u8 *entry)
+{
+ u8 *qe = q->dma.virt;
+ u32 qindex;
+ u32 val = 0;
+
+ qindex = q->index;
+ qe += q->index * q->size;
+
+ if (sli4->params.perf_wq_id_association)
+ sli_set_wq_id_association(entry, q->id);
+
+ memcpy(qe, entry, q->size);
+ val = sli_format_wq_db_data(q->id);
+
+ writel(val, q->db_regaddr);
+ q->index = (q->index + 1) & (q->length - 1);
+
+ return qindex;
+}
+
+int
+sli_mq_write(struct sli4 *sli4, struct sli4_queue *q, u8 *entry)
+{
+ u8 *qe = q->dma.virt;
+ u32 qindex;
+ u32 val = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&q->lock, flags);
+ qindex = q->index;
+ qe += q->index * q->size;
+
+ memcpy(qe, entry, q->size);
+ val = sli_format_mq_db_data(q->id);
+ writel(val, q->db_regaddr);
+ q->index = (q->index + 1) & (q->length - 1);
+ spin_unlock_irqrestore(&q->lock, flags);
+
+ return qindex;
+}
+
+int
+sli_rq_write(struct sli4 *sli4, struct sli4_queue *q, u8 *entry)
+{
+ u8 *qe = q->dma.virt;
+ u32 qindex;
+ u32 val = 0;
+
+ qindex = q->index;
+ qe += q->index * q->size;
+
+ memcpy(qe, entry, q->size);
+
+ /*
+ * In RQ-pair, an RQ either contains the FC header
+ * (i.e. is_hdr == TRUE) or the payload.
+ *
+ * Don't ring doorbell for payload RQ
+ */
+ if (!(q->u.flag & SLI4_QUEUE_FLAG_HDR))
+ goto skip;
+
+ val = sli_format_rq_db_data(q->id);
+ writel(val, q->db_regaddr);
+skip:
+ q->index = (q->index + 1) & (q->length - 1);
+
+ return qindex;
+}
+
+int
+sli_eq_read(struct sli4 *sli4, struct sli4_queue *q, u8 *entry)
+{
+ u8 *qe = q->dma.virt;
+ unsigned long flags = 0;
+ u16 wflags = 0;
+
+ spin_lock_irqsave(&q->lock, flags);
+
+ qe += q->index * q->size;
+
+ /* Check if eqe is valid */
+ wflags = le16_to_cpu(((struct sli4_eqe *)qe)->dw0w0_flags);
+
+ if ((wflags & SLI4_EQE_VALID) != q->phase) {
+ spin_unlock_irqrestore(&q->lock, flags);
+ return -EIO;
+ }
+
+ if (sli4->if_type != SLI4_INTF_IF_TYPE_6) {
+ wflags &= ~SLI4_EQE_VALID;
+ ((struct sli4_eqe *)qe)->dw0w0_flags = cpu_to_le16(wflags);
+ }
+
+ memcpy(entry, qe, q->size);
+ q->index = (q->index + 1) & (q->length - 1);
+ q->n_posted++;
+ /*
+ * For prism, the phase value will be used
+ * to check the validity of eq/cq entries.
+ * The value toggles after a complete sweep
+ * through the queue.
+ */
+
+ if (sli4->if_type == SLI4_INTF_IF_TYPE_6 && q->index == 0)
+ q->phase ^= (u16)0x1;
+
+ spin_unlock_irqrestore(&q->lock, flags);
+
+ return 0;
+}
+
+int
+sli_cq_read(struct sli4 *sli4, struct sli4_queue *q, u8 *entry)
+{
+ u8 *qe = q->dma.virt;
+ unsigned long flags = 0;
+ u32 dwflags = 0;
+ bool valid_bit_set;
+
+ spin_lock_irqsave(&q->lock, flags);
+
+ qe += q->index * q->size;
+
+ /* Check if cqe is valid */
+ dwflags = le32_to_cpu(((struct sli4_mcqe *)qe)->dw3_flags);
+ valid_bit_set = (dwflags & SLI4_MCQE_VALID) != 0;
+
+ if (valid_bit_set != q->phase) {
+ spin_unlock_irqrestore(&q->lock, flags);
+ return -EIO;
+ }
+
+ if (sli4->if_type != SLI4_INTF_IF_TYPE_6) {
+ dwflags &= ~SLI4_MCQE_VALID;
+ ((struct sli4_mcqe *)qe)->dw3_flags = cpu_to_le32(dwflags);
+ }
+
+ memcpy(entry, qe, q->size);
+ q->index = (q->index + 1) & (q->length - 1);
+ q->n_posted++;
+ /*
+ * For prism, the phase value will be used
+ * to check the validity of eq/cq entries.
+ * The value toggles after a complete sweep
+ * through the queue.
+ */
+
+ if (sli4->if_type == SLI4_INTF_IF_TYPE_6 && q->index == 0)
+ q->phase ^= (u16)0x1;
+
+ spin_unlock_irqrestore(&q->lock, flags);
+
+ return 0;
+}
+
+int
+sli_mq_read(struct sli4 *sli4, struct sli4_queue *q, u8 *entry)
+{
+ u8 *qe = q->dma.virt;
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&q->lock, flags);
+
+ qe += q->u.r_idx * q->size;
+
+ /* Check if mqe is valid */
+ if (q->index == q->u.r_idx) {
+ spin_unlock_irqrestore(&q->lock, flags);
+ return -EIO;
+ }
+
+ memcpy(entry, qe, q->size);
+ q->u.r_idx = (q->u.r_idx + 1) & (q->length - 1);
+
+ spin_unlock_irqrestore(&q->lock, flags);
+
+ return 0;
+}
+
+int
+sli_eq_parse(struct sli4 *sli4, u8 *buf, u16 *cq_id)
+{
+ struct sli4_eqe *eqe = (void *)buf;
+ int rc = 0;
+ u16 flags = 0;
+ u16 majorcode;
+ u16 minorcode;
+
+ if (!buf || !cq_id) {
+ efc_log_err(sli4, "bad parameters sli4=%p buf=%p cq_id=%p\n",
+ sli4, buf, cq_id);
+ return -EIO;
+ }
+
+ flags = le16_to_cpu(eqe->dw0w0_flags);
+ majorcode = (flags & SLI4_EQE_MJCODE) >> 1;
+ minorcode = (flags & SLI4_EQE_MNCODE) >> 4;
+ switch (majorcode) {
+ case SLI4_MAJOR_CODE_STANDARD:
+ *cq_id = le16_to_cpu(eqe->resource_id);
+ break;
+ case SLI4_MAJOR_CODE_SENTINEL:
+ efc_log_info(sli4, "sentinel EQE\n");
+ rc = SLI4_EQE_STATUS_EQ_FULL;
+ break;
+ default:
+ efc_log_info(sli4, "Unsupported EQE: major %x minor %x\n",
+ majorcode, minorcode);
+ rc = -EIO;
+ }
+
+ return rc;
+}
+
+int
+sli_cq_parse(struct sli4 *sli4, struct sli4_queue *cq, u8 *cqe,
+ enum sli4_qentry *etype, u16 *q_id)
+{
+ int rc = 0;
+
+ if (!cq || !cqe || !etype) {
+ efc_log_err(sli4, "bad params sli4=%p cq=%p cqe=%p etype=%p q_id=%p\n",
+ sli4, cq, cqe, etype, q_id);
+ return -EINVAL;
+ }
+
+ /* Parse a CQ entry to retrieve the event type and the queue id */
+ if (cq->u.flag & SLI4_QUEUE_FLAG_MQ) {
+ struct sli4_mcqe *mcqe = (void *)cqe;
+
+ if (le32_to_cpu(mcqe->dw3_flags) & SLI4_MCQE_AE) {
+ *etype = SLI4_QENTRY_ASYNC;
+ } else {
+ *etype = SLI4_QENTRY_MQ;
+ rc = sli_cqe_mq(sli4, mcqe);
+ }
+ *q_id = -1;
+ } else {
+ rc = sli_fc_cqe_parse(sli4, cq, cqe, etype, q_id);
+ }
+
+ return rc;
+}
+
+int
+sli_abort_wqe(struct sli4 *sli, void *buf, enum sli4_abort_type type,
+ bool send_abts, u32 ids, u32 mask, u16 tag, u16 cq_id)
+{
+ struct sli4_abort_wqe *abort = buf;
+
+ memset(buf, 0, sli->wqe_size);
+
+ switch (type) {
+ case SLI4_ABORT_XRI:
+ abort->criteria = SLI4_ABORT_CRITERIA_XRI_TAG;
+ if (mask) {
+ efc_log_warn(sli, "%#x aborting XRI %#x warning non-zero mask",
+ mask, ids);
+ mask = 0;
+ }
+ break;
+ case SLI4_ABORT_ABORT_ID:
+ abort->criteria = SLI4_ABORT_CRITERIA_ABORT_TAG;
+ break;
+ case SLI4_ABORT_REQUEST_ID:
+ abort->criteria = SLI4_ABORT_CRITERIA_REQUEST_TAG;
+ break;
+ default:
+ efc_log_info(sli, "unsupported type %#x\n", type);
+ return -EIO;
+ }
+
+ abort->ia_ir_byte |= send_abts ? 0 : 1;
+
+ /* Suppress ABTS retries */
+ abort->ia_ir_byte |= SLI4_ABRT_WQE_IR;
+
+ abort->t_mask = cpu_to_le32(mask);
+ abort->t_tag = cpu_to_le32(ids);
+ abort->command = SLI4_WQE_ABORT;
+ abort->request_tag = cpu_to_le16(tag);
+
+ abort->dw10w0_flags = cpu_to_le16(SLI4_ABRT_WQE_QOSD);
+
+ abort->cq_id = cpu_to_le16(cq_id);
+ abort->cmdtype_wqec_byte |= SLI4_CMD_ABORT_WQE;
+
+ return 0;
+}
+
+int
+sli_els_request64_wqe(struct sli4 *sli, void *buf, struct efc_dma *sgl,
+ struct sli_els_params *params)
+{
+ struct sli4_els_request64_wqe *els = buf;
+ struct sli4_sge *sge = sgl->virt;
+ bool is_fabric = false;
+ struct sli4_bde *bptr;
+
+ memset(buf, 0, sli->wqe_size);
+
+ bptr = &els->els_request_payload;
+ if (sli->params.sgl_pre_registered) {
+ els->qosd_xbl_hlm_iod_dbde_wqes &= ~SLI4_REQ_WQE_XBL;
+
+ els->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_REQ_WQE_DBDE;
+ bptr->bde_type_buflen =
+ cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) |
+ (params->xmit_len & SLI4_BDE_LEN_MASK));
+
+ bptr->u.data.low = sge[0].buffer_address_low;
+ bptr->u.data.high = sge[0].buffer_address_high;
+ } else {
+ els->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_REQ_WQE_XBL;
+
+ bptr->bde_type_buflen =
+ cpu_to_le32((SLI4_BDE_TYPE_VAL(BLP)) |
+ ((2 * sizeof(struct sli4_sge)) &
+ SLI4_BDE_LEN_MASK));
+ bptr->u.blp.low = cpu_to_le32(lower_32_bits(sgl->phys));
+ bptr->u.blp.high = cpu_to_le32(upper_32_bits(sgl->phys));
+ }
+
+ els->els_request_payload_length = cpu_to_le32(params->xmit_len);
+ els->max_response_payload_length = cpu_to_le32(params->rsp_len);
+
+ els->xri_tag = cpu_to_le16(params->xri);
+ els->timer = params->timeout;
+ els->class_byte |= SLI4_GENERIC_CLASS_CLASS_3;
+
+ els->command = SLI4_WQE_ELS_REQUEST64;
+
+ els->request_tag = cpu_to_le16(params->tag);
+
+ els->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_REQ_WQE_IOD;
+
+ els->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_REQ_WQE_QOSD;
+
+ /* figure out the ELS_ID value from the request buffer */
+
+ switch (params->cmd) {
+ case ELS_LOGO:
+ els->cmdtype_elsid_byte |=
+ SLI4_ELS_REQUEST64_LOGO << SLI4_REQ_WQE_ELSID_SHFT;
+ if (params->rpi_registered) {
+ els->ct_byte |=
+ SLI4_GENERIC_CONTEXT_RPI << SLI4_REQ_WQE_CT_SHFT;
+ els->context_tag = cpu_to_le16(params->rpi);
+ } else {
+ els->ct_byte |=
+ SLI4_GENERIC_CONTEXT_VPI << SLI4_REQ_WQE_CT_SHFT;
+ els->context_tag = cpu_to_le16(params->vpi);
+ }
+ if (params->d_id == FC_FID_FLOGI)
+ is_fabric = true;
+ break;
+ case ELS_FDISC:
+ if (params->d_id == FC_FID_FLOGI)
+ is_fabric = true;
+ if (params->s_id == 0) {
+ els->cmdtype_elsid_byte |=
+ SLI4_ELS_REQUEST64_FDISC << SLI4_REQ_WQE_ELSID_SHFT;
+ is_fabric = true;
+ } else {
+ els->cmdtype_elsid_byte |=
+ SLI4_ELS_REQUEST64_OTHER << SLI4_REQ_WQE_ELSID_SHFT;
+ }
+ els->ct_byte |=
+ SLI4_GENERIC_CONTEXT_VPI << SLI4_REQ_WQE_CT_SHFT;
+ els->context_tag = cpu_to_le16(params->vpi);
+ els->sid_sp_dword |= cpu_to_le32(1 << SLI4_REQ_WQE_SP_SHFT);
+ break;
+ case ELS_FLOGI:
+ els->ct_byte |=
+ SLI4_GENERIC_CONTEXT_VPI << SLI4_REQ_WQE_CT_SHFT;
+ els->context_tag = cpu_to_le16(params->vpi);
+ /*
+ * Set SP here ... we haven't done a REG_VPI yet
+ * need to maybe not set this when we have
+ * completed VFI/VPI registrations ...
+ *
+ * Use the FC_ID of the SPORT if it has been allocated,
+ * otherwise use an S_ID of zero.
+ */
+ els->sid_sp_dword |= cpu_to_le32(1 << SLI4_REQ_WQE_SP_SHFT);
+ if (params->s_id != U32_MAX)
+ els->sid_sp_dword |= cpu_to_le32(params->s_id);
+ break;
+ case ELS_PLOGI:
+ els->cmdtype_elsid_byte |=
+ SLI4_ELS_REQUEST64_PLOGI << SLI4_REQ_WQE_ELSID_SHFT;
+ els->ct_byte |=
+ SLI4_GENERIC_CONTEXT_VPI << SLI4_REQ_WQE_CT_SHFT;
+ els->context_tag = cpu_to_le16(params->vpi);
+ break;
+ case ELS_SCR:
+ els->cmdtype_elsid_byte |=
+ SLI4_ELS_REQUEST64_OTHER << SLI4_REQ_WQE_ELSID_SHFT;
+ els->ct_byte |=
+ SLI4_GENERIC_CONTEXT_VPI << SLI4_REQ_WQE_CT_SHFT;
+ els->context_tag = cpu_to_le16(params->vpi);
+ break;
+ default:
+ els->cmdtype_elsid_byte |=
+ SLI4_ELS_REQUEST64_OTHER << SLI4_REQ_WQE_ELSID_SHFT;
+ if (params->rpi_registered) {
+ els->ct_byte |= (SLI4_GENERIC_CONTEXT_RPI <<
+ SLI4_REQ_WQE_CT_SHFT);
+ els->context_tag = cpu_to_le16(params->vpi);
+ } else {
+ els->ct_byte |=
+ SLI4_GENERIC_CONTEXT_VPI << SLI4_REQ_WQE_CT_SHFT;
+ els->context_tag = cpu_to_le16(params->vpi);
+ }
+ break;
+ }
+
+ if (is_fabric)
+ els->cmdtype_elsid_byte |= SLI4_ELS_REQUEST64_CMD_FABRIC;
+ else
+ els->cmdtype_elsid_byte |= SLI4_ELS_REQUEST64_CMD_NON_FABRIC;
+
+ els->cq_id = cpu_to_le16(SLI4_CQ_DEFAULT);
+
+ if (((els->ct_byte & SLI4_REQ_WQE_CT) >> SLI4_REQ_WQE_CT_SHFT) !=
+ SLI4_GENERIC_CONTEXT_RPI)
+ els->remote_id_dword = cpu_to_le32(params->d_id);
+
+ if (((els->ct_byte & SLI4_REQ_WQE_CT) >> SLI4_REQ_WQE_CT_SHFT) ==
+ SLI4_GENERIC_CONTEXT_VPI)
+ els->temporary_rpi = cpu_to_le16(params->rpi);
+
+ return 0;
+}
+
+int
+sli_fcp_icmnd64_wqe(struct sli4 *sli, void *buf, struct efc_dma *sgl, u16 xri,
+ u16 tag, u16 cq_id, u32 rpi, u32 rnode_fcid, u8 timeout)
+{
+ struct sli4_fcp_icmnd64_wqe *icmnd = buf;
+ struct sli4_sge *sge = NULL;
+ struct sli4_bde *bptr;
+ u32 len;
+
+ memset(buf, 0, sli->wqe_size);
+
+ if (!sgl || !sgl->virt) {
+ efc_log_err(sli, "bad parameter sgl=%p virt=%p\n",
+ sgl, sgl ? sgl->virt : NULL);
+ return -EIO;
+ }
+ sge = sgl->virt;
+ bptr = &icmnd->bde;
+ if (sli->params.sgl_pre_registered) {
+ icmnd->qosd_xbl_hlm_iod_dbde_wqes &= ~SLI4_ICMD_WQE_XBL;
+
+ icmnd->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_ICMD_WQE_DBDE;
+ bptr->bde_type_buflen =
+ cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) |
+ (le32_to_cpu(sge[0].buffer_length) &
+ SLI4_BDE_LEN_MASK));
+
+ bptr->u.data.low = sge[0].buffer_address_low;
+ bptr->u.data.high = sge[0].buffer_address_high;
+ } else {
+ icmnd->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_ICMD_WQE_XBL;
+
+ bptr->bde_type_buflen =
+ cpu_to_le32((SLI4_BDE_TYPE_VAL(BLP)) |
+ (sgl->size & SLI4_BDE_LEN_MASK));
+
+ bptr->u.blp.low = cpu_to_le32(lower_32_bits(sgl->phys));
+ bptr->u.blp.high = cpu_to_le32(upper_32_bits(sgl->phys));
+ }
+
+ len = le32_to_cpu(sge[0].buffer_length) +
+ le32_to_cpu(sge[1].buffer_length);
+ icmnd->payload_offset_length = cpu_to_le16(len);
+ icmnd->xri_tag = cpu_to_le16(xri);
+ icmnd->context_tag = cpu_to_le16(rpi);
+ icmnd->timer = timeout;
+
+ /* WQE word 4 contains read transfer length */
+ icmnd->class_pu_byte |= 2 << SLI4_ICMD_WQE_PU_SHFT;
+ icmnd->class_pu_byte |= SLI4_GENERIC_CLASS_CLASS_3;
+ icmnd->command = SLI4_WQE_FCP_ICMND64;
+ icmnd->dif_ct_bs_byte |=
+ SLI4_GENERIC_CONTEXT_RPI << SLI4_ICMD_WQE_CT_SHFT;
+
+ icmnd->abort_tag = cpu_to_le32(xri);
+
+ icmnd->request_tag = cpu_to_le16(tag);
+ icmnd->len_loc1_byte |= SLI4_ICMD_WQE_LEN_LOC_BIT1;
+ icmnd->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_ICMD_WQE_LEN_LOC_BIT2;
+ icmnd->cmd_type_byte |= SLI4_CMD_FCP_ICMND64_WQE;
+ icmnd->cq_id = cpu_to_le16(cq_id);
+
+ return 0;
+}
+
+int
+sli_fcp_iread64_wqe(struct sli4 *sli, void *buf, struct efc_dma *sgl,
+ u32 first_data_sge, u32 xfer_len, u16 xri, u16 tag,
+ u16 cq_id, u32 rpi, u32 rnode_fcid,
+ u8 dif, u8 bs, u8 timeout)
+{
+ struct sli4_fcp_iread64_wqe *iread = buf;
+ struct sli4_sge *sge = NULL;
+ struct sli4_bde *bptr;
+ u32 sge_flags, len;
+
+ memset(buf, 0, sli->wqe_size);
+
+ if (!sgl || !sgl->virt) {
+ efc_log_err(sli, "bad parameter sgl=%p virt=%p\n",
+ sgl, sgl ? sgl->virt : NULL);
+ return -EIO;
+ }
+
+ sge = sgl->virt;
+ bptr = &iread->bde;
+ if (sli->params.sgl_pre_registered) {
+ iread->qosd_xbl_hlm_iod_dbde_wqes &= ~SLI4_IR_WQE_XBL;
+
+ iread->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_IR_WQE_DBDE;
+
+ bptr->bde_type_buflen =
+ cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) |
+ (le32_to_cpu(sge[0].buffer_length) &
+ SLI4_BDE_LEN_MASK));
+
+ bptr->u.blp.low = sge[0].buffer_address_low;
+ bptr->u.blp.high = sge[0].buffer_address_high;
+ } else {
+ iread->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_IR_WQE_XBL;
+
+ bptr->bde_type_buflen =
+ cpu_to_le32((SLI4_BDE_TYPE_VAL(BLP)) |
+ (sgl->size & SLI4_BDE_LEN_MASK));
+
+ bptr->u.blp.low =
+ cpu_to_le32(lower_32_bits(sgl->phys));
+ bptr->u.blp.high =
+ cpu_to_le32(upper_32_bits(sgl->phys));
+
+ /*
+ * fill out fcp_cmnd buffer len and change resp buffer to be of
+ * type "skip" (note: response will still be written to sge[1]
+ * if necessary)
+ */
+ len = le32_to_cpu(sge[0].buffer_length);
+ iread->fcp_cmd_buffer_length = cpu_to_le16(len);
+
+ sge_flags = le32_to_cpu(sge[1].dw2_flags);
+ sge_flags &= (~SLI4_SGE_TYPE_MASK);
+ sge_flags |= (SLI4_SGE_TYPE_SKIP << SLI4_SGE_TYPE_SHIFT);
+ sge[1].dw2_flags = cpu_to_le32(sge_flags);
+ }
+
+ len = le32_to_cpu(sge[0].buffer_length) +
+ le32_to_cpu(sge[1].buffer_length);
+ iread->payload_offset_length = cpu_to_le16(len);
+ iread->total_transfer_length = cpu_to_le32(xfer_len);
+
+ iread->xri_tag = cpu_to_le16(xri);
+ iread->context_tag = cpu_to_le16(rpi);
+
+ iread->timer = timeout;
+
+ /* WQE word 4 contains read transfer length */
+ iread->class_pu_byte |= 2 << SLI4_IR_WQE_PU_SHFT;
+ iread->class_pu_byte |= SLI4_GENERIC_CLASS_CLASS_3;
+ iread->command = SLI4_WQE_FCP_IREAD64;
+ iread->dif_ct_bs_byte |=
+ SLI4_GENERIC_CONTEXT_RPI << SLI4_IR_WQE_CT_SHFT;
+ iread->dif_ct_bs_byte |= dif;
+ iread->dif_ct_bs_byte |= bs << SLI4_IR_WQE_BS_SHFT;
+
+ iread->abort_tag = cpu_to_le32(xri);
+
+ iread->request_tag = cpu_to_le16(tag);
+ iread->len_loc1_byte |= SLI4_IR_WQE_LEN_LOC_BIT1;
+ iread->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_IR_WQE_LEN_LOC_BIT2;
+ iread->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_IR_WQE_IOD;
+ iread->cmd_type_byte |= SLI4_CMD_FCP_IREAD64_WQE;
+ iread->cq_id = cpu_to_le16(cq_id);
+
+ if (sli->params.perf_hint) {
+ bptr = &iread->first_data_bde;
+ bptr->bde_type_buflen = cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) |
+ (le32_to_cpu(sge[first_data_sge].buffer_length) &
+ SLI4_BDE_LEN_MASK));
+ bptr->u.data.low =
+ sge[first_data_sge].buffer_address_low;
+ bptr->u.data.high =
+ sge[first_data_sge].buffer_address_high;
+ }
+
+ return 0;
+}
+
+int
+sli_fcp_iwrite64_wqe(struct sli4 *sli, void *buf, struct efc_dma *sgl,
+ u32 first_data_sge, u32 xfer_len,
+ u32 first_burst, u16 xri, u16 tag,
+ u16 cq_id, u32 rpi,
+ u32 rnode_fcid,
+ u8 dif, u8 bs, u8 timeout)
+{
+ struct sli4_fcp_iwrite64_wqe *iwrite = buf;
+ struct sli4_sge *sge = NULL;
+ struct sli4_bde *bptr;
+ u32 sge_flags, min, len;
+
+ memset(buf, 0, sli->wqe_size);
+
+ if (!sgl || !sgl->virt) {
+ efc_log_err(sli, "bad parameter sgl=%p virt=%p\n",
+ sgl, sgl ? sgl->virt : NULL);
+ return -EIO;
+ }
+ sge = sgl->virt;
+ bptr = &iwrite->bde;
+ if (sli->params.sgl_pre_registered) {
+ iwrite->qosd_xbl_hlm_iod_dbde_wqes &= ~SLI4_IWR_WQE_XBL;
+
+ iwrite->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_IWR_WQE_DBDE;
+ bptr->bde_type_buflen = cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) |
+ (le32_to_cpu(sge[0].buffer_length) & SLI4_BDE_LEN_MASK));
+ bptr->u.data.low = sge[0].buffer_address_low;
+ bptr->u.data.high = sge[0].buffer_address_high;
+ } else {
+ iwrite->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_IWR_WQE_XBL;
+
+ bptr->bde_type_buflen = cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) |
+ (sgl->size & SLI4_BDE_LEN_MASK));
+
+ bptr->u.blp.low = cpu_to_le32(lower_32_bits(sgl->phys));
+ bptr->u.blp.high = cpu_to_le32(upper_32_bits(sgl->phys));
+
+ /*
+ * fill out fcp_cmnd buffer len and change resp buffer to be of
+ * type "skip" (note: response will still be written to sge[1]
+ * if necessary)
+ */
+ len = le32_to_cpu(sge[0].buffer_length);
+ iwrite->fcp_cmd_buffer_length = cpu_to_le16(len);
+ sge_flags = le32_to_cpu(sge[1].dw2_flags);
+ sge_flags &= ~SLI4_SGE_TYPE_MASK;
+ sge_flags |= (SLI4_SGE_TYPE_SKIP << SLI4_SGE_TYPE_SHIFT);
+ sge[1].dw2_flags = cpu_to_le32(sge_flags);
+ }
+
+ len = le32_to_cpu(sge[0].buffer_length) +
+ le32_to_cpu(sge[1].buffer_length);
+ iwrite->payload_offset_length = cpu_to_le16(len);
+ iwrite->total_transfer_length = cpu_to_le16(xfer_len);
+ min = (xfer_len < first_burst) ? xfer_len : first_burst;
+ iwrite->initial_transfer_length = cpu_to_le16(min);
+
+ iwrite->xri_tag = cpu_to_le16(xri);
+ iwrite->context_tag = cpu_to_le16(rpi);
+
+ iwrite->timer = timeout;
+ /* WQE word 4 contains read transfer length */
+ iwrite->class_pu_byte |= 2 << SLI4_IWR_WQE_PU_SHFT;
+ iwrite->class_pu_byte |= SLI4_GENERIC_CLASS_CLASS_3;
+ iwrite->command = SLI4_WQE_FCP_IWRITE64;
+ iwrite->dif_ct_bs_byte |=
+ SLI4_GENERIC_CONTEXT_RPI << SLI4_IWR_WQE_CT_SHFT;
+ iwrite->dif_ct_bs_byte |= dif;
+ iwrite->dif_ct_bs_byte |= bs << SLI4_IWR_WQE_BS_SHFT;
+
+ iwrite->abort_tag = cpu_to_le32(xri);
+
+ iwrite->request_tag = cpu_to_le16(tag);
+ iwrite->len_loc1_byte |= SLI4_IWR_WQE_LEN_LOC_BIT1;
+ iwrite->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_IWR_WQE_LEN_LOC_BIT2;
+ iwrite->cmd_type_byte |= SLI4_CMD_FCP_IWRITE64_WQE;
+ iwrite->cq_id = cpu_to_le16(cq_id);
+
+ if (sli->params.perf_hint) {
+ bptr = &iwrite->first_data_bde;
+
+ bptr->bde_type_buflen = cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) |
+ (le32_to_cpu(sge[first_data_sge].buffer_length) &
+ SLI4_BDE_LEN_MASK));
+
+ bptr->u.data.low = sge[first_data_sge].buffer_address_low;
+ bptr->u.data.high = sge[first_data_sge].buffer_address_high;
+ }
+
+ return 0;
+}
+
+int
+sli_fcp_treceive64_wqe(struct sli4 *sli, void *buf, struct efc_dma *sgl,
+ u32 first_data_sge, u16 cq_id, u8 dif, u8 bs,
+ struct sli_fcp_tgt_params *params)
+{
+ struct sli4_fcp_treceive64_wqe *trecv = buf;
+ struct sli4_fcp_128byte_wqe *trecv_128 = buf;
+ struct sli4_sge *sge = NULL;
+ struct sli4_bde *bptr;
+
+ memset(buf, 0, sli->wqe_size);
+
+ if (!sgl || !sgl->virt) {
+ efc_log_err(sli, "bad parameter sgl=%p virt=%p\n",
+ sgl, sgl ? sgl->virt : NULL);
+ return -EIO;
+ }
+ sge = sgl->virt;
+ bptr = &trecv->bde;
+ if (sli->params.sgl_pre_registered) {
+ trecv->qosd_xbl_hlm_iod_dbde_wqes &= ~SLI4_TRCV_WQE_XBL;
+
+ trecv->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_TRCV_WQE_DBDE;
+
+ bptr->bde_type_buflen =
+ cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) |
+ (le32_to_cpu(sge[0].buffer_length)
+ & SLI4_BDE_LEN_MASK));
+
+ bptr->u.data.low = sge[0].buffer_address_low;
+ bptr->u.data.high = sge[0].buffer_address_high;
+
+ trecv->payload_offset_length = sge[0].buffer_length;
+ } else {
+ trecv->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_TRCV_WQE_XBL;
+
+ /* if data is a single physical address, use a BDE */
+ if (!dif &&
+ params->xmit_len <= le32_to_cpu(sge[2].buffer_length)) {
+ trecv->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_TRCV_WQE_DBDE;
+ bptr->bde_type_buflen =
+ cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) |
+ (le32_to_cpu(sge[2].buffer_length)
+ & SLI4_BDE_LEN_MASK));
+
+ bptr->u.data.low = sge[2].buffer_address_low;
+ bptr->u.data.high = sge[2].buffer_address_high;
+ } else {
+ bptr->bde_type_buflen =
+ cpu_to_le32((SLI4_BDE_TYPE_VAL(BLP)) |
+ (sgl->size & SLI4_BDE_LEN_MASK));
+ bptr->u.blp.low = cpu_to_le32(lower_32_bits(sgl->phys));
+ bptr->u.blp.high =
+ cpu_to_le32(upper_32_bits(sgl->phys));
+ }
+ }
+
+ trecv->relative_offset = cpu_to_le32(params->offset);
+
+ if (params->flags & SLI4_IO_CONTINUATION)
+ trecv->eat_xc_ccpe |= SLI4_TRCV_WQE_XC;
+
+ trecv->xri_tag = cpu_to_le16(params->xri);
+
+ trecv->context_tag = cpu_to_le16(params->rpi);
+
+ /* WQE uses relative offset */
+ trecv->class_ar_pu_byte |= 1 << SLI4_TRCV_WQE_PU_SHFT;
+
+ if (params->flags & SLI4_IO_AUTO_GOOD_RESPONSE)
+ trecv->class_ar_pu_byte |= SLI4_TRCV_WQE_AR;
+
+ trecv->command = SLI4_WQE_FCP_TRECEIVE64;
+ trecv->class_ar_pu_byte |= SLI4_GENERIC_CLASS_CLASS_3;
+ trecv->dif_ct_bs_byte |=
+ SLI4_GENERIC_CONTEXT_RPI << SLI4_TRCV_WQE_CT_SHFT;
+ trecv->dif_ct_bs_byte |= bs << SLI4_TRCV_WQE_BS_SHFT;
+
+ trecv->remote_xid = cpu_to_le16(params->ox_id);
+
+ trecv->request_tag = cpu_to_le16(params->tag);
+
+ trecv->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_TRCV_WQE_IOD;
+
+ trecv->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_TRCV_WQE_LEN_LOC_BIT2;
+
+ trecv->cmd_type_byte |= SLI4_CMD_FCP_TRECEIVE64_WQE;
+
+ trecv->cq_id = cpu_to_le16(cq_id);
+
+ trecv->fcp_data_receive_length = cpu_to_le32(params->xmit_len);
+
+ if (sli->params.perf_hint) {
+ bptr = &trecv->first_data_bde;
+
+ bptr->bde_type_buflen =
+ cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) |
+ (le32_to_cpu(sge[first_data_sge].buffer_length) &
+ SLI4_BDE_LEN_MASK));
+ bptr->u.data.low = sge[first_data_sge].buffer_address_low;
+ bptr->u.data.high = sge[first_data_sge].buffer_address_high;
+ }
+
+ /* The upper 7 bits of csctl is the priority */
+ if (params->cs_ctl & SLI4_MASK_CCP) {
+ trecv->eat_xc_ccpe |= SLI4_TRCV_WQE_CCPE;
+ trecv->ccp = (params->cs_ctl & SLI4_MASK_CCP);
+ }
+
+ if (params->app_id && sli->wqe_size == SLI4_WQE_EXT_BYTES &&
+ !(trecv->eat_xc_ccpe & SLI4_TRSP_WQE_EAT)) {
+ trecv->lloc1_appid |= SLI4_TRCV_WQE_APPID;
+ trecv->qosd_xbl_hlm_iod_dbde_wqes |= SLI4_TRCV_WQE_WQES;
+ trecv_128->dw[31] = params->app_id;
+ }
+ return 0;
+}
+
+int
+sli_fcp_cont_treceive64_wqe(struct sli4 *sli, void *buf,
+ struct efc_dma *sgl, u32 first_data_sge,
+ u16 sec_xri, u16 cq_id, u8 dif, u8 bs,
+ struct sli_fcp_tgt_params *params)
+{
+ int rc;
+
+ rc = sli_fcp_treceive64_wqe(sli, buf, sgl, first_data_sge,
+ cq_id, dif, bs, params);
+ if (!rc) {
+ struct sli4_fcp_treceive64_wqe *trecv = buf;
+
+ trecv->command = SLI4_WQE_FCP_CONT_TRECEIVE64;
+ trecv->dword5.sec_xri_tag = cpu_to_le16(sec_xri);
+ }
+ return rc;
+}
+
+int
+sli_fcp_trsp64_wqe(struct sli4 *sli4, void *buf, struct efc_dma *sgl,
+ u16 cq_id, u8 port_owned, struct sli_fcp_tgt_params *params)
+{
+ struct sli4_fcp_trsp64_wqe *trsp = buf;
+ struct sli4_fcp_128byte_wqe *trsp_128 = buf;
+
+ memset(buf, 0, sli4->wqe_size);
+
+ if (params->flags & SLI4_IO_AUTO_GOOD_RESPONSE) {
+ trsp->class_ag_byte |= SLI4_TRSP_WQE_AG;
+ } else {
+ struct sli4_sge *sge = sgl->virt;
+ struct sli4_bde *bptr;
+
+ if (sli4->params.sgl_pre_registered || port_owned)
+ trsp->qosd_xbl_hlm_dbde_wqes |= SLI4_TRSP_WQE_DBDE;
+ else
+ trsp->qosd_xbl_hlm_dbde_wqes |= SLI4_TRSP_WQE_XBL;
+ bptr = &trsp->bde;
+
+ bptr->bde_type_buflen =
+ cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) |
+ (le32_to_cpu(sge[0].buffer_length) &
+ SLI4_BDE_LEN_MASK));
+ bptr->u.data.low = sge[0].buffer_address_low;
+ bptr->u.data.high = sge[0].buffer_address_high;
+
+ trsp->fcp_response_length = cpu_to_le32(params->xmit_len);
+ }
+
+ if (params->flags & SLI4_IO_CONTINUATION)
+ trsp->eat_xc_ccpe |= SLI4_TRSP_WQE_XC;
+
+ trsp->xri_tag = cpu_to_le16(params->xri);
+ trsp->rpi = cpu_to_le16(params->rpi);
+
+ trsp->command = SLI4_WQE_FCP_TRSP64;
+ trsp->class_ag_byte |= SLI4_GENERIC_CLASS_CLASS_3;
+
+ trsp->remote_xid = cpu_to_le16(params->ox_id);
+ trsp->request_tag = cpu_to_le16(params->tag);
+ if (params->flags & SLI4_IO_DNRX)
+ trsp->ct_dnrx_byte |= SLI4_TRSP_WQE_DNRX;
+ else
+ trsp->ct_dnrx_byte &= ~SLI4_TRSP_WQE_DNRX;
+
+ trsp->lloc1_appid |= 0x1;
+ trsp->cq_id = cpu_to_le16(cq_id);
+ trsp->cmd_type_byte = SLI4_CMD_FCP_TRSP64_WQE;
+
+ /* The upper 7 bits of csctl is the priority */
+ if (params->cs_ctl & SLI4_MASK_CCP) {
+ trsp->eat_xc_ccpe |= SLI4_TRSP_WQE_CCPE;
+ trsp->ccp = (params->cs_ctl & SLI4_MASK_CCP);
+ }
+
+ if (params->app_id && sli4->wqe_size == SLI4_WQE_EXT_BYTES &&
+ !(trsp->eat_xc_ccpe & SLI4_TRSP_WQE_EAT)) {
+ trsp->lloc1_appid |= SLI4_TRSP_WQE_APPID;
+ trsp->qosd_xbl_hlm_dbde_wqes |= SLI4_TRSP_WQE_WQES;
+ trsp_128->dw[31] = params->app_id;
+ }
+ return 0;
+}
+
+int
+sli_fcp_tsend64_wqe(struct sli4 *sli4, void *buf, struct efc_dma *sgl,
+ u32 first_data_sge, u16 cq_id, u8 dif, u8 bs,
+ struct sli_fcp_tgt_params *params)
+{
+ struct sli4_fcp_tsend64_wqe *tsend = buf;
+ struct sli4_fcp_128byte_wqe *tsend_128 = buf;
+ struct sli4_sge *sge = NULL;
+ struct sli4_bde *bptr;
+
+ memset(buf, 0, sli4->wqe_size);
+
+ if (!sgl || !sgl->virt) {
+ efc_log_err(sli4, "bad parameter sgl=%p virt=%p\n",
+ sgl, sgl ? sgl->virt : NULL);
+ return -EIO;
+ }
+ sge = sgl->virt;
+
+ bptr = &tsend->bde;
+ if (sli4->params.sgl_pre_registered) {
+ tsend->ll_qd_xbl_hlm_iod_dbde &= ~SLI4_TSEND_WQE_XBL;
+
+ tsend->ll_qd_xbl_hlm_iod_dbde |= SLI4_TSEND_WQE_DBDE;
+
+ bptr->bde_type_buflen =
+ cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) |
+ (le32_to_cpu(sge[2].buffer_length) &
+ SLI4_BDE_LEN_MASK));
+
+ /* TSEND64_WQE specifies first two SGE are skipped (3rd is
+ * valid)
+ */
+ bptr->u.data.low = sge[2].buffer_address_low;
+ bptr->u.data.high = sge[2].buffer_address_high;
+ } else {
+ tsend->ll_qd_xbl_hlm_iod_dbde |= SLI4_TSEND_WQE_XBL;
+
+ /* if data is a single physical address, use a BDE */
+ if (!dif &&
+ params->xmit_len <= le32_to_cpu(sge[2].buffer_length)) {
+ tsend->ll_qd_xbl_hlm_iod_dbde |= SLI4_TSEND_WQE_DBDE;
+
+ bptr->bde_type_buflen =
+ cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) |
+ (le32_to_cpu(sge[2].buffer_length) &
+ SLI4_BDE_LEN_MASK));
+ /*
+ * TSEND64_WQE specifies first two SGE are skipped
+ * (i.e. 3rd is valid)
+ */
+ bptr->u.data.low =
+ sge[2].buffer_address_low;
+ bptr->u.data.high =
+ sge[2].buffer_address_high;
+ } else {
+ bptr->bde_type_buflen =
+ cpu_to_le32((SLI4_BDE_TYPE_VAL(BLP)) |
+ (sgl->size &
+ SLI4_BDE_LEN_MASK));
+ bptr->u.blp.low =
+ cpu_to_le32(lower_32_bits(sgl->phys));
+ bptr->u.blp.high =
+ cpu_to_le32(upper_32_bits(sgl->phys));
+ }
+ }
+
+ tsend->relative_offset = cpu_to_le32(params->offset);
+
+ if (params->flags & SLI4_IO_CONTINUATION)
+ tsend->dw10byte2 |= SLI4_TSEND_XC;
+
+ tsend->xri_tag = cpu_to_le16(params->xri);
+
+ tsend->rpi = cpu_to_le16(params->rpi);
+ /* WQE uses relative offset */
+ tsend->class_pu_ar_byte |= 1 << SLI4_TSEND_WQE_PU_SHFT;
+
+ if (params->flags & SLI4_IO_AUTO_GOOD_RESPONSE)
+ tsend->class_pu_ar_byte |= SLI4_TSEND_WQE_AR;
+
+ tsend->command = SLI4_WQE_FCP_TSEND64;
+ tsend->class_pu_ar_byte |= SLI4_GENERIC_CLASS_CLASS_3;
+ tsend->ct_byte |= SLI4_GENERIC_CONTEXT_RPI << SLI4_TSEND_CT_SHFT;
+ tsend->ct_byte |= dif;
+ tsend->ct_byte |= bs << SLI4_TSEND_BS_SHFT;
+
+ tsend->remote_xid = cpu_to_le16(params->ox_id);
+
+ tsend->request_tag = cpu_to_le16(params->tag);
+
+ tsend->ll_qd_xbl_hlm_iod_dbde |= SLI4_TSEND_LEN_LOC_BIT2;
+
+ tsend->cq_id = cpu_to_le16(cq_id);
+
+ tsend->cmd_type_byte |= SLI4_CMD_FCP_TSEND64_WQE;
+
+ tsend->fcp_data_transmit_length = cpu_to_le32(params->xmit_len);
+
+ if (sli4->params.perf_hint) {
+ bptr = &tsend->first_data_bde;
+ bptr->bde_type_buflen =
+ cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) |
+ (le32_to_cpu(sge[first_data_sge].buffer_length) &
+ SLI4_BDE_LEN_MASK));
+ bptr->u.data.low =
+ sge[first_data_sge].buffer_address_low;
+ bptr->u.data.high =
+ sge[first_data_sge].buffer_address_high;
+ }
+
+ /* The upper 7 bits of csctl is the priority */
+ if (params->cs_ctl & SLI4_MASK_CCP) {
+ tsend->dw10byte2 |= SLI4_TSEND_CCPE;
+ tsend->ccp = (params->cs_ctl & SLI4_MASK_CCP);
+ }
+
+ if (params->app_id && sli4->wqe_size == SLI4_WQE_EXT_BYTES &&
+ !(tsend->dw10byte2 & SLI4_TSEND_EAT)) {
+ tsend->dw10byte0 |= SLI4_TSEND_APPID_VALID;
+ tsend->ll_qd_xbl_hlm_iod_dbde |= SLI4_TSEND_WQES;
+ tsend_128->dw[31] = params->app_id;
+ }
+ return 0;
+}
+
+int
+sli_gen_request64_wqe(struct sli4 *sli4, void *buf, struct efc_dma *sgl,
+ struct sli_ct_params *params)
+{
+ struct sli4_gen_request64_wqe *gen = buf;
+ struct sli4_sge *sge = NULL;
+ struct sli4_bde *bptr;
+
+ memset(buf, 0, sli4->wqe_size);
+
+ if (!sgl || !sgl->virt) {
+ efc_log_err(sli4, "bad parameter sgl=%p virt=%p\n",
+ sgl, sgl ? sgl->virt : NULL);
+ return -EIO;
+ }
+ sge = sgl->virt;
+ bptr = &gen->bde;
+
+ if (sli4->params.sgl_pre_registered) {
+ gen->dw10flags1 &= ~SLI4_GEN_REQ64_WQE_XBL;
+
+ gen->dw10flags1 |= SLI4_GEN_REQ64_WQE_DBDE;
+ bptr->bde_type_buflen =
+ cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) |
+ (params->xmit_len & SLI4_BDE_LEN_MASK));
+
+ bptr->u.data.low = sge[0].buffer_address_low;
+ bptr->u.data.high = sge[0].buffer_address_high;
+ } else {
+ gen->dw10flags1 |= SLI4_GEN_REQ64_WQE_XBL;
+
+ bptr->bde_type_buflen =
+ cpu_to_le32((SLI4_BDE_TYPE_VAL(BLP)) |
+ ((2 * sizeof(struct sli4_sge)) &
+ SLI4_BDE_LEN_MASK));
+
+ bptr->u.blp.low =
+ cpu_to_le32(lower_32_bits(sgl->phys));
+ bptr->u.blp.high =
+ cpu_to_le32(upper_32_bits(sgl->phys));
+ }
+
+ gen->request_payload_length = cpu_to_le32(params->xmit_len);
+ gen->max_response_payload_length = cpu_to_le32(params->rsp_len);
+
+ gen->df_ctl = params->df_ctl;
+ gen->type = params->type;
+ gen->r_ctl = params->r_ctl;
+
+ gen->xri_tag = cpu_to_le16(params->xri);
+
+ gen->ct_byte = SLI4_GENERIC_CONTEXT_RPI << SLI4_GEN_REQ64_CT_SHFT;
+ gen->context_tag = cpu_to_le16(params->rpi);
+
+ gen->class_byte = SLI4_GENERIC_CLASS_CLASS_3;
+
+ gen->command = SLI4_WQE_GEN_REQUEST64;
+
+ gen->timer = params->timeout;
+
+ gen->request_tag = cpu_to_le16(params->tag);
+
+ gen->dw10flags1 |= SLI4_GEN_REQ64_WQE_IOD;
+
+ gen->dw10flags0 |= SLI4_GEN_REQ64_WQE_QOSD;
+
+ gen->cmd_type_byte = SLI4_CMD_GEN_REQUEST64_WQE;
+
+ gen->cq_id = cpu_to_le16(SLI4_CQ_DEFAULT);
+
+ return 0;
+}
+
+int
+sli_send_frame_wqe(struct sli4 *sli, void *buf, u8 sof, u8 eof, u32 *hdr,
+ struct efc_dma *payload, u32 req_len, u8 timeout, u16 xri,
+ u16 req_tag)
+{
+ struct sli4_send_frame_wqe *sf = buf;
+
+ memset(buf, 0, sli->wqe_size);
+
+ sf->dw10flags1 |= SLI4_SF_WQE_DBDE;
+ sf->bde.bde_type_buflen = cpu_to_le32(req_len &
+ SLI4_BDE_LEN_MASK);
+ sf->bde.u.data.low = cpu_to_le32(lower_32_bits(payload->phys));
+ sf->bde.u.data.high = cpu_to_le32(upper_32_bits(payload->phys));
+
+ /* Copy FC header */
+ sf->fc_header_0_1[0] = cpu_to_le32(hdr[0]);
+ sf->fc_header_0_1[1] = cpu_to_le32(hdr[1]);
+ sf->fc_header_2_5[0] = cpu_to_le32(hdr[2]);
+ sf->fc_header_2_5[1] = cpu_to_le32(hdr[3]);
+ sf->fc_header_2_5[2] = cpu_to_le32(hdr[4]);
+ sf->fc_header_2_5[3] = cpu_to_le32(hdr[5]);
+
+ sf->frame_length = cpu_to_le32(req_len);
+
+ sf->xri_tag = cpu_to_le16(xri);
+ sf->dw7flags0 &= ~SLI4_SF_PU;
+ sf->context_tag = 0;
+
+ sf->ct_byte &= ~SLI4_SF_CT;
+ sf->command = SLI4_WQE_SEND_FRAME;
+ sf->dw7flags0 |= SLI4_GENERIC_CLASS_CLASS_3;
+ sf->timer = timeout;
+
+ sf->request_tag = cpu_to_le16(req_tag);
+ sf->eof = eof;
+ sf->sof = sof;
+
+ sf->dw10flags1 &= ~SLI4_SF_QOSD;
+ sf->dw10flags0 |= SLI4_SF_LEN_LOC_BIT1;
+ sf->dw10flags2 &= ~SLI4_SF_XC;
+
+ sf->dw10flags1 |= SLI4_SF_XBL;
+
+ sf->cmd_type_byte |= SLI4_CMD_SEND_FRAME_WQE;
+ sf->cq_id = cpu_to_le16(0xffff);
+
+ return 0;
+}
+
+int
+sli_xmit_bls_rsp64_wqe(struct sli4 *sli, void *buf,
+ struct sli_bls_payload *payload,
+ struct sli_bls_params *params)
+{
+ struct sli4_xmit_bls_rsp_wqe *bls = buf;
+ u32 dw_ridflags = 0;
+
+ /*
+ * Callers can either specify RPI or S_ID, but not both
+ */
+ if (params->rpi_registered && params->s_id != U32_MAX) {
+ efc_log_info(sli, "S_ID specified for attached remote node %d\n",
+ params->rpi);
+ return -EIO;
+ }
+
+ memset(buf, 0, sli->wqe_size);
+
+ if (payload->type == SLI4_SLI_BLS_ACC) {
+ bls->payload_word0 =
+ cpu_to_le32((payload->u.acc.seq_id_last << 16) |
+ (payload->u.acc.seq_id_validity << 24));
+ bls->high_seq_cnt = payload->u.acc.high_seq_cnt;
+ bls->low_seq_cnt = payload->u.acc.low_seq_cnt;
+ } else if (payload->type == SLI4_SLI_BLS_RJT) {
+ bls->payload_word0 =
+ cpu_to_le32(*((u32 *)&payload->u.rjt));
+ dw_ridflags |= SLI4_BLS_RSP_WQE_AR;
+ } else {
+ efc_log_info(sli, "bad BLS type %#x\n", payload->type);
+ return -EIO;
+ }
+
+ bls->ox_id = payload->ox_id;
+ bls->rx_id = payload->rx_id;
+
+ if (params->rpi_registered) {
+ bls->dw8flags0 |=
+ SLI4_GENERIC_CONTEXT_RPI << SLI4_BLS_RSP_WQE_CT_SHFT;
+ bls->context_tag = cpu_to_le16(params->rpi);
+ } else {
+ bls->dw8flags0 |=
+ SLI4_GENERIC_CONTEXT_VPI << SLI4_BLS_RSP_WQE_CT_SHFT;
+ bls->context_tag = cpu_to_le16(params->vpi);
+
+ if (params->s_id != U32_MAX)
+ bls->local_n_port_id_dword |=
+ cpu_to_le32(params->s_id & 0x00ffffff);
+ else
+ bls->local_n_port_id_dword |=
+ cpu_to_le32(params->s_id & 0x00ffffff);
+
+ dw_ridflags = (dw_ridflags & ~SLI4_BLS_RSP_RID) |
+ (params->d_id & SLI4_BLS_RSP_RID);
+
+ bls->temporary_rpi = cpu_to_le16(params->rpi);
+ }
+
+ bls->xri_tag = cpu_to_le16(params->xri);
+
+ bls->dw8flags1 |= SLI4_GENERIC_CLASS_CLASS_3;
+
+ bls->command = SLI4_WQE_XMIT_BLS_RSP;
+
+ bls->request_tag = cpu_to_le16(params->tag);
+
+ bls->dw11flags1 |= SLI4_BLS_RSP_WQE_QOSD;
+
+ bls->remote_id_dword = cpu_to_le32(dw_ridflags);
+ bls->cq_id = cpu_to_le16(SLI4_CQ_DEFAULT);
+
+ bls->dw12flags0 |= SLI4_CMD_XMIT_BLS_RSP64_WQE;
+
+ return 0;
+}
+
+int
+sli_xmit_els_rsp64_wqe(struct sli4 *sli, void *buf, struct efc_dma *rsp,
+ struct sli_els_params *params)
+{
+ struct sli4_xmit_els_rsp64_wqe *els = buf;
+
+ memset(buf, 0, sli->wqe_size);
+
+ if (sli->params.sgl_pre_registered)
+ els->flags2 |= SLI4_ELS_DBDE;
+ else
+ els->flags2 |= SLI4_ELS_XBL;
+
+ els->els_response_payload.bde_type_buflen =
+ cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) |
+ (params->rsp_len & SLI4_BDE_LEN_MASK));
+ els->els_response_payload.u.data.low =
+ cpu_to_le32(lower_32_bits(rsp->phys));
+ els->els_response_payload.u.data.high =
+ cpu_to_le32(upper_32_bits(rsp->phys));
+
+ els->els_response_payload_length = cpu_to_le32(params->rsp_len);
+
+ els->xri_tag = cpu_to_le16(params->xri);
+
+ els->class_byte |= SLI4_GENERIC_CLASS_CLASS_3;
+
+ els->command = SLI4_WQE_ELS_RSP64;
+
+ els->request_tag = cpu_to_le16(params->tag);
+
+ els->ox_id = cpu_to_le16(params->ox_id);
+
+ els->flags2 |= SLI4_ELS_IOD & SLI4_ELS_REQUEST64_DIR_WRITE;
+
+ els->flags2 |= SLI4_ELS_QOSD;
+
+ els->cmd_type_wqec = SLI4_ELS_REQUEST64_CMD_GEN;
+
+ els->cq_id = cpu_to_le16(SLI4_CQ_DEFAULT);
+
+ if (params->rpi_registered) {
+ els->ct_byte |=
+ SLI4_GENERIC_CONTEXT_RPI << SLI4_ELS_CT_OFFSET;
+ els->context_tag = cpu_to_le16(params->rpi);
+ return 0;
+ }
+
+ els->ct_byte |= SLI4_GENERIC_CONTEXT_VPI << SLI4_ELS_CT_OFFSET;
+ els->context_tag = cpu_to_le16(params->vpi);
+ els->rid_dw = cpu_to_le32(params->d_id & SLI4_ELS_RID);
+ els->temporary_rpi = cpu_to_le16(params->rpi);
+ if (params->s_id != U32_MAX) {
+ els->sid_dw |=
+ cpu_to_le32(SLI4_ELS_SP | (params->s_id & SLI4_ELS_SID));
+ }
+
+ return 0;
+}
+
+int
+sli_xmit_sequence64_wqe(struct sli4 *sli4, void *buf, struct efc_dma *payload,
+ struct sli_ct_params *params)
+{
+ struct sli4_xmit_sequence64_wqe *xmit = buf;
+
+ memset(buf, 0, sli4->wqe_size);
+
+ if (!payload || !payload->virt) {
+ efc_log_err(sli4, "bad parameter sgl=%p virt=%p\n",
+ payload, payload ? payload->virt : NULL);
+ return -EIO;
+ }
+
+ if (sli4->params.sgl_pre_registered)
+ xmit->dw10w0 |= cpu_to_le16(SLI4_SEQ_WQE_DBDE);
+ else
+ xmit->dw10w0 |= cpu_to_le16(SLI4_SEQ_WQE_XBL);
+
+ xmit->bde.bde_type_buflen =
+ cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) |
+ (params->rsp_len & SLI4_BDE_LEN_MASK));
+ xmit->bde.u.data.low =
+ cpu_to_le32(lower_32_bits(payload->phys));
+ xmit->bde.u.data.high =
+ cpu_to_le32(upper_32_bits(payload->phys));
+ xmit->sequence_payload_len = cpu_to_le32(params->rsp_len);
+
+ xmit->remote_n_port_id_dword |= cpu_to_le32(params->d_id & 0x00ffffff);
+
+ xmit->relative_offset = 0;
+
+ /* sequence initiative - this matches what is seen from
+ * FC switches in response to FCGS commands
+ */
+ xmit->dw5flags0 &= (~SLI4_SEQ_WQE_SI);
+ xmit->dw5flags0 &= (~SLI4_SEQ_WQE_FT);/* force transmit */
+ xmit->dw5flags0 &= (~SLI4_SEQ_WQE_XO);/* exchange responder */
+ xmit->dw5flags0 |= SLI4_SEQ_WQE_LS;/* last in seqence */
+ xmit->df_ctl = params->df_ctl;
+ xmit->type = params->type;
+ xmit->r_ctl = params->r_ctl;
+
+ xmit->xri_tag = cpu_to_le16(params->xri);
+ xmit->context_tag = cpu_to_le16(params->rpi);
+
+ xmit->dw7flags0 &= ~SLI4_SEQ_WQE_DIF;
+ xmit->dw7flags0 |=
+ SLI4_GENERIC_CONTEXT_RPI << SLI4_SEQ_WQE_CT_SHIFT;
+ xmit->dw7flags0 &= ~SLI4_SEQ_WQE_BS;
+
+ xmit->command = SLI4_WQE_XMIT_SEQUENCE64;
+ xmit->dw7flags1 |= SLI4_GENERIC_CLASS_CLASS_3;
+ xmit->dw7flags1 &= ~SLI4_SEQ_WQE_PU;
+ xmit->timer = params->timeout;
+
+ xmit->abort_tag = 0;
+ xmit->request_tag = cpu_to_le16(params->tag);
+ xmit->remote_xid = cpu_to_le16(params->ox_id);
+
+ xmit->dw10w0 |=
+ cpu_to_le16(SLI4_ELS_REQUEST64_DIR_READ << SLI4_SEQ_WQE_IOD_SHIFT);
+
+ xmit->cmd_type_wqec_byte |= SLI4_CMD_XMIT_SEQUENCE64_WQE;
+
+ xmit->dw10w0 |= cpu_to_le16(2 << SLI4_SEQ_WQE_LEN_LOC_SHIFT);
+
+ xmit->cq_id = cpu_to_le16(0xFFFF);
+
+ return 0;
+}
+
+int
+sli_requeue_xri_wqe(struct sli4 *sli4, void *buf, u16 xri, u16 tag, u16 cq_id)
+{
+ struct sli4_requeue_xri_wqe *requeue = buf;
+
+ memset(buf, 0, sli4->wqe_size);
+
+ requeue->command = SLI4_WQE_REQUEUE_XRI;
+ requeue->xri_tag = cpu_to_le16(xri);
+ requeue->request_tag = cpu_to_le16(tag);
+ requeue->flags2 |= cpu_to_le16(SLI4_REQU_XRI_WQE_XC);
+ requeue->flags1 |= cpu_to_le16(SLI4_REQU_XRI_WQE_QOSD);
+ requeue->cq_id = cpu_to_le16(cq_id);
+ requeue->cmd_type_wqec_byte = SLI4_CMD_REQUEUE_XRI_WQE;
+ return 0;
+}
+
+int
+sli_fc_process_link_attention(struct sli4 *sli4, void *acqe)
+{
+ struct sli4_link_attention *link_attn = acqe;
+ struct sli4_link_event event = { 0 };
+
+ efc_log_info(sli4, "link=%d attn_type=%#x top=%#x speed=%#x pfault=%#x\n",
+ link_attn->link_number, link_attn->attn_type,
+ link_attn->topology, link_attn->port_speed,
+ link_attn->port_fault);
+ efc_log_info(sli4, "shared_lnk_status=%#x logl_lnk_speed=%#x evttag=%#x\n",
+ link_attn->shared_link_status,
+ le16_to_cpu(link_attn->logical_link_speed),
+ le32_to_cpu(link_attn->event_tag));
+
+ if (!sli4->link)
+ return -EIO;
+
+ event.medium = SLI4_LINK_MEDIUM_FC;
+
+ switch (link_attn->attn_type) {
+ case SLI4_LNK_ATTN_TYPE_LINK_UP:
+ event.status = SLI4_LINK_STATUS_UP;
+ break;
+ case SLI4_LNK_ATTN_TYPE_LINK_DOWN:
+ event.status = SLI4_LINK_STATUS_DOWN;
+ break;
+ case SLI4_LNK_ATTN_TYPE_NO_HARD_ALPA:
+ efc_log_info(sli4, "attn_type: no hard alpa\n");
+ event.status = SLI4_LINK_STATUS_NO_ALPA;
+ break;
+ default:
+ efc_log_info(sli4, "attn_type: unknown\n");
+ break;
+ }
+
+ switch (link_attn->event_type) {
+ case SLI4_EVENT_LINK_ATTENTION:
+ break;
+ case SLI4_EVENT_SHARED_LINK_ATTENTION:
+ efc_log_info(sli4, "event_type: FC shared link event\n");
+ break;
+ default:
+ efc_log_info(sli4, "event_type: unknown\n");
+ break;
+ }
+
+ switch (link_attn->topology) {
+ case SLI4_LNK_ATTN_P2P:
+ event.topology = SLI4_LINK_TOPO_NON_FC_AL;
+ break;
+ case SLI4_LNK_ATTN_FC_AL:
+ event.topology = SLI4_LINK_TOPO_FC_AL;
+ break;
+ case SLI4_LNK_ATTN_INTERNAL_LOOPBACK:
+ efc_log_info(sli4, "topology Internal loopback\n");
+ event.topology = SLI4_LINK_TOPO_LOOPBACK_INTERNAL;
+ break;
+ case SLI4_LNK_ATTN_SERDES_LOOPBACK:
+ efc_log_info(sli4, "topology serdes loopback\n");
+ event.topology = SLI4_LINK_TOPO_LOOPBACK_EXTERNAL;
+ break;
+ default:
+ efc_log_info(sli4, "topology: unknown\n");
+ break;
+ }
+
+ event.speed = link_attn->port_speed * 1000;
+
+ sli4->link(sli4->link_arg, (void *)&event);
+
+ return 0;
+}
+
+int
+sli_fc_cqe_parse(struct sli4 *sli4, struct sli4_queue *cq,
+ u8 *cqe, enum sli4_qentry *etype, u16 *r_id)
+{
+ u8 code = cqe[SLI4_CQE_CODE_OFFSET];
+ int rc;
+
+ switch (code) {
+ case SLI4_CQE_CODE_WORK_REQUEST_COMPLETION:
+ {
+ struct sli4_fc_wcqe *wcqe = (void *)cqe;
+
+ *etype = SLI4_QENTRY_WQ;
+ *r_id = le16_to_cpu(wcqe->request_tag);
+ rc = wcqe->status;
+
+ /* Flag errors except for FCP_RSP_FAILURE */
+ if (rc && rc != SLI4_FC_WCQE_STATUS_FCP_RSP_FAILURE) {
+ efc_log_info(sli4, "WCQE: status=%#x hw_status=%#x tag=%#x\n",
+ wcqe->status, wcqe->hw_status,
+ le16_to_cpu(wcqe->request_tag));
+ efc_log_info(sli4, "w1=%#x w2=%#x xb=%d\n",
+ le32_to_cpu(wcqe->wqe_specific_1),
+ le32_to_cpu(wcqe->wqe_specific_2),
+ (wcqe->flags & SLI4_WCQE_XB));
+ efc_log_info(sli4, " %08X %08X %08X %08X\n",
+ ((u32 *)cqe)[0], ((u32 *)cqe)[1],
+ ((u32 *)cqe)[2], ((u32 *)cqe)[3]);
+ }
+
+ break;
+ }
+ case SLI4_CQE_CODE_RQ_ASYNC:
+ {
+ struct sli4_fc_async_rcqe *rcqe = (void *)cqe;
+
+ *etype = SLI4_QENTRY_RQ;
+ *r_id = le16_to_cpu(rcqe->fcfi_rq_id_word) & SLI4_RACQE_RQ_ID;
+ rc = rcqe->status;
+ break;
+ }
+ case SLI4_CQE_CODE_RQ_ASYNC_V1:
+ {
+ struct sli4_fc_async_rcqe_v1 *rcqe = (void *)cqe;
+
+ *etype = SLI4_QENTRY_RQ;
+ *r_id = le16_to_cpu(rcqe->rq_id);
+ rc = rcqe->status;
+ break;
+ }
+ case SLI4_CQE_CODE_OPTIMIZED_WRITE_CMD:
+ {
+ struct sli4_fc_optimized_write_cmd_cqe *optcqe = (void *)cqe;
+
+ *etype = SLI4_QENTRY_OPT_WRITE_CMD;
+ *r_id = le16_to_cpu(optcqe->rq_id);
+ rc = optcqe->status;
+ break;
+ }
+ case SLI4_CQE_CODE_OPTIMIZED_WRITE_DATA:
+ {
+ struct sli4_fc_optimized_write_data_cqe *dcqe = (void *)cqe;
+
+ *etype = SLI4_QENTRY_OPT_WRITE_DATA;
+ *r_id = le16_to_cpu(dcqe->xri);
+ rc = dcqe->status;
+
+ /* Flag errors */
+ if (rc != SLI4_FC_WCQE_STATUS_SUCCESS) {
+ efc_log_info(sli4, "Optimized DATA CQE: status=%#x\n",
+ dcqe->status);
+ efc_log_info(sli4, "hstat=%#x xri=%#x dpl=%#x w3=%#x xb=%d\n",
+ dcqe->hw_status, le16_to_cpu(dcqe->xri),
+ le32_to_cpu(dcqe->total_data_placed),
+ ((u32 *)cqe)[3],
+ (dcqe->flags & SLI4_OCQE_XB));
+ }
+ break;
+ }
+ case SLI4_CQE_CODE_RQ_COALESCING:
+ {
+ struct sli4_fc_coalescing_rcqe *rcqe = (void *)cqe;
+
+ *etype = SLI4_QENTRY_RQ;
+ *r_id = le16_to_cpu(rcqe->rq_id);
+ rc = rcqe->status;
+ break;
+ }
+ case SLI4_CQE_CODE_XRI_ABORTED:
+ {
+ struct sli4_fc_xri_aborted_cqe *xa = (void *)cqe;
+
+ *etype = SLI4_QENTRY_XABT;
+ *r_id = le16_to_cpu(xa->xri);
+ rc = 0;
+ break;
+ }
+ case SLI4_CQE_CODE_RELEASE_WQE:
+ {
+ struct sli4_fc_wqec *wqec = (void *)cqe;
+
+ *etype = SLI4_QENTRY_WQ_RELEASE;
+ *r_id = le16_to_cpu(wqec->wq_id);
+ rc = 0;
+ break;
+ }
+ default:
+ efc_log_info(sli4, "CQE completion code %d not handled\n",
+ code);
+ *etype = SLI4_QENTRY_MAX;
+ *r_id = U16_MAX;
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+
+u32
+sli_fc_response_length(struct sli4 *sli4, u8 *cqe)
+{
+ struct sli4_fc_wcqe *wcqe = (void *)cqe;
+
+ return le32_to_cpu(wcqe->wqe_specific_1);
+}
+
+u32
+sli_fc_io_length(struct sli4 *sli4, u8 *cqe)
+{
+ struct sli4_fc_wcqe *wcqe = (void *)cqe;
+
+ return le32_to_cpu(wcqe->wqe_specific_1);
+}
+
+int
+sli_fc_els_did(struct sli4 *sli4, u8 *cqe, u32 *d_id)
+{
+ struct sli4_fc_wcqe *wcqe = (void *)cqe;
+
+ *d_id = 0;
+
+ if (wcqe->status)
+ return -EIO;
+ *d_id = le32_to_cpu(wcqe->wqe_specific_2) & 0x00ffffff;
+ return 0;
+}
+
+u32
+sli_fc_ext_status(struct sli4 *sli4, u8 *cqe)
+{
+ struct sli4_fc_wcqe *wcqe = (void *)cqe;
+ u32 mask;
+
+ switch (wcqe->status) {
+ case SLI4_FC_WCQE_STATUS_FCP_RSP_FAILURE:
+ mask = U32_MAX;
+ break;
+ case SLI4_FC_WCQE_STATUS_LOCAL_REJECT:
+ case SLI4_FC_WCQE_STATUS_CMD_REJECT:
+ mask = 0xff;
+ break;
+ case SLI4_FC_WCQE_STATUS_NPORT_RJT:
+ case SLI4_FC_WCQE_STATUS_FABRIC_RJT:
+ case SLI4_FC_WCQE_STATUS_NPORT_BSY:
+ case SLI4_FC_WCQE_STATUS_FABRIC_BSY:
+ case SLI4_FC_WCQE_STATUS_LS_RJT:
+ mask = U32_MAX;
+ break;
+ case SLI4_FC_WCQE_STATUS_DI_ERROR:
+ mask = U32_MAX;
+ break;
+ default:
+ mask = 0;
+ }
+
+ return le32_to_cpu(wcqe->wqe_specific_2) & mask;
+}
+
+int
+sli_fc_rqe_rqid_and_index(struct sli4 *sli4, u8 *cqe, u16 *rq_id, u32 *index)
+{
+ int rc = -EIO;
+ u8 code = 0;
+ u16 rq_element_index;
+
+ *rq_id = 0;
+ *index = U32_MAX;
+
+ code = cqe[SLI4_CQE_CODE_OFFSET];
+
+ /* Retrieve the RQ index from the completion */
+ if (code == SLI4_CQE_CODE_RQ_ASYNC) {
+ struct sli4_fc_async_rcqe *rcqe = (void *)cqe;
+
+ *rq_id = le16_to_cpu(rcqe->fcfi_rq_id_word) & SLI4_RACQE_RQ_ID;
+ rq_element_index =
+ le16_to_cpu(rcqe->rq_elmt_indx_word) & SLI4_RACQE_RQ_EL_INDX;
+ *index = rq_element_index;
+ if (rcqe->status == SLI4_FC_ASYNC_RQ_SUCCESS) {
+ rc = 0;
+ } else {
+ rc = rcqe->status;
+ efc_log_info(sli4, "status=%02x (%s) rq_id=%d\n",
+ rcqe->status,
+ sli_fc_get_status_string(rcqe->status),
+ le16_to_cpu(rcqe->fcfi_rq_id_word) &
+ SLI4_RACQE_RQ_ID);
+
+ efc_log_info(sli4, "pdpl=%x sof=%02x eof=%02x hdpl=%x\n",
+ le16_to_cpu(rcqe->data_placement_length),
+ rcqe->sof_byte, rcqe->eof_byte,
+ rcqe->hdpl_byte & SLI4_RACQE_HDPL);
+ }
+ } else if (code == SLI4_CQE_CODE_RQ_ASYNC_V1) {
+ struct sli4_fc_async_rcqe_v1 *rcqe_v1 = (void *)cqe;
+
+ *rq_id = le16_to_cpu(rcqe_v1->rq_id);
+ rq_element_index =
+ (le16_to_cpu(rcqe_v1->rq_elmt_indx_word) &
+ SLI4_RACQE_RQ_EL_INDX);
+ *index = rq_element_index;
+ if (rcqe_v1->status == SLI4_FC_ASYNC_RQ_SUCCESS) {
+ rc = 0;
+ } else {
+ rc = rcqe_v1->status;
+ efc_log_info(sli4, "status=%02x (%s) rq_id=%d, index=%x\n",
+ rcqe_v1->status,
+ sli_fc_get_status_string(rcqe_v1->status),
+ le16_to_cpu(rcqe_v1->rq_id), rq_element_index);
+
+ efc_log_info(sli4, "pdpl=%x sof=%02x eof=%02x hdpl=%x\n",
+ le16_to_cpu(rcqe_v1->data_placement_length),
+ rcqe_v1->sof_byte, rcqe_v1->eof_byte,
+ rcqe_v1->hdpl_byte & SLI4_RACQE_HDPL);
+ }
+ } else if (code == SLI4_CQE_CODE_OPTIMIZED_WRITE_CMD) {
+ struct sli4_fc_optimized_write_cmd_cqe *optcqe = (void *)cqe;
+
+ *rq_id = le16_to_cpu(optcqe->rq_id);
+ *index = le16_to_cpu(optcqe->w1) & SLI4_OCQE_RQ_EL_INDX;
+ if (optcqe->status == SLI4_FC_ASYNC_RQ_SUCCESS) {
+ rc = 0;
+ } else {
+ rc = optcqe->status;
+ efc_log_info(sli4, "stat=%02x (%s) rqid=%d, idx=%x pdpl=%x\n",
+ optcqe->status,
+ sli_fc_get_status_string(optcqe->status),
+ le16_to_cpu(optcqe->rq_id), *index,
+ le16_to_cpu(optcqe->data_placement_length));
+
+ efc_log_info(sli4, "hdpl=%x oox=%d agxr=%d xri=0x%x rpi=%x\n",
+ (optcqe->hdpl_vld & SLI4_OCQE_HDPL),
+ (optcqe->flags1 & SLI4_OCQE_OOX),
+ (optcqe->flags1 & SLI4_OCQE_AGXR),
+ optcqe->xri, le16_to_cpu(optcqe->rpi));
+ }
+ } else if (code == SLI4_CQE_CODE_RQ_COALESCING) {
+ struct sli4_fc_coalescing_rcqe *rcqe = (void *)cqe;
+
+ rq_element_index = (le16_to_cpu(rcqe->rq_elmt_indx_word) &
+ SLI4_RCQE_RQ_EL_INDX);
+
+ *rq_id = le16_to_cpu(rcqe->rq_id);
+ if (rcqe->status == SLI4_FC_COALESCE_RQ_SUCCESS) {
+ *index = rq_element_index;
+ rc = 0;
+ } else {
+ *index = U32_MAX;
+ rc = rcqe->status;
+
+ efc_log_info(sli4, "stat=%02x (%s) rq_id=%d, idx=%x\n",
+ rcqe->status,
+ sli_fc_get_status_string(rcqe->status),
+ le16_to_cpu(rcqe->rq_id), rq_element_index);
+ efc_log_info(sli4, "rq_id=%#x sdpl=%x\n",
+ le16_to_cpu(rcqe->rq_id),
+ le16_to_cpu(rcqe->seq_placement_length));
+ }
+ } else {
+ struct sli4_fc_async_rcqe *rcqe = (void *)cqe;
+
+ *index = U32_MAX;
+ rc = rcqe->status;
+
+ efc_log_info(sli4, "status=%02x rq_id=%d, index=%x pdpl=%x\n",
+ rcqe->status,
+ le16_to_cpu(rcqe->fcfi_rq_id_word) & SLI4_RACQE_RQ_ID,
+ (le16_to_cpu(rcqe->rq_elmt_indx_word) & SLI4_RACQE_RQ_EL_INDX),
+ le16_to_cpu(rcqe->data_placement_length));
+ efc_log_info(sli4, "sof=%02x eof=%02x hdpl=%x\n",
+ rcqe->sof_byte, rcqe->eof_byte,
+ rcqe->hdpl_byte & SLI4_RACQE_HDPL);
+ }
+
+ return rc;
+}
+
+static int
+sli_bmbx_wait(struct sli4 *sli4, u32 msec)
+{
+ u32 val;
+ unsigned long end;
+
+ /* Wait for the bootstrap mailbox to report "ready" */
+ end = jiffies + msecs_to_jiffies(msec);
+ do {
+ val = readl(sli4->reg[0] + SLI4_BMBX_REG);
+ if (val & SLI4_BMBX_RDY)
+ return 0;
+
+ usleep_range(1000, 2000);
+ } while (time_before(jiffies, end));
+
+ return -EIO;
+}
+
+static int
+sli_bmbx_write(struct sli4 *sli4)
+{
+ u32 val;
+
+ /* write buffer location to bootstrap mailbox register */
+ val = sli_bmbx_write_hi(sli4->bmbx.phys);
+ writel(val, (sli4->reg[0] + SLI4_BMBX_REG));
+
+ if (sli_bmbx_wait(sli4, SLI4_BMBX_DELAY_US)) {
+ efc_log_crit(sli4, "BMBX WRITE_HI failed\n");
+ return -EIO;
+ }
+ val = sli_bmbx_write_lo(sli4->bmbx.phys);
+ writel(val, (sli4->reg[0] + SLI4_BMBX_REG));
+
+ /* wait for SLI Port to set ready bit */
+ return sli_bmbx_wait(sli4, SLI4_BMBX_TIMEOUT_MSEC);
+}
+
+int
+sli_bmbx_command(struct sli4 *sli4)
+{
+ void *cqe = (u8 *)sli4->bmbx.virt + SLI4_BMBX_SIZE;
+
+ if (sli_fw_error_status(sli4) > 0) {
+ efc_log_crit(sli4, "Chip is in an error state -Mailbox command rejected");
+ efc_log_crit(sli4, " status=%#x error1=%#x error2=%#x\n",
+ sli_reg_read_status(sli4),
+ sli_reg_read_err1(sli4),
+ sli_reg_read_err2(sli4));
+ return -EIO;
+ }
+
+ /* Submit a command to the bootstrap mailbox and check the status */
+ if (sli_bmbx_write(sli4)) {
+ efc_log_crit(sli4, "bmbx write fail phys=%pad reg=%#x\n",
+ &sli4->bmbx.phys, readl(sli4->reg[0] + SLI4_BMBX_REG));
+ return -EIO;
+ }
+
+ /* check completion queue entry status */
+ if (le32_to_cpu(((struct sli4_mcqe *)cqe)->dw3_flags) &
+ SLI4_MCQE_VALID) {
+ return sli_cqe_mq(sli4, cqe);
+ }
+ efc_log_crit(sli4, "invalid or wrong type\n");
+ return -EIO;
+}
+
+int
+sli_cmd_config_link(struct sli4 *sli4, void *buf)
+{
+ struct sli4_cmd_config_link *config_link = buf;
+
+ memset(buf, 0, SLI4_BMBX_SIZE);
+
+ config_link->hdr.command = SLI4_MBX_CMD_CONFIG_LINK;
+
+ /* Port interprets zero in a field as "use default value" */
+
+ return 0;
+}
+
+int
+sli_cmd_down_link(struct sli4 *sli4, void *buf)
+{
+ struct sli4_mbox_command_header *hdr = buf;
+
+ memset(buf, 0, SLI4_BMBX_SIZE);
+
+ hdr->command = SLI4_MBX_CMD_DOWN_LINK;
+
+ /* Port interprets zero in a field as "use default value" */
+
+ return 0;
+}
+
+int
+sli_cmd_dump_type4(struct sli4 *sli4, void *buf, u16 wki)
+{
+ struct sli4_cmd_dump4 *cmd = buf;
+
+ memset(buf, 0, SLI4_BMBX_SIZE);
+
+ cmd->hdr.command = SLI4_MBX_CMD_DUMP;
+ cmd->type_dword = cpu_to_le32(0x4);
+ cmd->wki_selection = cpu_to_le16(wki);
+ return 0;
+}
+
+int
+sli_cmd_common_read_transceiver_data(struct sli4 *sli4, void *buf, u32 page_num,
+ struct efc_dma *dma)
+{
+ struct sli4_rqst_cmn_read_transceiver_data *req = NULL;
+ u32 psize;
+
+ if (!dma)
+ psize = SLI4_CFG_PYLD_LENGTH(cmn_read_transceiver_data);
+ else
+ psize = dma->size;
+
+ req = sli_config_cmd_init(sli4, buf, psize, dma);
+ if (!req)
+ return -EIO;
+
+ sli_cmd_fill_hdr(&req->hdr, SLI4_CMN_READ_TRANS_DATA,
+ SLI4_SUBSYSTEM_COMMON, CMD_V0,
+ SLI4_RQST_PYLD_LEN(cmn_read_transceiver_data));
+
+ req->page_number = cpu_to_le32(page_num);
+ req->port = cpu_to_le32(sli4->port_number);
+
+ return 0;
+}
+
+int
+sli_cmd_read_link_stats(struct sli4 *sli4, void *buf, u8 req_ext_counters,
+ u8 clear_overflow_flags,
+ u8 clear_all_counters)
+{
+ struct sli4_cmd_read_link_stats *cmd = buf;
+ u32 flags;
+
+ memset(buf, 0, SLI4_BMBX_SIZE);
+
+ cmd->hdr.command = SLI4_MBX_CMD_READ_LNK_STAT;
+
+ flags = 0;
+ if (req_ext_counters)
+ flags |= SLI4_READ_LNKSTAT_REC;
+ if (clear_all_counters)
+ flags |= SLI4_READ_LNKSTAT_CLRC;
+ if (clear_overflow_flags)
+ flags |= SLI4_READ_LNKSTAT_CLOF;
+
+ cmd->dw1_flags = cpu_to_le32(flags);
+ return 0;
+}
+
+int
+sli_cmd_read_status(struct sli4 *sli4, void *buf, u8 clear_counters)
+{
+ struct sli4_cmd_read_status *cmd = buf;
+ u32 flags = 0;
+
+ memset(buf, 0, SLI4_BMBX_SIZE);
+
+ cmd->hdr.command = SLI4_MBX_CMD_READ_STATUS;
+ if (clear_counters)
+ flags |= SLI4_READSTATUS_CLEAR_COUNTERS;
+ else
+ flags &= ~SLI4_READSTATUS_CLEAR_COUNTERS;
+
+ cmd->dw1_flags = cpu_to_le32(flags);
+ return 0;
+}
+
+int
+sli_cmd_init_link(struct sli4 *sli4, void *buf, u32 speed, u8 reset_alpa)
+{
+ struct sli4_cmd_init_link *init_link = buf;
+ u32 flags = 0;
+
+ memset(buf, 0, SLI4_BMBX_SIZE);
+
+ init_link->hdr.command = SLI4_MBX_CMD_INIT_LINK;
+
+ init_link->sel_reset_al_pa_dword =
+ cpu_to_le32(reset_alpa);
+ flags &= ~SLI4_INIT_LINK_F_LOOPBACK;
+
+ init_link->link_speed_sel_code = cpu_to_le32(speed);
+ switch (speed) {
+ case SLI4_LINK_SPEED_1G:
+ case SLI4_LINK_SPEED_2G:
+ case SLI4_LINK_SPEED_4G:
+ case SLI4_LINK_SPEED_8G:
+ case SLI4_LINK_SPEED_16G:
+ case SLI4_LINK_SPEED_32G:
+ case SLI4_LINK_SPEED_64G:
+ flags |= SLI4_INIT_LINK_F_FIXED_SPEED;
+ break;
+ case SLI4_LINK_SPEED_10G:
+ efc_log_info(sli4, "unsupported FC speed %d\n", speed);
+ init_link->flags0 = cpu_to_le32(flags);
+ return -EIO;
+ }
+
+ switch (sli4->topology) {
+ case SLI4_READ_CFG_TOPO_FC:
+ /* Attempt P2P but failover to FC-AL */
+ flags |= SLI4_INIT_LINK_F_FAIL_OVER;
+ flags |= SLI4_INIT_LINK_F_P2P_FAIL_OVER;
+ break;
+ case SLI4_READ_CFG_TOPO_FC_AL:
+ flags |= SLI4_INIT_LINK_F_FCAL_ONLY;
+ if (speed == SLI4_LINK_SPEED_16G ||
+ speed == SLI4_LINK_SPEED_32G) {
+ efc_log_info(sli4, "unsupported FC-AL speed %d\n",
+ speed);
+ init_link->flags0 = cpu_to_le32(flags);
+ return -EIO;
+ }
+ break;
+ case SLI4_READ_CFG_TOPO_NON_FC_AL:
+ flags |= SLI4_INIT_LINK_F_P2P_ONLY;
+ break;
+ default:
+
+ efc_log_info(sli4, "unsupported topology %#x\n", sli4->topology);
+
+ init_link->flags0 = cpu_to_le32(flags);
+ return -EIO;
+ }
+
+ flags &= ~SLI4_INIT_LINK_F_UNFAIR;
+ flags &= ~SLI4_INIT_LINK_F_NO_LIRP;
+ flags &= ~SLI4_INIT_LINK_F_LOOP_VALID_CHK;
+ flags &= ~SLI4_INIT_LINK_F_NO_LISA;
+ flags &= ~SLI4_INIT_LINK_F_PICK_HI_ALPA;
+ init_link->flags0 = cpu_to_le32(flags);
+
+ return 0;
+}
+
+int
+sli_cmd_init_vfi(struct sli4 *sli4, void *buf, u16 vfi, u16 fcfi, u16 vpi)
+{
+ struct sli4_cmd_init_vfi *init_vfi = buf;
+ u16 flags = 0;
+
+ memset(buf, 0, SLI4_BMBX_SIZE);
+
+ init_vfi->hdr.command = SLI4_MBX_CMD_INIT_VFI;
+ init_vfi->vfi = cpu_to_le16(vfi);
+ init_vfi->fcfi = cpu_to_le16(fcfi);
+
+ /*
+ * If the VPI is valid, initialize it at the same time as
+ * the VFI
+ */
+ if (vpi != U16_MAX) {
+ flags |= SLI4_INIT_VFI_FLAG_VP;
+ init_vfi->flags0_word = cpu_to_le16(flags);
+ init_vfi->vpi = cpu_to_le16(vpi);
+ }
+
+ return 0;
+}
+
+int
+sli_cmd_init_vpi(struct sli4 *sli4, void *buf, u16 vpi, u16 vfi)
+{
+ struct sli4_cmd_init_vpi *init_vpi = buf;
+
+ memset(buf, 0, SLI4_BMBX_SIZE);
+
+ init_vpi->hdr.command = SLI4_MBX_CMD_INIT_VPI;
+ init_vpi->vpi = cpu_to_le16(vpi);
+ init_vpi->vfi = cpu_to_le16(vfi);
+
+ return 0;
+}
+
+int
+sli_cmd_post_xri(struct sli4 *sli4, void *buf, u16 xri_base, u16 xri_count)
+{
+ struct sli4_cmd_post_xri *post_xri = buf;
+ u16 xri_count_flags = 0;
+
+ memset(buf, 0, SLI4_BMBX_SIZE);
+
+ post_xri->hdr.command = SLI4_MBX_CMD_POST_XRI;
+ post_xri->xri_base = cpu_to_le16(xri_base);
+ xri_count_flags = xri_count & SLI4_POST_XRI_COUNT;
+ xri_count_flags |= SLI4_POST_XRI_FLAG_ENX;
+ xri_count_flags |= SLI4_POST_XRI_FLAG_VAL;
+ post_xri->xri_count_flags = cpu_to_le16(xri_count_flags);
+
+ return 0;
+}
+
+int
+sli_cmd_release_xri(struct sli4 *sli4, void *buf, u8 num_xri)
+{
+ struct sli4_cmd_release_xri *release_xri = buf;
+
+ memset(buf, 0, SLI4_BMBX_SIZE);
+
+ release_xri->hdr.command = SLI4_MBX_CMD_RELEASE_XRI;
+ release_xri->xri_count_word = cpu_to_le16(num_xri &
+ SLI4_RELEASE_XRI_COUNT);
+
+ return 0;
+}
+
+static int
+sli_cmd_read_config(struct sli4 *sli4, void *buf)
+{
+ struct sli4_cmd_read_config *read_config = buf;
+
+ memset(buf, 0, SLI4_BMBX_SIZE);
+
+ read_config->hdr.command = SLI4_MBX_CMD_READ_CONFIG;
+
+ return 0;
+}
+
+int
+sli_cmd_read_nvparms(struct sli4 *sli4, void *buf)
+{
+ struct sli4_cmd_read_nvparms *read_nvparms = buf;
+
+ memset(buf, 0, SLI4_BMBX_SIZE);
+
+ read_nvparms->hdr.command = SLI4_MBX_CMD_READ_NVPARMS;
+
+ return 0;
+}
+
+int
+sli_cmd_write_nvparms(struct sli4 *sli4, void *buf, u8 *wwpn, u8 *wwnn,
+ u8 hard_alpa, u32 preferred_d_id)
+{
+ struct sli4_cmd_write_nvparms *write_nvparms = buf;
+
+ memset(buf, 0, SLI4_BMBX_SIZE);
+
+ write_nvparms->hdr.command = SLI4_MBX_CMD_WRITE_NVPARMS;
+ memcpy(write_nvparms->wwpn, wwpn, 8);
+ memcpy(write_nvparms->wwnn, wwnn, 8);
+
+ write_nvparms->hard_alpa_d_id =
+ cpu_to_le32((preferred_d_id << 8) | hard_alpa);
+ return 0;
+}
+
+static int
+sli_cmd_read_rev(struct sli4 *sli4, void *buf, struct efc_dma *vpd)
+{
+ struct sli4_cmd_read_rev *read_rev = buf;
+
+ memset(buf, 0, SLI4_BMBX_SIZE);
+
+ read_rev->hdr.command = SLI4_MBX_CMD_READ_REV;
+
+ if (vpd && vpd->size) {
+ read_rev->flags0_word |= cpu_to_le16(SLI4_READ_REV_FLAG_VPD);
+
+ read_rev->available_length_dword =
+ cpu_to_le32(vpd->size &
+ SLI4_READ_REV_AVAILABLE_LENGTH);
+
+ read_rev->hostbuf.low =
+ cpu_to_le32(lower_32_bits(vpd->phys));
+ read_rev->hostbuf.high =
+ cpu_to_le32(upper_32_bits(vpd->phys));
+ }
+
+ return 0;
+}
+
+int
+sli_cmd_read_sparm64(struct sli4 *sli4, void *buf, struct efc_dma *dma, u16 vpi)
+{
+ struct sli4_cmd_read_sparm64 *read_sparm64 = buf;
+
+ if (vpi == U16_MAX) {
+ efc_log_err(sli4, "special VPI not supported!!!\n");
+ return -EIO;
+ }
+
+ if (!dma || !dma->phys) {
+ efc_log_err(sli4, "bad DMA buffer\n");
+ return -EIO;
+ }
+
+ memset(buf, 0, SLI4_BMBX_SIZE);
+
+ read_sparm64->hdr.command = SLI4_MBX_CMD_READ_SPARM64;
+
+ read_sparm64->bde_64.bde_type_buflen =
+ cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) |
+ (dma->size & SLI4_BDE_LEN_MASK));
+ read_sparm64->bde_64.u.data.low =
+ cpu_to_le32(lower_32_bits(dma->phys));
+ read_sparm64->bde_64.u.data.high =
+ cpu_to_le32(upper_32_bits(dma->phys));
+
+ read_sparm64->vpi = cpu_to_le16(vpi);
+
+ return 0;
+}
+
+int
+sli_cmd_read_topology(struct sli4 *sli4, void *buf, struct efc_dma *dma)
+{
+ struct sli4_cmd_read_topology *read_topo = buf;
+
+ if (!dma || !dma->size)
+ return -EIO;
+
+ if (dma->size < SLI4_MIN_LOOP_MAP_BYTES) {
+ efc_log_err(sli4, "loop map buffer too small %zx\n", dma->size);
+ return -EIO;
+ }
+
+ memset(buf, 0, SLI4_BMBX_SIZE);
+
+ read_topo->hdr.command = SLI4_MBX_CMD_READ_TOPOLOGY;
+
+ memset(dma->virt, 0, dma->size);
+
+ read_topo->bde_loop_map.bde_type_buflen =
+ cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) |
+ (dma->size & SLI4_BDE_LEN_MASK));
+ read_topo->bde_loop_map.u.data.low =
+ cpu_to_le32(lower_32_bits(dma->phys));
+ read_topo->bde_loop_map.u.data.high =
+ cpu_to_le32(upper_32_bits(dma->phys));
+
+ return 0;
+}
+
+int
+sli_cmd_reg_fcfi(struct sli4 *sli4, void *buf, u16 index,
+ struct sli4_cmd_rq_cfg *rq_cfg)
+{
+ struct sli4_cmd_reg_fcfi *reg_fcfi = buf;
+ u32 i;
+
+ memset(buf, 0, SLI4_BMBX_SIZE);
+
+ reg_fcfi->hdr.command = SLI4_MBX_CMD_REG_FCFI;
+
+ reg_fcfi->fcf_index = cpu_to_le16(index);
+
+ for (i = 0; i < SLI4_CMD_REG_FCFI_NUM_RQ_CFG; i++) {
+ switch (i) {
+ case 0:
+ reg_fcfi->rqid0 = rq_cfg[0].rq_id;
+ break;
+ case 1:
+ reg_fcfi->rqid1 = rq_cfg[1].rq_id;
+ break;
+ case 2:
+ reg_fcfi->rqid2 = rq_cfg[2].rq_id;
+ break;
+ case 3:
+ reg_fcfi->rqid3 = rq_cfg[3].rq_id;
+ break;
+ }
+ reg_fcfi->rq_cfg[i].r_ctl_mask = rq_cfg[i].r_ctl_mask;
+ reg_fcfi->rq_cfg[i].r_ctl_match = rq_cfg[i].r_ctl_match;
+ reg_fcfi->rq_cfg[i].type_mask = rq_cfg[i].type_mask;
+ reg_fcfi->rq_cfg[i].type_match = rq_cfg[i].type_match;
+ }
+
+ return 0;
+}
+
+int
+sli_cmd_reg_fcfi_mrq(struct sli4 *sli4, void *buf, u8 mode, u16 fcf_index,
+ u8 rq_selection_policy, u8 mrq_bit_mask, u16 num_mrqs,
+ struct sli4_cmd_rq_cfg *rq_cfg)
+{
+ struct sli4_cmd_reg_fcfi_mrq *reg_fcfi_mrq = buf;
+ u32 i;
+ u32 mrq_flags = 0;
+
+ memset(buf, 0, SLI4_BMBX_SIZE);
+
+ reg_fcfi_mrq->hdr.command = SLI4_MBX_CMD_REG_FCFI_MRQ;
+ if (mode == SLI4_CMD_REG_FCFI_SET_FCFI_MODE) {
+ reg_fcfi_mrq->fcf_index = cpu_to_le16(fcf_index);
+ goto done;
+ }
+
+ reg_fcfi_mrq->dw8_vlan = cpu_to_le32(SLI4_REGFCFI_MRQ_MODE);
+
+ for (i = 0; i < SLI4_CMD_REG_FCFI_NUM_RQ_CFG; i++) {
+ reg_fcfi_mrq->rq_cfg[i].r_ctl_mask = rq_cfg[i].r_ctl_mask;
+ reg_fcfi_mrq->rq_cfg[i].r_ctl_match = rq_cfg[i].r_ctl_match;
+ reg_fcfi_mrq->rq_cfg[i].type_mask = rq_cfg[i].type_mask;
+ reg_fcfi_mrq->rq_cfg[i].type_match = rq_cfg[i].type_match;
+
+ switch (i) {
+ case 3:
+ reg_fcfi_mrq->rqid3 = rq_cfg[i].rq_id;
+ break;
+ case 2:
+ reg_fcfi_mrq->rqid2 = rq_cfg[i].rq_id;
+ break;
+ case 1:
+ reg_fcfi_mrq->rqid1 = rq_cfg[i].rq_id;
+ break;
+ case 0:
+ reg_fcfi_mrq->rqid0 = rq_cfg[i].rq_id;
+ break;
+ }
+ }
+
+ mrq_flags = num_mrqs & SLI4_REGFCFI_MRQ_MASK_NUM_PAIRS;
+ mrq_flags |= (mrq_bit_mask << 8);
+ mrq_flags |= (rq_selection_policy << 12);
+ reg_fcfi_mrq->dw9_mrqflags = cpu_to_le32(mrq_flags);
+done:
+ return 0;
+}
+
+int
+sli_cmd_reg_rpi(struct sli4 *sli4, void *buf, u32 rpi, u32 vpi, u32 fc_id,
+ struct efc_dma *dma, u8 update, u8 enable_t10_pi)
+{
+ struct sli4_cmd_reg_rpi *reg_rpi = buf;
+ u32 rportid_flags = 0;
+
+ memset(buf, 0, SLI4_BMBX_SIZE);
+
+ reg_rpi->hdr.command = SLI4_MBX_CMD_REG_RPI;
+
+ reg_rpi->rpi = cpu_to_le16(rpi);
+
+ rportid_flags = fc_id & SLI4_REGRPI_REMOTE_N_PORTID;
+
+ if (update)
+ rportid_flags |= SLI4_REGRPI_UPD;
+ else
+ rportid_flags &= ~SLI4_REGRPI_UPD;
+
+ if (enable_t10_pi)
+ rportid_flags |= SLI4_REGRPI_ETOW;
+ else
+ rportid_flags &= ~SLI4_REGRPI_ETOW;
+
+ reg_rpi->dw2_rportid_flags = cpu_to_le32(rportid_flags);
+
+ reg_rpi->bde_64.bde_type_buflen =
+ cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) |
+ (SLI4_REG_RPI_BUF_LEN & SLI4_BDE_LEN_MASK));
+ reg_rpi->bde_64.u.data.low =
+ cpu_to_le32(lower_32_bits(dma->phys));
+ reg_rpi->bde_64.u.data.high =
+ cpu_to_le32(upper_32_bits(dma->phys));
+
+ reg_rpi->vpi = cpu_to_le16(vpi);
+
+ return 0;
+}
+
+int
+sli_cmd_reg_vfi(struct sli4 *sli4, void *buf, size_t size,
+ u16 vfi, u16 fcfi, struct efc_dma dma,
+ u16 vpi, __be64 sli_wwpn, u32 fc_id)
+{
+ struct sli4_cmd_reg_vfi *reg_vfi = buf;
+
+ memset(buf, 0, SLI4_BMBX_SIZE);
+
+ reg_vfi->hdr.command = SLI4_MBX_CMD_REG_VFI;
+
+ reg_vfi->vfi = cpu_to_le16(vfi);
+
+ reg_vfi->fcfi = cpu_to_le16(fcfi);
+
+ reg_vfi->sparm.bde_type_buflen =
+ cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) |
+ (SLI4_REG_RPI_BUF_LEN & SLI4_BDE_LEN_MASK));
+ reg_vfi->sparm.u.data.low =
+ cpu_to_le32(lower_32_bits(dma.phys));
+ reg_vfi->sparm.u.data.high =
+ cpu_to_le32(upper_32_bits(dma.phys));
+
+ reg_vfi->e_d_tov = cpu_to_le32(sli4->e_d_tov);
+ reg_vfi->r_a_tov = cpu_to_le32(sli4->r_a_tov);
+
+ reg_vfi->dw0w1_flags |= cpu_to_le16(SLI4_REGVFI_VP);
+ reg_vfi->vpi = cpu_to_le16(vpi);
+ memcpy(reg_vfi->wwpn, &sli_wwpn, sizeof(reg_vfi->wwpn));
+ reg_vfi->dw10_lportid_flags = cpu_to_le32(fc_id);
+
+ return 0;
+}
+
+int
+sli_cmd_reg_vpi(struct sli4 *sli4, void *buf, u32 fc_id, __be64 sli_wwpn,
+ u16 vpi, u16 vfi, bool update)
+{
+ struct sli4_cmd_reg_vpi *reg_vpi = buf;
+ u32 flags = 0;
+
+ memset(buf, 0, SLI4_BMBX_SIZE);
+
+ reg_vpi->hdr.command = SLI4_MBX_CMD_REG_VPI;
+
+ flags = (fc_id & SLI4_REGVPI_LOCAL_N_PORTID);
+ if (update)
+ flags |= SLI4_REGVPI_UPD;
+ else
+ flags &= ~SLI4_REGVPI_UPD;
+
+ reg_vpi->dw2_lportid_flags = cpu_to_le32(flags);
+ memcpy(reg_vpi->wwpn, &sli_wwpn, sizeof(reg_vpi->wwpn));
+ reg_vpi->vpi = cpu_to_le16(vpi);
+ reg_vpi->vfi = cpu_to_le16(vfi);
+
+ return 0;
+}
+
+static int
+sli_cmd_request_features(struct sli4 *sli4, void *buf, u32 features_mask,
+ bool query)
+{
+ struct sli4_cmd_request_features *req_features = buf;
+
+ memset(buf, 0, SLI4_BMBX_SIZE);
+
+ req_features->hdr.command = SLI4_MBX_CMD_RQST_FEATURES;
+
+ if (query)
+ req_features->dw1_qry = cpu_to_le32(SLI4_REQFEAT_QRY);
+
+ req_features->cmd = cpu_to_le32(features_mask);
+
+ return 0;
+}
+
+int
+sli_cmd_unreg_fcfi(struct sli4 *sli4, void *buf, u16 indicator)
+{
+ struct sli4_cmd_unreg_fcfi *unreg_fcfi = buf;
+
+ memset(buf, 0, SLI4_BMBX_SIZE);
+
+ unreg_fcfi->hdr.command = SLI4_MBX_CMD_UNREG_FCFI;
+ unreg_fcfi->fcfi = cpu_to_le16(indicator);
+
+ return 0;
+}
+
+int
+sli_cmd_unreg_rpi(struct sli4 *sli4, void *buf, u16 indicator,
+ enum sli4_resource which, u32 fc_id)
+{
+ struct sli4_cmd_unreg_rpi *unreg_rpi = buf;
+ u32 flags = 0;
+
+ memset(buf, 0, SLI4_BMBX_SIZE);
+
+ unreg_rpi->hdr.command = SLI4_MBX_CMD_UNREG_RPI;
+ switch (which) {
+ case SLI4_RSRC_RPI:
+ flags |= SLI4_UNREG_RPI_II_RPI;
+ if (fc_id == U32_MAX)
+ break;
+
+ flags |= SLI4_UNREG_RPI_DP;
+ unreg_rpi->dw2_dest_n_portid =
+ cpu_to_le32(fc_id & SLI4_UNREG_RPI_DEST_N_PORTID_MASK);
+ break;
+ case SLI4_RSRC_VPI:
+ flags |= SLI4_UNREG_RPI_II_VPI;
+ break;
+ case SLI4_RSRC_VFI:
+ flags |= SLI4_UNREG_RPI_II_VFI;
+ break;
+ case SLI4_RSRC_FCFI:
+ flags |= SLI4_UNREG_RPI_II_FCFI;
+ break;
+ default:
+ efc_log_info(sli4, "unknown type %#x\n", which);
+ return -EIO;
+ }
+
+ unreg_rpi->dw1w1_flags = cpu_to_le16(flags);
+ unreg_rpi->index = cpu_to_le16(indicator);
+
+ return 0;
+}
+
+int
+sli_cmd_unreg_vfi(struct sli4 *sli4, void *buf, u16 index, u32 which)
+{
+ struct sli4_cmd_unreg_vfi *unreg_vfi = buf;
+
+ memset(buf, 0, SLI4_BMBX_SIZE);
+
+ unreg_vfi->hdr.command = SLI4_MBX_CMD_UNREG_VFI;
+ switch (which) {
+ case SLI4_UNREG_TYPE_DOMAIN:
+ unreg_vfi->index = cpu_to_le16(index);
+ break;
+ case SLI4_UNREG_TYPE_FCF:
+ unreg_vfi->index = cpu_to_le16(index);
+ break;
+ case SLI4_UNREG_TYPE_ALL:
+ unreg_vfi->index = cpu_to_le16(U32_MAX);
+ break;
+ default:
+ return -EIO;
+ }
+
+ if (which != SLI4_UNREG_TYPE_DOMAIN)
+ unreg_vfi->dw2_flags = cpu_to_le16(SLI4_UNREG_VFI_II_FCFI);
+
+ return 0;
+}
+
+int
+sli_cmd_unreg_vpi(struct sli4 *sli4, void *buf, u16 indicator, u32 which)
+{
+ struct sli4_cmd_unreg_vpi *unreg_vpi = buf;
+ u32 flags = 0;
+
+ memset(buf, 0, SLI4_BMBX_SIZE);
+
+ unreg_vpi->hdr.command = SLI4_MBX_CMD_UNREG_VPI;
+ unreg_vpi->index = cpu_to_le16(indicator);
+ switch (which) {
+ case SLI4_UNREG_TYPE_PORT:
+ flags |= SLI4_UNREG_VPI_II_VPI;
+ break;
+ case SLI4_UNREG_TYPE_DOMAIN:
+ flags |= SLI4_UNREG_VPI_II_VFI;
+ break;
+ case SLI4_UNREG_TYPE_FCF:
+ flags |= SLI4_UNREG_VPI_II_FCFI;
+ break;
+ case SLI4_UNREG_TYPE_ALL:
+ /* override indicator */
+ unreg_vpi->index = cpu_to_le16(U32_MAX);
+ flags |= SLI4_UNREG_VPI_II_FCFI;
+ break;
+ default:
+ return -EIO;
+ }
+
+ unreg_vpi->dw2w0_flags = cpu_to_le16(flags);
+ return 0;
+}
+
+static int
+sli_cmd_common_modify_eq_delay(struct sli4 *sli4, void *buf,
+ struct sli4_queue *q, int num_q, u32 shift,
+ u32 delay_mult)
+{
+ struct sli4_rqst_cmn_modify_eq_delay *req = NULL;
+ int i;
+
+ req = sli_config_cmd_init(sli4, buf,
+ SLI4_CFG_PYLD_LENGTH(cmn_modify_eq_delay), NULL);
+ if (!req)
+ return -EIO;
+
+ sli_cmd_fill_hdr(&req->hdr, SLI4_CMN_MODIFY_EQ_DELAY,
+ SLI4_SUBSYSTEM_COMMON, CMD_V0,
+ SLI4_RQST_PYLD_LEN(cmn_modify_eq_delay));
+ req->num_eq = cpu_to_le32(num_q);
+
+ for (i = 0; i < num_q; i++) {
+ req->eq_delay_record[i].eq_id = cpu_to_le32(q[i].id);
+ req->eq_delay_record[i].phase = cpu_to_le32(shift);
+ req->eq_delay_record[i].delay_multiplier =
+ cpu_to_le32(delay_mult);
+ }
+
+ return 0;
+}
+
+void
+sli4_cmd_lowlevel_set_watchdog(struct sli4 *sli4, void *buf,
+ size_t size, u16 timeout)
+{
+ struct sli4_rqst_lowlevel_set_watchdog *req = NULL;
+
+ req = sli_config_cmd_init(sli4, buf,
+ SLI4_CFG_PYLD_LENGTH(lowlevel_set_watchdog), NULL);
+ if (!req)
+ return;
+
+ sli_cmd_fill_hdr(&req->hdr, SLI4_OPC_LOWLEVEL_SET_WATCHDOG,
+ SLI4_SUBSYSTEM_LOWLEVEL, CMD_V0,
+ SLI4_RQST_PYLD_LEN(lowlevel_set_watchdog));
+ req->watchdog_timeout = cpu_to_le16(timeout);
+}
+
+static int
+sli_cmd_common_get_cntl_attributes(struct sli4 *sli4, void *buf,
+ struct efc_dma *dma)
+{
+ struct sli4_rqst_hdr *hdr = NULL;
+
+ hdr = sli_config_cmd_init(sli4, buf, SLI4_RQST_CMDSZ(hdr), dma);
+ if (!hdr)
+ return -EIO;
+
+ hdr->opcode = SLI4_CMN_GET_CNTL_ATTRIBUTES;
+ hdr->subsystem = SLI4_SUBSYSTEM_COMMON;
+ hdr->request_length = cpu_to_le32(dma->size);
+
+ return 0;
+}
+
+static int
+sli_cmd_common_get_cntl_addl_attributes(struct sli4 *sli4, void *buf,
+ struct efc_dma *dma)
+{
+ struct sli4_rqst_hdr *hdr = NULL;
+
+ hdr = sli_config_cmd_init(sli4, buf, SLI4_RQST_CMDSZ(hdr), dma);
+ if (!hdr)
+ return -EIO;
+
+ hdr->opcode = SLI4_CMN_GET_CNTL_ADDL_ATTRS;
+ hdr->subsystem = SLI4_SUBSYSTEM_COMMON;
+ hdr->request_length = cpu_to_le32(dma->size);
+
+ return 0;
+}
+
+int
+sli_cmd_common_nop(struct sli4 *sli4, void *buf, uint64_t context)
+{
+ struct sli4_rqst_cmn_nop *nop = NULL;
+
+ nop = sli_config_cmd_init(sli4, buf, SLI4_CFG_PYLD_LENGTH(cmn_nop),
+ NULL);
+ if (!nop)
+ return -EIO;
+
+ sli_cmd_fill_hdr(&nop->hdr, SLI4_CMN_NOP, SLI4_SUBSYSTEM_COMMON,
+ CMD_V0, SLI4_RQST_PYLD_LEN(cmn_nop));
+
+ memcpy(&nop->context, &context, sizeof(context));
+
+ return 0;
+}
+
+int
+sli_cmd_common_get_resource_extent_info(struct sli4 *sli4, void *buf, u16 rtype)
+{
+ struct sli4_rqst_cmn_get_resource_extent_info *ext = NULL;
+
+ ext = sli_config_cmd_init(sli4, buf,
+ SLI4_RQST_CMDSZ(cmn_get_resource_extent_info), NULL);
+ if (!ext)
+ return -EIO;
+
+ sli_cmd_fill_hdr(&ext->hdr, SLI4_CMN_GET_RSC_EXTENT_INFO,
+ SLI4_SUBSYSTEM_COMMON, CMD_V0,
+ SLI4_RQST_PYLD_LEN(cmn_get_resource_extent_info));
+
+ ext->resource_type = cpu_to_le16(rtype);
+
+ return 0;
+}
+
+int
+sli_cmd_common_get_sli4_parameters(struct sli4 *sli4, void *buf)
+{
+ struct sli4_rqst_hdr *hdr = NULL;
+
+ hdr = sli_config_cmd_init(sli4, buf,
+ SLI4_CFG_PYLD_LENGTH(cmn_get_sli4_params), NULL);
+ if (!hdr)
+ return -EIO;
+
+ hdr->opcode = SLI4_CMN_GET_SLI4_PARAMS;
+ hdr->subsystem = SLI4_SUBSYSTEM_COMMON;
+ hdr->request_length = SLI4_RQST_PYLD_LEN(cmn_get_sli4_params);
+
+ return 0;
+}
+
+static int
+sli_cmd_common_get_port_name(struct sli4 *sli4, void *buf)
+{
+ struct sli4_rqst_cmn_get_port_name *pname;
+
+ pname = sli_config_cmd_init(sli4, buf,
+ SLI4_CFG_PYLD_LENGTH(cmn_get_port_name), NULL);
+ if (!pname)
+ return -EIO;
+
+ sli_cmd_fill_hdr(&pname->hdr, SLI4_CMN_GET_PORT_NAME,
+ SLI4_SUBSYSTEM_COMMON, CMD_V1,
+ SLI4_RQST_PYLD_LEN(cmn_get_port_name));
+
+ /* Set the port type value (ethernet=0, FC=1) for V1 commands */
+ pname->port_type = SLI4_PORT_TYPE_FC;
+
+ return 0;
+}
+
+int
+sli_cmd_common_write_object(struct sli4 *sli4, void *buf, u16 noc,
+ u16 eof, u32 desired_write_length,
+ u32 offset, char *obj_name,
+ struct efc_dma *dma)
+{
+ struct sli4_rqst_cmn_write_object *wr_obj = NULL;
+ struct sli4_bde *bde;
+ u32 dwflags = 0;
+
+ wr_obj = sli_config_cmd_init(sli4, buf,
+ SLI4_RQST_CMDSZ(cmn_write_object) + sizeof(*bde), NULL);
+ if (!wr_obj)
+ return -EIO;
+
+ sli_cmd_fill_hdr(&wr_obj->hdr, SLI4_CMN_WRITE_OBJECT,
+ SLI4_SUBSYSTEM_COMMON, CMD_V0,
+ SLI4_RQST_PYLD_LEN_VAR(cmn_write_object, sizeof(*bde)));
+
+ if (noc)
+ dwflags |= SLI4_RQ_DES_WRITE_LEN_NOC;
+ if (eof)
+ dwflags |= SLI4_RQ_DES_WRITE_LEN_EOF;
+ dwflags |= (desired_write_length & SLI4_RQ_DES_WRITE_LEN);
+
+ wr_obj->desired_write_len_dword = cpu_to_le32(dwflags);
+
+ wr_obj->write_offset = cpu_to_le32(offset);
+ strncpy(wr_obj->object_name, obj_name, sizeof(wr_obj->object_name) - 1);
+ wr_obj->host_buffer_descriptor_count = cpu_to_le32(1);
+
+ bde = (struct sli4_bde *)wr_obj->host_buffer_descriptor;
+
+ /* Setup to transfer xfer_size bytes to device */
+ bde->bde_type_buflen =
+ cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) |
+ (desired_write_length & SLI4_BDE_LEN_MASK));
+ bde->u.data.low = cpu_to_le32(lower_32_bits(dma->phys));
+ bde->u.data.high = cpu_to_le32(upper_32_bits(dma->phys));
+
+ return 0;
+}
+
+int
+sli_cmd_common_delete_object(struct sli4 *sli4, void *buf, char *obj_name)
+{
+ struct sli4_rqst_cmn_delete_object *req = NULL;
+
+ req = sli_config_cmd_init(sli4, buf,
+ SLI4_RQST_CMDSZ(cmn_delete_object), NULL);
+ if (!req)
+ return -EIO;
+
+ sli_cmd_fill_hdr(&req->hdr, SLI4_CMN_DELETE_OBJECT,
+ SLI4_SUBSYSTEM_COMMON, CMD_V0,
+ SLI4_RQST_PYLD_LEN(cmn_delete_object));
+
+ strncpy(req->object_name, obj_name, sizeof(req->object_name) - 1);
+ return 0;
+}
+
+int
+sli_cmd_common_read_object(struct sli4 *sli4, void *buf, u32 desired_read_len,
+ u32 offset, char *obj_name, struct efc_dma *dma)
+{
+ struct sli4_rqst_cmn_read_object *rd_obj = NULL;
+ struct sli4_bde *bde;
+
+ rd_obj = sli_config_cmd_init(sli4, buf,
+ SLI4_RQST_CMDSZ(cmn_read_object) + sizeof(*bde), NULL);
+ if (!rd_obj)
+ return -EIO;
+
+ sli_cmd_fill_hdr(&rd_obj->hdr, SLI4_CMN_READ_OBJECT,
+ SLI4_SUBSYSTEM_COMMON, CMD_V0,
+ SLI4_RQST_PYLD_LEN_VAR(cmn_read_object, sizeof(*bde)));
+ rd_obj->desired_read_length_dword =
+ cpu_to_le32(desired_read_len & SLI4_REQ_DESIRE_READLEN);
+
+ rd_obj->read_offset = cpu_to_le32(offset);
+ strncpy(rd_obj->object_name, obj_name, sizeof(rd_obj->object_name) - 1);
+ rd_obj->host_buffer_descriptor_count = cpu_to_le32(1);
+
+ bde = (struct sli4_bde *)rd_obj->host_buffer_descriptor;
+
+ /* Setup to transfer xfer_size bytes to device */
+ bde->bde_type_buflen =
+ cpu_to_le32((SLI4_BDE_TYPE_VAL(64)) |
+ (desired_read_len & SLI4_BDE_LEN_MASK));
+ if (dma) {
+ bde->u.data.low = cpu_to_le32(lower_32_bits(dma->phys));
+ bde->u.data.high = cpu_to_le32(upper_32_bits(dma->phys));
+ } else {
+ bde->u.data.low = 0;
+ bde->u.data.high = 0;
+ }
+
+ return 0;
+}
+
+int
+sli_cmd_dmtf_exec_clp_cmd(struct sli4 *sli4, void *buf, struct efc_dma *cmd,
+ struct efc_dma *resp)
+{
+ struct sli4_rqst_dmtf_exec_clp_cmd *clp_cmd = NULL;
+
+ clp_cmd = sli_config_cmd_init(sli4, buf,
+ SLI4_RQST_CMDSZ(dmtf_exec_clp_cmd), NULL);
+ if (!clp_cmd)
+ return -EIO;
+
+ sli_cmd_fill_hdr(&clp_cmd->hdr, DMTF_EXEC_CLP_CMD, SLI4_SUBSYSTEM_DMTF,
+ CMD_V0, SLI4_RQST_PYLD_LEN(dmtf_exec_clp_cmd));
+
+ clp_cmd->cmd_buf_length = cpu_to_le32(cmd->size);
+ clp_cmd->cmd_buf_addr_low = cpu_to_le32(lower_32_bits(cmd->phys));
+ clp_cmd->cmd_buf_addr_high = cpu_to_le32(upper_32_bits(cmd->phys));
+ clp_cmd->resp_buf_length = cpu_to_le32(resp->size);
+ clp_cmd->resp_buf_addr_low = cpu_to_le32(lower_32_bits(resp->phys));
+ clp_cmd->resp_buf_addr_high = cpu_to_le32(upper_32_bits(resp->phys));
+ return 0;
+}
+
+int
+sli_cmd_common_set_dump_location(struct sli4 *sli4, void *buf, bool query,
+ bool is_buffer_list,
+ struct efc_dma *buffer, u8 fdb)
+{
+ struct sli4_rqst_cmn_set_dump_location *set_dump_loc = NULL;
+ u32 buffer_length_flag = 0;
+
+ set_dump_loc = sli_config_cmd_init(sli4, buf,
+ SLI4_RQST_CMDSZ(cmn_set_dump_location), NULL);
+ if (!set_dump_loc)
+ return -EIO;
+
+ sli_cmd_fill_hdr(&set_dump_loc->hdr, SLI4_CMN_SET_DUMP_LOCATION,
+ SLI4_SUBSYSTEM_COMMON, CMD_V0,
+ SLI4_RQST_PYLD_LEN(cmn_set_dump_location));
+
+ if (is_buffer_list)
+ buffer_length_flag |= SLI4_CMN_SET_DUMP_BLP;
+
+ if (query)
+ buffer_length_flag |= SLI4_CMN_SET_DUMP_QRY;
+
+ if (fdb)
+ buffer_length_flag |= SLI4_CMN_SET_DUMP_FDB;
+
+ if (buffer) {
+ set_dump_loc->buf_addr_low =
+ cpu_to_le32(lower_32_bits(buffer->phys));
+ set_dump_loc->buf_addr_high =
+ cpu_to_le32(upper_32_bits(buffer->phys));
+
+ buffer_length_flag |=
+ buffer->len & SLI4_CMN_SET_DUMP_BUFFER_LEN;
+ } else {
+ set_dump_loc->buf_addr_low = 0;
+ set_dump_loc->buf_addr_high = 0;
+ set_dump_loc->buffer_length_dword = 0;
+ }
+ set_dump_loc->buffer_length_dword = cpu_to_le32(buffer_length_flag);
+ return 0;
+}
+
+int
+sli_cmd_common_set_features(struct sli4 *sli4, void *buf, u32 feature,
+ u32 param_len, void *parameter)
+{
+ struct sli4_rqst_cmn_set_features *cmd = NULL;
+
+ cmd = sli_config_cmd_init(sli4, buf,
+ SLI4_RQST_CMDSZ(cmn_set_features), NULL);
+ if (!cmd)
+ return -EIO;
+
+ sli_cmd_fill_hdr(&cmd->hdr, SLI4_CMN_SET_FEATURES,
+ SLI4_SUBSYSTEM_COMMON, CMD_V0,
+ SLI4_RQST_PYLD_LEN(cmn_set_features));
+
+ cmd->feature = cpu_to_le32(feature);
+ cmd->param_len = cpu_to_le32(param_len);
+ memcpy(cmd->params, parameter, param_len);
+
+ return 0;
+}
+
+int
+sli_cqe_mq(struct sli4 *sli4, void *buf)
+{
+ struct sli4_mcqe *mcqe = buf;
+ u32 dwflags = le32_to_cpu(mcqe->dw3_flags);
+ /*
+ * Firmware can split mbx completions into two MCQEs: first with only
+ * the "consumed" bit set and a second with the "complete" bit set.
+ * Thus, ignore MCQE unless "complete" is set.
+ */
+ if (!(dwflags & SLI4_MCQE_COMPLETED))
+ return SLI4_MCQE_STATUS_NOT_COMPLETED;
+
+ if (le16_to_cpu(mcqe->completion_status)) {
+ efc_log_info(sli4, "status(st=%#x ext=%#x con=%d cmp=%d ae=%d val=%d)\n",
+ le16_to_cpu(mcqe->completion_status),
+ le16_to_cpu(mcqe->extended_status),
+ (dwflags & SLI4_MCQE_CONSUMED),
+ (dwflags & SLI4_MCQE_COMPLETED),
+ (dwflags & SLI4_MCQE_AE),
+ (dwflags & SLI4_MCQE_VALID));
+ }
+
+ return le16_to_cpu(mcqe->completion_status);
+}
+
+int
+sli_cqe_async(struct sli4 *sli4, void *buf)
+{
+ struct sli4_acqe *acqe = buf;
+ int rc = -EIO;
+
+ if (!buf) {
+ efc_log_err(sli4, "bad parameter sli4=%p buf=%p\n", sli4, buf);
+ return -EIO;
+ }
+
+ switch (acqe->event_code) {
+ case SLI4_ACQE_EVENT_CODE_LINK_STATE:
+ efc_log_info(sli4, "Unsupported by FC link, evt code:%#x\n",
+ acqe->event_code);
+ break;
+ case SLI4_ACQE_EVENT_CODE_GRP_5:
+ efc_log_info(sli4, "ACQE GRP5\n");
+ break;
+ case SLI4_ACQE_EVENT_CODE_SLI_PORT_EVENT:
+ efc_log_info(sli4, "ACQE SLI Port, type=0x%x, data1,2=0x%08x,0x%08x\n",
+ acqe->event_type,
+ le32_to_cpu(acqe->event_data[0]),
+ le32_to_cpu(acqe->event_data[1]));
+ break;
+ case SLI4_ACQE_EVENT_CODE_FC_LINK_EVENT:
+ rc = sli_fc_process_link_attention(sli4, buf);
+ break;
+ default:
+ efc_log_info(sli4, "ACQE unknown=%#x\n", acqe->event_code);
+ }
+
+ return rc;
+}
+
+bool
+sli_fw_ready(struct sli4 *sli4)
+{
+ u32 val;
+
+ /* Determine if the chip FW is in a ready state */
+ val = sli_reg_read_status(sli4);
+ return (val & SLI4_PORT_STATUS_RDY) ? 1 : 0;
+}
+
+static bool
+sli_wait_for_fw_ready(struct sli4 *sli4, u32 timeout_ms)
+{
+ unsigned long end;
+
+ end = jiffies + msecs_to_jiffies(timeout_ms);
+
+ do {
+ if (sli_fw_ready(sli4))
+ return true;
+
+ usleep_range(1000, 2000);
+ } while (time_before(jiffies, end));
+
+ return false;
+}
+
+static bool
+sli_sliport_reset(struct sli4 *sli4)
+{
+ bool rc;
+ u32 val;
+
+ val = SLI4_PORT_CTRL_IP;
+ /* Initialize port, endian */
+ writel(val, (sli4->reg[0] + SLI4_PORT_CTRL_REG));
+
+ rc = sli_wait_for_fw_ready(sli4, SLI4_FW_READY_TIMEOUT_MSEC);
+ if (!rc)
+ efc_log_crit(sli4, "port failed to become ready after initialization\n");
+
+ return rc;
+}
+
+static bool
+sli_fw_init(struct sli4 *sli4)
+{
+ /*
+ * Is firmware ready for operation?
+ */
+ if (!sli_wait_for_fw_ready(sli4, SLI4_FW_READY_TIMEOUT_MSEC)) {
+ efc_log_crit(sli4, "FW status is NOT ready\n");
+ return false;
+ }
+
+ /*
+ * Reset port to a known state
+ */
+ return sli_sliport_reset(sli4);
+}
+
+static int
+sli_request_features(struct sli4 *sli4, u32 *features, bool query)
+{
+ struct sli4_cmd_request_features *req_features = sli4->bmbx.virt;
+
+ if (sli_cmd_request_features(sli4, sli4->bmbx.virt, *features, query)) {
+ efc_log_err(sli4, "bad REQUEST_FEATURES write\n");
+ return -EIO;
+ }
+
+ if (sli_bmbx_command(sli4)) {
+ efc_log_crit(sli4, "bootstrap mailbox write fail\n");
+ return -EIO;
+ }
+
+ if (le16_to_cpu(req_features->hdr.status)) {
+ efc_log_err(sli4, "REQUEST_FEATURES bad status %#x\n",
+ le16_to_cpu(req_features->hdr.status));
+ return -EIO;
+ }
+
+ *features = le32_to_cpu(req_features->resp);
+ return 0;
+}
+
+void
+sli_calc_max_qentries(struct sli4 *sli4)
+{
+ enum sli4_qtype q;
+ u32 qentries;
+
+ for (q = SLI4_QTYPE_EQ; q < SLI4_QTYPE_MAX; q++) {
+ sli4->qinfo.max_qentries[q] =
+ sli_convert_mask_to_count(sli4->qinfo.count_method[q],
+ sli4->qinfo.count_mask[q]);
+ }
+
+ /* single, continguous DMA allocations will be called for each queue
+ * of size (max_qentries * queue entry size); since these can be large,
+ * check against the OS max DMA allocation size
+ */
+ for (q = SLI4_QTYPE_EQ; q < SLI4_QTYPE_MAX; q++) {
+ qentries = sli4->qinfo.max_qentries[q];
+
+ efc_log_info(sli4, "[%s]: max_qentries from %d to %d\n",
+ SLI4_QNAME[q],
+ sli4->qinfo.max_qentries[q], qentries);
+ sli4->qinfo.max_qentries[q] = qentries;
+ }
+}
+
+static int
+sli_get_read_config(struct sli4 *sli4)
+{
+ struct sli4_rsp_read_config *conf = sli4->bmbx.virt;
+ u32 i, total, total_size;
+ u32 *base;
+
+ if (sli_cmd_read_config(sli4, sli4->bmbx.virt)) {
+ efc_log_err(sli4, "bad READ_CONFIG write\n");
+ return -EIO;
+ }
+
+ if (sli_bmbx_command(sli4)) {
+ efc_log_crit(sli4, "bootstrap mailbox fail (READ_CONFIG)\n");
+ return -EIO;
+ }
+
+ if (le16_to_cpu(conf->hdr.status)) {
+ efc_log_err(sli4, "READ_CONFIG bad status %#x\n",
+ le16_to_cpu(conf->hdr.status));
+ return -EIO;
+ }
+
+ sli4->params.has_extents =
+ le32_to_cpu(conf->ext_dword) & SLI4_READ_CFG_RESP_RESOURCE_EXT;
+ if (sli4->params.has_extents) {
+ efc_log_err(sli4, "extents not supported\n");
+ return -EIO;
+ }
+
+ base = sli4->ext[0].base;
+ if (!base) {
+ int size = SLI4_RSRC_MAX * sizeof(u32);
+
+ base = kzalloc(size, GFP_KERNEL);
+ if (!base)
+ return -EIO;
+ }
+
+ for (i = 0; i < SLI4_RSRC_MAX; i++) {
+ sli4->ext[i].number = 1;
+ sli4->ext[i].n_alloc = 0;
+ sli4->ext[i].base = &base[i];
+ }
+
+ sli4->ext[SLI4_RSRC_VFI].base[0] = le16_to_cpu(conf->vfi_base);
+ sli4->ext[SLI4_RSRC_VFI].size = le16_to_cpu(conf->vfi_count);
+
+ sli4->ext[SLI4_RSRC_VPI].base[0] = le16_to_cpu(conf->vpi_base);
+ sli4->ext[SLI4_RSRC_VPI].size = le16_to_cpu(conf->vpi_count);
+
+ sli4->ext[SLI4_RSRC_RPI].base[0] = le16_to_cpu(conf->rpi_base);
+ sli4->ext[SLI4_RSRC_RPI].size = le16_to_cpu(conf->rpi_count);
+
+ sli4->ext[SLI4_RSRC_XRI].base[0] = le16_to_cpu(conf->xri_base);
+ sli4->ext[SLI4_RSRC_XRI].size = le16_to_cpu(conf->xri_count);
+
+ sli4->ext[SLI4_RSRC_FCFI].base[0] = 0;
+ sli4->ext[SLI4_RSRC_FCFI].size = le16_to_cpu(conf->fcfi_count);
+
+ for (i = 0; i < SLI4_RSRC_MAX; i++) {
+ total = sli4->ext[i].number * sli4->ext[i].size;
+ total_size = BITS_TO_LONGS(total) * sizeof(long);
+ sli4->ext[i].use_map = kzalloc(total_size, GFP_KERNEL);
+ if (!sli4->ext[i].use_map) {
+ efc_log_err(sli4, "bitmap memory allocation failed %d\n",
+ i);
+ return -EIO;
+ }
+ sli4->ext[i].map_size = total;
+ }
+
+ sli4->topology = (le32_to_cpu(conf->topology_dword) &
+ SLI4_READ_CFG_RESP_TOPOLOGY) >> 24;
+ switch (sli4->topology) {
+ case SLI4_READ_CFG_TOPO_FC:
+ efc_log_info(sli4, "FC (unknown)\n");
+ break;
+ case SLI4_READ_CFG_TOPO_NON_FC_AL:
+ efc_log_info(sli4, "FC (direct attach)\n");
+ break;
+ case SLI4_READ_CFG_TOPO_FC_AL:
+ efc_log_info(sli4, "FC (arbitrated loop)\n");
+ break;
+ default:
+ efc_log_info(sli4, "bad topology %#x\n", sli4->topology);
+ }
+
+ sli4->e_d_tov = le16_to_cpu(conf->e_d_tov);
+ sli4->r_a_tov = le16_to_cpu(conf->r_a_tov);
+
+ sli4->link_module_type = le16_to_cpu(conf->lmt);
+
+ sli4->qinfo.max_qcount[SLI4_QTYPE_EQ] = le16_to_cpu(conf->eq_count);
+ sli4->qinfo.max_qcount[SLI4_QTYPE_CQ] = le16_to_cpu(conf->cq_count);
+ sli4->qinfo.max_qcount[SLI4_QTYPE_WQ] = le16_to_cpu(conf->wq_count);
+ sli4->qinfo.max_qcount[SLI4_QTYPE_RQ] = le16_to_cpu(conf->rq_count);
+
+ /*
+ * READ_CONFIG doesn't give the max number of MQ. Applications
+ * will typically want 1, but we may need another at some future
+ * date. Dummy up a "max" MQ count here.
+ */
+ sli4->qinfo.max_qcount[SLI4_QTYPE_MQ] = SLI4_USER_MQ_COUNT;
+ return 0;
+}
+
+static int
+sli_get_sli4_parameters(struct sli4 *sli4)
+{
+ struct sli4_rsp_cmn_get_sli4_params *parms;
+ u32 dw_loopback;
+ u32 dw_eq_pg_cnt;
+ u32 dw_cq_pg_cnt;
+ u32 dw_mq_pg_cnt;
+ u32 dw_wq_pg_cnt;
+ u32 dw_rq_pg_cnt;
+ u32 dw_sgl_pg_cnt;
+
+ if (sli_cmd_common_get_sli4_parameters(sli4, sli4->bmbx.virt))
+ return -EIO;
+
+ parms = (struct sli4_rsp_cmn_get_sli4_params *)
+ (((u8 *)sli4->bmbx.virt) +
+ offsetof(struct sli4_cmd_sli_config, payload.embed));
+
+ if (sli_bmbx_command(sli4)) {
+ efc_log_crit(sli4, "bootstrap mailbox write fail\n");
+ return -EIO;
+ }
+
+ if (parms->hdr.status) {
+ efc_log_err(sli4, "COMMON_GET_SLI4_PARAMETERS bad status %#x",
+ parms->hdr.status);
+ efc_log_err(sli4, "additional status %#x\n",
+ parms->hdr.additional_status);
+ return -EIO;
+ }
+
+ dw_loopback = le32_to_cpu(parms->dw16_loopback_scope);
+ dw_eq_pg_cnt = le32_to_cpu(parms->dw6_eq_page_cnt);
+ dw_cq_pg_cnt = le32_to_cpu(parms->dw8_cq_page_cnt);
+ dw_mq_pg_cnt = le32_to_cpu(parms->dw10_mq_page_cnt);
+ dw_wq_pg_cnt = le32_to_cpu(parms->dw12_wq_page_cnt);
+ dw_rq_pg_cnt = le32_to_cpu(parms->dw14_rq_page_cnt);
+
+ sli4->params.auto_reg = (dw_loopback & SLI4_PARAM_AREG);
+ sli4->params.auto_xfer_rdy = (dw_loopback & SLI4_PARAM_AGXF);
+ sli4->params.hdr_template_req = (dw_loopback & SLI4_PARAM_HDRR);
+ sli4->params.t10_dif_inline_capable = (dw_loopback & SLI4_PARAM_TIMM);
+ sli4->params.t10_dif_separate_capable = (dw_loopback & SLI4_PARAM_TSMM);
+
+ sli4->params.mq_create_version = GET_Q_CREATE_VERSION(dw_mq_pg_cnt);
+ sli4->params.cq_create_version = GET_Q_CREATE_VERSION(dw_cq_pg_cnt);
+
+ sli4->rq_min_buf_size = le16_to_cpu(parms->min_rq_buffer_size);
+ sli4->rq_max_buf_size = le32_to_cpu(parms->max_rq_buffer_size);
+
+ sli4->qinfo.qpage_count[SLI4_QTYPE_EQ] =
+ (dw_eq_pg_cnt & SLI4_PARAM_EQ_PAGE_CNT_MASK);
+ sli4->qinfo.qpage_count[SLI4_QTYPE_CQ] =
+ (dw_cq_pg_cnt & SLI4_PARAM_CQ_PAGE_CNT_MASK);
+ sli4->qinfo.qpage_count[SLI4_QTYPE_MQ] =
+ (dw_mq_pg_cnt & SLI4_PARAM_MQ_PAGE_CNT_MASK);
+ sli4->qinfo.qpage_count[SLI4_QTYPE_WQ] =
+ (dw_wq_pg_cnt & SLI4_PARAM_WQ_PAGE_CNT_MASK);
+ sli4->qinfo.qpage_count[SLI4_QTYPE_RQ] =
+ (dw_rq_pg_cnt & SLI4_PARAM_RQ_PAGE_CNT_MASK);
+
+ /* save count methods and masks for each queue type */
+
+ sli4->qinfo.count_mask[SLI4_QTYPE_EQ] =
+ le16_to_cpu(parms->eqe_count_mask);
+ sli4->qinfo.count_method[SLI4_QTYPE_EQ] =
+ GET_Q_CNT_METHOD(dw_eq_pg_cnt);
+
+ sli4->qinfo.count_mask[SLI4_QTYPE_CQ] =
+ le16_to_cpu(parms->cqe_count_mask);
+ sli4->qinfo.count_method[SLI4_QTYPE_CQ] =
+ GET_Q_CNT_METHOD(dw_cq_pg_cnt);
+
+ sli4->qinfo.count_mask[SLI4_QTYPE_MQ] =
+ le16_to_cpu(parms->mqe_count_mask);
+ sli4->qinfo.count_method[SLI4_QTYPE_MQ] =
+ GET_Q_CNT_METHOD(dw_mq_pg_cnt);
+
+ sli4->qinfo.count_mask[SLI4_QTYPE_WQ] =
+ le16_to_cpu(parms->wqe_count_mask);
+ sli4->qinfo.count_method[SLI4_QTYPE_WQ] =
+ GET_Q_CNT_METHOD(dw_wq_pg_cnt);
+
+ sli4->qinfo.count_mask[SLI4_QTYPE_RQ] =
+ le16_to_cpu(parms->rqe_count_mask);
+ sli4->qinfo.count_method[SLI4_QTYPE_RQ] =
+ GET_Q_CNT_METHOD(dw_rq_pg_cnt);
+
+ /* now calculate max queue entries */
+ sli_calc_max_qentries(sli4);
+
+ dw_sgl_pg_cnt = le32_to_cpu(parms->dw18_sgl_page_cnt);
+
+ /* max # of pages */
+ sli4->max_sgl_pages = (dw_sgl_pg_cnt & SLI4_PARAM_SGL_PAGE_CNT_MASK);
+
+ /* bit map of available sizes */
+ sli4->sgl_page_sizes = (dw_sgl_pg_cnt &
+ SLI4_PARAM_SGL_PAGE_SZS_MASK) >> 8;
+ /* ignore HLM here. Use value from REQUEST_FEATURES */
+ sli4->sge_supported_length = le32_to_cpu(parms->sge_supported_length);
+ sli4->params.sgl_pre_reg_required = (dw_loopback & SLI4_PARAM_SGLR);
+ /* default to using pre-registered SGL's */
+ sli4->params.sgl_pre_registered = true;
+
+ sli4->params.perf_hint = dw_loopback & SLI4_PARAM_PHON;
+ sli4->params.perf_wq_id_association = (dw_loopback & SLI4_PARAM_PHWQ);
+
+ sli4->rq_batch = (le16_to_cpu(parms->dw15w1_rq_db_window) &
+ SLI4_PARAM_RQ_DB_WINDOW_MASK) >> 12;
+
+ /* Use the highest available WQE size. */
+ if (((dw_wq_pg_cnt & SLI4_PARAM_WQE_SZS_MASK) >> 8) &
+ SLI4_128BYTE_WQE_SUPPORT)
+ sli4->wqe_size = SLI4_WQE_EXT_BYTES;
+ else
+ sli4->wqe_size = SLI4_WQE_BYTES;
+
+ return 0;
+}
+
+static int
+sli_get_ctrl_attributes(struct sli4 *sli4)
+{
+ struct sli4_rsp_cmn_get_cntl_attributes *attr;
+ struct sli4_rsp_cmn_get_cntl_addl_attributes *add_attr;
+ struct efc_dma data;
+ u32 psize;
+
+ /*
+ * Issue COMMON_GET_CNTL_ATTRIBUTES to get port_number. Temporarily
+ * uses VPD DMA buffer as the response won't fit in the embedded
+ * buffer.
+ */
+ memset(sli4->vpd_data.virt, 0, sli4->vpd_data.size);
+ if (sli_cmd_common_get_cntl_attributes(sli4, sli4->bmbx.virt,
+ &sli4->vpd_data)) {
+ efc_log_err(sli4, "bad COMMON_GET_CNTL_ATTRIBUTES write\n");
+ return -EIO;
+ }
+
+ attr = sli4->vpd_data.virt;
+
+ if (sli_bmbx_command(sli4)) {
+ efc_log_crit(sli4, "bootstrap mailbox write fail\n");
+ return -EIO;
+ }
+
+ if (attr->hdr.status) {
+ efc_log_err(sli4, "COMMON_GET_CNTL_ATTRIBUTES bad status %#x",
+ attr->hdr.status);
+ efc_log_err(sli4, "additional status %#x\n",
+ attr->hdr.additional_status);
+ return -EIO;
+ }
+
+ sli4->port_number = attr->port_num_type_flags & SLI4_CNTL_ATTR_PORTNUM;
+
+ memcpy(sli4->bios_version_string, attr->bios_version_str,
+ sizeof(sli4->bios_version_string));
+
+ /* get additional attributes */
+ psize = sizeof(struct sli4_rsp_cmn_get_cntl_addl_attributes);
+ data.size = psize;
+ data.virt = dma_alloc_coherent(&sli4->pci->dev, data.size,
+ &data.phys, GFP_DMA);
+ if (!data.virt) {
+ memset(&data, 0, sizeof(struct efc_dma));
+ efc_log_err(sli4, "Failed to allocate memory for GET_CNTL_ADDL_ATTR\n");
+ return -EIO;
+ }
+
+ if (sli_cmd_common_get_cntl_addl_attributes(sli4, sli4->bmbx.virt,
+ &data)) {
+ efc_log_err(sli4, "bad GET_CNTL_ADDL_ATTR write\n");
+ dma_free_coherent(&sli4->pci->dev, data.size,
+ data.virt, data.phys);
+ return -EIO;
+ }
+
+ if (sli_bmbx_command(sli4)) {
+ efc_log_crit(sli4, "mailbox fail (GET_CNTL_ADDL_ATTR)\n");
+ dma_free_coherent(&sli4->pci->dev, data.size,
+ data.virt, data.phys);
+ return -EIO;
+ }
+
+ add_attr = data.virt;
+ if (add_attr->hdr.status) {
+ efc_log_err(sli4, "GET_CNTL_ADDL_ATTR bad status %#x\n",
+ add_attr->hdr.status);
+ dma_free_coherent(&sli4->pci->dev, data.size,
+ data.virt, data.phys);
+ return -EIO;
+ }
+
+ memcpy(sli4->ipl_name, add_attr->ipl_file_name, sizeof(sli4->ipl_name));
+
+ efc_log_info(sli4, "IPL:%s\n", (char *)sli4->ipl_name);
+
+ dma_free_coherent(&sli4->pci->dev, data.size, data.virt,
+ data.phys);
+ memset(&data, 0, sizeof(struct efc_dma));
+ return 0;
+}
+
+static int
+sli_get_fw_rev(struct sli4 *sli4)
+{
+ struct sli4_cmd_read_rev *read_rev = sli4->bmbx.virt;
+
+ if (sli_cmd_read_rev(sli4, sli4->bmbx.virt, &sli4->vpd_data))
+ return -EIO;
+
+ if (sli_bmbx_command(sli4)) {
+ efc_log_crit(sli4, "bootstrap mailbox write fail (READ_REV)\n");
+ return -EIO;
+ }
+
+ if (le16_to_cpu(read_rev->hdr.status)) {
+ efc_log_err(sli4, "READ_REV bad status %#x\n",
+ le16_to_cpu(read_rev->hdr.status));
+ return -EIO;
+ }
+
+ sli4->fw_rev[0] = le32_to_cpu(read_rev->first_fw_id);
+ memcpy(sli4->fw_name[0], read_rev->first_fw_name,
+ sizeof(sli4->fw_name[0]));
+
+ sli4->fw_rev[1] = le32_to_cpu(read_rev->second_fw_id);
+ memcpy(sli4->fw_name[1], read_rev->second_fw_name,
+ sizeof(sli4->fw_name[1]));
+
+ sli4->hw_rev[0] = le32_to_cpu(read_rev->first_hw_rev);
+ sli4->hw_rev[1] = le32_to_cpu(read_rev->second_hw_rev);
+ sli4->hw_rev[2] = le32_to_cpu(read_rev->third_hw_rev);
+
+ efc_log_info(sli4, "FW1:%s (%08x) / FW2:%s (%08x)\n",
+ read_rev->first_fw_name, le32_to_cpu(read_rev->first_fw_id),
+ read_rev->second_fw_name, le32_to_cpu(read_rev->second_fw_id));
+
+ efc_log_info(sli4, "HW1: %08x / HW2: %08x\n",
+ le32_to_cpu(read_rev->first_hw_rev),
+ le32_to_cpu(read_rev->second_hw_rev));
+
+ /* Check that all VPD data was returned */
+ if (le32_to_cpu(read_rev->returned_vpd_length) !=
+ le32_to_cpu(read_rev->actual_vpd_length)) {
+ efc_log_info(sli4, "VPD length: avail=%d return=%d actual=%d\n",
+ le32_to_cpu(read_rev->available_length_dword) &
+ SLI4_READ_REV_AVAILABLE_LENGTH,
+ le32_to_cpu(read_rev->returned_vpd_length),
+ le32_to_cpu(read_rev->actual_vpd_length));
+ }
+ sli4->vpd_length = le32_to_cpu(read_rev->returned_vpd_length);
+ return 0;
+}
+
+static int
+sli_get_config(struct sli4 *sli4)
+{
+ struct sli4_rsp_cmn_get_port_name *port_name;
+ struct sli4_cmd_read_nvparms *read_nvparms;
+
+ /*
+ * Read the device configuration
+ */
+ if (sli_get_read_config(sli4))
+ return -EIO;
+
+ if (sli_get_sli4_parameters(sli4))
+ return -EIO;
+
+ if (sli_get_ctrl_attributes(sli4))
+ return -EIO;
+
+ if (sli_cmd_common_get_port_name(sli4, sli4->bmbx.virt))
+ return -EIO;
+
+ port_name = (struct sli4_rsp_cmn_get_port_name *)
+ (((u8 *)sli4->bmbx.virt) +
+ offsetof(struct sli4_cmd_sli_config, payload.embed));
+
+ if (sli_bmbx_command(sli4)) {
+ efc_log_crit(sli4, "bootstrap mailbox fail (GET_PORT_NAME)\n");
+ return -EIO;
+ }
+
+ sli4->port_name[0] = port_name->port_name[sli4->port_number];
+ sli4->port_name[1] = '\0';
+
+ if (sli_get_fw_rev(sli4))
+ return -EIO;
+
+ if (sli_cmd_read_nvparms(sli4, sli4->bmbx.virt)) {
+ efc_log_err(sli4, "bad READ_NVPARMS write\n");
+ return -EIO;
+ }
+
+ if (sli_bmbx_command(sli4)) {
+ efc_log_crit(sli4, "bootstrap mailbox fail (READ_NVPARMS)\n");
+ return -EIO;
+ }
+
+ read_nvparms = sli4->bmbx.virt;
+ if (le16_to_cpu(read_nvparms->hdr.status)) {
+ efc_log_err(sli4, "READ_NVPARMS bad status %#x\n",
+ le16_to_cpu(read_nvparms->hdr.status));
+ return -EIO;
+ }
+
+ memcpy(sli4->wwpn, read_nvparms->wwpn, sizeof(sli4->wwpn));
+ memcpy(sli4->wwnn, read_nvparms->wwnn, sizeof(sli4->wwnn));
+
+ efc_log_info(sli4, "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n",
+ sli4->wwpn[0], sli4->wwpn[1], sli4->wwpn[2], sli4->wwpn[3],
+ sli4->wwpn[4], sli4->wwpn[5], sli4->wwpn[6], sli4->wwpn[7]);
+ efc_log_info(sli4, "WWNN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n",
+ sli4->wwnn[0], sli4->wwnn[1], sli4->wwnn[2], sli4->wwnn[3],
+ sli4->wwnn[4], sli4->wwnn[5], sli4->wwnn[6], sli4->wwnn[7]);
+
+ return 0;
+}
+
+int
+sli_setup(struct sli4 *sli4, void *os, struct pci_dev *pdev,
+ void __iomem *reg[])
+{
+ u32 intf = U32_MAX;
+ u32 pci_class_rev = 0;
+ u32 rev_id = 0;
+ u32 family = 0;
+ u32 asic_id = 0;
+ u32 i;
+ struct sli4_asic_entry_t *asic;
+
+ memset(sli4, 0, sizeof(struct sli4));
+
+ sli4->os = os;
+ sli4->pci = pdev;
+
+ for (i = 0; i < 6; i++)
+ sli4->reg[i] = reg[i];
+ /*
+ * Read the SLI_INTF register to discover the register layout
+ * and other capability information
+ */
+ if (pci_read_config_dword(pdev, SLI4_INTF_REG, &intf))
+ return -EIO;
+
+ if ((intf & SLI4_INTF_VALID_MASK) != (u32)SLI4_INTF_VALID_VALUE) {
+ efc_log_err(sli4, "SLI_INTF is not valid\n");
+ return -EIO;
+ }
+
+ /* driver only support SLI-4 */
+ if ((intf & SLI4_INTF_REV_MASK) != SLI4_INTF_REV_S4) {
+ efc_log_err(sli4, "Unsupported SLI revision (intf=%#x)\n", intf);
+ return -EIO;
+ }
+
+ sli4->sli_family = intf & SLI4_INTF_FAMILY_MASK;
+
+ sli4->if_type = intf & SLI4_INTF_IF_TYPE_MASK;
+ efc_log_info(sli4, "status=%#x error1=%#x error2=%#x\n",
+ sli_reg_read_status(sli4),
+ sli_reg_read_err1(sli4),
+ sli_reg_read_err2(sli4));
+
+ /*
+ * set the ASIC type and revision
+ */
+ if (pci_read_config_dword(pdev, PCI_CLASS_REVISION, &pci_class_rev))
+ return -EIO;
+
+ rev_id = pci_class_rev & 0xff;
+ family = sli4->sli_family;
+ if (family == SLI4_FAMILY_CHECK_ASIC_TYPE) {
+ if (!pci_read_config_dword(pdev, SLI4_ASIC_ID_REG, &asic_id))
+ family = asic_id & SLI4_ASIC_GEN_MASK;
+ }
+
+ for (i = 0, asic = sli4_asic_table; i < ARRAY_SIZE(sli4_asic_table);
+ i++, asic++) {
+ if (rev_id == asic->rev_id && family == asic->family) {
+ sli4->asic_type = family;
+ sli4->asic_rev = rev_id;
+ break;
+ }
+ }
+ /* Fail if no matching asic type/rev was found */
+ if (!sli4->asic_type) {
+ efc_log_err(sli4, "no matching asic family/rev found: %02x/%02x\n",
+ family, rev_id);
+ return -EIO;
+ }
+
+ /*
+ * The bootstrap mailbox is equivalent to a MQ with a single 256 byte
+ * entry, a CQ with a single 16 byte entry, and no event queue.
+ * Alignment must be 16 bytes as the low order address bits in the
+ * address register are also control / status.
+ */
+ sli4->bmbx.size = SLI4_BMBX_SIZE + sizeof(struct sli4_mcqe);
+ sli4->bmbx.virt = dma_alloc_coherent(&pdev->dev, sli4->bmbx.size,
+ &sli4->bmbx.phys, GFP_DMA);
+ if (!sli4->bmbx.virt) {
+ memset(&sli4->bmbx, 0, sizeof(struct efc_dma));
+ efc_log_err(sli4, "bootstrap mailbox allocation failed\n");
+ return -EIO;
+ }
+
+ if (sli4->bmbx.phys & SLI4_BMBX_MASK_LO) {
+ efc_log_err(sli4, "bad alignment for bootstrap mailbox\n");
+ return -EIO;
+ }
+
+ efc_log_info(sli4, "bmbx v=%p p=0x%x %08x s=%zd\n", sli4->bmbx.virt,
+ upper_32_bits(sli4->bmbx.phys),
+ lower_32_bits(sli4->bmbx.phys), sli4->bmbx.size);
+
+ /* 4096 is arbitrary. What should this value actually be? */
+ sli4->vpd_data.size = 4096;
+ sli4->vpd_data.virt = dma_alloc_coherent(&pdev->dev,
+ sli4->vpd_data.size,
+ &sli4->vpd_data.phys,
+ GFP_DMA);
+ if (!sli4->vpd_data.virt) {
+ memset(&sli4->vpd_data, 0, sizeof(struct efc_dma));
+ /* Note that failure isn't fatal in this specific case */
+ efc_log_info(sli4, "VPD buffer allocation failed\n");
+ }
+
+ if (!sli_fw_init(sli4)) {
+ efc_log_err(sli4, "FW initialization failed\n");
+ return -EIO;
+ }
+
+ /*
+ * Set one of fcpi(initiator), fcpt(target), fcpc(combined) to true
+ * in addition to any other desired features
+ */
+ sli4->features = (SLI4_REQFEAT_IAAB | SLI4_REQFEAT_NPIV |
+ SLI4_REQFEAT_DIF | SLI4_REQFEAT_VF |
+ SLI4_REQFEAT_FCPC | SLI4_REQFEAT_IAAR |
+ SLI4_REQFEAT_HLM | SLI4_REQFEAT_PERFH |
+ SLI4_REQFEAT_RXSEQ | SLI4_REQFEAT_RXRI |
+ SLI4_REQFEAT_MRQP);
+
+ /* use performance hints if available */
+ if (sli4->params.perf_hint)
+ sli4->features |= SLI4_REQFEAT_PERFH;
+
+ if (sli_request_features(sli4, &sli4->features, true))
+ return -EIO;
+
+ if (sli_get_config(sli4))
+ return -EIO;
+
+ return 0;
+}
+
+int
+sli_init(struct sli4 *sli4)
+{
+ if (sli4->params.has_extents) {
+ efc_log_info(sli4, "extend allocation not supported\n");
+ return -EIO;
+ }
+
+ sli4->features &= (~SLI4_REQFEAT_HLM);
+ sli4->features &= (~SLI4_REQFEAT_RXSEQ);
+ sli4->features &= (~SLI4_REQFEAT_RXRI);
+
+ if (sli_request_features(sli4, &sli4->features, false))
+ return -EIO;
+
+ return 0;
+}
+
+int
+sli_reset(struct sli4 *sli4)
+{
+ u32 i;
+
+ if (!sli_fw_init(sli4)) {
+ efc_log_crit(sli4, "FW initialization failed\n");
+ return -EIO;
+ }
+
+ kfree(sli4->ext[0].base);
+ sli4->ext[0].base = NULL;
+
+ for (i = 0; i < SLI4_RSRC_MAX; i++) {
+ kfree(sli4->ext[i].use_map);
+ sli4->ext[i].use_map = NULL;
+ sli4->ext[i].base = NULL;
+ }
+
+ return sli_get_config(sli4);
+}
+
+int
+sli_fw_reset(struct sli4 *sli4)
+{
+ /*
+ * Firmware must be ready before issuing the reset.
+ */
+ if (!sli_wait_for_fw_ready(sli4, SLI4_FW_READY_TIMEOUT_MSEC)) {
+ efc_log_crit(sli4, "FW status is NOT ready\n");
+ return -EIO;
+ }
+
+ /* Lancer uses PHYDEV_CONTROL */
+ writel(SLI4_PHYDEV_CTRL_FRST, (sli4->reg[0] + SLI4_PHYDEV_CTRL_REG));
+
+ /* wait for the FW to become ready after the reset */
+ if (!sli_wait_for_fw_ready(sli4, SLI4_FW_READY_TIMEOUT_MSEC)) {
+ efc_log_crit(sli4, "Failed to be ready after firmware reset\n");
+ return -EIO;
+ }
+ return 0;
+}
+
+void
+sli_teardown(struct sli4 *sli4)
+{
+ u32 i;
+
+ kfree(sli4->ext[0].base);
+ sli4->ext[0].base = NULL;
+
+ for (i = 0; i < SLI4_RSRC_MAX; i++) {
+ sli4->ext[i].base = NULL;
+
+ kfree(sli4->ext[i].use_map);
+ sli4->ext[i].use_map = NULL;
+ }
+
+ if (!sli_sliport_reset(sli4))
+ efc_log_err(sli4, "FW deinitialization failed\n");
+
+ dma_free_coherent(&sli4->pci->dev, sli4->vpd_data.size,
+ sli4->vpd_data.virt, sli4->vpd_data.phys);
+ memset(&sli4->vpd_data, 0, sizeof(struct efc_dma));
+
+ dma_free_coherent(&sli4->pci->dev, sli4->bmbx.size,
+ sli4->bmbx.virt, sli4->bmbx.phys);
+ memset(&sli4->bmbx, 0, sizeof(struct efc_dma));
+}
+
+int
+sli_callback(struct sli4 *sli4, enum sli4_callback which,
+ void *func, void *arg)
+{
+ if (!func) {
+ efc_log_err(sli4, "bad parameter sli4=%p which=%#x func=%p\n",
+ sli4, which, func);
+ return -EIO;
+ }
+
+ switch (which) {
+ case SLI4_CB_LINK:
+ sli4->link = func;
+ sli4->link_arg = arg;
+ break;
+ default:
+ efc_log_info(sli4, "unknown callback %#x\n", which);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int
+sli_eq_modify_delay(struct sli4 *sli4, struct sli4_queue *eq,
+ u32 num_eq, u32 shift, u32 delay_mult)
+{
+ sli_cmd_common_modify_eq_delay(sli4, sli4->bmbx.virt, eq, num_eq,
+ shift, delay_mult);
+
+ if (sli_bmbx_command(sli4)) {
+ efc_log_crit(sli4, "bootstrap mailbox write fail (MODIFY EQ DELAY)\n");
+ return -EIO;
+ }
+ if (sli_res_sli_config(sli4, sli4->bmbx.virt)) {
+ efc_log_err(sli4, "bad status MODIFY EQ DELAY\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int
+sli_resource_alloc(struct sli4 *sli4, enum sli4_resource rtype,
+ u32 *rid, u32 *index)
+{
+ int rc = 0;
+ u32 size;
+ u32 ext_idx;
+ u32 item_idx;
+ u32 position;
+
+ *rid = U32_MAX;
+ *index = U32_MAX;
+
+ switch (rtype) {
+ case SLI4_RSRC_VFI:
+ case SLI4_RSRC_VPI:
+ case SLI4_RSRC_RPI:
+ case SLI4_RSRC_XRI:
+ position =
+ find_first_zero_bit(sli4->ext[rtype].use_map,
+ sli4->ext[rtype].map_size);
+ if (position >= sli4->ext[rtype].map_size) {
+ efc_log_err(sli4, "out of resource %d (alloc=%d)\n",
+ rtype, sli4->ext[rtype].n_alloc);
+ rc = -EIO;
+ break;
+ }
+ set_bit(position, sli4->ext[rtype].use_map);
+ *index = position;
+
+ size = sli4->ext[rtype].size;
+
+ ext_idx = *index / size;
+ item_idx = *index % size;
+
+ *rid = sli4->ext[rtype].base[ext_idx] + item_idx;
+
+ sli4->ext[rtype].n_alloc++;
+ break;
+ default:
+ rc = -EIO;
+ }
+
+ return rc;
+}
+
+int
+sli_resource_free(struct sli4 *sli4, enum sli4_resource rtype, u32 rid)
+{
+ int rc = -EIO;
+ u32 x;
+ u32 size, *base;
+
+ switch (rtype) {
+ case SLI4_RSRC_VFI:
+ case SLI4_RSRC_VPI:
+ case SLI4_RSRC_RPI:
+ case SLI4_RSRC_XRI:
+ /*
+ * Figure out which extent contains the resource ID. I.e. find
+ * the extent such that
+ * extent->base <= resource ID < extent->base + extent->size
+ */
+ base = sli4->ext[rtype].base;
+ size = sli4->ext[rtype].size;
+
+ /*
+ * In the case of FW reset, this may be cleared
+ * but the force_free path will still attempt to
+ * free the resource. Prevent a NULL pointer access.
+ */
+ if (!base)
+ break;
+
+ for (x = 0; x < sli4->ext[rtype].number; x++) {
+ if ((rid < base[x] || (rid >= (base[x] + size))))
+ continue;
+
+ rid -= base[x];
+ clear_bit((x * size) + rid, sli4->ext[rtype].use_map);
+ rc = 0;
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return rc;
+}
+
+int
+sli_resource_reset(struct sli4 *sli4, enum sli4_resource rtype)
+{
+ int rc = -EIO;
+ u32 i;
+
+ switch (rtype) {
+ case SLI4_RSRC_VFI:
+ case SLI4_RSRC_VPI:
+ case SLI4_RSRC_RPI:
+ case SLI4_RSRC_XRI:
+ for (i = 0; i < sli4->ext[rtype].map_size; i++)
+ clear_bit(i, sli4->ext[rtype].use_map);
+ rc = 0;
+ break;
+ default:
+ break;
+ }
+
+ return rc;
+}
+
+int sli_raise_ue(struct sli4 *sli4, u8 dump)
+{
+ u32 val = 0;
+
+ if (dump == SLI4_FUNC_DESC_DUMP) {
+ val = SLI4_PORT_CTRL_FDD | SLI4_PORT_CTRL_IP;
+ writel(val, (sli4->reg[0] + SLI4_PORT_CTRL_REG));
+ } else {
+ val = SLI4_PHYDEV_CTRL_FRST;
+
+ if (dump == SLI4_CHIP_LEVEL_DUMP)
+ val |= SLI4_PHYDEV_CTRL_DD;
+ writel(val, (sli4->reg[0] + SLI4_PHYDEV_CTRL_REG));
+ }
+
+ return 0;
+}
+
+int sli_dump_is_ready(struct sli4 *sli4)
+{
+ int rc = SLI4_DUMP_READY_STATUS_NOT_READY;
+ u32 port_val;
+ u32 bmbx_val;
+
+ /*
+ * Ensure that the port is ready AND the mailbox is
+ * ready before signaling that the dump is ready to go.
+ */
+ port_val = sli_reg_read_status(sli4);
+ bmbx_val = readl(sli4->reg[0] + SLI4_BMBX_REG);
+
+ if ((bmbx_val & SLI4_BMBX_RDY) &&
+ (port_val & SLI4_PORT_STATUS_RDY)) {
+ if (port_val & SLI4_PORT_STATUS_DIP)
+ rc = SLI4_DUMP_READY_STATUS_DD_PRESENT;
+ else if (port_val & SLI4_PORT_STATUS_FDP)
+ rc = SLI4_DUMP_READY_STATUS_FDB_PRESENT;
+ }
+
+ return rc;
+}
+
+bool sli_reset_required(struct sli4 *sli4)
+{
+ u32 val;
+
+ val = sli_reg_read_status(sli4);
+ return (val & SLI4_PORT_STATUS_RN);
+}
+
+int
+sli_cmd_post_sgl_pages(struct sli4 *sli4, void *buf, u16 xri,
+ u32 xri_count, struct efc_dma *page0[],
+ struct efc_dma *page1[], struct efc_dma *dma)
+{
+ struct sli4_rqst_post_sgl_pages *post = NULL;
+ u32 i;
+ __le32 req_len;
+
+ post = sli_config_cmd_init(sli4, buf,
+ SLI4_CFG_PYLD_LENGTH(post_sgl_pages), dma);
+ if (!post)
+ return -EIO;
+
+ /* payload size calculation */
+ /* 4 = xri_start + xri_count */
+ /* xri_count = # of XRI's registered */
+ /* sizeof(uint64_t) = physical address size */
+ /* 2 = # of physical addresses per page set */
+ req_len = cpu_to_le32(4 + (xri_count * (sizeof(uint64_t) * 2)));
+ sli_cmd_fill_hdr(&post->hdr, SLI4_OPC_POST_SGL_PAGES, SLI4_SUBSYSTEM_FC,
+ CMD_V0, req_len);
+ post->xri_start = cpu_to_le16(xri);
+ post->xri_count = cpu_to_le16(xri_count);
+
+ for (i = 0; i < xri_count; i++) {
+ post->page_set[i].page0_low =
+ cpu_to_le32(lower_32_bits(page0[i]->phys));
+ post->page_set[i].page0_high =
+ cpu_to_le32(upper_32_bits(page0[i]->phys));
+ }
+
+ if (page1) {
+ for (i = 0; i < xri_count; i++) {
+ post->page_set[i].page1_low =
+ cpu_to_le32(lower_32_bits(page1[i]->phys));
+ post->page_set[i].page1_high =
+ cpu_to_le32(upper_32_bits(page1[i]->phys));
+ }
+ }
+
+ return 0;
+}
+
+int
+sli_cmd_post_hdr_templates(struct sli4 *sli4, void *buf, struct efc_dma *dma,
+ u16 rpi, struct efc_dma *payload_dma)
+{
+ struct sli4_rqst_post_hdr_templates *req = NULL;
+ uintptr_t phys = 0;
+ u32 i = 0;
+ u32 page_count, payload_size;
+
+ page_count = sli_page_count(dma->size, SLI_PAGE_SIZE);
+
+ payload_size = ((sizeof(struct sli4_rqst_post_hdr_templates) +
+ (page_count * SZ_DMAADDR)) - sizeof(struct sli4_rqst_hdr));
+
+ if (page_count > 16) {
+ /*
+ * We can't fit more than 16 descriptors into an embedded mbox
+ * command, it has to be non-embedded
+ */
+ payload_dma->size = payload_size;
+ payload_dma->virt = dma_alloc_coherent(&sli4->pci->dev,
+ payload_dma->size,
+ &payload_dma->phys, GFP_DMA);
+ if (!payload_dma->virt) {
+ memset(payload_dma, 0, sizeof(struct efc_dma));
+ efc_log_err(sli4, "mbox payload memory allocation fail\n");
+ return -EIO;
+ }
+ req = sli_config_cmd_init(sli4, buf, payload_size, payload_dma);
+ } else {
+ req = sli_config_cmd_init(sli4, buf, payload_size, NULL);
+ }
+
+ if (!req)
+ return -EIO;
+
+ if (rpi == U16_MAX)
+ rpi = sli4->ext[SLI4_RSRC_RPI].base[0];
+
+ sli_cmd_fill_hdr(&req->hdr, SLI4_OPC_POST_HDR_TEMPLATES,
+ SLI4_SUBSYSTEM_FC, CMD_V0,
+ SLI4_RQST_PYLD_LEN(post_hdr_templates));
+
+ req->rpi_offset = cpu_to_le16(rpi);
+ req->page_count = cpu_to_le16(page_count);
+ phys = dma->phys;
+ for (i = 0; i < page_count; i++) {
+ req->page_descriptor[i].low = cpu_to_le32(lower_32_bits(phys));
+ req->page_descriptor[i].high = cpu_to_le32(upper_32_bits(phys));
+
+ phys += SLI_PAGE_SIZE;
+ }
+
+ return 0;
+}
+
+u32
+sli_fc_get_rpi_requirements(struct sli4 *sli4, u32 n_rpi)
+{
+ u32 bytes = 0;
+
+ /* Check if header templates needed */
+ if (sli4->params.hdr_template_req)
+ /* round up to a page */
+ bytes = round_up(n_rpi * SLI4_HDR_TEMPLATE_SIZE, SLI_PAGE_SIZE);
+
+ return bytes;
+}
+
+const char *
+sli_fc_get_status_string(u32 status)
+{
+ static struct {
+ u32 code;
+ const char *label;
+ } lookup[] = {
+ {SLI4_FC_WCQE_STATUS_SUCCESS, "SUCCESS"},
+ {SLI4_FC_WCQE_STATUS_FCP_RSP_FAILURE, "FCP_RSP_FAILURE"},
+ {SLI4_FC_WCQE_STATUS_REMOTE_STOP, "REMOTE_STOP"},
+ {SLI4_FC_WCQE_STATUS_LOCAL_REJECT, "LOCAL_REJECT"},
+ {SLI4_FC_WCQE_STATUS_NPORT_RJT, "NPORT_RJT"},
+ {SLI4_FC_WCQE_STATUS_FABRIC_RJT, "FABRIC_RJT"},
+ {SLI4_FC_WCQE_STATUS_NPORT_BSY, "NPORT_BSY"},
+ {SLI4_FC_WCQE_STATUS_FABRIC_BSY, "FABRIC_BSY"},
+ {SLI4_FC_WCQE_STATUS_LS_RJT, "LS_RJT"},
+ {SLI4_FC_WCQE_STATUS_CMD_REJECT, "CMD_REJECT"},
+ {SLI4_FC_WCQE_STATUS_FCP_TGT_LENCHECK, "FCP_TGT_LENCHECK"},
+ {SLI4_FC_WCQE_STATUS_RQ_BUF_LEN_EXCEEDED, "BUF_LEN_EXCEEDED"},
+ {SLI4_FC_WCQE_STATUS_RQ_INSUFF_BUF_NEEDED,
+ "RQ_INSUFF_BUF_NEEDED"},
+ {SLI4_FC_WCQE_STATUS_RQ_INSUFF_FRM_DISC, "RQ_INSUFF_FRM_DESC"},
+ {SLI4_FC_WCQE_STATUS_RQ_DMA_FAILURE, "RQ_DMA_FAILURE"},
+ {SLI4_FC_WCQE_STATUS_FCP_RSP_TRUNCATE, "FCP_RSP_TRUNCATE"},
+ {SLI4_FC_WCQE_STATUS_DI_ERROR, "DI_ERROR"},
+ {SLI4_FC_WCQE_STATUS_BA_RJT, "BA_RJT"},
+ {SLI4_FC_WCQE_STATUS_RQ_INSUFF_XRI_NEEDED,
+ "RQ_INSUFF_XRI_NEEDED"},
+ {SLI4_FC_WCQE_STATUS_RQ_INSUFF_XRI_DISC, "INSUFF_XRI_DISC"},
+ {SLI4_FC_WCQE_STATUS_RX_ERROR_DETECT, "RX_ERROR_DETECT"},
+ {SLI4_FC_WCQE_STATUS_RX_ABORT_REQUEST, "RX_ABORT_REQUEST"},
+ };
+ u32 i;
+
+ for (i = 0; i < ARRAY_SIZE(lookup); i++) {
+ if (status == lookup[i].code)
+ return lookup[i].label;
+ }
+ return "unknown";
+}
diff --git a/drivers/scsi/elx/libefc_sli/sli4.h b/drivers/scsi/elx/libefc_sli/sli4.h
new file mode 100644
index 000000000000..ee2a9e65a88d
--- /dev/null
+++ b/drivers/scsi/elx/libefc_sli/sli4.h
@@ -0,0 +1,4132 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
+ *
+ */
+
+/*
+ * All common SLI-4 structures and function prototypes.
+ */
+
+#ifndef _SLI4_H
+#define _SLI4_H
+
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include "scsi/fc/fc_els.h"
+#include "scsi/fc/fc_fs.h"
+#include "../include/efc_common.h"
+
+/*************************************************************************
+ * Common SLI-4 register offsets and field definitions
+ */
+
+/* SLI_INTF - SLI Interface Definition Register */
+#define SLI4_INTF_REG 0x0058
+enum sli4_intf {
+ SLI4_INTF_REV_SHIFT = 4,
+ SLI4_INTF_REV_MASK = 0xf0,
+
+ SLI4_INTF_REV_S3 = 0x30,
+ SLI4_INTF_REV_S4 = 0x40,
+
+ SLI4_INTF_FAMILY_SHIFT = 8,
+ SLI4_INTF_FAMILY_MASK = 0x0f00,
+
+ SLI4_FAMILY_CHECK_ASIC_TYPE = 0x0f00,
+
+ SLI4_INTF_IF_TYPE_SHIFT = 12,
+ SLI4_INTF_IF_TYPE_MASK = 0xf000,
+
+ SLI4_INTF_IF_TYPE_2 = 0x2000,
+ SLI4_INTF_IF_TYPE_6 = 0x6000,
+
+ SLI4_INTF_VALID_SHIFT = 29,
+ SLI4_INTF_VALID_MASK = 0xe0000000,
+
+ SLI4_INTF_VALID_VALUE = 0xc0000000,
+};
+
+/* ASIC_ID - SLI ASIC Type and Revision Register */
+#define SLI4_ASIC_ID_REG 0x009c
+enum sli4_asic {
+ SLI4_ASIC_GEN_SHIFT = 8,
+ SLI4_ASIC_GEN_MASK = 0xff00,
+ SLI4_ASIC_GEN_5 = 0x0b00,
+ SLI4_ASIC_GEN_6 = 0x0c00,
+ SLI4_ASIC_GEN_7 = 0x0d00,
+};
+
+enum sli4_acic_revisions {
+ SLI4_ASIC_REV_A0 = 0x00,
+ SLI4_ASIC_REV_A1 = 0x01,
+ SLI4_ASIC_REV_A2 = 0x02,
+ SLI4_ASIC_REV_A3 = 0x03,
+ SLI4_ASIC_REV_B0 = 0x10,
+ SLI4_ASIC_REV_B1 = 0x11,
+ SLI4_ASIC_REV_B2 = 0x12,
+ SLI4_ASIC_REV_C0 = 0x20,
+ SLI4_ASIC_REV_C1 = 0x21,
+ SLI4_ASIC_REV_C2 = 0x22,
+ SLI4_ASIC_REV_D0 = 0x30,
+};
+
+struct sli4_asic_entry_t {
+ u32 rev_id;
+ u32 family;
+};
+
+/* BMBX - Bootstrap Mailbox Register */
+#define SLI4_BMBX_REG 0x0160
+enum sli4_bmbx {
+ SLI4_BMBX_MASK_HI = 0x3,
+ SLI4_BMBX_MASK_LO = 0xf,
+ SLI4_BMBX_RDY = 1 << 0,
+ SLI4_BMBX_HI = 1 << 1,
+ SLI4_BMBX_SIZE = 256,
+};
+
+static inline u32
+sli_bmbx_write_hi(u64 addr) {
+ u32 val;
+
+ val = upper_32_bits(addr) & ~SLI4_BMBX_MASK_HI;
+ val |= SLI4_BMBX_HI;
+
+ return val;
+}
+
+static inline u32
+sli_bmbx_write_lo(u64 addr) {
+ u32 val;
+
+ val = (upper_32_bits(addr) & SLI4_BMBX_MASK_HI) << 30;
+ val |= ((addr) & ~SLI4_BMBX_MASK_LO) >> 2;
+
+ return val;
+}
+
+/* SLIPORT_CONTROL - SLI Port Control Register */
+#define SLI4_PORT_CTRL_REG 0x0408
+enum sli4_port_ctrl {
+ SLI4_PORT_CTRL_IP = 1u << 27,
+ SLI4_PORT_CTRL_IDIS = 1u << 22,
+ SLI4_PORT_CTRL_FDD = 1u << 31,
+};
+
+/* SLI4_SLIPORT_ERROR - SLI Port Error Register */
+#define SLI4_PORT_ERROR1 0x040c
+#define SLI4_PORT_ERROR2 0x0410
+
+/* EQCQ_DOORBELL - EQ and CQ Doorbell Register */
+#define SLI4_EQCQ_DB_REG 0x120
+enum sli4_eqcq_e {
+ SLI4_EQ_ID_LO_MASK = 0x01ff,
+
+ SLI4_CQ_ID_LO_MASK = 0x03ff,
+
+ SLI4_EQCQ_CI_EQ = 0x0200,
+
+ SLI4_EQCQ_QT_EQ = 0x00000400,
+ SLI4_EQCQ_QT_CQ = 0x00000000,
+
+ SLI4_EQCQ_ID_HI_SHIFT = 11,
+ SLI4_EQCQ_ID_HI_MASK = 0xf800,
+
+ SLI4_EQCQ_NUM_SHIFT = 16,
+ SLI4_EQCQ_NUM_MASK = 0x1fff0000,
+
+ SLI4_EQCQ_ARM = 0x20000000,
+ SLI4_EQCQ_UNARM = 0x00000000,
+};
+
+static inline u32
+sli_format_eq_db_data(u16 num_popped, u16 id, u32 arm) {
+ u32 reg;
+
+ reg = (id & SLI4_EQ_ID_LO_MASK) | SLI4_EQCQ_QT_EQ;
+ reg |= (((id) >> 9) << SLI4_EQCQ_ID_HI_SHIFT) & SLI4_EQCQ_ID_HI_MASK;
+ reg |= ((num_popped) << SLI4_EQCQ_NUM_SHIFT) & SLI4_EQCQ_NUM_MASK;
+ reg |= arm | SLI4_EQCQ_CI_EQ;
+
+ return reg;
+}
+
+static inline u32
+sli_format_cq_db_data(u16 num_popped, u16 id, u32 arm) {
+ u32 reg;
+
+ reg = ((id) & SLI4_CQ_ID_LO_MASK) | SLI4_EQCQ_QT_CQ;
+ reg |= (((id) >> 10) << SLI4_EQCQ_ID_HI_SHIFT) & SLI4_EQCQ_ID_HI_MASK;
+ reg |= ((num_popped) << SLI4_EQCQ_NUM_SHIFT) & SLI4_EQCQ_NUM_MASK;
+ reg |= arm;
+
+ return reg;
+}
+
+/* EQ_DOORBELL - EQ Doorbell Register for IF_TYPE = 6*/
+#define SLI4_IF6_EQ_DB_REG 0x120
+enum sli4_eq_e {
+ SLI4_IF6_EQ_ID_MASK = 0x0fff,
+
+ SLI4_IF6_EQ_NUM_SHIFT = 16,
+ SLI4_IF6_EQ_NUM_MASK = 0x1fff0000,
+};
+
+static inline u32
+sli_format_if6_eq_db_data(u16 num_popped, u16 id, u32 arm) {
+ u32 reg;
+
+ reg = id & SLI4_IF6_EQ_ID_MASK;
+ reg |= (num_popped << SLI4_IF6_EQ_NUM_SHIFT) & SLI4_IF6_EQ_NUM_MASK;
+ reg |= arm;
+
+ return reg;
+}
+
+/* CQ_DOORBELL - CQ Doorbell Register for IF_TYPE = 6 */
+#define SLI4_IF6_CQ_DB_REG 0xc0
+enum sli4_cq_e {
+ SLI4_IF6_CQ_ID_MASK = 0xffff,
+
+ SLI4_IF6_CQ_NUM_SHIFT = 16,
+ SLI4_IF6_CQ_NUM_MASK = 0x1fff0000,
+};
+
+static inline u32
+sli_format_if6_cq_db_data(u16 num_popped, u16 id, u32 arm) {
+ u32 reg;
+
+ reg = id & SLI4_IF6_CQ_ID_MASK;
+ reg |= ((num_popped) << SLI4_IF6_CQ_NUM_SHIFT) & SLI4_IF6_CQ_NUM_MASK;
+ reg |= arm;
+
+ return reg;
+}
+
+/* MQ_DOORBELL - MQ Doorbell Register */
+#define SLI4_MQ_DB_REG 0x0140
+#define SLI4_IF6_MQ_DB_REG 0x0160
+enum sli4_mq_e {
+ SLI4_MQ_ID_MASK = 0xffff,
+
+ SLI4_MQ_NUM_SHIFT = 16,
+ SLI4_MQ_NUM_MASK = 0x3fff0000,
+};
+
+static inline u32
+sli_format_mq_db_data(u16 id) {
+ u32 reg;
+
+ reg = id & SLI4_MQ_ID_MASK;
+ reg |= (1 << SLI4_MQ_NUM_SHIFT) & SLI4_MQ_NUM_MASK;
+
+ return reg;
+}
+
+/* RQ_DOORBELL - RQ Doorbell Register */
+#define SLI4_RQ_DB_REG 0x0a0
+#define SLI4_IF6_RQ_DB_REG 0x0080
+enum sli4_rq_e {
+ SLI4_RQ_DB_ID_MASK = 0xffff,
+
+ SLI4_RQ_DB_NUM_SHIFT = 16,
+ SLI4_RQ_DB_NUM_MASK = 0x3fff0000,
+};
+
+static inline u32
+sli_format_rq_db_data(u16 id) {
+ u32 reg;
+
+ reg = id & SLI4_RQ_DB_ID_MASK;
+ reg |= (1 << SLI4_RQ_DB_NUM_SHIFT) & SLI4_RQ_DB_NUM_MASK;
+
+ return reg;
+}
+
+/* WQ_DOORBELL - WQ Doorbell Register */
+#define SLI4_IO_WQ_DB_REG 0x040
+#define SLI4_IF6_WQ_DB_REG 0x040
+enum sli4_wq_e {
+ SLI4_WQ_ID_MASK = 0xffff,
+
+ SLI4_WQ_IDX_SHIFT = 16,
+ SLI4_WQ_IDX_MASK = 0xff0000,
+
+ SLI4_WQ_NUM_SHIFT = 24,
+ SLI4_WQ_NUM_MASK = 0x0ff00000,
+};
+
+static inline u32
+sli_format_wq_db_data(u16 id) {
+ u32 reg;
+
+ reg = id & SLI4_WQ_ID_MASK;
+ reg |= (1 << SLI4_WQ_NUM_SHIFT) & SLI4_WQ_NUM_MASK;
+
+ return reg;
+}
+
+/* SLIPORT_STATUS - SLI Port Status Register */
+#define SLI4_PORT_STATUS_REGOFF 0x0404
+enum sli4_port_status {
+ SLI4_PORT_STATUS_FDP = 1u << 21,
+ SLI4_PORT_STATUS_RDY = 1u << 23,
+ SLI4_PORT_STATUS_RN = 1u << 24,
+ SLI4_PORT_STATUS_DIP = 1u << 25,
+ SLI4_PORT_STATUS_OTI = 1u << 29,
+ SLI4_PORT_STATUS_ERR = 1u << 31,
+};
+
+#define SLI4_PHYDEV_CTRL_REG 0x0414
+#define SLI4_PHYDEV_CTRL_FRST (1 << 1)
+#define SLI4_PHYDEV_CTRL_DD (1 << 2)
+
+/* Register name enums */
+enum sli4_regname_en {
+ SLI4_REG_BMBX,
+ SLI4_REG_EQ_DOORBELL,
+ SLI4_REG_CQ_DOORBELL,
+ SLI4_REG_RQ_DOORBELL,
+ SLI4_REG_IO_WQ_DOORBELL,
+ SLI4_REG_MQ_DOORBELL,
+ SLI4_REG_PHYSDEV_CONTROL,
+ SLI4_REG_PORT_CONTROL,
+ SLI4_REG_PORT_ERROR1,
+ SLI4_REG_PORT_ERROR2,
+ SLI4_REG_PORT_SEMAPHORE,
+ SLI4_REG_PORT_STATUS,
+ SLI4_REG_UNKWOWN /* must be last */
+};
+
+struct sli4_reg {
+ u32 rset;
+ u32 off;
+};
+
+struct sli4_dmaaddr {
+ __le32 low;
+ __le32 high;
+};
+
+/*
+ * a 3-word Buffer Descriptor Entry with
+ * address 1st 2 words, length last word
+ */
+struct sli4_bufptr {
+ struct sli4_dmaaddr addr;
+ __le32 length;
+};
+
+/* Buffer Descriptor Entry (BDE) */
+enum sli4_bde_e {
+ SLI4_BDE_LEN_MASK = 0x00ffffff,
+ SLI4_BDE_TYPE_MASK = 0xff000000,
+};
+
+struct sli4_bde {
+ __le32 bde_type_buflen;
+ union {
+ struct sli4_dmaaddr data;
+ struct {
+ __le32 offset;
+ __le32 rsvd2;
+ } imm;
+ struct sli4_dmaaddr blp;
+ } u;
+};
+
+/* Buffer Descriptors */
+enum sli4_bde_type {
+ SLI4_BDE_TYPE_SHIFT = 24,
+ SLI4_BDE_TYPE_64 = 0x00, /* Generic 64-bit data */
+ SLI4_BDE_TYPE_IMM = 0x01, /* Immediate data */
+ SLI4_BDE_TYPE_BLP = 0x40, /* Buffer List Pointer */
+};
+
+#define SLI4_BDE_TYPE_VAL(type) \
+ (SLI4_BDE_TYPE_##type << SLI4_BDE_TYPE_SHIFT)
+
+/* Scatter-Gather Entry (SGE) */
+#define SLI4_SGE_MAX_RESERVED 3
+
+enum sli4_sge_type {
+ /* DW2 */
+ SLI4_SGE_DATA_OFFSET_MASK = 0x07ffffff,
+ /*DW2W1*/
+ SLI4_SGE_TYPE_SHIFT = 27,
+ SLI4_SGE_TYPE_MASK = 0x78000000,
+ /*SGE Types*/
+ SLI4_SGE_TYPE_DATA = 0x00,
+ SLI4_SGE_TYPE_DIF = 0x04, /* Data Integrity Field */
+ SLI4_SGE_TYPE_LSP = 0x05, /* List Segment Pointer */
+ SLI4_SGE_TYPE_PEDIF = 0x06, /* Post Encryption Engine DIF */
+ SLI4_SGE_TYPE_PESEED = 0x07, /* Post Encryption DIF Seed */
+ SLI4_SGE_TYPE_DISEED = 0x08, /* DIF Seed */
+ SLI4_SGE_TYPE_ENC = 0x09, /* Encryption */
+ SLI4_SGE_TYPE_ATM = 0x0a, /* DIF Application Tag Mask */
+ SLI4_SGE_TYPE_SKIP = 0x0c, /* SKIP */
+
+ SLI4_SGE_LAST = 1u << 31,
+};
+
+struct sli4_sge {
+ __le32 buffer_address_high;
+ __le32 buffer_address_low;
+ __le32 dw2_flags;
+ __le32 buffer_length;
+};
+
+/* T10 DIF Scatter-Gather Entry (SGE) */
+struct sli4_dif_sge {
+ __le32 buffer_address_high;
+ __le32 buffer_address_low;
+ __le32 dw2_flags;
+ __le32 rsvd12;
+};
+
+/* Data Integrity Seed (DISEED) SGE */
+enum sli4_diseed_sge_flags {
+ /* DW2W1 */
+ SLI4_DISEED_SGE_HS = 1 << 2,
+ SLI4_DISEED_SGE_WS = 1 << 3,
+ SLI4_DISEED_SGE_IC = 1 << 4,
+ SLI4_DISEED_SGE_ICS = 1 << 5,
+ SLI4_DISEED_SGE_ATRT = 1 << 6,
+ SLI4_DISEED_SGE_AT = 1 << 7,
+ SLI4_DISEED_SGE_FAT = 1 << 8,
+ SLI4_DISEED_SGE_NA = 1 << 9,
+ SLI4_DISEED_SGE_HI = 1 << 10,
+
+ /* DW3W1 */
+ SLI4_DISEED_SGE_BS_MASK = 0x0007,
+ SLI4_DISEED_SGE_AI = 1 << 3,
+ SLI4_DISEED_SGE_ME = 1 << 4,
+ SLI4_DISEED_SGE_RE = 1 << 5,
+ SLI4_DISEED_SGE_CE = 1 << 6,
+ SLI4_DISEED_SGE_NR = 1 << 7,
+
+ SLI4_DISEED_SGE_OP_RX_SHIFT = 8,
+ SLI4_DISEED_SGE_OP_RX_MASK = 0x0f00,
+ SLI4_DISEED_SGE_OP_TX_SHIFT = 12,
+ SLI4_DISEED_SGE_OP_TX_MASK = 0xf000,
+};
+
+/* Opcode values */
+enum sli4_diseed_sge_opcodes {
+ SLI4_DISEED_SGE_OP_IN_NODIF_OUT_CRC,
+ SLI4_DISEED_SGE_OP_IN_CRC_OUT_NODIF,
+ SLI4_DISEED_SGE_OP_IN_NODIF_OUT_CSUM,
+ SLI4_DISEED_SGE_OP_IN_CSUM_OUT_NODIF,
+ SLI4_DISEED_SGE_OP_IN_CRC_OUT_CRC,
+ SLI4_DISEED_SGE_OP_IN_CSUM_OUT_CSUM,
+ SLI4_DISEED_SGE_OP_IN_CRC_OUT_CSUM,
+ SLI4_DISEED_SGE_OP_IN_CSUM_OUT_CRC,
+ SLI4_DISEED_SGE_OP_IN_RAW_OUT_RAW,
+};
+
+#define SLI4_DISEED_SGE_OP_RX_VALUE(stype) \
+ (SLI4_DISEED_SGE_OP_##stype << SLI4_DISEED_SGE_OP_RX_SHIFT)
+#define SLI4_DISEED_SGE_OP_TX_VALUE(stype) \
+ (SLI4_DISEED_SGE_OP_##stype << SLI4_DISEED_SGE_OP_TX_SHIFT)
+
+struct sli4_diseed_sge {
+ __le32 ref_tag_cmp;
+ __le32 ref_tag_repl;
+ __le16 app_tag_repl;
+ __le16 dw2w1_flags;
+ __le16 app_tag_cmp;
+ __le16 dw3w1_flags;
+};
+
+/* List Segment Pointer Scatter-Gather Entry (SGE) */
+#define SLI4_LSP_SGE_SEGLEN 0x00ffffff
+
+struct sli4_lsp_sge {
+ __le32 buffer_address_high;
+ __le32 buffer_address_low;
+ __le32 dw2_flags;
+ __le32 dw3_seglen;
+};
+
+enum sli4_eqe_e {
+ SLI4_EQE_VALID = 1,
+ SLI4_EQE_MJCODE = 0xe,
+ SLI4_EQE_MNCODE = 0xfff0,
+};
+
+struct sli4_eqe {
+ __le16 dw0w0_flags;
+ __le16 resource_id;
+};
+
+#define SLI4_MAJOR_CODE_STANDARD 0
+#define SLI4_MAJOR_CODE_SENTINEL 1
+
+/* Sentinel EQE indicating the EQ is full */
+#define SLI4_EQE_STATUS_EQ_FULL 2
+
+enum sli4_mcqe_e {
+ SLI4_MCQE_CONSUMED = 1u << 27,
+ SLI4_MCQE_COMPLETED = 1u << 28,
+ SLI4_MCQE_AE = 1u << 30,
+ SLI4_MCQE_VALID = 1u << 31,
+};
+
+/* Entry was consumed but not completed */
+#define SLI4_MCQE_STATUS_NOT_COMPLETED -2
+
+struct sli4_mcqe {
+ __le16 completion_status;
+ __le16 extended_status;
+ __le32 mqe_tag_low;
+ __le32 mqe_tag_high;
+ __le32 dw3_flags;
+};
+
+enum sli4_acqe_e {
+ SLI4_ACQE_AE = 1 << 6, /* async event - this is an ACQE */
+ SLI4_ACQE_VAL = 1 << 7, /* valid - contents of CQE are valid */
+};
+
+struct sli4_acqe {
+ __le32 event_data[3];
+ u8 rsvd12;
+ u8 event_code;
+ u8 event_type;
+ u8 ae_val;
+};
+
+enum sli4_acqe_event_code {
+ SLI4_ACQE_EVENT_CODE_LINK_STATE = 0x01,
+ SLI4_ACQE_EVENT_CODE_FIP = 0x02,
+ SLI4_ACQE_EVENT_CODE_DCBX = 0x03,
+ SLI4_ACQE_EVENT_CODE_ISCSI = 0x04,
+ SLI4_ACQE_EVENT_CODE_GRP_5 = 0x05,
+ SLI4_ACQE_EVENT_CODE_FC_LINK_EVENT = 0x10,
+ SLI4_ACQE_EVENT_CODE_SLI_PORT_EVENT = 0x11,
+ SLI4_ACQE_EVENT_CODE_VF_EVENT = 0x12,
+ SLI4_ACQE_EVENT_CODE_MR_EVENT = 0x13,
+};
+
+enum sli4_qtype {
+ SLI4_QTYPE_EQ,
+ SLI4_QTYPE_CQ,
+ SLI4_QTYPE_MQ,
+ SLI4_QTYPE_WQ,
+ SLI4_QTYPE_RQ,
+ SLI4_QTYPE_MAX, /* must be last */
+};
+
+#define SLI4_USER_MQ_COUNT 1
+#define SLI4_MAX_CQ_SET_COUNT 16
+#define SLI4_MAX_RQ_SET_COUNT 16
+
+enum sli4_qentry {
+ SLI4_QENTRY_ASYNC,
+ SLI4_QENTRY_MQ,
+ SLI4_QENTRY_RQ,
+ SLI4_QENTRY_WQ,
+ SLI4_QENTRY_WQ_RELEASE,
+ SLI4_QENTRY_OPT_WRITE_CMD,
+ SLI4_QENTRY_OPT_WRITE_DATA,
+ SLI4_QENTRY_XABT,
+ SLI4_QENTRY_MAX /* must be last */
+};
+
+enum sli4_queue_flags {
+ SLI4_QUEUE_FLAG_MQ = 1 << 0, /* CQ has MQ/Async completion */
+ SLI4_QUEUE_FLAG_HDR = 1 << 1, /* RQ for packet headers */
+ SLI4_QUEUE_FLAG_RQBATCH = 1 << 2, /* RQ index increment by 8 */
+};
+
+/* Generic Command Request header */
+enum sli4_cmd_version {
+ CMD_V0,
+ CMD_V1,
+ CMD_V2,
+};
+
+struct sli4_rqst_hdr {
+ u8 opcode;
+ u8 subsystem;
+ __le16 rsvd2;
+ __le32 timeout;
+ __le32 request_length;
+ __le32 dw3_version;
+};
+
+/* Generic Command Response header */
+struct sli4_rsp_hdr {
+ u8 opcode;
+ u8 subsystem;
+ __le16 rsvd2;
+ u8 status;
+ u8 additional_status;
+ __le16 rsvd6;
+ __le32 response_length;
+ __le32 actual_response_length;
+};
+
+#define SLI4_QUEUE_RQ_BATCH 8
+
+#define SZ_DMAADDR sizeof(struct sli4_dmaaddr)
+#define SLI4_RQST_CMDSZ(stype) sizeof(struct sli4_rqst_##stype)
+
+#define SLI4_RQST_PYLD_LEN(stype) \
+ cpu_to_le32(sizeof(struct sli4_rqst_##stype) - \
+ sizeof(struct sli4_rqst_hdr))
+
+#define SLI4_RQST_PYLD_LEN_VAR(stype, varpyld) \
+ cpu_to_le32((sizeof(struct sli4_rqst_##stype) + \
+ varpyld) - sizeof(struct sli4_rqst_hdr))
+
+#define SLI4_CFG_PYLD_LENGTH(stype) \
+ max(sizeof(struct sli4_rqst_##stype), \
+ sizeof(struct sli4_rsp_##stype))
+
+enum sli4_create_cqv2_e {
+ /* DW5_flags values*/
+ SLI4_CREATE_CQV2_CLSWM_MASK = 0x00003000,
+ SLI4_CREATE_CQV2_NODELAY = 0x00004000,
+ SLI4_CREATE_CQV2_AUTOVALID = 0x00008000,
+ SLI4_CREATE_CQV2_CQECNT_MASK = 0x18000000,
+ SLI4_CREATE_CQV2_VALID = 0x20000000,
+ SLI4_CREATE_CQV2_EVT = 0x80000000,
+ /* DW6W1_flags values*/
+ SLI4_CREATE_CQV2_ARM = 0x8000,
+};
+
+struct sli4_rqst_cmn_create_cq_v2 {
+ struct sli4_rqst_hdr hdr;
+ __le16 num_pages;
+ u8 page_size;
+ u8 rsvd19;
+ __le32 dw5_flags;
+ __le16 eq_id;
+ __le16 dw6w1_arm;
+ __le16 cqe_count;
+ __le16 rsvd30;
+ __le32 rsvd32;
+ struct sli4_dmaaddr page_phys_addr[0];
+};
+
+enum sli4_create_cqset_e {
+ /* DW5_flags values*/
+ SLI4_CREATE_CQSETV0_CLSWM_MASK = 0x00003000,
+ SLI4_CREATE_CQSETV0_NODELAY = 0x00004000,
+ SLI4_CREATE_CQSETV0_AUTOVALID = 0x00008000,
+ SLI4_CREATE_CQSETV0_CQECNT_MASK = 0x18000000,
+ SLI4_CREATE_CQSETV0_VALID = 0x20000000,
+ SLI4_CREATE_CQSETV0_EVT = 0x80000000,
+ /* DW5W1_flags values */
+ SLI4_CREATE_CQSETV0_CQE_COUNT = 0x7fff,
+ SLI4_CREATE_CQSETV0_ARM = 0x8000,
+};
+
+struct sli4_rqst_cmn_create_cq_set_v0 {
+ struct sli4_rqst_hdr hdr;
+ __le16 num_pages;
+ u8 page_size;
+ u8 rsvd19;
+ __le32 dw5_flags;
+ __le16 num_cq_req;
+ __le16 dw6w1_flags;
+ __le16 eq_id[16];
+ struct sli4_dmaaddr page_phys_addr[0];
+};
+
+/* CQE count */
+enum sli4_cq_cnt {
+ SLI4_CQ_CNT_256,
+ SLI4_CQ_CNT_512,
+ SLI4_CQ_CNT_1024,
+ SLI4_CQ_CNT_LARGE,
+};
+
+#define SLI4_CQ_CNT_SHIFT 27
+#define SLI4_CQ_CNT_VAL(type) (SLI4_CQ_CNT_##type << SLI4_CQ_CNT_SHIFT)
+
+#define SLI4_CQE_BYTES (4 * sizeof(u32))
+
+#define SLI4_CREATE_CQV2_MAX_PAGES 8
+
+/* Generic Common Create EQ/CQ/MQ/WQ/RQ Queue completion */
+struct sli4_rsp_cmn_create_queue {
+ struct sli4_rsp_hdr hdr;
+ __le16 q_id;
+ u8 rsvd18;
+ u8 ulp;
+ __le32 db_offset;
+ __le16 db_rs;
+ __le16 db_fmt;
+};
+
+struct sli4_rsp_cmn_create_queue_set {
+ struct sli4_rsp_hdr hdr;
+ __le16 q_id;
+ __le16 num_q_allocated;
+};
+
+/* Common Destroy Queue */
+struct sli4_rqst_cmn_destroy_q {
+ struct sli4_rqst_hdr hdr;
+ __le16 q_id;
+ __le16 rsvd;
+};
+
+struct sli4_rsp_cmn_destroy_q {
+ struct sli4_rsp_hdr hdr;
+};
+
+/* Modify the delay multiplier for EQs */
+struct sli4_eqdelay_rec {
+ __le32 eq_id;
+ __le32 phase;
+ __le32 delay_multiplier;
+};
+
+struct sli4_rqst_cmn_modify_eq_delay {
+ struct sli4_rqst_hdr hdr;
+ __le32 num_eq;
+ struct sli4_eqdelay_rec eq_delay_record[8];
+};
+
+struct sli4_rsp_cmn_modify_eq_delay {
+ struct sli4_rsp_hdr hdr;
+};
+
+enum sli4_create_cq_e {
+ /* DW5 */
+ SLI4_CREATE_EQ_AUTOVALID = 1u << 28,
+ SLI4_CREATE_EQ_VALID = 1u << 29,
+ SLI4_CREATE_EQ_EQESZ = 1u << 31,
+ /* DW6 */
+ SLI4_CREATE_EQ_COUNT = 7 << 26,
+ SLI4_CREATE_EQ_ARM = 1u << 31,
+ /* DW7 */
+ SLI4_CREATE_EQ_DELAYMULTI_SHIFT = 13,
+ SLI4_CREATE_EQ_DELAYMULTI_MASK = 0x007fe000,
+ SLI4_CREATE_EQ_DELAYMULTI = 0x00040000,
+};
+
+struct sli4_rqst_cmn_create_eq {
+ struct sli4_rqst_hdr hdr;
+ __le16 num_pages;
+ __le16 rsvd18;
+ __le32 dw5_flags;
+ __le32 dw6_flags;
+ __le32 dw7_delaymulti;
+ __le32 rsvd32;
+ struct sli4_dmaaddr page_address[8];
+};
+
+struct sli4_rsp_cmn_create_eq {
+ struct sli4_rsp_cmn_create_queue q_rsp;
+};
+
+/* EQ count */
+enum sli4_eq_cnt {
+ SLI4_EQ_CNT_256,
+ SLI4_EQ_CNT_512,
+ SLI4_EQ_CNT_1024,
+ SLI4_EQ_CNT_2048,
+ SLI4_EQ_CNT_4096 = 3,
+};
+
+#define SLI4_EQ_CNT_SHIFT 26
+#define SLI4_EQ_CNT_VAL(type) (SLI4_EQ_CNT_##type << SLI4_EQ_CNT_SHIFT)
+
+#define SLI4_EQE_SIZE_4 0
+#define SLI4_EQE_SIZE_16 1
+
+/* Create a Mailbox Queue; accommodate v0 and v1 forms. */
+enum sli4_create_mq_flags {
+ /* DW6W1 */
+ SLI4_CREATE_MQEXT_RINGSIZE = 0xf,
+ SLI4_CREATE_MQEXT_CQID_SHIFT = 6,
+ SLI4_CREATE_MQEXT_CQIDV0_MASK = 0xffc0,
+ /* DW7 */
+ SLI4_CREATE_MQEXT_VAL = 1u << 31,
+ /* DW8 */
+ SLI4_CREATE_MQEXT_ACQV = 1u << 0,
+ SLI4_CREATE_MQEXT_ASYNC_CQIDV0 = 0x7fe,
+};
+
+struct sli4_rqst_cmn_create_mq_ext {
+ struct sli4_rqst_hdr hdr;
+ __le16 num_pages;
+ __le16 cq_id_v1;
+ __le32 async_event_bitmap;
+ __le16 async_cq_id_v1;
+ __le16 dw6w1_flags;
+ __le32 dw7_val;
+ __le32 dw8_flags;
+ __le32 rsvd36;
+ struct sli4_dmaaddr page_phys_addr[0];
+};
+
+struct sli4_rsp_cmn_create_mq_ext {
+ struct sli4_rsp_cmn_create_queue q_rsp;
+};
+
+enum sli4_mqe_size {
+ SLI4_MQE_SIZE_16 = 0x05,
+ SLI4_MQE_SIZE_32,
+ SLI4_MQE_SIZE_64,
+ SLI4_MQE_SIZE_128,
+};
+
+enum sli4_async_evt {
+ SLI4_ASYNC_EVT_LINK_STATE = 1 << 1,
+ SLI4_ASYNC_EVT_FIP = 1 << 2,
+ SLI4_ASYNC_EVT_GRP5 = 1 << 5,
+ SLI4_ASYNC_EVT_FC = 1 << 16,
+ SLI4_ASYNC_EVT_SLI_PORT = 1 << 17,
+};
+
+#define SLI4_ASYNC_EVT_FC_ALL \
+ (SLI4_ASYNC_EVT_LINK_STATE | \
+ SLI4_ASYNC_EVT_FIP | \
+ SLI4_ASYNC_EVT_GRP5 | \
+ SLI4_ASYNC_EVT_FC | \
+ SLI4_ASYNC_EVT_SLI_PORT)
+
+/* Create a Completion Queue. */
+struct sli4_rqst_cmn_create_cq_v0 {
+ struct sli4_rqst_hdr hdr;
+ __le16 num_pages;
+ __le16 rsvd18;
+ __le32 dw5_flags;
+ __le32 dw6_flags;
+ __le32 rsvd28;
+ __le32 rsvd32;
+ struct sli4_dmaaddr page_phys_addr[0];
+};
+
+enum sli4_create_rq_e {
+ SLI4_RQ_CREATE_DUA = 0x1,
+ SLI4_RQ_CREATE_BQU = 0x2,
+
+ SLI4_RQE_SIZE = 8,
+ SLI4_RQE_SIZE_8 = 0x2,
+ SLI4_RQE_SIZE_16 = 0x3,
+ SLI4_RQE_SIZE_32 = 0x4,
+ SLI4_RQE_SIZE_64 = 0x5,
+ SLI4_RQE_SIZE_128 = 0x6,
+
+ SLI4_RQ_PAGE_SIZE_4096 = 0x1,
+ SLI4_RQ_PAGE_SIZE_8192 = 0x2,
+ SLI4_RQ_PAGE_SIZE_16384 = 0x4,
+ SLI4_RQ_PAGE_SIZE_32768 = 0x8,
+ SLI4_RQ_PAGE_SIZE_64536 = 0x10,
+
+ SLI4_RQ_CREATE_V0_MAX_PAGES = 8,
+ SLI4_RQ_CREATE_V0_MIN_BUF_SIZE = 128,
+ SLI4_RQ_CREATE_V0_MAX_BUF_SIZE = 2048,
+};
+
+struct sli4_rqst_rq_create {
+ struct sli4_rqst_hdr hdr;
+ __le16 num_pages;
+ u8 dua_bqu_byte;
+ u8 ulp;
+ __le16 rsvd16;
+ u8 rqe_count_byte;
+ u8 rsvd19;
+ __le32 rsvd20;
+ __le16 buffer_size;
+ __le16 cq_id;
+ __le32 rsvd28;
+ struct sli4_dmaaddr page_phys_addr[SLI4_RQ_CREATE_V0_MAX_PAGES];
+};
+
+struct sli4_rsp_rq_create {
+ struct sli4_rsp_cmn_create_queue rsp;
+};
+
+enum sli4_create_rqv1_e {
+ SLI4_RQ_CREATE_V1_DNB = 0x80,
+ SLI4_RQ_CREATE_V1_MAX_PAGES = 8,
+ SLI4_RQ_CREATE_V1_MIN_BUF_SIZE = 64,
+ SLI4_RQ_CREATE_V1_MAX_BUF_SIZE = 2048,
+};
+
+struct sli4_rqst_rq_create_v1 {
+ struct sli4_rqst_hdr hdr;
+ __le16 num_pages;
+ u8 rsvd14;
+ u8 dim_dfd_dnb;
+ u8 page_size;
+ u8 rqe_size_byte;
+ __le16 rqe_count;
+ __le32 rsvd20;
+ __le16 rsvd24;
+ __le16 cq_id;
+ __le32 buffer_size;
+ struct sli4_dmaaddr page_phys_addr[SLI4_RQ_CREATE_V1_MAX_PAGES];
+};
+
+struct sli4_rsp_rq_create_v1 {
+ struct sli4_rsp_cmn_create_queue rsp;
+};
+
+#define SLI4_RQCREATEV2_DNB 0x80
+
+struct sli4_rqst_rq_create_v2 {
+ struct sli4_rqst_hdr hdr;
+ __le16 num_pages;
+ u8 rq_count;
+ u8 dim_dfd_dnb;
+ u8 page_size;
+ u8 rqe_size_byte;
+ __le16 rqe_count;
+ __le16 hdr_buffer_size;
+ __le16 payload_buffer_size;
+ __le16 base_cq_id;
+ __le16 rsvd26;
+ __le32 rsvd42;
+ struct sli4_dmaaddr page_phys_addr[0];
+};
+
+struct sli4_rsp_rq_create_v2 {
+ struct sli4_rsp_cmn_create_queue rsp;
+};
+
+#define SLI4_CQE_CODE_OFFSET 14
+
+enum sli4_cqe_code {
+ SLI4_CQE_CODE_WORK_REQUEST_COMPLETION = 0x01,
+ SLI4_CQE_CODE_RELEASE_WQE,
+ SLI4_CQE_CODE_RSVD,
+ SLI4_CQE_CODE_RQ_ASYNC,
+ SLI4_CQE_CODE_XRI_ABORTED,
+ SLI4_CQE_CODE_RQ_COALESCING,
+ SLI4_CQE_CODE_RQ_CONSUMPTION,
+ SLI4_CQE_CODE_MEASUREMENT_REPORTING,
+ SLI4_CQE_CODE_RQ_ASYNC_V1,
+ SLI4_CQE_CODE_RQ_COALESCING_V1,
+ SLI4_CQE_CODE_OPTIMIZED_WRITE_CMD,
+ SLI4_CQE_CODE_OPTIMIZED_WRITE_DATA,
+};
+
+#define SLI4_WQ_CREATE_MAX_PAGES 8
+
+struct sli4_rqst_wq_create {
+ struct sli4_rqst_hdr hdr;
+ __le16 num_pages;
+ __le16 cq_id;
+ u8 page_size;
+ u8 wqe_size_byte;
+ __le16 wqe_count;
+ __le32 rsvd;
+ struct sli4_dmaaddr page_phys_addr[SLI4_WQ_CREATE_MAX_PAGES];
+};
+
+struct sli4_rsp_wq_create {
+ struct sli4_rsp_cmn_create_queue rsp;
+};
+
+enum sli4_link_attention_flags {
+ SLI4_LNK_ATTN_TYPE_LINK_UP = 0x01,
+ SLI4_LNK_ATTN_TYPE_LINK_DOWN = 0x02,
+ SLI4_LNK_ATTN_TYPE_NO_HARD_ALPA = 0x03,
+
+ SLI4_LNK_ATTN_P2P = 0x01,
+ SLI4_LNK_ATTN_FC_AL = 0x02,
+ SLI4_LNK_ATTN_INTERNAL_LOOPBACK = 0x03,
+ SLI4_LNK_ATTN_SERDES_LOOPBACK = 0x04,
+};
+
+struct sli4_link_attention {
+ u8 link_number;
+ u8 attn_type;
+ u8 topology;
+ u8 port_speed;
+ u8 port_fault;
+ u8 shared_link_status;
+ __le16 logical_link_speed;
+ __le32 event_tag;
+ u8 rsvd12;
+ u8 event_code;
+ u8 event_type;
+ u8 flags;
+};
+
+enum sli4_link_event_type {
+ SLI4_EVENT_LINK_ATTENTION = 0x01,
+ SLI4_EVENT_SHARED_LINK_ATTENTION = 0x02,
+};
+
+enum sli4_wcqe_flags {
+ SLI4_WCQE_XB = 0x10,
+ SLI4_WCQE_QX = 0x80,
+};
+
+struct sli4_fc_wcqe {
+ u8 hw_status;
+ u8 status;
+ __le16 request_tag;
+ __le32 wqe_specific_1;
+ __le32 wqe_specific_2;
+ u8 rsvd12;
+ u8 qx_byte;
+ u8 code;
+ u8 flags;
+};
+
+/* FC WQ consumed CQ queue entry */
+struct sli4_fc_wqec {
+ __le32 rsvd0;
+ __le32 rsvd1;
+ __le16 wqe_index;
+ __le16 wq_id;
+ __le16 rsvd12;
+ u8 code;
+ u8 vld_byte;
+};
+
+/* FC Completion Status Codes. */
+enum sli4_wcqe_status {
+ SLI4_FC_WCQE_STATUS_SUCCESS,
+ SLI4_FC_WCQE_STATUS_FCP_RSP_FAILURE,
+ SLI4_FC_WCQE_STATUS_REMOTE_STOP,
+ SLI4_FC_WCQE_STATUS_LOCAL_REJECT,
+ SLI4_FC_WCQE_STATUS_NPORT_RJT,
+ SLI4_FC_WCQE_STATUS_FABRIC_RJT,
+ SLI4_FC_WCQE_STATUS_NPORT_BSY,
+ SLI4_FC_WCQE_STATUS_FABRIC_BSY,
+ SLI4_FC_WCQE_STATUS_RSVD,
+ SLI4_FC_WCQE_STATUS_LS_RJT,
+ SLI4_FC_WCQE_STATUS_RX_BUF_OVERRUN,
+ SLI4_FC_WCQE_STATUS_CMD_REJECT,
+ SLI4_FC_WCQE_STATUS_FCP_TGT_LENCHECK,
+ SLI4_FC_WCQE_STATUS_RSVD1,
+ SLI4_FC_WCQE_STATUS_ELS_CMPLT_NO_AUTOREG,
+ SLI4_FC_WCQE_STATUS_RSVD2,
+ SLI4_FC_WCQE_STATUS_RQ_SUCCESS,
+ SLI4_FC_WCQE_STATUS_RQ_BUF_LEN_EXCEEDED,
+ SLI4_FC_WCQE_STATUS_RQ_INSUFF_BUF_NEEDED,
+ SLI4_FC_WCQE_STATUS_RQ_INSUFF_FRM_DISC,
+ SLI4_FC_WCQE_STATUS_RQ_DMA_FAILURE,
+ SLI4_FC_WCQE_STATUS_FCP_RSP_TRUNCATE,
+ SLI4_FC_WCQE_STATUS_DI_ERROR,
+ SLI4_FC_WCQE_STATUS_BA_RJT,
+ SLI4_FC_WCQE_STATUS_RQ_INSUFF_XRI_NEEDED,
+ SLI4_FC_WCQE_STATUS_RQ_INSUFF_XRI_DISC,
+ SLI4_FC_WCQE_STATUS_RX_ERROR_DETECT,
+ SLI4_FC_WCQE_STATUS_RX_ABORT_REQUEST,
+
+ /* driver generated status codes */
+ SLI4_FC_WCQE_STATUS_DISPATCH_ERROR = 0xfd,
+ SLI4_FC_WCQE_STATUS_SHUTDOWN = 0xfe,
+ SLI4_FC_WCQE_STATUS_TARGET_WQE_TIMEOUT = 0xff,
+};
+
+/* DI_ERROR Extended Status */
+enum sli4_fc_di_error_status {
+ SLI4_FC_DI_ERROR_GE = 1 << 0,
+ SLI4_FC_DI_ERROR_AE = 1 << 1,
+ SLI4_FC_DI_ERROR_RE = 1 << 2,
+ SLI4_FC_DI_ERROR_TDPV = 1 << 3,
+ SLI4_FC_DI_ERROR_UDB = 1 << 4,
+ SLI4_FC_DI_ERROR_EDIR = 1 << 5,
+};
+
+/* WQE DIF field contents */
+enum sli4_dif_fields {
+ SLI4_DIF_DISABLED,
+ SLI4_DIF_PASS_THROUGH,
+ SLI4_DIF_STRIP,
+ SLI4_DIF_INSERT,
+};
+
+/* Work Queue Entry (WQE) types */
+enum sli4_wqe_types {
+ SLI4_WQE_ABORT = 0x0f,
+ SLI4_WQE_ELS_REQUEST64 = 0x8a,
+ SLI4_WQE_FCP_IBIDIR64 = 0xac,
+ SLI4_WQE_FCP_IREAD64 = 0x9a,
+ SLI4_WQE_FCP_IWRITE64 = 0x98,
+ SLI4_WQE_FCP_ICMND64 = 0x9c,
+ SLI4_WQE_FCP_TRECEIVE64 = 0xa1,
+ SLI4_WQE_FCP_CONT_TRECEIVE64 = 0xe5,
+ SLI4_WQE_FCP_TRSP64 = 0xa3,
+ SLI4_WQE_FCP_TSEND64 = 0x9f,
+ SLI4_WQE_GEN_REQUEST64 = 0xc2,
+ SLI4_WQE_SEND_FRAME = 0xe1,
+ SLI4_WQE_XMIT_BCAST64 = 0x84,
+ SLI4_WQE_XMIT_BLS_RSP = 0x97,
+ SLI4_WQE_ELS_RSP64 = 0x95,
+ SLI4_WQE_XMIT_SEQUENCE64 = 0x82,
+ SLI4_WQE_REQUEUE_XRI = 0x93,
+};
+
+/* WQE command types */
+enum sli4_wqe_cmds {
+ SLI4_CMD_FCP_IREAD64_WQE = 0x00,
+ SLI4_CMD_FCP_ICMND64_WQE = 0x00,
+ SLI4_CMD_FCP_IWRITE64_WQE = 0x01,
+ SLI4_CMD_FCP_TRECEIVE64_WQE = 0x02,
+ SLI4_CMD_FCP_TRSP64_WQE = 0x03,
+ SLI4_CMD_FCP_TSEND64_WQE = 0x07,
+ SLI4_CMD_GEN_REQUEST64_WQE = 0x08,
+ SLI4_CMD_XMIT_BCAST64_WQE = 0x08,
+ SLI4_CMD_XMIT_BLS_RSP64_WQE = 0x08,
+ SLI4_CMD_ABORT_WQE = 0x08,
+ SLI4_CMD_XMIT_SEQUENCE64_WQE = 0x08,
+ SLI4_CMD_REQUEUE_XRI_WQE = 0x0a,
+ SLI4_CMD_SEND_FRAME_WQE = 0x0a,
+};
+
+#define SLI4_WQE_SIZE 0x05
+#define SLI4_WQE_EXT_SIZE 0x06
+
+#define SLI4_WQE_BYTES (16 * sizeof(u32))
+#define SLI4_WQE_EXT_BYTES (32 * sizeof(u32))
+
+/* Mask for ccp (CS_CTL) */
+#define SLI4_MASK_CCP 0xfe
+
+/* Generic WQE */
+enum sli4_gen_wqe_flags {
+ SLI4_GEN_WQE_EBDECNT = 0xf,
+ SLI4_GEN_WQE_LEN_LOC = 0x3 << 7,
+ SLI4_GEN_WQE_QOSD = 1 << 9,
+ SLI4_GEN_WQE_XBL = 1 << 11,
+ SLI4_GEN_WQE_HLM = 1 << 12,
+ SLI4_GEN_WQE_IOD = 1 << 13,
+ SLI4_GEN_WQE_DBDE = 1 << 14,
+ SLI4_GEN_WQE_WQES = 1 << 15,
+
+ SLI4_GEN_WQE_PRI = 0x7,
+ SLI4_GEN_WQE_PV = 1 << 3,
+ SLI4_GEN_WQE_EAT = 1 << 4,
+ SLI4_GEN_WQE_XC = 1 << 5,
+ SLI4_GEN_WQE_CCPE = 1 << 7,
+
+ SLI4_GEN_WQE_CMDTYPE = 0xf,
+ SLI4_GEN_WQE_WQEC = 1 << 7,
+};
+
+struct sli4_generic_wqe {
+ __le32 cmd_spec0_5[6];
+ __le16 xri_tag;
+ __le16 context_tag;
+ u8 ct_byte;
+ u8 command;
+ u8 class_byte;
+ u8 timer;
+ __le32 abort_tag;
+ __le16 request_tag;
+ __le16 rsvd34;
+ __le16 dw10w0_flags;
+ u8 eat_xc_ccpe;
+ u8 ccp;
+ u8 cmdtype_wqec_byte;
+ u8 rsvd41;
+ __le16 cq_id;
+};
+
+/* WQE used to abort exchanges. */
+enum sli4_abort_wqe_flags {
+ SLI4_ABRT_WQE_IR = 0x02,
+
+ SLI4_ABRT_WQE_EBDECNT = 0xf,
+ SLI4_ABRT_WQE_LEN_LOC = 0x3 << 7,
+ SLI4_ABRT_WQE_QOSD = 1 << 9,
+ SLI4_ABRT_WQE_XBL = 1 << 11,
+ SLI4_ABRT_WQE_IOD = 1 << 13,
+ SLI4_ABRT_WQE_DBDE = 1 << 14,
+ SLI4_ABRT_WQE_WQES = 1 << 15,
+
+ SLI4_ABRT_WQE_PRI = 0x7,
+ SLI4_ABRT_WQE_PV = 1 << 3,
+ SLI4_ABRT_WQE_EAT = 1 << 4,
+ SLI4_ABRT_WQE_XC = 1 << 5,
+ SLI4_ABRT_WQE_CCPE = 1 << 7,
+
+ SLI4_ABRT_WQE_CMDTYPE = 0xf,
+ SLI4_ABRT_WQE_WQEC = 1 << 7,
+};
+
+struct sli4_abort_wqe {
+ __le32 rsvd0;
+ __le32 rsvd4;
+ __le32 ext_t_tag;
+ u8 ia_ir_byte;
+ u8 criteria;
+ __le16 rsvd10;
+ __le32 ext_t_mask;
+ __le32 t_mask;
+ __le16 xri_tag;
+ __le16 context_tag;
+ u8 ct_byte;
+ u8 command;
+ u8 class_byte;
+ u8 timer;
+ __le32 t_tag;
+ __le16 request_tag;
+ __le16 rsvd34;
+ __le16 dw10w0_flags;
+ u8 eat_xc_ccpe;
+ u8 ccp;
+ u8 cmdtype_wqec_byte;
+ u8 rsvd41;
+ __le16 cq_id;
+};
+
+enum sli4_abort_criteria {
+ SLI4_ABORT_CRITERIA_XRI_TAG = 0x01,
+ SLI4_ABORT_CRITERIA_ABORT_TAG,
+ SLI4_ABORT_CRITERIA_REQUEST_TAG,
+ SLI4_ABORT_CRITERIA_EXT_ABORT_TAG,
+};
+
+enum sli4_abort_type {
+ SLI4_ABORT_XRI,
+ SLI4_ABORT_ABORT_ID,
+ SLI4_ABORT_REQUEST_ID,
+ SLI4_ABORT_MAX, /* must be last */
+};
+
+/* WQE used to create an ELS request. */
+enum sli4_els_req_wqe_flags {
+ SLI4_REQ_WQE_QOSD = 0x2,
+ SLI4_REQ_WQE_DBDE = 0x40,
+ SLI4_REQ_WQE_XBL = 0x8,
+ SLI4_REQ_WQE_XC = 0x20,
+ SLI4_REQ_WQE_IOD = 0x20,
+ SLI4_REQ_WQE_HLM = 0x10,
+ SLI4_REQ_WQE_CCPE = 0x80,
+ SLI4_REQ_WQE_EAT = 0x10,
+ SLI4_REQ_WQE_WQES = 0x80,
+ SLI4_REQ_WQE_PU_SHFT = 4,
+ SLI4_REQ_WQE_CT_SHFT = 2,
+ SLI4_REQ_WQE_CT = 0xc,
+ SLI4_REQ_WQE_ELSID_SHFT = 4,
+ SLI4_REQ_WQE_SP_SHFT = 24,
+ SLI4_REQ_WQE_LEN_LOC_BIT1 = 0x80,
+ SLI4_REQ_WQE_LEN_LOC_BIT2 = 0x1,
+};
+
+struct sli4_els_request64_wqe {
+ struct sli4_bde els_request_payload;
+ __le32 els_request_payload_length;
+ __le32 sid_sp_dword;
+ __le32 remote_id_dword;
+ __le16 xri_tag;
+ __le16 context_tag;
+ u8 ct_byte;
+ u8 command;
+ u8 class_byte;
+ u8 timer;
+ __le32 abort_tag;
+ __le16 request_tag;
+ __le16 temporary_rpi;
+ u8 len_loc1_byte;
+ u8 qosd_xbl_hlm_iod_dbde_wqes;
+ u8 eat_xc_ccpe;
+ u8 ccp;
+ u8 cmdtype_elsid_byte;
+ u8 rsvd41;
+ __le16 cq_id;
+ struct sli4_bde els_response_payload_bde;
+ __le32 max_response_payload_length;
+};
+
+/* WQE used to create an FCP initiator no data command. */
+enum sli4_icmd_wqe_flags {
+ SLI4_ICMD_WQE_DBDE = 0x40,
+ SLI4_ICMD_WQE_XBL = 0x8,
+ SLI4_ICMD_WQE_XC = 0x20,
+ SLI4_ICMD_WQE_IOD = 0x20,
+ SLI4_ICMD_WQE_HLM = 0x10,
+ SLI4_ICMD_WQE_CCPE = 0x80,
+ SLI4_ICMD_WQE_EAT = 0x10,
+ SLI4_ICMD_WQE_APPID = 0x10,
+ SLI4_ICMD_WQE_WQES = 0x80,
+ SLI4_ICMD_WQE_PU_SHFT = 4,
+ SLI4_ICMD_WQE_CT_SHFT = 2,
+ SLI4_ICMD_WQE_BS_SHFT = 4,
+ SLI4_ICMD_WQE_LEN_LOC_BIT1 = 0x80,
+ SLI4_ICMD_WQE_LEN_LOC_BIT2 = 0x1,
+};
+
+struct sli4_fcp_icmnd64_wqe {
+ struct sli4_bde bde;
+ __le16 payload_offset_length;
+ __le16 fcp_cmd_buffer_length;
+ __le32 rsvd12;
+ __le32 remote_n_port_id_dword;
+ __le16 xri_tag;
+ __le16 context_tag;
+ u8 dif_ct_bs_byte;
+ u8 command;
+ u8 class_pu_byte;
+ u8 timer;
+ __le32 abort_tag;
+ __le16 request_tag;
+ __le16 rsvd34;
+ u8 len_loc1_byte;
+ u8 qosd_xbl_hlm_iod_dbde_wqes;
+ u8 eat_xc_ccpe;
+ u8 ccp;
+ u8 cmd_type_byte;
+ u8 rsvd41;
+ __le16 cq_id;
+ __le32 rsvd44;
+ __le32 rsvd48;
+ __le32 rsvd52;
+ __le32 rsvd56;
+};
+
+/* WQE used to create an FCP initiator read. */
+enum sli4_ir_wqe_flags {
+ SLI4_IR_WQE_DBDE = 0x40,
+ SLI4_IR_WQE_XBL = 0x8,
+ SLI4_IR_WQE_XC = 0x20,
+ SLI4_IR_WQE_IOD = 0x20,
+ SLI4_IR_WQE_HLM = 0x10,
+ SLI4_IR_WQE_CCPE = 0x80,
+ SLI4_IR_WQE_EAT = 0x10,
+ SLI4_IR_WQE_APPID = 0x10,
+ SLI4_IR_WQE_WQES = 0x80,
+ SLI4_IR_WQE_PU_SHFT = 4,
+ SLI4_IR_WQE_CT_SHFT = 2,
+ SLI4_IR_WQE_BS_SHFT = 4,
+ SLI4_IR_WQE_LEN_LOC_BIT1 = 0x80,
+ SLI4_IR_WQE_LEN_LOC_BIT2 = 0x1,
+};
+
+struct sli4_fcp_iread64_wqe {
+ struct sli4_bde bde;
+ __le16 payload_offset_length;
+ __le16 fcp_cmd_buffer_length;
+
+ __le32 total_transfer_length;
+
+ __le32 remote_n_port_id_dword;
+
+ __le16 xri_tag;
+ __le16 context_tag;
+
+ u8 dif_ct_bs_byte;
+ u8 command;
+ u8 class_pu_byte;
+ u8 timer;
+
+ __le32 abort_tag;
+
+ __le16 request_tag;
+ __le16 rsvd34;
+
+ u8 len_loc1_byte;
+ u8 qosd_xbl_hlm_iod_dbde_wqes;
+ u8 eat_xc_ccpe;
+ u8 ccp;
+
+ u8 cmd_type_byte;
+ u8 rsvd41;
+ __le16 cq_id;
+
+ __le32 rsvd44;
+ struct sli4_bde first_data_bde;
+};
+
+/* WQE used to create an FCP initiator write. */
+enum sli4_iwr_wqe_flags {
+ SLI4_IWR_WQE_DBDE = 0x40,
+ SLI4_IWR_WQE_XBL = 0x8,
+ SLI4_IWR_WQE_XC = 0x20,
+ SLI4_IWR_WQE_IOD = 0x20,
+ SLI4_IWR_WQE_HLM = 0x10,
+ SLI4_IWR_WQE_DNRX = 0x10,
+ SLI4_IWR_WQE_CCPE = 0x80,
+ SLI4_IWR_WQE_EAT = 0x10,
+ SLI4_IWR_WQE_APPID = 0x10,
+ SLI4_IWR_WQE_WQES = 0x80,
+ SLI4_IWR_WQE_PU_SHFT = 4,
+ SLI4_IWR_WQE_CT_SHFT = 2,
+ SLI4_IWR_WQE_BS_SHFT = 4,
+ SLI4_IWR_WQE_LEN_LOC_BIT1 = 0x80,
+ SLI4_IWR_WQE_LEN_LOC_BIT2 = 0x1,
+};
+
+struct sli4_fcp_iwrite64_wqe {
+ struct sli4_bde bde;
+ __le16 payload_offset_length;
+ __le16 fcp_cmd_buffer_length;
+ __le16 total_transfer_length;
+ __le16 initial_transfer_length;
+ __le16 xri_tag;
+ __le16 context_tag;
+ u8 dif_ct_bs_byte;
+ u8 command;
+ u8 class_pu_byte;
+ u8 timer;
+ __le32 abort_tag;
+ __le16 request_tag;
+ __le16 rsvd34;
+ u8 len_loc1_byte;
+ u8 qosd_xbl_hlm_iod_dbde_wqes;
+ u8 eat_xc_ccpe;
+ u8 ccp;
+ u8 cmd_type_byte;
+ u8 rsvd41;
+ __le16 cq_id;
+ __le32 remote_n_port_id_dword;
+ struct sli4_bde first_data_bde;
+};
+
+struct sli4_fcp_128byte_wqe {
+ u32 dw[32];
+};
+
+/* WQE used to create an FCP target receive */
+enum sli4_trcv_wqe_flags {
+ SLI4_TRCV_WQE_DBDE = 0x40,
+ SLI4_TRCV_WQE_XBL = 0x8,
+ SLI4_TRCV_WQE_AR = 0x8,
+ SLI4_TRCV_WQE_XC = 0x20,
+ SLI4_TRCV_WQE_IOD = 0x20,
+ SLI4_TRCV_WQE_HLM = 0x10,
+ SLI4_TRCV_WQE_DNRX = 0x10,
+ SLI4_TRCV_WQE_CCPE = 0x80,
+ SLI4_TRCV_WQE_EAT = 0x10,
+ SLI4_TRCV_WQE_APPID = 0x10,
+ SLI4_TRCV_WQE_WQES = 0x80,
+ SLI4_TRCV_WQE_PU_SHFT = 4,
+ SLI4_TRCV_WQE_CT_SHFT = 2,
+ SLI4_TRCV_WQE_BS_SHFT = 4,
+ SLI4_TRCV_WQE_LEN_LOC_BIT2 = 0x1,
+};
+
+struct sli4_fcp_treceive64_wqe {
+ struct sli4_bde bde;
+ __le32 payload_offset_length;
+ __le32 relative_offset;
+ union {
+ __le16 sec_xri_tag;
+ __le16 rsvd;
+ __le32 dword;
+ } dword5;
+ __le16 xri_tag;
+ __le16 context_tag;
+ u8 dif_ct_bs_byte;
+ u8 command;
+ u8 class_ar_pu_byte;
+ u8 timer;
+ __le32 abort_tag;
+ __le16 request_tag;
+ __le16 remote_xid;
+ u8 lloc1_appid;
+ u8 qosd_xbl_hlm_iod_dbde_wqes;
+ u8 eat_xc_ccpe;
+ u8 ccp;
+ u8 cmd_type_byte;
+ u8 rsvd41;
+ __le16 cq_id;
+ __le32 fcp_data_receive_length;
+ struct sli4_bde first_data_bde;
+};
+
+/* WQE used to create an FCP target response */
+enum sli4_trsp_wqe_flags {
+ SLI4_TRSP_WQE_AG = 0x8,
+ SLI4_TRSP_WQE_DBDE = 0x40,
+ SLI4_TRSP_WQE_XBL = 0x8,
+ SLI4_TRSP_WQE_XC = 0x20,
+ SLI4_TRSP_WQE_HLM = 0x10,
+ SLI4_TRSP_WQE_DNRX = 0x10,
+ SLI4_TRSP_WQE_CCPE = 0x80,
+ SLI4_TRSP_WQE_EAT = 0x10,
+ SLI4_TRSP_WQE_APPID = 0x10,
+ SLI4_TRSP_WQE_WQES = 0x80,
+};
+
+struct sli4_fcp_trsp64_wqe {
+ struct sli4_bde bde;
+ __le32 fcp_response_length;
+ __le32 rsvd12;
+ __le32 dword5;
+ __le16 xri_tag;
+ __le16 rpi;
+ u8 ct_dnrx_byte;
+ u8 command;
+ u8 class_ag_byte;
+ u8 timer;
+ __le32 abort_tag;
+ __le16 request_tag;
+ __le16 remote_xid;
+ u8 lloc1_appid;
+ u8 qosd_xbl_hlm_dbde_wqes;
+ u8 eat_xc_ccpe;
+ u8 ccp;
+ u8 cmd_type_byte;
+ u8 rsvd41;
+ __le16 cq_id;
+ __le32 rsvd44;
+ __le32 rsvd48;
+ __le32 rsvd52;
+ __le32 rsvd56;
+};
+
+/* WQE used to create an FCP target send (DATA IN). */
+enum sli4_tsend_wqe_flags {
+ SLI4_TSEND_WQE_XBL = 0x8,
+ SLI4_TSEND_WQE_DBDE = 0x40,
+ SLI4_TSEND_WQE_IOD = 0x20,
+ SLI4_TSEND_WQE_QOSD = 0x2,
+ SLI4_TSEND_WQE_HLM = 0x10,
+ SLI4_TSEND_WQE_PU_SHFT = 4,
+ SLI4_TSEND_WQE_AR = 0x8,
+ SLI4_TSEND_CT_SHFT = 2,
+ SLI4_TSEND_BS_SHFT = 4,
+ SLI4_TSEND_LEN_LOC_BIT2 = 0x1,
+ SLI4_TSEND_CCPE = 0x80,
+ SLI4_TSEND_APPID_VALID = 0x20,
+ SLI4_TSEND_WQES = 0x80,
+ SLI4_TSEND_XC = 0x20,
+ SLI4_TSEND_EAT = 0x10,
+};
+
+struct sli4_fcp_tsend64_wqe {
+ struct sli4_bde bde;
+ __le32 payload_offset_length;
+ __le32 relative_offset;
+ __le32 dword5;
+ __le16 xri_tag;
+ __le16 rpi;
+ u8 ct_byte;
+ u8 command;
+ u8 class_pu_ar_byte;
+ u8 timer;
+ __le32 abort_tag;
+ __le16 request_tag;
+ __le16 remote_xid;
+ u8 dw10byte0;
+ u8 ll_qd_xbl_hlm_iod_dbde;
+ u8 dw10byte2;
+ u8 ccp;
+ u8 cmd_type_byte;
+ u8 rsvd45;
+ __le16 cq_id;
+ __le32 fcp_data_transmit_length;
+ struct sli4_bde first_data_bde;
+};
+
+/* WQE used to create a general request. */
+enum sli4_gen_req_wqe_flags {
+ SLI4_GEN_REQ64_WQE_XBL = 0x8,
+ SLI4_GEN_REQ64_WQE_DBDE = 0x40,
+ SLI4_GEN_REQ64_WQE_IOD = 0x20,
+ SLI4_GEN_REQ64_WQE_QOSD = 0x2,
+ SLI4_GEN_REQ64_WQE_HLM = 0x10,
+ SLI4_GEN_REQ64_CT_SHFT = 2,
+};
+
+struct sli4_gen_request64_wqe {
+ struct sli4_bde bde;
+ __le32 request_payload_length;
+ __le32 relative_offset;
+ u8 rsvd17;
+ u8 df_ctl;
+ u8 type;
+ u8 r_ctl;
+ __le16 xri_tag;
+ __le16 context_tag;
+ u8 ct_byte;
+ u8 command;
+ u8 class_byte;
+ u8 timer;
+ __le32 abort_tag;
+ __le16 request_tag;
+ __le16 rsvd34;
+ u8 dw10flags0;
+ u8 dw10flags1;
+ u8 dw10flags2;
+ u8 ccp;
+ u8 cmd_type_byte;
+ u8 rsvd41;
+ __le16 cq_id;
+ __le32 remote_n_port_id_dword;
+ __le32 rsvd48;
+ __le32 rsvd52;
+ __le32 max_response_payload_length;
+};
+
+/* WQE used to create a send frame request */
+enum sli4_sf_wqe_flags {
+ SLI4_SF_WQE_DBDE = 0x40,
+ SLI4_SF_PU = 0x30,
+ SLI4_SF_CT = 0xc,
+ SLI4_SF_QOSD = 0x2,
+ SLI4_SF_LEN_LOC_BIT1 = 0x80,
+ SLI4_SF_LEN_LOC_BIT2 = 0x1,
+ SLI4_SF_XC = 0x20,
+ SLI4_SF_XBL = 0x8,
+};
+
+struct sli4_send_frame_wqe {
+ struct sli4_bde bde;
+ __le32 frame_length;
+ __le32 fc_header_0_1[2];
+ __le16 xri_tag;
+ __le16 context_tag;
+ u8 ct_byte;
+ u8 command;
+ u8 dw7flags0;
+ u8 timer;
+ __le32 abort_tag;
+ __le16 request_tag;
+ u8 eof;
+ u8 sof;
+ u8 dw10flags0;
+ u8 dw10flags1;
+ u8 dw10flags2;
+ u8 ccp;
+ u8 cmd_type_byte;
+ u8 rsvd41;
+ __le16 cq_id;
+ __le32 fc_header_2_5[4];
+};
+
+/* WQE used to create a transmit sequence */
+enum sli4_seq_wqe_flags {
+ SLI4_SEQ_WQE_DBDE = 0x4000,
+ SLI4_SEQ_WQE_XBL = 0x800,
+ SLI4_SEQ_WQE_SI = 0x4,
+ SLI4_SEQ_WQE_FT = 0x8,
+ SLI4_SEQ_WQE_XO = 0x40,
+ SLI4_SEQ_WQE_LS = 0x80,
+ SLI4_SEQ_WQE_DIF = 0x3,
+ SLI4_SEQ_WQE_BS = 0x70,
+ SLI4_SEQ_WQE_PU = 0x30,
+ SLI4_SEQ_WQE_HLM = 0x1000,
+ SLI4_SEQ_WQE_IOD_SHIFT = 13,
+ SLI4_SEQ_WQE_CT_SHIFT = 2,
+ SLI4_SEQ_WQE_LEN_LOC_SHIFT = 7,
+};
+
+struct sli4_xmit_sequence64_wqe {
+ struct sli4_bde bde;
+ __le32 remote_n_port_id_dword;
+ __le32 relative_offset;
+ u8 dw5flags0;
+ u8 df_ctl;
+ u8 type;
+ u8 r_ctl;
+ __le16 xri_tag;
+ __le16 context_tag;
+ u8 dw7flags0;
+ u8 command;
+ u8 dw7flags1;
+ u8 timer;
+ __le32 abort_tag;
+ __le16 request_tag;
+ __le16 remote_xid;
+ __le16 dw10w0;
+ u8 dw10flags0;
+ u8 ccp;
+ u8 cmd_type_wqec_byte;
+ u8 rsvd45;
+ __le16 cq_id;
+ __le32 sequence_payload_len;
+ __le32 rsvd48;
+ __le32 rsvd52;
+ __le32 rsvd56;
+};
+
+/*
+ * WQE used unblock the specified XRI and to release
+ * it to the SLI Port's free pool.
+ */
+enum sli4_requeue_wqe_flags {
+ SLI4_REQU_XRI_WQE_XC = 0x20,
+ SLI4_REQU_XRI_WQE_QOSD = 0x2,
+};
+
+struct sli4_requeue_xri_wqe {
+ __le32 rsvd0;
+ __le32 rsvd4;
+ __le32 rsvd8;
+ __le32 rsvd12;
+ __le32 rsvd16;
+ __le32 rsvd20;
+ __le16 xri_tag;
+ __le16 context_tag;
+ u8 ct_byte;
+ u8 command;
+ u8 class_byte;
+ u8 timer;
+ __le32 rsvd32;
+ __le16 request_tag;
+ __le16 rsvd34;
+ __le16 flags0;
+ __le16 flags1;
+ __le16 flags2;
+ u8 ccp;
+ u8 cmd_type_wqec_byte;
+ u8 rsvd42;
+ __le16 cq_id;
+ __le32 rsvd44;
+ __le32 rsvd48;
+ __le32 rsvd52;
+ __le32 rsvd56;
+};
+
+/* WQE used to create a BLS response */
+enum sli4_bls_rsp_wqe_flags {
+ SLI4_BLS_RSP_RID = 0xffffff,
+ SLI4_BLS_RSP_WQE_AR = 0x40000000,
+ SLI4_BLS_RSP_WQE_CT_SHFT = 2,
+ SLI4_BLS_RSP_WQE_QOSD = 0x2,
+ SLI4_BLS_RSP_WQE_HLM = 0x10,
+};
+
+struct sli4_xmit_bls_rsp_wqe {
+ __le32 payload_word0;
+ __le16 rx_id;
+ __le16 ox_id;
+ __le16 high_seq_cnt;
+ __le16 low_seq_cnt;
+ __le32 rsvd12;
+ __le32 local_n_port_id_dword;
+ __le32 remote_id_dword;
+ __le16 xri_tag;
+ __le16 context_tag;
+ u8 dw8flags0;
+ u8 command;
+ u8 dw8flags1;
+ u8 timer;
+ __le32 abort_tag;
+ __le16 request_tag;
+ __le16 rsvd38;
+ u8 dw11flags0;
+ u8 dw11flags1;
+ u8 dw11flags2;
+ u8 ccp;
+ u8 dw12flags0;
+ u8 rsvd45;
+ __le16 cq_id;
+ __le16 temporary_rpi;
+ u8 rsvd50;
+ u8 rsvd51;
+ __le32 rsvd52;
+ __le32 rsvd56;
+ __le32 rsvd60;
+};
+
+enum sli_bls_type {
+ SLI4_SLI_BLS_ACC,
+ SLI4_SLI_BLS_RJT,
+ SLI4_SLI_BLS_MAX
+};
+
+struct sli_bls_payload {
+ enum sli_bls_type type;
+ __le16 ox_id;
+ __le16 rx_id;
+ union {
+ struct {
+ u8 seq_id_validity;
+ u8 seq_id_last;
+ u8 rsvd2;
+ u8 rsvd3;
+ u16 ox_id;
+ u16 rx_id;
+ __le16 low_seq_cnt;
+ __le16 high_seq_cnt;
+ } acc;
+ struct {
+ u8 vendor_unique;
+ u8 reason_explanation;
+ u8 reason_code;
+ u8 rsvd3;
+ } rjt;
+ } u;
+};
+
+/* WQE used to create an ELS response */
+
+enum sli4_els_rsp_flags {
+ SLI4_ELS_SID = 0xffffff,
+ SLI4_ELS_RID = 0xffffff,
+ SLI4_ELS_DBDE = 0x40,
+ SLI4_ELS_XBL = 0x8,
+ SLI4_ELS_IOD = 0x20,
+ SLI4_ELS_QOSD = 0x2,
+ SLI4_ELS_XC = 0x20,
+ SLI4_ELS_CT_OFFSET = 0X2,
+ SLI4_ELS_SP = 0X1000000,
+ SLI4_ELS_HLM = 0X10,
+};
+
+struct sli4_xmit_els_rsp64_wqe {
+ struct sli4_bde els_response_payload;
+ __le32 els_response_payload_length;
+ __le32 sid_dw;
+ __le32 rid_dw;
+ __le16 xri_tag;
+ __le16 context_tag;
+ u8 ct_byte;
+ u8 command;
+ u8 class_byte;
+ u8 timer;
+ __le32 abort_tag;
+ __le16 request_tag;
+ __le16 ox_id;
+ u8 flags1;
+ u8 flags2;
+ u8 flags3;
+ u8 flags4;
+ u8 cmd_type_wqec;
+ u8 rsvd34;
+ __le16 cq_id;
+ __le16 temporary_rpi;
+ __le16 rsvd38;
+ u32 rsvd40;
+ u32 rsvd44;
+ u32 rsvd48;
+};
+
+/* Local Reject Reason Codes */
+enum sli4_fc_local_rej_codes {
+ SLI4_FC_LOCAL_REJECT_UNKNOWN,
+ SLI4_FC_LOCAL_REJECT_MISSING_CONTINUE,
+ SLI4_FC_LOCAL_REJECT_SEQUENCE_TIMEOUT,
+ SLI4_FC_LOCAL_REJECT_INTERNAL_ERROR,
+ SLI4_FC_LOCAL_REJECT_INVALID_RPI,
+ SLI4_FC_LOCAL_REJECT_NO_XRI,
+ SLI4_FC_LOCAL_REJECT_ILLEGAL_COMMAND,
+ SLI4_FC_LOCAL_REJECT_XCHG_DROPPED,
+ SLI4_FC_LOCAL_REJECT_ILLEGAL_FIELD,
+ SLI4_FC_LOCAL_REJECT_RPI_SUSPENDED,
+ SLI4_FC_LOCAL_REJECT_RSVD,
+ SLI4_FC_LOCAL_REJECT_RSVD1,
+ SLI4_FC_LOCAL_REJECT_NO_ABORT_MATCH,
+ SLI4_FC_LOCAL_REJECT_TX_DMA_FAILED,
+ SLI4_FC_LOCAL_REJECT_RX_DMA_FAILED,
+ SLI4_FC_LOCAL_REJECT_ILLEGAL_FRAME,
+ SLI4_FC_LOCAL_REJECT_RSVD2,
+ SLI4_FC_LOCAL_REJECT_NO_RESOURCES, //0x11
+ SLI4_FC_LOCAL_REJECT_FCP_CONF_FAILURE,
+ SLI4_FC_LOCAL_REJECT_ILLEGAL_LENGTH,
+ SLI4_FC_LOCAL_REJECT_UNSUPPORTED_FEATURE,
+ SLI4_FC_LOCAL_REJECT_ABORT_IN_PROGRESS,
+ SLI4_FC_LOCAL_REJECT_ABORT_REQUESTED,
+ SLI4_FC_LOCAL_REJECT_RCV_BUFFER_TIMEOUT,
+ SLI4_FC_LOCAL_REJECT_LOOP_OPEN_FAILURE,
+ SLI4_FC_LOCAL_REJECT_RSVD3,
+ SLI4_FC_LOCAL_REJECT_LINK_DOWN,
+ SLI4_FC_LOCAL_REJECT_CORRUPTED_DATA,
+ SLI4_FC_LOCAL_REJECT_CORRUPTED_RPI,
+ SLI4_FC_LOCAL_REJECT_OUTOFORDER_DATA,
+ SLI4_FC_LOCAL_REJECT_OUTOFORDER_ACK,
+ SLI4_FC_LOCAL_REJECT_DUP_FRAME,
+ SLI4_FC_LOCAL_REJECT_LINK_CONTROL_FRAME, //0x20
+ SLI4_FC_LOCAL_REJECT_BAD_HOST_ADDRESS,
+ SLI4_FC_LOCAL_REJECT_RSVD4,
+ SLI4_FC_LOCAL_REJECT_MISSING_HDR_BUFFER,
+ SLI4_FC_LOCAL_REJECT_MSEQ_CHAIN_CORRUPTED,
+ SLI4_FC_LOCAL_REJECT_ABORTMULT_REQUESTED,
+ SLI4_FC_LOCAL_REJECT_BUFFER_SHORTAGE = 0x28,
+ SLI4_FC_LOCAL_REJECT_RCV_XRIBUF_WAITING,
+ SLI4_FC_LOCAL_REJECT_INVALID_VPI = 0x2e,
+ SLI4_FC_LOCAL_REJECT_NO_FPORT_DETECTED,
+ SLI4_FC_LOCAL_REJECT_MISSING_XRIBUF,
+ SLI4_FC_LOCAL_REJECT_RSVD5,
+ SLI4_FC_LOCAL_REJECT_INVALID_XRI,
+ SLI4_FC_LOCAL_REJECT_INVALID_RELOFFSET = 0x40,
+ SLI4_FC_LOCAL_REJECT_MISSING_RELOFFSET,
+ SLI4_FC_LOCAL_REJECT_INSUFF_BUFFERSPACE,
+ SLI4_FC_LOCAL_REJECT_MISSING_SI,
+ SLI4_FC_LOCAL_REJECT_MISSING_ES,
+ SLI4_FC_LOCAL_REJECT_INCOMPLETE_XFER,
+ SLI4_FC_LOCAL_REJECT_SLER_FAILURE,
+ SLI4_FC_LOCAL_REJECT_SLER_CMD_RCV_FAILURE,
+ SLI4_FC_LOCAL_REJECT_SLER_REC_RJT_ERR,
+ SLI4_FC_LOCAL_REJECT_SLER_REC_SRR_RETRY_ERR,
+ SLI4_FC_LOCAL_REJECT_SLER_SRR_RJT_ERR,
+ SLI4_FC_LOCAL_REJECT_RSVD6,
+ SLI4_FC_LOCAL_REJECT_SLER_RRQ_RJT_ERR,
+ SLI4_FC_LOCAL_REJECT_SLER_RRQ_RETRY_ERR,
+ SLI4_FC_LOCAL_REJECT_SLER_ABTS_ERR,
+};
+
+enum sli4_async_rcqe_flags {
+ SLI4_RACQE_RQ_EL_INDX = 0xfff,
+ SLI4_RACQE_FCFI = 0x3f,
+ SLI4_RACQE_HDPL = 0x3f,
+ SLI4_RACQE_RQ_ID = 0xffc0,
+};
+
+struct sli4_fc_async_rcqe {
+ u8 rsvd0;
+ u8 status;
+ __le16 rq_elmt_indx_word;
+ __le32 rsvd4;
+ __le16 fcfi_rq_id_word;
+ __le16 data_placement_length;
+ u8 sof_byte;
+ u8 eof_byte;
+ u8 code;
+ u8 hdpl_byte;
+};
+
+struct sli4_fc_async_rcqe_v1 {
+ u8 rsvd0;
+ u8 status;
+ __le16 rq_elmt_indx_word;
+ u8 fcfi_byte;
+ u8 rsvd5;
+ __le16 rsvd6;
+ __le16 rq_id;
+ __le16 data_placement_length;
+ u8 sof_byte;
+ u8 eof_byte;
+ u8 code;
+ u8 hdpl_byte;
+};
+
+enum sli4_fc_async_rq_status {
+ SLI4_FC_ASYNC_RQ_SUCCESS = 0x10,
+ SLI4_FC_ASYNC_RQ_BUF_LEN_EXCEEDED,
+ SLI4_FC_ASYNC_RQ_INSUFF_BUF_NEEDED,
+ SLI4_FC_ASYNC_RQ_INSUFF_BUF_FRM_DISC,
+ SLI4_FC_ASYNC_RQ_DMA_FAILURE,
+};
+
+#define SLI4_RCQE_RQ_EL_INDX 0xfff
+
+struct sli4_fc_coalescing_rcqe {
+ u8 rsvd0;
+ u8 status;
+ __le16 rq_elmt_indx_word;
+ __le32 rsvd4;
+ __le16 rq_id;
+ __le16 seq_placement_length;
+ __le16 rsvd14;
+ u8 code;
+ u8 vld_byte;
+};
+
+#define SLI4_FC_COALESCE_RQ_SUCCESS 0x10
+#define SLI4_FC_COALESCE_RQ_INSUFF_XRI_NEEDED 0x18
+
+enum sli4_optimized_write_cmd_cqe_flags {
+ SLI4_OCQE_RQ_EL_INDX = 0x7f, /* DW0 bits 16:30 */
+ SLI4_OCQE_FCFI = 0x3f, /* DW1 bits 0:6 */
+ SLI4_OCQE_OOX = 1 << 6, /* DW1 bit 15 */
+ SLI4_OCQE_AGXR = 1 << 7, /* DW1 bit 16 */
+ SLI4_OCQE_HDPL = 0x3f, /* DW3 bits 24:29*/
+};
+
+struct sli4_fc_optimized_write_cmd_cqe {
+ u8 rsvd0;
+ u8 status;
+ __le16 w1;
+ u8 flags0;
+ u8 flags1;
+ __le16 xri;
+ __le16 rq_id;
+ __le16 data_placement_length;
+ __le16 rpi;
+ u8 code;
+ u8 hdpl_vld;
+};
+
+#define SLI4_OCQE_XB 0x10
+
+struct sli4_fc_optimized_write_data_cqe {
+ u8 hw_status;
+ u8 status;
+ __le16 xri;
+ __le32 total_data_placed;
+ __le32 extended_status;
+ __le16 rsvd12;
+ u8 code;
+ u8 flags;
+};
+
+struct sli4_fc_xri_aborted_cqe {
+ u8 rsvd0;
+ u8 status;
+ __le16 rsvd2;
+ __le32 extended_status;
+ __le16 xri;
+ __le16 remote_xid;
+ __le16 rsvd12;
+ u8 code;
+ u8 flags;
+};
+
+enum sli4_generic_ctx {
+ SLI4_GENERIC_CONTEXT_RPI,
+ SLI4_GENERIC_CONTEXT_VPI,
+ SLI4_GENERIC_CONTEXT_VFI,
+ SLI4_GENERIC_CONTEXT_FCFI,
+};
+
+#define SLI4_GENERIC_CLASS_CLASS_2 0x1
+#define SLI4_GENERIC_CLASS_CLASS_3 0x2
+
+#define SLI4_ELS_REQUEST64_DIR_WRITE 0x0
+#define SLI4_ELS_REQUEST64_DIR_READ 0x1
+
+enum sli4_els_request {
+ SLI4_ELS_REQUEST64_OTHER,
+ SLI4_ELS_REQUEST64_LOGO,
+ SLI4_ELS_REQUEST64_FDISC,
+ SLI4_ELS_REQUEST64_FLOGIN,
+ SLI4_ELS_REQUEST64_PLOGI,
+};
+
+enum sli4_els_cmd_type {
+ SLI4_ELS_REQUEST64_CMD_GEN = 0x08,
+ SLI4_ELS_REQUEST64_CMD_NON_FABRIC = 0x0c,
+ SLI4_ELS_REQUEST64_CMD_FABRIC = 0x0d,
+};
+
+#define SLI_PAGE_SIZE SZ_4K
+
+#define SLI4_BMBX_TIMEOUT_MSEC 30000
+#define SLI4_FW_READY_TIMEOUT_MSEC 30000
+
+#define SLI4_BMBX_DELAY_US 1000 /* 1 ms */
+#define SLI4_INIT_PORT_DELAY_US 10000 /* 10 ms */
+
+static inline u32
+sli_page_count(size_t bytes, u32 page_size)
+{
+ if (!page_size)
+ return 0;
+
+ return (bytes + (page_size - 1)) >> __ffs(page_size);
+}
+
+/*************************************************************************
+ * SLI-4 mailbox command formats and definitions
+ */
+
+struct sli4_mbox_command_header {
+ u8 resvd0;
+ u8 command;
+ __le16 status; /* Port writes to indicate success/fail */
+};
+
+enum sli4_mbx_cmd_value {
+ SLI4_MBX_CMD_CONFIG_LINK = 0x07,
+ SLI4_MBX_CMD_DUMP = 0x17,
+ SLI4_MBX_CMD_DOWN_LINK = 0x06,
+ SLI4_MBX_CMD_INIT_LINK = 0x05,
+ SLI4_MBX_CMD_INIT_VFI = 0xa3,
+ SLI4_MBX_CMD_INIT_VPI = 0xa4,
+ SLI4_MBX_CMD_POST_XRI = 0xa7,
+ SLI4_MBX_CMD_RELEASE_XRI = 0xac,
+ SLI4_MBX_CMD_READ_CONFIG = 0x0b,
+ SLI4_MBX_CMD_READ_STATUS = 0x0e,
+ SLI4_MBX_CMD_READ_NVPARMS = 0x02,
+ SLI4_MBX_CMD_READ_REV = 0x11,
+ SLI4_MBX_CMD_READ_LNK_STAT = 0x12,
+ SLI4_MBX_CMD_READ_SPARM64 = 0x8d,
+ SLI4_MBX_CMD_READ_TOPOLOGY = 0x95,
+ SLI4_MBX_CMD_REG_FCFI = 0xa0,
+ SLI4_MBX_CMD_REG_FCFI_MRQ = 0xaf,
+ SLI4_MBX_CMD_REG_RPI = 0x93,
+ SLI4_MBX_CMD_REG_RX_RQ = 0xa6,
+ SLI4_MBX_CMD_REG_VFI = 0x9f,
+ SLI4_MBX_CMD_REG_VPI = 0x96,
+ SLI4_MBX_CMD_RQST_FEATURES = 0x9d,
+ SLI4_MBX_CMD_SLI_CONFIG = 0x9b,
+ SLI4_MBX_CMD_UNREG_FCFI = 0xa2,
+ SLI4_MBX_CMD_UNREG_RPI = 0x14,
+ SLI4_MBX_CMD_UNREG_VFI = 0xa1,
+ SLI4_MBX_CMD_UNREG_VPI = 0x97,
+ SLI4_MBX_CMD_WRITE_NVPARMS = 0x03,
+ SLI4_MBX_CMD_CFG_AUTO_XFER_RDY = 0xad,
+};
+
+enum sli4_mbx_status {
+ SLI4_MBX_STATUS_SUCCESS = 0x0000,
+ SLI4_MBX_STATUS_FAILURE = 0x0001,
+ SLI4_MBX_STATUS_RPI_NOT_REG = 0x1400,
+};
+
+/* CONFIG_LINK - configure link-oriented parameters,
+ * such as default N_Port_ID address and various timers
+ */
+enum sli4_cmd_config_link_flags {
+ SLI4_CFG_LINK_BBSCN = 0xf00,
+ SLI4_CFG_LINK_CSCN = 0x1000,
+};
+
+struct sli4_cmd_config_link {
+ struct sli4_mbox_command_header hdr;
+ u8 maxbbc;
+ u8 rsvd5;
+ u8 rsvd6;
+ u8 rsvd7;
+ u8 alpa;
+ __le16 n_port_id;
+ u8 rsvd11;
+ __le32 rsvd12;
+ __le32 e_d_tov;
+ __le32 lp_tov;
+ __le32 r_a_tov;
+ __le32 r_t_tov;
+ __le32 al_tov;
+ __le32 rsvd36;
+ __le32 bbscn_dword;
+};
+
+#define SLI4_DUMP4_TYPE 0xf
+
+#define SLI4_WKI_TAG_SAT_TEM 0x1040
+
+struct sli4_cmd_dump4 {
+ struct sli4_mbox_command_header hdr;
+ __le32 type_dword;
+ __le16 wki_selection;
+ __le16 rsvd10;
+ __le32 rsvd12;
+ __le32 returned_byte_cnt;
+ __le32 resp_data[59];
+};
+
+/* INIT_LINK - initialize the link for a FC port */
+enum sli4_init_link_flags {
+ SLI4_INIT_LINK_F_LOOPBACK = 1 << 0,
+
+ SLI4_INIT_LINK_F_P2P_ONLY = 1 << 1,
+ SLI4_INIT_LINK_F_FCAL_ONLY = 2 << 1,
+ SLI4_INIT_LINK_F_FCAL_FAIL_OVER = 0 << 1,
+ SLI4_INIT_LINK_F_P2P_FAIL_OVER = 1 << 1,
+
+ SLI4_INIT_LINK_F_UNFAIR = 1 << 6,
+ SLI4_INIT_LINK_F_NO_LIRP = 1 << 7,
+ SLI4_INIT_LINK_F_LOOP_VALID_CHK = 1 << 8,
+ SLI4_INIT_LINK_F_NO_LISA = 1 << 9,
+ SLI4_INIT_LINK_F_FAIL_OVER = 1 << 10,
+ SLI4_INIT_LINK_F_FIXED_SPEED = 1 << 11,
+ SLI4_INIT_LINK_F_PICK_HI_ALPA = 1 << 15,
+
+};
+
+enum sli4_fc_link_speed {
+ SLI4_LINK_SPEED_1G = 1,
+ SLI4_LINK_SPEED_2G,
+ SLI4_LINK_SPEED_AUTO_1_2,
+ SLI4_LINK_SPEED_4G,
+ SLI4_LINK_SPEED_AUTO_4_1,
+ SLI4_LINK_SPEED_AUTO_4_2,
+ SLI4_LINK_SPEED_AUTO_4_2_1,
+ SLI4_LINK_SPEED_8G,
+ SLI4_LINK_SPEED_AUTO_8_1,
+ SLI4_LINK_SPEED_AUTO_8_2,
+ SLI4_LINK_SPEED_AUTO_8_2_1,
+ SLI4_LINK_SPEED_AUTO_8_4,
+ SLI4_LINK_SPEED_AUTO_8_4_1,
+ SLI4_LINK_SPEED_AUTO_8_4_2,
+ SLI4_LINK_SPEED_10G,
+ SLI4_LINK_SPEED_16G,
+ SLI4_LINK_SPEED_AUTO_16_8_4,
+ SLI4_LINK_SPEED_AUTO_16_8,
+ SLI4_LINK_SPEED_32G,
+ SLI4_LINK_SPEED_AUTO_32_16_8,
+ SLI4_LINK_SPEED_AUTO_32_16,
+ SLI4_LINK_SPEED_64G,
+ SLI4_LINK_SPEED_AUTO_64_32_16,
+ SLI4_LINK_SPEED_AUTO_64_32,
+ SLI4_LINK_SPEED_128G,
+ SLI4_LINK_SPEED_AUTO_128_64_32,
+ SLI4_LINK_SPEED_AUTO_128_64,
+};
+
+struct sli4_cmd_init_link {
+ struct sli4_mbox_command_header hdr;
+ __le32 sel_reset_al_pa_dword;
+ __le32 flags0;
+ __le32 link_speed_sel_code;
+};
+
+/* INIT_VFI - initialize the VFI resource */
+enum sli4_init_vfi_flags {
+ SLI4_INIT_VFI_FLAG_VP = 0x1000,
+ SLI4_INIT_VFI_FLAG_VF = 0x2000,
+ SLI4_INIT_VFI_FLAG_VT = 0x4000,
+ SLI4_INIT_VFI_FLAG_VR = 0x8000,
+
+ SLI4_INIT_VFI_VFID = 0x1fff,
+ SLI4_INIT_VFI_PRI = 0xe000,
+
+ SLI4_INIT_VFI_HOP_COUNT = 0xff000000,
+};
+
+struct sli4_cmd_init_vfi {
+ struct sli4_mbox_command_header hdr;
+ __le16 vfi;
+ __le16 flags0_word;
+ __le16 fcfi;
+ __le16 vpi;
+ __le32 vf_id_pri_dword;
+ __le32 hop_cnt_dword;
+};
+
+/* INIT_VPI - initialize the VPI resource */
+struct sli4_cmd_init_vpi {
+ struct sli4_mbox_command_header hdr;
+ __le16 vpi;
+ __le16 vfi;
+};
+
+/* POST_XRI - post XRI resources to the SLI Port */
+enum sli4_post_xri_flags {
+ SLI4_POST_XRI_COUNT = 0xfff,
+ SLI4_POST_XRI_FLAG_ENX = 0x1000,
+ SLI4_POST_XRI_FLAG_DL = 0x2000,
+ SLI4_POST_XRI_FLAG_DI = 0x4000,
+ SLI4_POST_XRI_FLAG_VAL = 0x8000,
+};
+
+struct sli4_cmd_post_xri {
+ struct sli4_mbox_command_header hdr;
+ __le16 xri_base;
+ __le16 xri_count_flags;
+};
+
+/* RELEASE_XRI - Release XRI resources from the SLI Port */
+enum sli4_release_xri_flags {
+ SLI4_RELEASE_XRI_REL_XRI_CNT = 0x1f,
+ SLI4_RELEASE_XRI_COUNT = 0x1f,
+};
+
+struct sli4_cmd_release_xri {
+ struct sli4_mbox_command_header hdr;
+ __le16 rel_xri_count_word;
+ __le16 xri_count_word;
+
+ struct {
+ __le16 xri_tag0;
+ __le16 xri_tag1;
+ } xri_tbl[62];
+};
+
+/* READ_CONFIG - read SLI port configuration parameters */
+struct sli4_cmd_read_config {
+ struct sli4_mbox_command_header hdr;
+};
+
+enum sli4_read_cfg_resp_flags {
+ SLI4_READ_CFG_RESP_RESOURCE_EXT = 0x80000000, /* DW1 */
+ SLI4_READ_CFG_RESP_TOPOLOGY = 0xff000000, /* DW2 */
+};
+
+enum sli4_read_cfg_topo {
+ SLI4_READ_CFG_TOPO_FC = 0x1, /* FC topology unknown */
+ SLI4_READ_CFG_TOPO_NON_FC_AL = 0x2, /* FC point-to-point or fabric */
+ SLI4_READ_CFG_TOPO_FC_AL = 0x3, /* FC-AL topology */
+};
+
+/* Link Module Type */
+enum sli4_read_cfg_lmt {
+ SLI4_LINK_MODULE_TYPE_1GB = 0x0004,
+ SLI4_LINK_MODULE_TYPE_2GB = 0x0008,
+ SLI4_LINK_MODULE_TYPE_4GB = 0x0040,
+ SLI4_LINK_MODULE_TYPE_8GB = 0x0080,
+ SLI4_LINK_MODULE_TYPE_16GB = 0x0200,
+ SLI4_LINK_MODULE_TYPE_32GB = 0x0400,
+ SLI4_LINK_MODULE_TYPE_64GB = 0x0800,
+ SLI4_LINK_MODULE_TYPE_128GB = 0x1000,
+};
+
+struct sli4_rsp_read_config {
+ struct sli4_mbox_command_header hdr;
+ __le32 ext_dword;
+ __le32 topology_dword;
+ __le32 resvd8;
+ __le16 e_d_tov;
+ __le16 resvd14;
+ __le32 resvd16;
+ __le16 r_a_tov;
+ __le16 resvd22;
+ __le32 resvd24;
+ __le32 resvd28;
+ __le16 lmt;
+ __le16 resvd34;
+ __le32 resvd36;
+ __le32 resvd40;
+ __le16 xri_base;
+ __le16 xri_count;
+ __le16 rpi_base;
+ __le16 rpi_count;
+ __le16 vpi_base;
+ __le16 vpi_count;
+ __le16 vfi_base;
+ __le16 vfi_count;
+ __le16 resvd60;
+ __le16 fcfi_count;
+ __le16 rq_count;
+ __le16 eq_count;
+ __le16 wq_count;
+ __le16 cq_count;
+ __le32 pad[45];
+};
+
+/* READ_NVPARMS - read SLI port configuration parameters */
+enum sli4_read_nvparms_flags {
+ SLI4_READ_NVPARAMS_HARD_ALPA = 0xff,
+ SLI4_READ_NVPARAMS_PREFERRED_D_ID = 0xffffff00,
+};
+
+struct sli4_cmd_read_nvparms {
+ struct sli4_mbox_command_header hdr;
+ __le32 resvd0;
+ __le32 resvd4;
+ __le32 resvd8;
+ __le32 resvd12;
+ u8 wwpn[8];
+ u8 wwnn[8];
+ __le32 hard_alpa_d_id;
+};
+
+/* WRITE_NVPARMS - write SLI port configuration parameters */
+struct sli4_cmd_write_nvparms {
+ struct sli4_mbox_command_header hdr;
+ __le32 resvd0;
+ __le32 resvd4;
+ __le32 resvd8;
+ __le32 resvd12;
+ u8 wwpn[8];
+ u8 wwnn[8];
+ __le32 hard_alpa_d_id;
+};
+
+/* READ_REV - read the Port revision levels */
+enum {
+ SLI4_READ_REV_FLAG_SLI_LEVEL = 0xf,
+ SLI4_READ_REV_FLAG_FCOEM = 0x10,
+ SLI4_READ_REV_FLAG_CEEV = 0x60,
+ SLI4_READ_REV_FLAG_VPD = 0x2000,
+
+ SLI4_READ_REV_AVAILABLE_LENGTH = 0xffffff,
+};
+
+struct sli4_cmd_read_rev {
+ struct sli4_mbox_command_header hdr;
+ __le16 resvd0;
+ __le16 flags0_word;
+ __le32 first_hw_rev;
+ __le32 second_hw_rev;
+ __le32 resvd12;
+ __le32 third_hw_rev;
+ u8 fc_ph_low;
+ u8 fc_ph_high;
+ u8 feature_level_low;
+ u8 feature_level_high;
+ __le32 resvd24;
+ __le32 first_fw_id;
+ u8 first_fw_name[16];
+ __le32 second_fw_id;
+ u8 second_fw_name[16];
+ __le32 rsvd18[30];
+ __le32 available_length_dword;
+ struct sli4_dmaaddr hostbuf;
+ __le32 returned_vpd_length;
+ __le32 actual_vpd_length;
+};
+
+/* READ_SPARM64 - read the Port service parameters */
+#define SLI4_READ_SPARM64_WWPN_OFFSET (4 * sizeof(u32))
+#define SLI4_READ_SPARM64_WWNN_OFFSET (6 * sizeof(u32))
+
+struct sli4_cmd_read_sparm64 {
+ struct sli4_mbox_command_header hdr;
+ __le32 resvd0;
+ __le32 resvd4;
+ struct sli4_bde bde_64;
+ __le16 vpi;
+ __le16 resvd22;
+ __le16 port_name_start;
+ __le16 port_name_len;
+ __le16 node_name_start;
+ __le16 node_name_len;
+};
+
+/* READ_TOPOLOGY - read the link event information */
+enum sli4_read_topo_e {
+ SLI4_READTOPO_ATTEN_TYPE = 0xff,
+ SLI4_READTOPO_FLAG_IL = 0x100,
+ SLI4_READTOPO_FLAG_PB_RECVD = 0x200,
+
+ SLI4_READTOPO_LINKSTATE_RECV = 0x3,
+ SLI4_READTOPO_LINKSTATE_TRANS = 0xc,
+ SLI4_READTOPO_LINKSTATE_MACHINE = 0xf0,
+ SLI4_READTOPO_LINKSTATE_SPEED = 0xff00,
+ SLI4_READTOPO_LINKSTATE_TF = 0x40000000,
+ SLI4_READTOPO_LINKSTATE_LU = 0x80000000,
+
+ SLI4_READTOPO_SCN_BBSCN = 0xf,
+ SLI4_READTOPO_SCN_CBBSCN = 0xf0,
+
+ SLI4_READTOPO_R_T_TOV = 0x1ff,
+ SLI4_READTOPO_AL_TOV = 0xf000,
+
+ SLI4_READTOPO_PB_FLAG = 0x80,
+
+ SLI4_READTOPO_INIT_N_PORTID = 0xffffff,
+};
+
+#define SLI4_MIN_LOOP_MAP_BYTES 128
+
+struct sli4_cmd_read_topology {
+ struct sli4_mbox_command_header hdr;
+ __le32 event_tag;
+ __le32 dw2_attentype;
+ u8 topology;
+ u8 lip_type;
+ u8 lip_al_ps;
+ u8 al_pa_granted;
+ struct sli4_bde bde_loop_map;
+ __le32 linkdown_state;
+ __le32 currlink_state;
+ u8 max_bbc;
+ u8 init_bbc;
+ u8 scn_flags;
+ u8 rsvd39;
+ __le16 dw10w0_al_rt_tov;
+ __le16 lp_tov;
+ u8 acquired_al_pa;
+ u8 pb_flags;
+ __le16 specified_al_pa;
+ __le32 dw12_init_n_port_id;
+};
+
+enum sli4_read_topo_link {
+ SLI4_READ_TOPOLOGY_LINK_UP = 0x1,
+ SLI4_READ_TOPOLOGY_LINK_DOWN,
+ SLI4_READ_TOPOLOGY_LINK_NO_ALPA,
+};
+
+enum sli4_read_topo {
+ SLI4_READ_TOPO_UNKNOWN = 0x0,
+ SLI4_READ_TOPO_NON_FC_AL,
+ SLI4_READ_TOPO_FC_AL,
+};
+
+enum sli4_read_topo_speed {
+ SLI4_READ_TOPOLOGY_SPEED_NONE = 0x00,
+ SLI4_READ_TOPOLOGY_SPEED_1G = 0x04,
+ SLI4_READ_TOPOLOGY_SPEED_2G = 0x08,
+ SLI4_READ_TOPOLOGY_SPEED_4G = 0x10,
+ SLI4_READ_TOPOLOGY_SPEED_8G = 0x20,
+ SLI4_READ_TOPOLOGY_SPEED_10G = 0x40,
+ SLI4_READ_TOPOLOGY_SPEED_16G = 0x80,
+ SLI4_READ_TOPOLOGY_SPEED_32G = 0x90,
+ SLI4_READ_TOPOLOGY_SPEED_64G = 0xa0,
+ SLI4_READ_TOPOLOGY_SPEED_128G = 0xb0,
+};
+
+/* REG_FCFI - activate a FC Forwarder */
+struct sli4_cmd_reg_fcfi_rq_cfg {
+ u8 r_ctl_mask;
+ u8 r_ctl_match;
+ u8 type_mask;
+ u8 type_match;
+};
+
+enum sli4_regfcfi_tag {
+ SLI4_REGFCFI_VLAN_TAG = 0xfff,
+ SLI4_REGFCFI_VLANTAG_VALID = 0x1000,
+};
+
+#define SLI4_CMD_REG_FCFI_NUM_RQ_CFG 4
+struct sli4_cmd_reg_fcfi {
+ struct sli4_mbox_command_header hdr;
+ __le16 fcf_index;
+ __le16 fcfi;
+ __le16 rqid1;
+ __le16 rqid0;
+ __le16 rqid3;
+ __le16 rqid2;
+ struct sli4_cmd_reg_fcfi_rq_cfg
+ rq_cfg[SLI4_CMD_REG_FCFI_NUM_RQ_CFG];
+ __le32 dw8_vlan;
+};
+
+#define SLI4_CMD_REG_FCFI_MRQ_NUM_RQ_CFG 4
+#define SLI4_CMD_REG_FCFI_MRQ_MAX_NUM_RQ 32
+#define SLI4_CMD_REG_FCFI_SET_FCFI_MODE 0
+#define SLI4_CMD_REG_FCFI_SET_MRQ_MODE 1
+
+enum sli4_reg_fcfi_mrq {
+ SLI4_REGFCFI_MRQ_VLAN_TAG = 0xfff,
+ SLI4_REGFCFI_MRQ_VLANTAG_VALID = 0x1000,
+ SLI4_REGFCFI_MRQ_MODE = 0x2000,
+
+ SLI4_REGFCFI_MRQ_MASK_NUM_PAIRS = 0xff,
+ SLI4_REGFCFI_MRQ_FILTER_BITMASK = 0xf00,
+ SLI4_REGFCFI_MRQ_RQ_SEL_POLICY = 0xf000,
+};
+
+struct sli4_cmd_reg_fcfi_mrq {
+ struct sli4_mbox_command_header hdr;
+ __le16 fcf_index;
+ __le16 fcfi;
+ __le16 rqid1;
+ __le16 rqid0;
+ __le16 rqid3;
+ __le16 rqid2;
+ struct sli4_cmd_reg_fcfi_rq_cfg
+ rq_cfg[SLI4_CMD_REG_FCFI_MRQ_NUM_RQ_CFG];
+ __le32 dw8_vlan;
+ __le32 dw9_mrqflags;
+};
+
+struct sli4_cmd_rq_cfg {
+ __le16 rq_id;
+ u8 r_ctl_mask;
+ u8 r_ctl_match;
+ u8 type_mask;
+ u8 type_match;
+};
+
+/* REG_RPI - register a Remote Port Indicator */
+enum sli4_reg_rpi {
+ SLI4_REGRPI_REMOTE_N_PORTID = 0xffffff, /* DW2 */
+ SLI4_REGRPI_UPD = 0x1000000,
+ SLI4_REGRPI_ETOW = 0x8000000,
+ SLI4_REGRPI_TERP = 0x20000000,
+ SLI4_REGRPI_CI = 0x80000000,
+};
+
+struct sli4_cmd_reg_rpi {
+ struct sli4_mbox_command_header hdr;
+ __le16 rpi;
+ __le16 rsvd2;
+ __le32 dw2_rportid_flags;
+ struct sli4_bde bde_64;
+ __le16 vpi;
+ __le16 rsvd26;
+};
+
+#define SLI4_REG_RPI_BUF_LEN 0x70
+
+/* REG_VFI - register a Virtual Fabric Indicator */
+enum sli_reg_vfi {
+ SLI4_REGVFI_VP = 0x1000, /* DW1 */
+ SLI4_REGVFI_UPD = 0x2000,
+
+ SLI4_REGVFI_LOCAL_N_PORTID = 0xffffff, /* DW10 */
+};
+
+struct sli4_cmd_reg_vfi {
+ struct sli4_mbox_command_header hdr;
+ __le16 vfi;
+ __le16 dw0w1_flags;
+ __le16 fcfi;
+ __le16 vpi;
+ u8 wwpn[8];
+ struct sli4_bde sparm;
+ __le32 e_d_tov;
+ __le32 r_a_tov;
+ __le32 dw10_lportid_flags;
+};
+
+/* REG_VPI - register a Virtual Port Indicator */
+enum sli4_reg_vpi {
+ SLI4_REGVPI_LOCAL_N_PORTID = 0xffffff,
+ SLI4_REGVPI_UPD = 0x1000000,
+};
+
+struct sli4_cmd_reg_vpi {
+ struct sli4_mbox_command_header hdr;
+ __le32 rsvd0;
+ __le32 dw2_lportid_flags;
+ u8 wwpn[8];
+ __le32 rsvd12;
+ __le16 vpi;
+ __le16 vfi;
+};
+
+/* REQUEST_FEATURES - request / query SLI features */
+enum sli4_req_features_flags {
+ SLI4_REQFEAT_QRY = 0x1, /* Dw1 */
+
+ SLI4_REQFEAT_IAAB = 1 << 0, /* DW2 & DW3 */
+ SLI4_REQFEAT_NPIV = 1 << 1,
+ SLI4_REQFEAT_DIF = 1 << 2,
+ SLI4_REQFEAT_VF = 1 << 3,
+ SLI4_REQFEAT_FCPI = 1 << 4,
+ SLI4_REQFEAT_FCPT = 1 << 5,
+ SLI4_REQFEAT_FCPC = 1 << 6,
+ SLI4_REQFEAT_RSVD = 1 << 7,
+ SLI4_REQFEAT_RQD = 1 << 8,
+ SLI4_REQFEAT_IAAR = 1 << 9,
+ SLI4_REQFEAT_HLM = 1 << 10,
+ SLI4_REQFEAT_PERFH = 1 << 11,
+ SLI4_REQFEAT_RXSEQ = 1 << 12,
+ SLI4_REQFEAT_RXRI = 1 << 13,
+ SLI4_REQFEAT_DCL2 = 1 << 14,
+ SLI4_REQFEAT_RSCO = 1 << 15,
+ SLI4_REQFEAT_MRQP = 1 << 16,
+};
+
+struct sli4_cmd_request_features {
+ struct sli4_mbox_command_header hdr;
+ __le32 dw1_qry;
+ __le32 cmd;
+ __le32 resp;
+};
+
+/*
+ * SLI_CONFIG - submit a configuration command to Port
+ *
+ * Command is either embedded as part of the payload (embed) or located
+ * in a separate memory buffer (mem)
+ */
+enum sli4_sli_config {
+ SLI4_SLICONF_EMB = 0x1, /* DW1 */
+ SLI4_SLICONF_PMDCMD_SHIFT = 3,
+ SLI4_SLICONF_PMDCMD_MASK = 0xf8,
+ SLI4_SLICONF_PMDCMD_VAL_1 = 8,
+ SLI4_SLICONF_PMDCNT = 0xf8,
+
+ SLI4_SLICONF_PMD_LEN = 0x00ffffff,
+};
+
+struct sli4_cmd_sli_config {
+ struct sli4_mbox_command_header hdr;
+ __le32 dw1_flags;
+ __le32 payload_len;
+ __le32 rsvd12[3];
+ union {
+ u8 embed[58 * sizeof(u32)];
+ struct sli4_bufptr mem;
+ } payload;
+};
+
+/* READ_STATUS - read tx/rx status of a particular port */
+#define SLI4_READSTATUS_CLEAR_COUNTERS 0x1
+
+struct sli4_cmd_read_status {
+ struct sli4_mbox_command_header hdr;
+ __le32 dw1_flags;
+ __le32 rsvd4;
+ __le32 trans_kbyte_cnt;
+ __le32 recv_kbyte_cnt;
+ __le32 trans_frame_cnt;
+ __le32 recv_frame_cnt;
+ __le32 trans_seq_cnt;
+ __le32 recv_seq_cnt;
+ __le32 tot_exchanges_orig;
+ __le32 tot_exchanges_resp;
+ __le32 recv_p_bsy_cnt;
+ __le32 recv_f_bsy_cnt;
+ __le32 no_rq_buf_dropped_frames_cnt;
+ __le32 empty_rq_timeout_cnt;
+ __le32 no_xri_dropped_frames_cnt;
+ __le32 empty_xri_pool_cnt;
+};
+
+/* READ_LNK_STAT - read link status of a particular port */
+enum sli4_read_link_stats_flags {
+ SLI4_READ_LNKSTAT_REC = 1u << 0,
+ SLI4_READ_LNKSTAT_GEC = 1u << 1,
+ SLI4_READ_LNKSTAT_W02OF = 1u << 2,
+ SLI4_READ_LNKSTAT_W03OF = 1u << 3,
+ SLI4_READ_LNKSTAT_W04OF = 1u << 4,
+ SLI4_READ_LNKSTAT_W05OF = 1u << 5,
+ SLI4_READ_LNKSTAT_W06OF = 1u << 6,
+ SLI4_READ_LNKSTAT_W07OF = 1u << 7,
+ SLI4_READ_LNKSTAT_W08OF = 1u << 8,
+ SLI4_READ_LNKSTAT_W09OF = 1u << 9,
+ SLI4_READ_LNKSTAT_W10OF = 1u << 10,
+ SLI4_READ_LNKSTAT_W11OF = 1u << 11,
+ SLI4_READ_LNKSTAT_W12OF = 1u << 12,
+ SLI4_READ_LNKSTAT_W13OF = 1u << 13,
+ SLI4_READ_LNKSTAT_W14OF = 1u << 14,
+ SLI4_READ_LNKSTAT_W15OF = 1u << 15,
+ SLI4_READ_LNKSTAT_W16OF = 1u << 16,
+ SLI4_READ_LNKSTAT_W17OF = 1u << 17,
+ SLI4_READ_LNKSTAT_W18OF = 1u << 18,
+ SLI4_READ_LNKSTAT_W19OF = 1u << 19,
+ SLI4_READ_LNKSTAT_W20OF = 1u << 20,
+ SLI4_READ_LNKSTAT_W21OF = 1u << 21,
+ SLI4_READ_LNKSTAT_CLRC = 1u << 30,
+ SLI4_READ_LNKSTAT_CLOF = 1u << 31,
+};
+
+struct sli4_cmd_read_link_stats {
+ struct sli4_mbox_command_header hdr;
+ __le32 dw1_flags;
+ __le32 linkfail_errcnt;
+ __le32 losssync_errcnt;
+ __le32 losssignal_errcnt;
+ __le32 primseq_errcnt;
+ __le32 inval_txword_errcnt;
+ __le32 crc_errcnt;
+ __le32 primseq_eventtimeout_cnt;
+ __le32 elastic_bufoverrun_errcnt;
+ __le32 arbit_fc_al_timeout_cnt;
+ __le32 adv_rx_buftor_to_buf_credit;
+ __le32 curr_rx_buf_to_buf_credit;
+ __le32 adv_tx_buf_to_buf_credit;
+ __le32 curr_tx_buf_to_buf_credit;
+ __le32 rx_eofa_cnt;
+ __le32 rx_eofdti_cnt;
+ __le32 rx_eofni_cnt;
+ __le32 rx_soff_cnt;
+ __le32 rx_dropped_no_aer_cnt;
+ __le32 rx_dropped_no_avail_rpi_rescnt;
+ __le32 rx_dropped_no_avail_xri_rescnt;
+};
+
+/* Format a WQE with WQ_ID Association performance hint */
+static inline void
+sli_set_wq_id_association(void *entry, u16 q_id)
+{
+ u32 *wqe = entry;
+
+ /*
+ * Set Word 10, bit 0 to zero
+ * Set Word 10, bits 15:1 to the WQ ID
+ */
+ wqe[10] &= ~0xffff;
+ wqe[10] |= q_id << 1;
+}
+
+/* UNREG_FCFI - unregister a FCFI */
+struct sli4_cmd_unreg_fcfi {
+ struct sli4_mbox_command_header hdr;
+ __le32 rsvd0;
+ __le16 fcfi;
+ __le16 rsvd6;
+};
+
+/* UNREG_RPI - unregister one or more RPI */
+enum sli4_unreg_rpi {
+ SLI4_UNREG_RPI_DP = 0x2000,
+ SLI4_UNREG_RPI_II_SHIFT = 14,
+ SLI4_UNREG_RPI_II_MASK = 0xc000,
+ SLI4_UNREG_RPI_II_RPI = 0x0000,
+ SLI4_UNREG_RPI_II_VPI = 0x4000,
+ SLI4_UNREG_RPI_II_VFI = 0x8000,
+ SLI4_UNREG_RPI_II_FCFI = 0xc000,
+
+ SLI4_UNREG_RPI_DEST_N_PORTID_MASK = 0x00ffffff,
+};
+
+struct sli4_cmd_unreg_rpi {
+ struct sli4_mbox_command_header hdr;
+ __le16 index;
+ __le16 dw1w1_flags;
+ __le32 dw2_dest_n_portid;
+};
+
+/* UNREG_VFI - unregister one or more VFI */
+enum sli4_unreg_vfi {
+ SLI4_UNREG_VFI_II_SHIFT = 14,
+ SLI4_UNREG_VFI_II_MASK = 0xc000,
+ SLI4_UNREG_VFI_II_VFI = 0x0000,
+ SLI4_UNREG_VFI_II_FCFI = 0xc000,
+};
+
+struct sli4_cmd_unreg_vfi {
+ struct sli4_mbox_command_header hdr;
+ __le32 rsvd0;
+ __le16 index;
+ __le16 dw2_flags;
+};
+
+enum sli4_unreg_type {
+ SLI4_UNREG_TYPE_PORT,
+ SLI4_UNREG_TYPE_DOMAIN,
+ SLI4_UNREG_TYPE_FCF,
+ SLI4_UNREG_TYPE_ALL
+};
+
+/* UNREG_VPI - unregister one or more VPI */
+enum sli4_unreg_vpi {
+ SLI4_UNREG_VPI_II_SHIFT = 14,
+ SLI4_UNREG_VPI_II_MASK = 0xc000,
+ SLI4_UNREG_VPI_II_VPI = 0x0000,
+ SLI4_UNREG_VPI_II_VFI = 0x8000,
+ SLI4_UNREG_VPI_II_FCFI = 0xc000,
+};
+
+struct sli4_cmd_unreg_vpi {
+ struct sli4_mbox_command_header hdr;
+ __le32 rsvd0;
+ __le16 index;
+ __le16 dw2w0_flags;
+};
+
+/* AUTO_XFER_RDY - Configure the auto-generate XFER-RDY feature */
+struct sli4_cmd_config_auto_xfer_rdy {
+ struct sli4_mbox_command_header hdr;
+ __le32 rsvd0;
+ __le32 max_burst_len;
+};
+
+#define SLI4_CONFIG_AUTO_XFERRDY_BLKSIZE 0xffff
+
+struct sli4_cmd_config_auto_xfer_rdy_hp {
+ struct sli4_mbox_command_header hdr;
+ __le32 rsvd0;
+ __le32 max_burst_len;
+ __le32 dw3_esoc_flags;
+ __le16 block_size;
+ __le16 rsvd14;
+};
+
+/*************************************************************************
+ * SLI-4 common configuration command formats and definitions
+ */
+
+/*
+ * Subsystem values.
+ */
+enum sli4_subsystem {
+ SLI4_SUBSYSTEM_COMMON = 0x01,
+ SLI4_SUBSYSTEM_LOWLEVEL = 0x0b,
+ SLI4_SUBSYSTEM_FC = 0x0c,
+ SLI4_SUBSYSTEM_DMTF = 0x11,
+};
+
+#define SLI4_OPC_LOWLEVEL_SET_WATCHDOG 0X36
+
+/*
+ * Common opcode (OPC) values.
+ */
+enum sli4_cmn_opcode {
+ SLI4_CMN_FUNCTION_RESET = 0x3d,
+ SLI4_CMN_CREATE_CQ = 0x0c,
+ SLI4_CMN_CREATE_CQ_SET = 0x1d,
+ SLI4_CMN_DESTROY_CQ = 0x36,
+ SLI4_CMN_MODIFY_EQ_DELAY = 0x29,
+ SLI4_CMN_CREATE_EQ = 0x0d,
+ SLI4_CMN_DESTROY_EQ = 0x37,
+ SLI4_CMN_CREATE_MQ_EXT = 0x5a,
+ SLI4_CMN_DESTROY_MQ = 0x35,
+ SLI4_CMN_GET_CNTL_ATTRIBUTES = 0x20,
+ SLI4_CMN_NOP = 0x21,
+ SLI4_CMN_GET_RSC_EXTENT_INFO = 0x9a,
+ SLI4_CMN_GET_SLI4_PARAMS = 0xb5,
+ SLI4_CMN_QUERY_FW_CONFIG = 0x3a,
+ SLI4_CMN_GET_PORT_NAME = 0x4d,
+
+ SLI4_CMN_WRITE_FLASHROM = 0x07,
+ /* TRANSCEIVER Data */
+ SLI4_CMN_READ_TRANS_DATA = 0x49,
+ SLI4_CMN_GET_CNTL_ADDL_ATTRS = 0x79,
+ SLI4_CMN_GET_FUNCTION_CFG = 0xa0,
+ SLI4_CMN_GET_PROFILE_CFG = 0xa4,
+ SLI4_CMN_SET_PROFILE_CFG = 0xa5,
+ SLI4_CMN_GET_PROFILE_LIST = 0xa6,
+ SLI4_CMN_GET_ACTIVE_PROFILE = 0xa7,
+ SLI4_CMN_SET_ACTIVE_PROFILE = 0xa8,
+ SLI4_CMN_READ_OBJECT = 0xab,
+ SLI4_CMN_WRITE_OBJECT = 0xac,
+ SLI4_CMN_DELETE_OBJECT = 0xae,
+ SLI4_CMN_READ_OBJECT_LIST = 0xad,
+ SLI4_CMN_SET_DUMP_LOCATION = 0xb8,
+ SLI4_CMN_SET_FEATURES = 0xbf,
+ SLI4_CMN_GET_RECFG_LINK_INFO = 0xc9,
+ SLI4_CMN_SET_RECNG_LINK_ID = 0xca,
+};
+
+/* DMTF opcode (OPC) values */
+#define DMTF_EXEC_CLP_CMD 0x01
+
+/*
+ * COMMON_FUNCTION_RESET
+ *
+ * Resets the Port, returning it to a power-on state. This configuration
+ * command does not have a payload and should set/expect the lengths to
+ * be zero.
+ */
+struct sli4_rqst_cmn_function_reset {
+ struct sli4_rqst_hdr hdr;
+};
+
+struct sli4_rsp_cmn_function_reset {
+ struct sli4_rsp_hdr hdr;
+};
+
+/*
+ * COMMON_GET_CNTL_ATTRIBUTES
+ *
+ * Query for information about the SLI Port
+ */
+enum sli4_cntrl_attr_flags {
+ SLI4_CNTL_ATTR_PORTNUM = 0x3f,
+ SLI4_CNTL_ATTR_PORTTYPE = 0xc0,
+};
+
+struct sli4_rsp_cmn_get_cntl_attributes {
+ struct sli4_rsp_hdr hdr;
+ u8 version_str[32];
+ u8 manufacturer_name[32];
+ __le32 supported_modes;
+ u8 eprom_version_lo;
+ u8 eprom_version_hi;
+ __le16 rsvd17;
+ __le32 mbx_ds_version;
+ __le32 ep_fw_ds_version;
+ u8 ncsi_version_str[12];
+ __le32 def_extended_timeout;
+ u8 model_number[32];
+ u8 description[64];
+ u8 serial_number[32];
+ u8 ip_version_str[32];
+ u8 fw_version_str[32];
+ u8 bios_version_str[32];
+ u8 redboot_version_str[32];
+ u8 driver_version_str[32];
+ u8 fw_on_flash_version_str[32];
+ __le32 functionalities_supported;
+ __le16 max_cdb_length;
+ u8 asic_revision;
+ u8 generational_guid0;
+ __le32 generational_guid1_12[3];
+ __le16 generational_guid13_14;
+ u8 generational_guid15;
+ u8 hba_port_count;
+ __le16 default_link_down_timeout;
+ u8 iscsi_version_min_max;
+ u8 multifunctional_device;
+ u8 cache_valid;
+ u8 hba_status;
+ u8 max_domains_supported;
+ u8 port_num_type_flags;
+ __le32 firmware_post_status;
+ __le32 hba_mtu;
+ u8 iscsi_features;
+ u8 rsvd121[3];
+ __le16 pci_vendor_id;
+ __le16 pci_device_id;
+ __le16 pci_sub_vendor_id;
+ __le16 pci_sub_system_id;
+ u8 pci_bus_number;
+ u8 pci_device_number;
+ u8 pci_function_number;
+ u8 interface_type;
+ __le64 unique_identifier;
+ u8 number_of_netfilters;
+ u8 rsvd122[3];
+};
+
+/*
+ * COMMON_GET_CNTL_ATTRIBUTES
+ *
+ * This command queries the controller information from the Flash ROM.
+ */
+struct sli4_rqst_cmn_get_cntl_addl_attributes {
+ struct sli4_rqst_hdr hdr;
+};
+
+struct sli4_rsp_cmn_get_cntl_addl_attributes {
+ struct sli4_rsp_hdr hdr;
+ __le16 ipl_file_number;
+ u8 ipl_file_version;
+ u8 rsvd4;
+ u8 on_die_temperature;
+ u8 rsvd5[3];
+ __le32 driver_advanced_features_supported;
+ __le32 rsvd7[4];
+ char universal_bios_version[32];
+ char x86_bios_version[32];
+ char efi_bios_version[32];
+ char fcode_version[32];
+ char uefi_bios_version[32];
+ char uefi_nic_version[32];
+ char uefi_fcode_version[32];
+ char uefi_iscsi_version[32];
+ char iscsi_x86_bios_version[32];
+ char pxe_x86_bios_version[32];
+ u8 default_wwpn[8];
+ u8 ext_phy_version[32];
+ u8 fc_universal_bios_version[32];
+ u8 fc_x86_bios_version[32];
+ u8 fc_efi_bios_version[32];
+ u8 fc_fcode_version[32];
+ u8 ext_phy_crc_label[8];
+ u8 ipl_file_name[16];
+ u8 rsvd139[72];
+};
+
+/*
+ * COMMON_NOP
+ *
+ * This command does not do anything; it only returns
+ * the payload in the completion.
+ */
+struct sli4_rqst_cmn_nop {
+ struct sli4_rqst_hdr hdr;
+ __le32 context[2];
+};
+
+struct sli4_rsp_cmn_nop {
+ struct sli4_rsp_hdr hdr;
+ __le32 context[2];
+};
+
+struct sli4_rqst_cmn_get_resource_extent_info {
+ struct sli4_rqst_hdr hdr;
+ __le16 resource_type;
+ __le16 rsvd16;
+};
+
+enum sli4_rsc_type {
+ SLI4_RSC_TYPE_VFI = 0x20,
+ SLI4_RSC_TYPE_VPI = 0x21,
+ SLI4_RSC_TYPE_RPI = 0x22,
+ SLI4_RSC_TYPE_XRI = 0x23,
+};
+
+struct sli4_rsp_cmn_get_resource_extent_info {
+ struct sli4_rsp_hdr hdr;
+ __le16 resource_extent_count;
+ __le16 resource_extent_size;
+};
+
+#define SLI4_128BYTE_WQE_SUPPORT 0x02
+
+#define GET_Q_CNT_METHOD(m) \
+ (((m) & SLI4_PARAM_Q_CNT_MTHD_MASK) >> SLI4_PARAM_Q_CNT_MTHD_SHFT)
+#define GET_Q_CREATE_VERSION(v) \
+ (((v) & SLI4_PARAM_QV_MASK) >> SLI4_PARAM_QV_SHIFT)
+
+enum sli4_rsp_get_params_e {
+ /*GENERIC*/
+ SLI4_PARAM_Q_CNT_MTHD_SHFT = 24,
+ SLI4_PARAM_Q_CNT_MTHD_MASK = 0xf << 24,
+ SLI4_PARAM_QV_SHIFT = 14,
+ SLI4_PARAM_QV_MASK = 3 << 14,
+
+ /* DW4 */
+ SLI4_PARAM_PROTO_TYPE_MASK = 0xff,
+ /* DW5 */
+ SLI4_PARAM_FT = 1 << 0,
+ SLI4_PARAM_SLI_REV_MASK = 0xf << 4,
+ SLI4_PARAM_SLI_FAM_MASK = 0xf << 8,
+ SLI4_PARAM_IF_TYPE_MASK = 0xf << 12,
+ SLI4_PARAM_SLI_HINT1_MASK = 0xff << 16,
+ SLI4_PARAM_SLI_HINT2_MASK = 0x1f << 24,
+ /* DW6 */
+ SLI4_PARAM_EQ_PAGE_CNT_MASK = 0xf << 0,
+ SLI4_PARAM_EQE_SZS_MASK = 0xf << 8,
+ SLI4_PARAM_EQ_PAGE_SZS_MASK = 0xff << 16,
+ /* DW8 */
+ SLI4_PARAM_CQ_PAGE_CNT_MASK = 0xf << 0,
+ SLI4_PARAM_CQE_SZS_MASK = 0xf << 8,
+ SLI4_PARAM_CQ_PAGE_SZS_MASK = 0xff << 16,
+ /* DW10 */
+ SLI4_PARAM_MQ_PAGE_CNT_MASK = 0xf << 0,
+ SLI4_PARAM_MQ_PAGE_SZS_MASK = 0xff << 16,
+ /* DW12 */
+ SLI4_PARAM_WQ_PAGE_CNT_MASK = 0xf << 0,
+ SLI4_PARAM_WQE_SZS_MASK = 0xf << 8,
+ SLI4_PARAM_WQ_PAGE_SZS_MASK = 0xff << 16,
+ /* DW14 */
+ SLI4_PARAM_RQ_PAGE_CNT_MASK = 0xf << 0,
+ SLI4_PARAM_RQE_SZS_MASK = 0xf << 8,
+ SLI4_PARAM_RQ_PAGE_SZS_MASK = 0xff << 16,
+ /* DW15W1*/
+ SLI4_PARAM_RQ_DB_WINDOW_MASK = 0xf000,
+ /* DW16 */
+ SLI4_PARAM_FC = 1 << 0,
+ SLI4_PARAM_EXT = 1 << 1,
+ SLI4_PARAM_HDRR = 1 << 2,
+ SLI4_PARAM_SGLR = 1 << 3,
+ SLI4_PARAM_FBRR = 1 << 4,
+ SLI4_PARAM_AREG = 1 << 5,
+ SLI4_PARAM_TGT = 1 << 6,
+ SLI4_PARAM_TERP = 1 << 7,
+ SLI4_PARAM_ASSI = 1 << 8,
+ SLI4_PARAM_WCHN = 1 << 9,
+ SLI4_PARAM_TCCA = 1 << 10,
+ SLI4_PARAM_TRTY = 1 << 11,
+ SLI4_PARAM_TRIR = 1 << 12,
+ SLI4_PARAM_PHOFF = 1 << 13,
+ SLI4_PARAM_PHON = 1 << 14,
+ SLI4_PARAM_PHWQ = 1 << 15,
+ SLI4_PARAM_BOUND_4GA = 1 << 16,
+ SLI4_PARAM_RXC = 1 << 17,
+ SLI4_PARAM_HLM = 1 << 18,
+ SLI4_PARAM_IPR = 1 << 19,
+ SLI4_PARAM_RXRI = 1 << 20,
+ SLI4_PARAM_SGLC = 1 << 21,
+ SLI4_PARAM_TIMM = 1 << 22,
+ SLI4_PARAM_TSMM = 1 << 23,
+ SLI4_PARAM_OAS = 1 << 25,
+ SLI4_PARAM_LC = 1 << 26,
+ SLI4_PARAM_AGXF = 1 << 27,
+ SLI4_PARAM_LOOPBACK_MASK = 0xf << 28,
+ /* DW18 */
+ SLI4_PARAM_SGL_PAGE_CNT_MASK = 0xf << 0,
+ SLI4_PARAM_SGL_PAGE_SZS_MASK = 0xff << 8,
+ SLI4_PARAM_SGL_PP_ALIGN_MASK = 0xff << 16,
+};
+
+struct sli4_rqst_cmn_get_sli4_params {
+ struct sli4_rqst_hdr hdr;
+};
+
+struct sli4_rsp_cmn_get_sli4_params {
+ struct sli4_rsp_hdr hdr;
+ __le32 dw4_protocol_type;
+ __le32 dw5_sli;
+ __le32 dw6_eq_page_cnt;
+ __le16 eqe_count_mask;
+ __le16 rsvd26;
+ __le32 dw8_cq_page_cnt;
+ __le16 cqe_count_mask;
+ __le16 rsvd34;
+ __le32 dw10_mq_page_cnt;
+ __le16 mqe_count_mask;
+ __le16 rsvd42;
+ __le32 dw12_wq_page_cnt;
+ __le16 wqe_count_mask;
+ __le16 rsvd50;
+ __le32 dw14_rq_page_cnt;
+ __le16 rqe_count_mask;
+ __le16 dw15w1_rq_db_window;
+ __le32 dw16_loopback_scope;
+ __le32 sge_supported_length;
+ __le32 dw18_sgl_page_cnt;
+ __le16 min_rq_buffer_size;
+ __le16 rsvd75;
+ __le32 max_rq_buffer_size;
+ __le16 physical_xri_max;
+ __le16 physical_rpi_max;
+ __le16 physical_vpi_max;
+ __le16 physical_vfi_max;
+ __le32 rsvd88;
+ __le16 frag_num_field_offset;
+ __le16 frag_num_field_size;
+ __le16 sgl_index_field_offset;
+ __le16 sgl_index_field_size;
+ __le32 chain_sge_initial_value_lo;
+ __le32 chain_sge_initial_value_hi;
+};
+
+/*Port Types*/
+enum sli4_port_types {
+ SLI4_PORT_TYPE_ETH = 0,
+ SLI4_PORT_TYPE_FC = 1,
+};
+
+struct sli4_rqst_cmn_get_port_name {
+ struct sli4_rqst_hdr hdr;
+ u8 port_type;
+ u8 rsvd4[3];
+};
+
+struct sli4_rsp_cmn_get_port_name {
+ struct sli4_rsp_hdr hdr;
+ char port_name[4];
+};
+
+struct sli4_rqst_cmn_write_flashrom {
+ struct sli4_rqst_hdr hdr;
+ __le32 flash_rom_access_opcode;
+ __le32 flash_rom_access_operation_type;
+ __le32 data_buffer_size;
+ __le32 offset;
+ u8 data_buffer[4];
+};
+
+/*
+ * COMMON_READ_TRANSCEIVER_DATA
+ *
+ * This command reads SFF transceiver data(Format is defined
+ * by the SFF-8472 specification).
+ */
+struct sli4_rqst_cmn_read_transceiver_data {
+ struct sli4_rqst_hdr hdr;
+ __le32 page_number;
+ __le32 port;
+};
+
+struct sli4_rsp_cmn_read_transceiver_data {
+ struct sli4_rsp_hdr hdr;
+ __le32 page_number;
+ __le32 port;
+ u8 page_data[128];
+ u8 page_data_2[128];
+};
+
+#define SLI4_REQ_DESIRE_READLEN 0xffffff
+
+struct sli4_rqst_cmn_read_object {
+ struct sli4_rqst_hdr hdr;
+ __le32 desired_read_length_dword;
+ __le32 read_offset;
+ u8 object_name[104];
+ __le32 host_buffer_descriptor_count;
+ struct sli4_bde host_buffer_descriptor[0];
+};
+
+#define RSP_COM_READ_OBJ_EOF 0x80000000
+
+struct sli4_rsp_cmn_read_object {
+ struct sli4_rsp_hdr hdr;
+ __le32 actual_read_length;
+ __le32 eof_dword;
+};
+
+enum sli4_rqst_write_object_flags {
+ SLI4_RQ_DES_WRITE_LEN = 0xffffff,
+ SLI4_RQ_DES_WRITE_LEN_NOC = 0x40000000,
+ SLI4_RQ_DES_WRITE_LEN_EOF = 0x80000000,
+};
+
+struct sli4_rqst_cmn_write_object {
+ struct sli4_rqst_hdr hdr;
+ __le32 desired_write_len_dword;
+ __le32 write_offset;
+ u8 object_name[104];
+ __le32 host_buffer_descriptor_count;
+ struct sli4_bde host_buffer_descriptor[0];
+};
+
+#define RSP_CHANGE_STATUS 0xff
+
+struct sli4_rsp_cmn_write_object {
+ struct sli4_rsp_hdr hdr;
+ __le32 actual_write_length;
+ __le32 change_status_dword;
+};
+
+struct sli4_rqst_cmn_delete_object {
+ struct sli4_rqst_hdr hdr;
+ __le32 rsvd4;
+ __le32 rsvd5;
+ u8 object_name[104];
+};
+
+#define SLI4_RQ_OBJ_LIST_READ_LEN 0xffffff
+
+struct sli4_rqst_cmn_read_object_list {
+ struct sli4_rqst_hdr hdr;
+ __le32 desired_read_length_dword;
+ __le32 read_offset;
+ u8 object_name[104];
+ __le32 host_buffer_descriptor_count;
+ struct sli4_bde host_buffer_descriptor[0];
+};
+
+enum sli4_rqst_set_dump_flags {
+ SLI4_CMN_SET_DUMP_BUFFER_LEN = 0xffffff,
+ SLI4_CMN_SET_DUMP_FDB = 0x20000000,
+ SLI4_CMN_SET_DUMP_BLP = 0x40000000,
+ SLI4_CMN_SET_DUMP_QRY = 0x80000000,
+};
+
+struct sli4_rqst_cmn_set_dump_location {
+ struct sli4_rqst_hdr hdr;
+ __le32 buffer_length_dword;
+ __le32 buf_addr_low;
+ __le32 buf_addr_high;
+};
+
+struct sli4_rsp_cmn_set_dump_location {
+ struct sli4_rsp_hdr hdr;
+ __le32 buffer_length_dword;
+};
+
+enum sli4_dump_level {
+ SLI4_DUMP_LEVEL_NONE,
+ SLI4_CHIP_LEVEL_DUMP,
+ SLI4_FUNC_DESC_DUMP,
+};
+
+enum sli4_dump_state {
+ SLI4_DUMP_STATE_NONE,
+ SLI4_CHIP_DUMP_STATE_VALID,
+ SLI4_FUNC_DUMP_STATE_VALID,
+};
+
+enum sli4_dump_status {
+ SLI4_DUMP_READY_STATUS_NOT_READY,
+ SLI4_DUMP_READY_STATUS_DD_PRESENT,
+ SLI4_DUMP_READY_STATUS_FDB_PRESENT,
+ SLI4_DUMP_READY_STATUS_SKIP_DUMP,
+ SLI4_DUMP_READY_STATUS_FAILED = -1,
+};
+
+enum sli4_set_features {
+ SLI4_SET_FEATURES_DIF_SEED = 0x01,
+ SLI4_SET_FEATURES_XRI_TIMER = 0x03,
+ SLI4_SET_FEATURES_MAX_PCIE_SPEED = 0x04,
+ SLI4_SET_FEATURES_FCTL_CHECK = 0x05,
+ SLI4_SET_FEATURES_FEC = 0x06,
+ SLI4_SET_FEATURES_PCIE_RECV_DETECT = 0x07,
+ SLI4_SET_FEATURES_DIF_MEMORY_MODE = 0x08,
+ SLI4_SET_FEATURES_DISABLE_SLI_PORT_PAUSE_STATE = 0x09,
+ SLI4_SET_FEATURES_ENABLE_PCIE_OPTIONS = 0x0a,
+ SLI4_SET_FEAT_CFG_AUTO_XFER_RDY_T10PI = 0x0c,
+ SLI4_SET_FEATURES_ENABLE_MULTI_RECEIVE_QUEUE = 0x0d,
+ SLI4_SET_FEATURES_SET_FTD_XFER_HINT = 0x0f,
+ SLI4_SET_FEATURES_SLI_PORT_HEALTH_CHECK = 0x11,
+};
+
+struct sli4_rqst_cmn_set_features {
+ struct sli4_rqst_hdr hdr;
+ __le32 feature;
+ __le32 param_len;
+ __le32 params[8];
+};
+
+struct sli4_rqst_cmn_set_features_dif_seed {
+ __le16 seed;
+ __le16 rsvd16;
+};
+
+enum sli4_rqst_set_mrq_features {
+ SLI4_RQ_MULTIRQ_ISR = 0x1,
+ SLI4_RQ_MULTIRQ_AUTOGEN_XFER_RDY = 0x2,
+
+ SLI4_RQ_MULTIRQ_NUM_RQS = 0xff,
+ SLI4_RQ_MULTIRQ_RQ_SELECT = 0xf00,
+};
+
+struct sli4_rqst_cmn_set_features_multirq {
+ __le32 auto_gen_xfer_dword;
+ __le32 num_rqs_dword;
+};
+
+enum sli4_rqst_health_check_flags {
+ SLI4_RQ_HEALTH_CHECK_ENABLE = 0x1,
+ SLI4_RQ_HEALTH_CHECK_QUERY = 0x2,
+};
+
+struct sli4_rqst_cmn_set_features_health_check {
+ __le32 health_check_dword;
+};
+
+struct sli4_rqst_cmn_set_features_set_fdt_xfer_hint {
+ __le32 fdt_xfer_hint;
+};
+
+struct sli4_rqst_dmtf_exec_clp_cmd {
+ struct sli4_rqst_hdr hdr;
+ __le32 cmd_buf_length;
+ __le32 resp_buf_length;
+ __le32 cmd_buf_addr_low;
+ __le32 cmd_buf_addr_high;
+ __le32 resp_buf_addr_low;
+ __le32 resp_buf_addr_high;
+};
+
+struct sli4_rsp_dmtf_exec_clp_cmd {
+ struct sli4_rsp_hdr hdr;
+ __le32 rsvd4;
+ __le32 resp_length;
+ __le32 rsvd6;
+ __le32 rsvd7;
+ __le32 rsvd8;
+ __le32 rsvd9;
+ __le32 clp_status;
+ __le32 clp_detailed_status;
+};
+
+#define SLI4_PROTOCOL_FC 0x10
+#define SLI4_PROTOCOL_DEFAULT 0xff
+
+struct sli4_rspource_descriptor_v1 {
+ u8 descriptor_type;
+ u8 descriptor_length;
+ __le16 rsvd16;
+ __le32 type_specific[0];
+};
+
+enum sli4_pcie_desc_flags {
+ SLI4_PCIE_DESC_IMM = 0x4000,
+ SLI4_PCIE_DESC_NOSV = 0x8000,
+
+ SLI4_PCIE_DESC_PF_NO = 0x3ff0000,
+
+ SLI4_PCIE_DESC_MISSN_ROLE = 0xff,
+ SLI4_PCIE_DESC_PCHG = 0x8000000,
+ SLI4_PCIE_DESC_SCHG = 0x10000000,
+ SLI4_PCIE_DESC_XCHG = 0x20000000,
+ SLI4_PCIE_DESC_XROM = 0xc0000000
+};
+
+struct sli4_pcie_resource_descriptor_v1 {
+ u8 descriptor_type;
+ u8 descriptor_length;
+ __le16 imm_nosv_dword;
+ __le32 pf_number_dword;
+ __le32 rsvd3;
+ u8 sriov_state;
+ u8 pf_state;
+ u8 pf_type;
+ u8 rsvd4;
+ __le16 number_of_vfs;
+ __le16 rsvd5;
+ __le32 mission_roles_dword;
+ __le32 rsvd7[16];
+};
+
+struct sli4_rqst_cmn_get_function_config {
+ struct sli4_rqst_hdr hdr;
+};
+
+struct sli4_rsp_cmn_get_function_config {
+ struct sli4_rsp_hdr hdr;
+ __le32 desc_count;
+ __le32 desc[54];
+};
+
+/* Link Config Descriptor for link config functions */
+struct sli4_link_config_descriptor {
+ u8 link_config_id;
+ u8 rsvd1[3];
+ __le32 config_description[8];
+};
+
+#define MAX_LINK_DES 10
+
+struct sli4_rqst_cmn_get_reconfig_link_info {
+ struct sli4_rqst_hdr hdr;
+};
+
+struct sli4_rsp_cmn_get_reconfig_link_info {
+ struct sli4_rsp_hdr hdr;
+ u8 active_link_config_id;
+ u8 rsvd17;
+ u8 next_link_config_id;
+ u8 rsvd19;
+ __le32 link_configuration_descriptor_count;
+ struct sli4_link_config_descriptor
+ desc[MAX_LINK_DES];
+};
+
+enum sli4_set_reconfig_link_flags {
+ SLI4_SET_RECONFIG_LINKID_NEXT = 0xff,
+ SLI4_SET_RECONFIG_LINKID_FD = 1u << 31,
+};
+
+struct sli4_rqst_cmn_set_reconfig_link_id {
+ struct sli4_rqst_hdr hdr;
+ __le32 dw4_flags;
+};
+
+struct sli4_rsp_cmn_set_reconfig_link_id {
+ struct sli4_rsp_hdr hdr;
+};
+
+struct sli4_rqst_lowlevel_set_watchdog {
+ struct sli4_rqst_hdr hdr;
+ __le16 watchdog_timeout;
+ __le16 rsvd18;
+};
+
+struct sli4_rsp_lowlevel_set_watchdog {
+ struct sli4_rsp_hdr hdr;
+ __le32 rsvd;
+};
+
+/* FC opcode (OPC) values */
+enum sli4_fc_opcodes {
+ SLI4_OPC_WQ_CREATE = 0x1,
+ SLI4_OPC_WQ_DESTROY = 0x2,
+ SLI4_OPC_POST_SGL_PAGES = 0x3,
+ SLI4_OPC_RQ_CREATE = 0x5,
+ SLI4_OPC_RQ_DESTROY = 0x6,
+ SLI4_OPC_READ_FCF_TABLE = 0x8,
+ SLI4_OPC_POST_HDR_TEMPLATES = 0xb,
+ SLI4_OPC_REDISCOVER_FCF = 0x10,
+};
+
+/* Use the default CQ associated with the WQ */
+#define SLI4_CQ_DEFAULT 0xffff
+
+/*
+ * POST_SGL_PAGES
+ *
+ * Register the scatter gather list (SGL) memory and
+ * associate it with an XRI.
+ */
+struct sli4_rqst_post_sgl_pages {
+ struct sli4_rqst_hdr hdr;
+ __le16 xri_start;
+ __le16 xri_count;
+ struct {
+ __le32 page0_low;
+ __le32 page0_high;
+ __le32 page1_low;
+ __le32 page1_high;
+ } page_set[10];
+};
+
+struct sli4_rsp_post_sgl_pages {
+ struct sli4_rsp_hdr hdr;
+};
+
+struct sli4_rqst_post_hdr_templates {
+ struct sli4_rqst_hdr hdr;
+ __le16 rpi_offset;
+ __le16 page_count;
+ struct sli4_dmaaddr page_descriptor[0];
+};
+
+#define SLI4_HDR_TEMPLATE_SIZE 64
+
+enum sli4_io_flags {
+/* The XRI associated with this IO is already active */
+ SLI4_IO_CONTINUATION = 1 << 0,
+/* Automatically generate a good RSP frame */
+ SLI4_IO_AUTO_GOOD_RESPONSE = 1 << 1,
+ SLI4_IO_NO_ABORT = 1 << 2,
+/* Set the DNRX bit because no auto xref rdy buffer is posted */
+ SLI4_IO_DNRX = 1 << 3,
+};
+
+enum sli4_callback {
+ SLI4_CB_LINK,
+ SLI4_CB_MAX,
+};
+
+enum sli4_link_status {
+ SLI4_LINK_STATUS_UP,
+ SLI4_LINK_STATUS_DOWN,
+ SLI4_LINK_STATUS_NO_ALPA,
+ SLI4_LINK_STATUS_MAX,
+};
+
+enum sli4_link_topology {
+ SLI4_LINK_TOPO_NON_FC_AL = 1,
+ SLI4_LINK_TOPO_FC_AL,
+ SLI4_LINK_TOPO_LOOPBACK_INTERNAL,
+ SLI4_LINK_TOPO_LOOPBACK_EXTERNAL,
+ SLI4_LINK_TOPO_NONE,
+ SLI4_LINK_TOPO_MAX,
+};
+
+enum sli4_link_medium {
+ SLI4_LINK_MEDIUM_ETHERNET,
+ SLI4_LINK_MEDIUM_FC,
+ SLI4_LINK_MEDIUM_MAX,
+};
+/******Driver specific structures******/
+
+struct sli4_queue {
+ /* Common to all queue types */
+ struct efc_dma dma;
+ spinlock_t lock; /* Lock to protect the doorbell register
+ * writes and queue reads
+ */
+ u32 index; /* current host entry index */
+ u16 size; /* entry size */
+ u16 length; /* number of entries */
+ u16 n_posted; /* number entries posted for CQ, EQ */
+ u16 id; /* Port assigned xQ_ID */
+ u8 type; /* queue type ie EQ, CQ, ... */
+ void __iomem *db_regaddr; /* register address for the doorbell */
+ u16 phase; /* For if_type = 6, this value toggle
+ * for each iteration of the queue,
+ * a queue entry is valid when a cqe
+ * valid bit matches this value
+ */
+ u32 proc_limit; /* limit CQE processed per iteration */
+ u32 posted_limit; /* CQE/EQE process before ring db */
+ u32 max_num_processed;
+ u64 max_process_time;
+ union {
+ u32 r_idx; /* "read" index (MQ only) */
+ u32 flag;
+ } u;
+};
+
+/* Parameters used to populate WQE*/
+struct sli_bls_params {
+ u32 s_id;
+ u32 d_id;
+ u16 ox_id;
+ u16 rx_id;
+ u32 rpi;
+ u32 vpi;
+ bool rpi_registered;
+ u8 payload[12];
+ u16 xri;
+ u16 tag;
+};
+
+struct sli_els_params {
+ u32 s_id;
+ u32 d_id;
+ u16 ox_id;
+ u32 rpi;
+ u32 vpi;
+ bool rpi_registered;
+ u32 xmit_len;
+ u32 rsp_len;
+ u8 timeout;
+ u8 cmd;
+ u16 xri;
+ u16 tag;
+};
+
+struct sli_ct_params {
+ u8 r_ctl;
+ u8 type;
+ u8 df_ctl;
+ u8 timeout;
+ u16 ox_id;
+ u32 d_id;
+ u32 rpi;
+ u32 vpi;
+ bool rpi_registered;
+ u32 xmit_len;
+ u32 rsp_len;
+ u16 xri;
+ u16 tag;
+};
+
+struct sli_fcp_tgt_params {
+ u32 s_id;
+ u32 d_id;
+ u32 rpi;
+ u32 vpi;
+ u32 offset;
+ u16 ox_id;
+ u16 flags;
+ u8 cs_ctl;
+ u8 timeout;
+ u32 app_id;
+ u32 xmit_len;
+ u16 xri;
+ u16 tag;
+};
+
+struct sli4_link_event {
+ enum sli4_link_status status;
+ enum sli4_link_topology topology;
+ enum sli4_link_medium medium;
+ u32 speed;
+ u8 *loop_map;
+ u32 fc_id;
+};
+
+enum sli4_resource {
+ SLI4_RSRC_VFI,
+ SLI4_RSRC_VPI,
+ SLI4_RSRC_RPI,
+ SLI4_RSRC_XRI,
+ SLI4_RSRC_FCFI,
+ SLI4_RSRC_MAX,
+};
+
+struct sli4_extent {
+ u32 number;
+ u32 size;
+ u32 n_alloc;
+ u32 *base;
+ unsigned long *use_map;
+ u32 map_size;
+};
+
+struct sli4_queue_info {
+ u16 max_qcount[SLI4_QTYPE_MAX];
+ u32 max_qentries[SLI4_QTYPE_MAX];
+ u16 count_mask[SLI4_QTYPE_MAX];
+ u16 count_method[SLI4_QTYPE_MAX];
+ u32 qpage_count[SLI4_QTYPE_MAX];
+};
+
+struct sli4_params {
+ u8 has_extents;
+ u8 auto_reg;
+ u8 auto_xfer_rdy;
+ u8 hdr_template_req;
+ u8 perf_hint;
+ u8 perf_wq_id_association;
+ u8 cq_create_version;
+ u8 mq_create_version;
+ u8 high_login_mode;
+ u8 sgl_pre_registered;
+ u8 sgl_pre_reg_required;
+ u8 t10_dif_inline_capable;
+ u8 t10_dif_separate_capable;
+};
+
+struct sli4 {
+ void *os;
+ struct pci_dev *pci;
+ void __iomem *reg[PCI_STD_NUM_BARS];
+
+ u32 sli_rev;
+ u32 sli_family;
+ u32 if_type;
+
+ u16 asic_type;
+ u16 asic_rev;
+
+ u16 e_d_tov;
+ u16 r_a_tov;
+ struct sli4_queue_info qinfo;
+ u16 link_module_type;
+ u8 rq_batch;
+ u8 port_number;
+ char port_name[2];
+ u16 rq_min_buf_size;
+ u32 rq_max_buf_size;
+ u8 topology;
+ u8 wwpn[8];
+ u8 wwnn[8];
+ u32 fw_rev[2];
+ u8 fw_name[2][16];
+ char ipl_name[16];
+ u32 hw_rev[3];
+ char modeldesc[64];
+ char bios_version_string[32];
+ u32 wqe_size;
+ u32 vpd_length;
+ /*
+ * Tracks the port resources using extents metaphor. For
+ * devices that don't implement extents (i.e.
+ * has_extents == FALSE), the code models each resource as
+ * a single large extent.
+ */
+ struct sli4_extent ext[SLI4_RSRC_MAX];
+ u32 features;
+ struct sli4_params params;
+ u32 sge_supported_length;
+ u32 sgl_page_sizes;
+ u32 max_sgl_pages;
+
+ /*
+ * Callback functions
+ */
+ int (*link)(void *ctx, void *event);
+ void *link_arg;
+
+ struct efc_dma bmbx;
+
+ /* Save pointer to physical memory descriptor for non-embedded
+ * SLI_CONFIG commands for BMBX dumping purposes
+ */
+ struct efc_dma *bmbx_non_emb_pmd;
+
+ struct efc_dma vpd_data;
+};
+
+static inline void
+sli_cmd_fill_hdr(struct sli4_rqst_hdr *hdr, u8 opc, u8 sub, u32 ver, __le32 len)
+{
+ hdr->opcode = opc;
+ hdr->subsystem = sub;
+ hdr->dw3_version = cpu_to_le32(ver);
+ hdr->request_length = len;
+}
+
+/**
+ * Get / set parameter functions
+ */
+
+static inline u32
+sli_get_max_sge(struct sli4 *sli4)
+{
+ return sli4->sge_supported_length;
+}
+
+static inline u32
+sli_get_max_sgl(struct sli4 *sli4)
+{
+ if (sli4->sgl_page_sizes != 1) {
+ efc_log_err(sli4, "unsupported SGL page sizes %#x\n",
+ sli4->sgl_page_sizes);
+ return 0;
+ }
+
+ return (sli4->max_sgl_pages * SLI_PAGE_SIZE) / sizeof(struct sli4_sge);
+}
+
+static inline enum sli4_link_medium
+sli_get_medium(struct sli4 *sli4)
+{
+ switch (sli4->topology) {
+ case SLI4_READ_CFG_TOPO_FC:
+ case SLI4_READ_CFG_TOPO_FC_AL:
+ case SLI4_READ_CFG_TOPO_NON_FC_AL:
+ return SLI4_LINK_MEDIUM_FC;
+ default:
+ return SLI4_LINK_MEDIUM_MAX;
+ }
+}
+
+static inline u32
+sli_get_lmt(struct sli4 *sli4)
+{
+ return sli4->link_module_type;
+}
+
+static inline int
+sli_set_topology(struct sli4 *sli4, u32 value)
+{
+ int rc = 0;
+
+ switch (value) {
+ case SLI4_READ_CFG_TOPO_FC:
+ case SLI4_READ_CFG_TOPO_FC_AL:
+ case SLI4_READ_CFG_TOPO_NON_FC_AL:
+ sli4->topology = value;
+ break;
+ default:
+ efc_log_err(sli4, "unsupported topology %#x\n", value);
+ rc = -1;
+ }
+
+ return rc;
+}
+
+static inline u32
+sli_convert_mask_to_count(u32 method, u32 mask)
+{
+ u32 count = 0;
+
+ if (method) {
+ count = 1 << (31 - __builtin_clz(mask));
+ count *= 16;
+ } else {
+ count = mask;
+ }
+
+ return count;
+}
+
+static inline u32
+sli_reg_read_status(struct sli4 *sli)
+{
+ return readl(sli->reg[0] + SLI4_PORT_STATUS_REGOFF);
+}
+
+static inline int
+sli_fw_error_status(struct sli4 *sli4)
+{
+ return (sli_reg_read_status(sli4) & SLI4_PORT_STATUS_ERR) ? 1 : 0;
+}
+
+static inline u32
+sli_reg_read_err1(struct sli4 *sli)
+{
+ return readl(sli->reg[0] + SLI4_PORT_ERROR1);
+}
+
+static inline u32
+sli_reg_read_err2(struct sli4 *sli)
+{
+ return readl(sli->reg[0] + SLI4_PORT_ERROR2);
+}
+
+static inline int
+sli_fc_rqe_length(struct sli4 *sli4, void *cqe, u32 *len_hdr,
+ u32 *len_data)
+{
+ struct sli4_fc_async_rcqe *rcqe = cqe;
+
+ *len_hdr = *len_data = 0;
+
+ if (rcqe->status == SLI4_FC_ASYNC_RQ_SUCCESS) {
+ *len_hdr = rcqe->hdpl_byte & SLI4_RACQE_HDPL;
+ *len_data = le16_to_cpu(rcqe->data_placement_length);
+ return 0;
+ } else {
+ return -1;
+ }
+}
+
+static inline u8
+sli_fc_rqe_fcfi(struct sli4 *sli4, void *cqe)
+{
+ u8 code = ((u8 *)cqe)[SLI4_CQE_CODE_OFFSET];
+ u8 fcfi = U8_MAX;
+
+ switch (code) {
+ case SLI4_CQE_CODE_RQ_ASYNC: {
+ struct sli4_fc_async_rcqe *rcqe = cqe;
+
+ fcfi = le16_to_cpu(rcqe->fcfi_rq_id_word) & SLI4_RACQE_FCFI;
+ break;
+ }
+ case SLI4_CQE_CODE_RQ_ASYNC_V1: {
+ struct sli4_fc_async_rcqe_v1 *rcqev1 = cqe;
+
+ fcfi = rcqev1->fcfi_byte & SLI4_RACQE_FCFI;
+ break;
+ }
+ case SLI4_CQE_CODE_OPTIMIZED_WRITE_CMD: {
+ struct sli4_fc_optimized_write_cmd_cqe *opt_wr = cqe;
+
+ fcfi = opt_wr->flags0 & SLI4_OCQE_FCFI;
+ break;
+ }
+ }
+
+ return fcfi;
+}
+
+/****************************************************************************
+ * Function prototypes
+ */
+int
+sli_cmd_config_link(struct sli4 *sli4, void *buf);
+int
+sli_cmd_down_link(struct sli4 *sli4, void *buf);
+int
+sli_cmd_dump_type4(struct sli4 *sli4, void *buf, u16 wki);
+int
+sli_cmd_common_read_transceiver_data(struct sli4 *sli4, void *buf,
+ u32 page_num, struct efc_dma *dma);
+int
+sli_cmd_read_link_stats(struct sli4 *sli4, void *buf, u8 req_stats,
+ u8 clear_overflow_flags, u8 clear_all_counters);
+int
+sli_cmd_read_status(struct sli4 *sli4, void *buf, u8 clear);
+int
+sli_cmd_init_link(struct sli4 *sli4, void *buf, u32 speed,
+ u8 reset_alpa);
+int
+sli_cmd_init_vfi(struct sli4 *sli4, void *buf, u16 vfi, u16 fcfi,
+ u16 vpi);
+int
+sli_cmd_init_vpi(struct sli4 *sli4, void *buf, u16 vpi, u16 vfi);
+int
+sli_cmd_post_xri(struct sli4 *sli4, void *buf, u16 base, u16 cnt);
+int
+sli_cmd_release_xri(struct sli4 *sli4, void *buf, u8 num_xri);
+int
+sli_cmd_read_sparm64(struct sli4 *sli4, void *buf,
+ struct efc_dma *dma, u16 vpi);
+int
+sli_cmd_read_topology(struct sli4 *sli4, void *buf, struct efc_dma *dma);
+int
+sli_cmd_read_nvparms(struct sli4 *sli4, void *buf);
+int
+sli_cmd_write_nvparms(struct sli4 *sli4, void *buf, u8 *wwpn,
+ u8 *wwnn, u8 hard_alpa, u32 preferred_d_id);
+int
+sli_cmd_reg_fcfi(struct sli4 *sli4, void *buf, u16 index,
+ struct sli4_cmd_rq_cfg *rq_cfg);
+int
+sli_cmd_reg_fcfi_mrq(struct sli4 *sli4, void *buf, u8 mode, u16 index,
+ u8 rq_selection_policy, u8 mrq_bit_mask, u16 num_mrqs,
+ struct sli4_cmd_rq_cfg *rq_cfg);
+int
+sli_cmd_reg_rpi(struct sli4 *sli4, void *buf, u32 rpi, u32 vpi, u32 fc_id,
+ struct efc_dma *dma, u8 update, u8 enable_t10_pi);
+int
+sli_cmd_unreg_fcfi(struct sli4 *sli4, void *buf, u16 indicator);
+int
+sli_cmd_unreg_rpi(struct sli4 *sli4, void *buf, u16 indicator,
+ enum sli4_resource which, u32 fc_id);
+int
+sli_cmd_reg_vpi(struct sli4 *sli4, void *buf, u32 fc_id,
+ __be64 sli_wwpn, u16 vpi, u16 vfi, bool update);
+int
+sli_cmd_reg_vfi(struct sli4 *sli4, void *buf, size_t size,
+ u16 vfi, u16 fcfi, struct efc_dma dma,
+ u16 vpi, __be64 sli_wwpn, u32 fc_id);
+int
+sli_cmd_unreg_vpi(struct sli4 *sli4, void *buf, u16 id, u32 type);
+int
+sli_cmd_unreg_vfi(struct sli4 *sli4, void *buf, u16 idx, u32 type);
+int
+sli_cmd_common_nop(struct sli4 *sli4, void *buf, uint64_t context);
+int
+sli_cmd_common_get_resource_extent_info(struct sli4 *sli4, void *buf,
+ u16 rtype);
+int
+sli_cmd_common_get_sli4_parameters(struct sli4 *sli4, void *buf);
+int
+sli_cmd_common_write_object(struct sli4 *sli4, void *buf, u16 noc,
+ u16 eof, u32 len, u32 offset, char *name, struct efc_dma *dma);
+int
+sli_cmd_common_delete_object(struct sli4 *sli4, void *buf, char *object_name);
+int
+sli_cmd_common_read_object(struct sli4 *sli4, void *buf,
+ u32 length, u32 offset, char *name, struct efc_dma *dma);
+int
+sli_cmd_dmtf_exec_clp_cmd(struct sli4 *sli4, void *buf,
+ struct efc_dma *cmd, struct efc_dma *resp);
+int
+sli_cmd_common_set_dump_location(struct sli4 *sli4, void *buf,
+ bool query, bool is_buffer_list, struct efc_dma *dma, u8 fdb);
+int
+sli_cmd_common_set_features(struct sli4 *sli4, void *buf,
+ u32 feature, u32 param_len, void *parameter);
+
+int sli_cqe_mq(struct sli4 *sli4, void *buf);
+int sli_cqe_async(struct sli4 *sli4, void *buf);
+
+int
+sli_setup(struct sli4 *sli4, void *os, struct pci_dev *pdev, void __iomem *r[]);
+void sli_calc_max_qentries(struct sli4 *sli4);
+int sli_init(struct sli4 *sli4);
+int sli_reset(struct sli4 *sli4);
+int sli_fw_reset(struct sli4 *sli4);
+void sli_teardown(struct sli4 *sli4);
+int
+sli_callback(struct sli4 *sli4, enum sli4_callback cb, void *func, void *arg);
+int
+sli_bmbx_command(struct sli4 *sli4);
+int
+__sli_queue_init(struct sli4 *sli4, struct sli4_queue *q, u32 qtype,
+ size_t size, u32 n_entries, u32 align);
+int
+__sli_create_queue(struct sli4 *sli4, struct sli4_queue *q);
+int
+sli_eq_modify_delay(struct sli4 *sli4, struct sli4_queue *eq, u32 num_eq,
+ u32 shift, u32 delay_mult);
+int
+sli_queue_alloc(struct sli4 *sli4, u32 qtype, struct sli4_queue *q,
+ u32 n_entries, struct sli4_queue *assoc);
+int
+sli_cq_alloc_set(struct sli4 *sli4, struct sli4_queue *qs[], u32 num_cqs,
+ u32 n_entries, struct sli4_queue *eqs[]);
+int
+sli_get_queue_entry_size(struct sli4 *sli4, u32 qtype);
+int
+sli_queue_free(struct sli4 *sli4, struct sli4_queue *q, u32 destroy_queues,
+ u32 free_memory);
+int
+sli_queue_eq_arm(struct sli4 *sli4, struct sli4_queue *q, bool arm);
+int
+sli_queue_arm(struct sli4 *sli4, struct sli4_queue *q, bool arm);
+
+int
+sli_wq_write(struct sli4 *sli4, struct sli4_queue *q, u8 *entry);
+int
+sli_mq_write(struct sli4 *sli4, struct sli4_queue *q, u8 *entry);
+int
+sli_rq_write(struct sli4 *sli4, struct sli4_queue *q, u8 *entry);
+int
+sli_eq_read(struct sli4 *sli4, struct sli4_queue *q, u8 *entry);
+int
+sli_cq_read(struct sli4 *sli4, struct sli4_queue *q, u8 *entry);
+int
+sli_mq_read(struct sli4 *sli4, struct sli4_queue *q, u8 *entry);
+int
+sli_resource_alloc(struct sli4 *sli4, enum sli4_resource rtype, u32 *rid,
+ u32 *index);
+int
+sli_resource_free(struct sli4 *sli4, enum sli4_resource rtype, u32 rid);
+int
+sli_resource_reset(struct sli4 *sli4, enum sli4_resource rtype);
+int
+sli_eq_parse(struct sli4 *sli4, u8 *buf, u16 *cq_id);
+int
+sli_cq_parse(struct sli4 *sli4, struct sli4_queue *cq, u8 *cqe,
+ enum sli4_qentry *etype, u16 *q_id);
+
+int sli_raise_ue(struct sli4 *sli4, u8 dump);
+int sli_dump_is_ready(struct sli4 *sli4);
+bool sli_reset_required(struct sli4 *sli4);
+bool sli_fw_ready(struct sli4 *sli4);
+
+int
+sli_fc_process_link_attention(struct sli4 *sli4, void *acqe);
+int
+sli_fc_cqe_parse(struct sli4 *sli4, struct sli4_queue *cq,
+ u8 *cqe, enum sli4_qentry *etype,
+ u16 *rid);
+u32 sli_fc_response_length(struct sli4 *sli4, u8 *cqe);
+u32 sli_fc_io_length(struct sli4 *sli4, u8 *cqe);
+int sli_fc_els_did(struct sli4 *sli4, u8 *cqe, u32 *d_id);
+u32 sli_fc_ext_status(struct sli4 *sli4, u8 *cqe);
+int
+sli_fc_rqe_rqid_and_index(struct sli4 *sli4, u8 *cqe, u16 *rq_id, u32 *index);
+int
+sli_cmd_wq_create(struct sli4 *sli4, void *buf,
+ struct efc_dma *qmem, u16 cq_id);
+int sli_cmd_post_sgl_pages(struct sli4 *sli4, void *buf, u16 xri,
+ u32 xri_count, struct efc_dma *page0[], struct efc_dma *page1[],
+ struct efc_dma *dma);
+int
+sli_cmd_post_hdr_templates(struct sli4 *sli4, void *buf,
+ struct efc_dma *dma, u16 rpi, struct efc_dma *payload_dma);
+int
+sli_fc_rq_alloc(struct sli4 *sli4, struct sli4_queue *q, u32 n_entries,
+ u32 buffer_size, struct sli4_queue *cq, bool is_hdr);
+int
+sli_fc_rq_set_alloc(struct sli4 *sli4, u32 num_rq_pairs, struct sli4_queue *q[],
+ u32 base_cq_id, u32 num, u32 hdr_buf_size, u32 data_buf_size);
+u32 sli_fc_get_rpi_requirements(struct sli4 *sli4, u32 n_rpi);
+int
+sli_abort_wqe(struct sli4 *sli4, void *buf, enum sli4_abort_type type,
+ bool send_abts, u32 ids, u32 mask, u16 tag, u16 cq_id);
+
+int
+sli_send_frame_wqe(struct sli4 *sli4, void *buf, u8 sof, u8 eof,
+ u32 *hdr, struct efc_dma *payload, u32 req_len, u8 timeout,
+ u16 xri, u16 req_tag);
+
+int
+sli_xmit_els_rsp64_wqe(struct sli4 *sli4, void *buf, struct efc_dma *rsp,
+ struct sli_els_params *params);
+
+int
+sli_els_request64_wqe(struct sli4 *sli4, void *buf, struct efc_dma *sgl,
+ struct sli_els_params *params);
+
+int
+sli_fcp_icmnd64_wqe(struct sli4 *sli4, void *buf, struct efc_dma *sgl, u16 xri,
+ u16 tag, u16 cq_id, u32 rpi, u32 rnode_fcid, u8 timeout);
+
+int
+sli_fcp_iread64_wqe(struct sli4 *sli4, void *buf, struct efc_dma *sgl,
+ u32 first_data_sge, u32 xfer_len, u16 xri,
+ u16 tag, u16 cq_id, u32 rpi, u32 rnode_fcid, u8 dif, u8 bs,
+ u8 timeout);
+
+int
+sli_fcp_iwrite64_wqe(struct sli4 *sli4, void *buf, struct efc_dma *sgl,
+ u32 first_data_sge, u32 xfer_len,
+ u32 first_burst, u16 xri, u16 tag, u16 cq_id, u32 rpi,
+ u32 rnode_fcid, u8 dif, u8 bs, u8 timeout);
+
+int
+sli_fcp_treceive64_wqe(struct sli4 *sli, void *buf, struct efc_dma *sgl,
+ u32 first_data_sge, u16 cq_id, u8 dif, u8 bs,
+ struct sli_fcp_tgt_params *params);
+int
+sli_fcp_cont_treceive64_wqe(struct sli4 *sli, void *buf, struct efc_dma *sgl,
+ u32 first_data_sge, u16 sec_xri, u16 cq_id, u8 dif,
+ u8 bs, struct sli_fcp_tgt_params *params);
+
+int
+sli_fcp_trsp64_wqe(struct sli4 *sli4, void *buf, struct efc_dma *sgl,
+ u16 cq_id, u8 port_owned, struct sli_fcp_tgt_params *params);
+
+int
+sli_fcp_tsend64_wqe(struct sli4 *sli4, void *buf, struct efc_dma *sgl,
+ u32 first_data_sge, u16 cq_id, u8 dif, u8 bs,
+ struct sli_fcp_tgt_params *params);
+int
+sli_gen_request64_wqe(struct sli4 *sli4, void *buf, struct efc_dma *sgl,
+ struct sli_ct_params *params);
+
+int
+sli_xmit_bls_rsp64_wqe(struct sli4 *sli4, void *buf,
+ struct sli_bls_payload *payload, struct sli_bls_params *params);
+
+int
+sli_xmit_sequence64_wqe(struct sli4 *sli4, void *buf, struct efc_dma *payload,
+ struct sli_ct_params *params);
+
+int
+sli_requeue_xri_wqe(struct sli4 *sli4, void *buf, u16 xri, u16 tag, u16 cq_id);
+void
+sli4_cmd_lowlevel_set_watchdog(struct sli4 *sli4, void *buf, size_t size,
+ u16 timeout);
+
+const char *sli_fc_get_status_string(u32 status);
+
+#endif /* !_SLI4_H */
diff --git a/drivers/scsi/esas2r/atioctl.h b/drivers/scsi/esas2r/atioctl.h
index 4aca3d52c851..ff2ad9b38575 100644
--- a/drivers/scsi/esas2r/atioctl.h
+++ b/drivers/scsi/esas2r/atioctl.h
@@ -1141,7 +1141,7 @@ struct __packed atto_ioctl_vda_gsv_cmd {
u8 rsp_len;
u8 reserved[7];
- u8 version_info[1];
+ u8 version_info[];
#define ATTO_VDA_VER_UNSUPPORTED 0xFF
};
diff --git a/drivers/scsi/esas2r/esas2r_main.c b/drivers/scsi/esas2r/esas2r_main.c
index 45ec9f16c085..647f82898b6e 100644
--- a/drivers/scsi/esas2r/esas2r_main.c
+++ b/drivers/scsi/esas2r/esas2r_main.c
@@ -1525,7 +1525,7 @@ void esas2r_complete_request_cb(struct esas2r_adapter *a,
rq->cmd->result =
((esas2r_req_status_to_error(rq->req_stat) << 16)
- | (rq->func_rsp.scsi_rsp.scsi_stat & STATUS_MASK));
+ | rq->func_rsp.scsi_rsp.scsi_stat);
if (rq->req_stat == RS_UNDERRUN)
scsi_set_resid(rq->cmd,
diff --git a/drivers/scsi/esp_scsi.c b/drivers/scsi/esp_scsi.c
index 342535ac0570..9a8c037a2f21 100644
--- a/drivers/scsi/esp_scsi.c
+++ b/drivers/scsi/esp_scsi.c
@@ -922,9 +922,7 @@ static void esp_cmd_is_done(struct esp *esp, struct esp_cmd_entry *ent,
* saw originally. Also, report that we are providing
* the sense data.
*/
- cmd->result = ((DRIVER_SENSE << 24) |
- (DID_OK << 16) |
- (SAM_STAT_CHECK_CONDITION << 0));
+ cmd->result = SAM_STAT_CHECK_CONDITION;
ent->flags &= ~ESP_CMD_FLAG_AUTOSENSE;
if (esp_debug & ESP_DEBUG_AUTOSENSE) {
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index 89ec735929c3..5ae6c207d3ac 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -293,7 +293,7 @@ static int fcoe_interface_setup(struct fcoe_interface *fcoe,
struct fcoe_ctlr *fip = fcoe_to_ctlr(fcoe);
struct netdev_hw_addr *ha;
struct net_device *real_dev;
- u8 flogi_maddr[ETH_ALEN];
+ static const u8 flogi_maddr[ETH_ALEN] = FC_FCOE_FLOGI_MAC;
const struct net_device_ops *ops;
fcoe->netdev = netdev;
@@ -336,7 +336,6 @@ static int fcoe_interface_setup(struct fcoe_interface *fcoe,
* or enter promiscuous mode if not capable of listening
* for multiple unicast MACs.
*/
- memcpy(flogi_maddr, (u8[6]) FC_FCOE_FLOGI_MAC, ETH_ALEN);
dev_uc_add(netdev, flogi_maddr);
if (fip->spma)
dev_uc_add(netdev, fip->ctl_src_addr);
@@ -442,7 +441,7 @@ static void fcoe_interface_remove(struct fcoe_interface *fcoe)
{
struct net_device *netdev = fcoe->netdev;
struct fcoe_ctlr *fip = fcoe_to_ctlr(fcoe);
- u8 flogi_maddr[ETH_ALEN];
+ static const u8 flogi_maddr[ETH_ALEN] = FC_FCOE_FLOGI_MAC;
const struct net_device_ops *ops;
/*
@@ -458,7 +457,6 @@ static void fcoe_interface_remove(struct fcoe_interface *fcoe)
synchronize_net();
/* Delete secondary MAC addresses */
- memcpy(flogi_maddr, (u8[6]) FC_FCOE_FLOGI_MAC, ETH_ALEN);
dev_uc_del(netdev, flogi_maddr);
if (fip->spma)
dev_uc_del(netdev, fip->ctl_src_addr);
diff --git a/drivers/scsi/fdomain.c b/drivers/scsi/fdomain.c
index 772bdc93930a..eda2be534aa7 100644
--- a/drivers/scsi/fdomain.c
+++ b/drivers/scsi/fdomain.c
@@ -202,11 +202,10 @@ static int fdomain_select(struct Scsi_Host *sh, int target)
return 1;
}
-static void fdomain_finish_cmd(struct fdomain *fd, int result)
+static void fdomain_finish_cmd(struct fdomain *fd)
{
outb(0, fd->base + REG_ICTL);
fdomain_make_bus_idle(fd);
- fd->cur_cmd->result = result;
fd->cur_cmd->scsi_done(fd->cur_cmd);
fd->cur_cmd = NULL;
}
@@ -273,7 +272,8 @@ static void fdomain_work(struct work_struct *work)
if (cmd->SCp.phase & in_arbitration) {
status = inb(fd->base + REG_ASTAT);
if (!(status & ASTAT_ARB)) {
- fdomain_finish_cmd(fd, DID_BUS_BUSY << 16);
+ set_host_byte(cmd, DID_BUS_BUSY);
+ fdomain_finish_cmd(fd);
goto out;
}
cmd->SCp.phase = in_selection;
@@ -290,7 +290,8 @@ static void fdomain_work(struct work_struct *work)
if (!(status & BSTAT_BSY)) {
/* Try again, for slow devices */
if (fdomain_select(cmd->device->host, scmd_id(cmd))) {
- fdomain_finish_cmd(fd, DID_NO_CONNECT << 16);
+ set_host_byte(cmd, DID_NO_CONNECT);
+ fdomain_finish_cmd(fd);
goto out;
}
/* Stop arbitration and enable parity */
@@ -333,7 +334,7 @@ static void fdomain_work(struct work_struct *work)
break;
case BSTAT_MSG | BSTAT_CMD | BSTAT_IO: /* MESSAGE IN */
cmd->SCp.Message = inb(fd->base + REG_SCSI_DATA);
- if (!cmd->SCp.Message)
+ if (cmd->SCp.Message == COMMAND_COMPLETE)
++done;
break;
}
@@ -359,9 +360,10 @@ static void fdomain_work(struct work_struct *work)
fdomain_read_data(cmd);
if (done) {
- fdomain_finish_cmd(fd, (cmd->SCp.Status & 0xff) |
- ((cmd->SCp.Message & 0xff) << 8) |
- (DID_OK << 16));
+ set_status_byte(cmd, cmd->SCp.Status);
+ set_host_byte(cmd, DID_OK);
+ scsi_msg_to_host_byte(cmd, cmd->SCp.Message);
+ fdomain_finish_cmd(fd);
} else {
if (cmd->SCp.phase & disconnect) {
outb(ICTL_FIFO | ICTL_SEL | ICTL_REQ | FIFO_COUNT,
@@ -439,10 +441,10 @@ static int fdomain_abort(struct scsi_cmnd *cmd)
fdomain_make_bus_idle(fd);
fd->cur_cmd->SCp.phase |= aborted;
- fd->cur_cmd->result = DID_ABORT << 16;
/* Aborts are not done well. . . */
- fdomain_finish_cmd(fd, DID_ABORT << 16);
+ set_host_byte(fd->cur_cmd, DID_ABORT);
+ fdomain_finish_cmd(fd);
spin_unlock_irqrestore(sh->host_lock, flags);
return SUCCESS;
}
diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h
index cf879cc59e4c..436d174f2194 100644
--- a/drivers/scsi/hisi_sas/hisi_sas.h
+++ b/drivers/scsi/hisi_sas/hisi_sas.h
@@ -8,6 +8,7 @@
#define _HISI_SAS_H_
#include <linux/acpi.h>
+#include <linux/async.h>
#include <linux/blk-mq.h>
#include <linux/blk-mq-pci.h>
#include <linux/clk.h>
@@ -37,6 +38,7 @@
#define HISI_SAS_RESET_BIT 0
#define HISI_SAS_REJECT_CMD_BIT 1
#define HISI_SAS_PM_BIT 2
+#define HISI_SAS_HW_FAULT_BIT 3
#define HISI_SAS_MAX_COMMANDS (HISI_SAS_QUEUE_SLOTS)
#define HISI_SAS_RESERVED_IPTT 96
#define HISI_SAS_UNRESERVED_IPTT \
@@ -90,8 +92,8 @@
#define HISI_SAS_PROT_MASK (HISI_SAS_DIF_PROT_MASK | HISI_SAS_DIX_PROT_MASK)
-#define HISI_SAS_WAIT_PHYUP_TIMEOUT 20
-#define CLEAR_ITCT_TIMEOUT 20
+#define HISI_SAS_WAIT_PHYUP_TIMEOUT (20 * HZ)
+#define HISI_SAS_CLEAR_ITCT_TIMEOUT (20 * HZ)
struct hisi_hba;
@@ -185,6 +187,7 @@ struct hisi_sas_phy {
enum sas_linkrate minimum_linkrate;
enum sas_linkrate maximum_linkrate;
int enable;
+ int wait_phyup_cnt;
atomic_t down_cnt;
/* Trace FIFO */
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index 5a204074099c..3a903e8e0384 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -15,7 +15,7 @@ static int hisi_sas_debug_issue_ssp_tmf(struct domain_device *device,
static int
hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
struct domain_device *device,
- int abort_flag, int tag);
+ int abort_flag, int tag, bool rst_to_recover);
static int hisi_sas_softreset_ata_disk(struct domain_device *device);
static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
void *funcdata);
@@ -857,6 +857,7 @@ static void hisi_sas_phyup_work(struct work_struct *work)
struct asd_sas_phy *sas_phy = &phy->sas_phy;
int phy_no = sas_phy->id;
+ phy->wait_phyup_cnt = 0;
if (phy->identify.target_port_protocols == SAS_PROTOCOL_SSP)
hisi_hba->hw->sl_notify_ssp(hisi_hba, phy_no);
hisi_sas_bytes_dmaed(hisi_hba, phy_no, GFP_KERNEL);
@@ -899,6 +900,8 @@ static void hisi_sas_wait_phyup_timedout(struct timer_list *t)
hisi_sas_notify_phy_event(phy, HISI_PHYE_LINK_RESET);
}
+#define HISI_SAS_WAIT_PHYUP_RETRIES 10
+
void hisi_sas_phy_oob_ready(struct hisi_hba *hisi_hba, int phy_no)
{
struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
@@ -909,8 +912,16 @@ void hisi_sas_phy_oob_ready(struct hisi_hba *hisi_hba, int phy_no)
return;
if (!timer_pending(&phy->timer)) {
- phy->timer.expires = jiffies + HISI_SAS_WAIT_PHYUP_TIMEOUT * HZ;
- add_timer(&phy->timer);
+ if (phy->wait_phyup_cnt < HISI_SAS_WAIT_PHYUP_RETRIES) {
+ phy->wait_phyup_cnt++;
+ phy->timer.expires = jiffies +
+ HISI_SAS_WAIT_PHYUP_TIMEOUT;
+ add_timer(&phy->timer);
+ } else {
+ dev_warn(dev, "phy%d failed to come up %d times, giving up\n",
+ phy_no, phy->wait_phyup_cnt);
+ phy->wait_phyup_cnt = 0;
+ }
}
}
EXPORT_SYMBOL_GPL(hisi_sas_phy_oob_ready);
@@ -1063,7 +1074,7 @@ static void hisi_sas_dev_gone(struct domain_device *device)
down(&hisi_hba->sem);
if (!test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)) {
hisi_sas_internal_task_abort(hisi_hba, device,
- HISI_SAS_INT_ABT_DEV, 0);
+ HISI_SAS_INT_ABT_DEV, 0, true);
hisi_sas_dereg_device(hisi_hba, device);
@@ -1182,9 +1193,9 @@ static void hisi_sas_tmf_timedout(struct timer_list *t)
complete(&task->slow_task->completion);
}
-#define TASK_TIMEOUT 20
-#define TASK_RETRY 3
-#define INTERNAL_ABORT_TIMEOUT 6
+#define TASK_TIMEOUT (20 * HZ)
+#define TASK_RETRY 3
+#define INTERNAL_ABORT_TIMEOUT (6 * HZ)
static int hisi_sas_exec_internal_tmf_task(struct domain_device *device,
void *parameter, u32 para_len,
struct hisi_sas_tmf_task *tmf)
@@ -1212,7 +1223,7 @@ static int hisi_sas_exec_internal_tmf_task(struct domain_device *device,
task->task_done = hisi_sas_task_done;
task->slow_task->timer.function = hisi_sas_tmf_timedout;
- task->slow_task->timer.expires = jiffies + TASK_TIMEOUT * HZ;
+ task->slow_task->timer.expires = jiffies + TASK_TIMEOUT;
add_timer(&task->slow_task->timer);
res = hisi_sas_task_exec(task, GFP_KERNEL, 1, tmf);
@@ -1505,7 +1516,8 @@ static void hisi_sas_terminate_stp_reject(struct hisi_hba *hisi_hba)
continue;
rc = hisi_sas_internal_task_abort(hisi_hba, device,
- HISI_SAS_INT_ABT_DEV, 0);
+ HISI_SAS_INT_ABT_DEV, 0,
+ false);
if (rc < 0)
dev_err(dev, "STP reject: abort dev failed %d\n", rc);
}
@@ -1604,6 +1616,7 @@ static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba)
}
hisi_sas_controller_reset_done(hisi_hba);
+ clear_bit(HISI_SAS_HW_FAULT_BIT, &hisi_hba->flags);
dev_info(dev, "controller reset complete\n");
return 0;
@@ -1660,7 +1673,8 @@ static int hisi_sas_abort_task(struct sas_task *task)
&tmf_task);
rc2 = hisi_sas_internal_task_abort(hisi_hba, device,
- HISI_SAS_INT_ABT_CMD, tag);
+ HISI_SAS_INT_ABT_CMD, tag,
+ false);
if (rc2 < 0) {
dev_err(dev, "abort task: internal abort (%d)\n", rc2);
return TMF_RESP_FUNC_FAILED;
@@ -1682,7 +1696,7 @@ static int hisi_sas_abort_task(struct sas_task *task)
if (task->dev->dev_type == SAS_SATA_DEV) {
rc = hisi_sas_internal_task_abort(hisi_hba, device,
HISI_SAS_INT_ABT_DEV,
- 0);
+ 0, false);
if (rc < 0) {
dev_err(dev, "abort task: internal abort failed\n");
goto out;
@@ -1697,7 +1711,8 @@ static int hisi_sas_abort_task(struct sas_task *task)
struct hisi_sas_cq *cq = &hisi_hba->cq[slot->dlvry_queue];
rc = hisi_sas_internal_task_abort(hisi_hba, device,
- HISI_SAS_INT_ABT_CMD, tag);
+ HISI_SAS_INT_ABT_CMD, tag,
+ false);
if (((rc < 0) || (rc == TMF_RESP_FUNC_FAILED)) &&
task->lldd_task) {
/*
@@ -1723,7 +1738,7 @@ static int hisi_sas_abort_task_set(struct domain_device *device, u8 *lun)
int rc;
rc = hisi_sas_internal_task_abort(hisi_hba, device,
- HISI_SAS_INT_ABT_DEV, 0);
+ HISI_SAS_INT_ABT_DEV, 0, false);
if (rc < 0) {
dev_err(dev, "abort task set: internal abort rc=%d\n", rc);
return TMF_RESP_FUNC_FAILED;
@@ -1750,6 +1765,8 @@ static int hisi_sas_clear_aca(struct domain_device *device, u8 *lun)
return rc;
}
+#define I_T_NEXUS_RESET_PHYUP_TIMEOUT (2 * HZ)
+
static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device)
{
struct sas_phy *local_phy = sas_get_local_phy(device);
@@ -1784,7 +1801,8 @@ static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device)
sas_ha->sas_phy[local_phy->number];
struct hisi_sas_phy *phy =
container_of(sas_phy, struct hisi_sas_phy, sas_phy);
- int ret = wait_for_completion_timeout(&phyreset, 2 * HZ);
+ int ret = wait_for_completion_timeout(&phyreset,
+ I_T_NEXUS_RESET_PHYUP_TIMEOUT);
unsigned long flags;
spin_lock_irqsave(&phy->lock, flags);
@@ -1814,7 +1832,7 @@ static int hisi_sas_I_T_nexus_reset(struct domain_device *device)
int rc;
rc = hisi_sas_internal_task_abort(hisi_hba, device,
- HISI_SAS_INT_ABT_DEV, 0);
+ HISI_SAS_INT_ABT_DEV, 0, false);
if (rc < 0) {
dev_err(dev, "I_T nexus reset: internal abort (%d)\n", rc);
return TMF_RESP_FUNC_FAILED;
@@ -1844,7 +1862,7 @@ static int hisi_sas_lu_reset(struct domain_device *device, u8 *lun)
/* Clear internal IO and then lu reset */
rc = hisi_sas_internal_task_abort(hisi_hba, device,
- HISI_SAS_INT_ABT_DEV, 0);
+ HISI_SAS_INT_ABT_DEV, 0, false);
if (rc < 0) {
dev_err(dev, "lu_reset: internal abort failed\n");
goto out;
@@ -1875,12 +1893,24 @@ out:
return rc;
}
+static void hisi_sas_async_I_T_nexus_reset(void *data, async_cookie_t cookie)
+{
+ struct domain_device *device = data;
+ struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
+ int rc;
+
+ rc = hisi_sas_debug_I_T_nexus_reset(device);
+ if (rc != TMF_RESP_FUNC_COMPLETE)
+ dev_info(hisi_hba->dev, "I_T_nexus reset fail for dev:%016llx rc=%d\n",
+ SAS_ADDR(device->sas_addr), rc);
+}
+
static int hisi_sas_clear_nexus_ha(struct sas_ha_struct *sas_ha)
{
struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
- struct device *dev = hisi_hba->dev;
HISI_SAS_DECLARE_RST_WORK_ON_STACK(r);
- int rc, i;
+ ASYNC_DOMAIN_EXCLUSIVE(async);
+ int i;
queue_work(hisi_hba->wq, &r.work);
wait_for_completion(r.completion);
@@ -1895,12 +1925,11 @@ static int hisi_sas_clear_nexus_ha(struct sas_ha_struct *sas_ha)
dev_is_expander(device->dev_type))
continue;
- rc = hisi_sas_debug_I_T_nexus_reset(device);
- if (rc != TMF_RESP_FUNC_COMPLETE)
- dev_info(dev, "clear nexus ha: for device[%d] rc=%d\n",
- sas_dev->device_id, rc);
+ async_schedule_domain(hisi_sas_async_I_T_nexus_reset,
+ device, &async);
}
+ async_synchronize_full_domain(&async);
hisi_sas_release_tasks(hisi_hba);
return TMF_RESP_FUNC_COMPLETE;
@@ -2029,11 +2058,13 @@ err_out:
* @tag: tag of IO to be aborted (only relevant to single
* IO mode)
* @dq: delivery queue for this internal abort command
+ * @rst_to_recover: If rst_to_recover set, queue a controller
+ * reset if an internal abort times out.
*/
static int
_hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
struct domain_device *device, int abort_flag,
- int tag, struct hisi_sas_dq *dq)
+ int tag, struct hisi_sas_dq *dq, bool rst_to_recover)
{
struct sas_task *task;
struct hisi_sas_device *sas_dev = device->lldd_dev;
@@ -2049,6 +2080,9 @@ _hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
if (!hisi_hba->hw->prep_abort)
return TMF_RESP_FUNC_FAILED;
+ if (test_bit(HISI_SAS_HW_FAULT_BIT, &hisi_hba->flags))
+ return -EIO;
+
task = sas_alloc_slow_task(GFP_KERNEL);
if (!task)
return -ENOMEM;
@@ -2057,7 +2091,7 @@ _hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
task->task_proto = device->tproto;
task->task_done = hisi_sas_task_done;
task->slow_task->timer.function = hisi_sas_tmf_timedout;
- task->slow_task->timer.expires = jiffies + INTERNAL_ABORT_TIMEOUT * HZ;
+ task->slow_task->timer.expires = jiffies + INTERNAL_ABORT_TIMEOUT;
add_timer(&task->slow_task->timer);
res = hisi_sas_internal_abort_task_exec(hisi_hba, sas_dev->device_id,
@@ -2079,6 +2113,8 @@ _hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
struct hisi_sas_slot *slot = task->lldd_task;
+ set_bit(HISI_SAS_HW_FAULT_BIT, &hisi_hba->flags);
+
if (slot) {
struct hisi_sas_cq *cq =
&hisi_hba->cq[slot->dlvry_queue];
@@ -2089,7 +2125,13 @@ _hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
synchronize_irq(cq->irq_no);
slot->task = NULL;
}
- dev_err(dev, "internal task abort: timeout and not done.\n");
+
+ if (rst_to_recover) {
+ dev_err(dev, "internal task abort: timeout and not done. Queuing reset.\n");
+ queue_work(hisi_hba->wq, &hisi_hba->rst_work);
+ } else {
+ dev_err(dev, "internal task abort: timeout and not done.\n");
+ }
res = -EIO;
goto exit;
@@ -2122,7 +2164,7 @@ exit:
static int
hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
struct domain_device *device,
- int abort_flag, int tag)
+ int abort_flag, int tag, bool rst_to_recover)
{
struct hisi_sas_slot *slot;
struct device *dev = hisi_hba->dev;
@@ -2134,7 +2176,8 @@ hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
slot = &hisi_hba->slot_info[tag];
dq = &hisi_hba->dq[slot->dlvry_queue];
return _hisi_sas_internal_task_abort(hisi_hba, device,
- abort_flag, tag, dq);
+ abort_flag, tag, dq,
+ rst_to_recover);
case HISI_SAS_INT_ABT_DEV:
for (i = 0; i < hisi_hba->cq_nvecs; i++) {
struct hisi_sas_cq *cq = &hisi_hba->cq[i];
@@ -2145,7 +2188,7 @@ hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
dq = &hisi_hba->dq[i];
rc = _hisi_sas_internal_task_abort(hisi_hba, device,
abort_flag, tag,
- dq);
+ dq, rst_to_recover);
if (rc)
return rc;
}
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
index 3e359ac752fd..9e58009369f9 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
@@ -1152,14 +1152,14 @@ static void slot_err_v1_hw(struct hisi_hba *hisi_hba,
}
default:
{
- ts->stat = SAM_STAT_CHECK_CONDITION;
+ ts->stat = SAS_SAM_STAT_CHECK_CONDITION;
break;
}
}
}
break;
case SAS_PROTOCOL_SMP:
- ts->stat = SAM_STAT_CHECK_CONDITION;
+ ts->stat = SAS_SAM_STAT_CHECK_CONDITION;
break;
case SAS_PROTOCOL_SATA:
@@ -1281,7 +1281,7 @@ static void slot_complete_v1_hw(struct hisi_hba *hisi_hba,
struct scatterlist *sg_resp = &task->smp_task.smp_resp;
void *to = page_address(sg_page(sg_resp));
- ts->stat = SAM_STAT_GOOD;
+ ts->stat = SAS_SAM_STAT_GOOD;
dma_unmap_sg(dev, &task->smp_task.smp_req, 1,
DMA_TO_DEVICE);
@@ -1298,7 +1298,7 @@ static void slot_complete_v1_hw(struct hisi_hba *hisi_hba,
break;
default:
- ts->stat = SAM_STAT_CHECK_CONDITION;
+ ts->stat = SAS_SAM_STAT_CHECK_CONDITION;
break;
}
@@ -1649,7 +1649,7 @@ static int interrupt_init_v1_hw(struct hisi_hba *hisi_hba)
if (irq < 0) {
dev_err(dev, "irq init: fail map phy interrupt %d\n",
idx);
- return -ENOENT;
+ return irq;
}
rc = devm_request_irq(dev, irq, phy_interrupts[j], 0,
@@ -1657,7 +1657,7 @@ static int interrupt_init_v1_hw(struct hisi_hba *hisi_hba)
if (rc) {
dev_err(dev, "irq init: could not request phy interrupt %d, rc=%d\n",
irq, rc);
- return -ENOENT;
+ return rc;
}
}
}
@@ -1668,7 +1668,7 @@ static int interrupt_init_v1_hw(struct hisi_hba *hisi_hba)
if (irq < 0) {
dev_err(dev, "irq init: could not map cq interrupt %d\n",
idx);
- return -ENOENT;
+ return irq;
}
rc = devm_request_irq(dev, irq, cq_interrupt_v1_hw, 0,
@@ -1676,7 +1676,7 @@ static int interrupt_init_v1_hw(struct hisi_hba *hisi_hba)
if (rc) {
dev_err(dev, "irq init: could not request cq interrupt %d, rc=%d\n",
irq, rc);
- return -ENOENT;
+ return rc;
}
}
@@ -1686,7 +1686,7 @@ static int interrupt_init_v1_hw(struct hisi_hba *hisi_hba)
if (irq < 0) {
dev_err(dev, "irq init: could not map fatal interrupt %d\n",
idx);
- return -ENOENT;
+ return irq;
}
rc = devm_request_irq(dev, irq, fatal_interrupts[i], 0,
@@ -1694,7 +1694,7 @@ static int interrupt_init_v1_hw(struct hisi_hba *hisi_hba)
if (rc) {
dev_err(dev, "irq init: could not request fatal interrupt %d, rc=%d\n",
irq, rc);
- return -ENOENT;
+ return rc;
}
}
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
index 46f60fc2a069..49d2723ef34c 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
@@ -994,7 +994,7 @@ static int clear_itct_v2_hw(struct hisi_hba *hisi_hba,
reg_val = ITCT_CLR_EN_MSK | (dev_id & ITCT_DEV_MSK);
hisi_sas_write32(hisi_hba, ITCT_CLR, reg_val);
if (!wait_for_completion_timeout(sas_dev->completion,
- CLEAR_ITCT_TIMEOUT * HZ)) {
+ HISI_SAS_CLEAR_ITCT_TIMEOUT)) {
dev_warn(dev, "failed to clear ITCT\n");
return -ETIMEDOUT;
}
@@ -2168,7 +2168,7 @@ static void slot_err_v2_hw(struct hisi_hba *hisi_hba,
}
break;
case SAS_PROTOCOL_SMP:
- ts->stat = SAM_STAT_CHECK_CONDITION;
+ ts->stat = SAS_SAM_STAT_CHECK_CONDITION;
break;
case SAS_PROTOCOL_SATA:
@@ -2427,7 +2427,7 @@ static void slot_complete_v2_hw(struct hisi_hba *hisi_hba,
struct scatterlist *sg_resp = &task->smp_task.smp_resp;
void *to = page_address(sg_page(sg_resp));
- ts->stat = SAM_STAT_GOOD;
+ ts->stat = SAS_SAM_STAT_GOOD;
dma_unmap_sg(dev, &task->smp_task.smp_req, 1,
DMA_TO_DEVICE);
@@ -2441,12 +2441,12 @@ static void slot_complete_v2_hw(struct hisi_hba *hisi_hba,
case SAS_PROTOCOL_STP:
case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
{
- ts->stat = SAM_STAT_GOOD;
+ ts->stat = SAS_SAM_STAT_GOOD;
hisi_sas_sata_done(task, slot);
break;
}
default:
- ts->stat = SAM_STAT_CHECK_CONDITION;
+ ts->stat = SAS_SAM_STAT_CHECK_CONDITION;
break;
}
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
index e95408314078..5c3b1dfcb37c 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
@@ -843,7 +843,7 @@ static int clear_itct_v3_hw(struct hisi_hba *hisi_hba,
hisi_sas_write32(hisi_hba, ITCT_CLR, reg_val);
if (!wait_for_completion_timeout(sas_dev->completion,
- CLEAR_ITCT_TIMEOUT * HZ)) {
+ HISI_SAS_CLEAR_ITCT_TIMEOUT)) {
dev_warn(dev, "failed to clear ITCT\n");
return -ETIMEDOUT;
}
@@ -2178,7 +2178,7 @@ slot_err_v3_hw(struct hisi_hba *hisi_hba, struct sas_task *task,
hisi_sas_sata_done(task, slot);
break;
case SAS_PROTOCOL_SMP:
- ts->stat = SAM_STAT_CHECK_CONDITION;
+ ts->stat = SAS_SAM_STAT_CHECK_CONDITION;
break;
default:
break;
@@ -2285,7 +2285,7 @@ static void slot_complete_v3_hw(struct hisi_hba *hisi_hba,
struct scatterlist *sg_resp = &task->smp_task.smp_resp;
void *to = page_address(sg_page(sg_resp));
- ts->stat = SAM_STAT_GOOD;
+ ts->stat = SAS_SAM_STAT_GOOD;
dma_unmap_sg(dev, &task->smp_task.smp_req, 1,
DMA_TO_DEVICE);
@@ -2298,11 +2298,11 @@ static void slot_complete_v3_hw(struct hisi_hba *hisi_hba,
case SAS_PROTOCOL_SATA:
case SAS_PROTOCOL_STP:
case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
- ts->stat = SAM_STAT_GOOD;
+ ts->stat = SAS_SAM_STAT_GOOD;
hisi_sas_sata_done(task, slot);
break;
default:
- ts->stat = SAM_STAT_CHECK_CONDITION;
+ ts->stat = SAS_SAM_STAT_CHECK_CONDITION;
break;
}
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index cd52664920e1..929a3b043ad7 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -220,6 +220,9 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
goto fail;
}
+ shost->cmd_per_lun = min_t(short, shost->cmd_per_lun,
+ shost->can_queue);
+
error = scsi_init_sense_cache(shost);
if (error)
goto fail;
@@ -319,7 +322,7 @@ static void scsi_host_dev_release(struct device *dev)
scsi_proc_hostdir_rm(shost->hostt);
- /* Wait for functions invoked through call_rcu(&shost->rcu, ...) */
+ /* Wait for functions invoked through call_rcu(&scmd->rcu, ...) */
rcu_barrier();
if (shost->tmf_work_q)
@@ -657,10 +660,11 @@ EXPORT_SYMBOL_GPL(scsi_flush_work);
static bool complete_all_cmds_iter(struct request *rq, void *data, bool rsvd)
{
struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
- int status = *(int *)data;
+ enum scsi_host_status status = *(enum scsi_host_status *)data;
scsi_dma_unmap(scmd);
- scmd->result = status << 16;
+ scmd->result = 0;
+ set_host_byte(scmd, status);
scmd->scsi_done(scmd);
return true;
}
@@ -675,7 +679,8 @@ static bool complete_all_cmds_iter(struct request *rq, void *data, bool rsvd)
* caller to ensure that concurrent I/O submission and/or
* completion is stopped when calling this function.
*/
-void scsi_host_complete_all_commands(struct Scsi_Host *shost, int status)
+void scsi_host_complete_all_commands(struct Scsi_Host *shost,
+ enum scsi_host_status status)
{
blk_mq_tagset_busy_iter(&shost->tag_set, complete_all_cmds_iter,
&status);
diff --git a/drivers/scsi/hptiop.c b/drivers/scsi/hptiop.c
index db4c7a7ff4dd..61cda7b7624f 100644
--- a/drivers/scsi/hptiop.c
+++ b/drivers/scsi/hptiop.c
@@ -760,7 +760,7 @@ static void hptiop_finish_scsi_req(struct hptiop_hba *hba, u32 tag,
goto skip_resid;
default:
- scp->result = DRIVER_INVALID << 24 | DID_ABORT << 16;
+ scp->result = DID_ABORT << 16;
break;
}
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index 715c34904e3e..bee1bec49c09 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -655,8 +655,10 @@ static void ibmvfc_reinit_host(struct ibmvfc_host *vhost)
**/
static void ibmvfc_del_tgt(struct ibmvfc_target *tgt)
{
- if (!ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_LOGOUT_RPORT))
+ if (!ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_LOGOUT_RPORT)) {
tgt->job_step = ibmvfc_tgt_implicit_logout_and_del;
+ tgt->init_retries = 0;
+ }
wake_up(&tgt->vhost->work_wait_q);
}
@@ -4300,9 +4302,10 @@ static void ibmvfc_tgt_move_login_done(struct ibmvfc_event *evt)
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
switch (status) {
case IBMVFC_MAD_SUCCESS:
- tgt_dbg(tgt, "Move Login succeeded for old scsi_id: %llX\n", tgt->old_scsi_id);
+ tgt_dbg(tgt, "Move Login succeeded for new scsi_id: %llX\n", tgt->new_scsi_id);
tgt->ids.node_name = wwn_to_u64(rsp->service_parms.node_name);
tgt->ids.port_name = wwn_to_u64(rsp->service_parms.port_name);
+ tgt->scsi_id = tgt->new_scsi_id;
tgt->ids.port_id = tgt->scsi_id;
memcpy(&tgt->service_parms, &rsp->service_parms,
sizeof(tgt->service_parms));
@@ -4320,8 +4323,8 @@ static void ibmvfc_tgt_move_login_done(struct ibmvfc_event *evt)
level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_move_login);
tgt_log(tgt, level,
- "Move Login failed: old scsi_id: %llX, flags:%x, vios_flags:%x, rc=0x%02X\n",
- tgt->old_scsi_id, be32_to_cpu(rsp->flags), be16_to_cpu(rsp->vios_flags),
+ "Move Login failed: new scsi_id: %llX, flags:%x, vios_flags:%x, rc=0x%02X\n",
+ tgt->new_scsi_id, be32_to_cpu(rsp->flags), be16_to_cpu(rsp->vios_flags),
status);
break;
}
@@ -4358,8 +4361,8 @@ static void ibmvfc_tgt_move_login(struct ibmvfc_target *tgt)
move->common.opcode = cpu_to_be32(IBMVFC_MOVE_LOGIN);
move->common.length = cpu_to_be16(sizeof(*move));
- move->old_scsi_id = cpu_to_be64(tgt->old_scsi_id);
- move->new_scsi_id = cpu_to_be64(tgt->scsi_id);
+ move->old_scsi_id = cpu_to_be64(tgt->scsi_id);
+ move->new_scsi_id = cpu_to_be64(tgt->new_scsi_id);
move->wwpn = cpu_to_be64(tgt->wwpn);
move->node_name = cpu_to_be64(tgt->ids.node_name);
@@ -4368,7 +4371,7 @@ static void ibmvfc_tgt_move_login(struct ibmvfc_target *tgt)
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
kref_put(&tgt->kref, ibmvfc_release_tgt);
} else
- tgt_dbg(tgt, "Sent Move Login for old scsi_id: %llX\n", tgt->old_scsi_id);
+ tgt_dbg(tgt, "Sent Move Login for new scsi_id: %llX\n", tgt->new_scsi_id);
}
/**
@@ -4728,20 +4731,25 @@ static int ibmvfc_alloc_target(struct ibmvfc_host *vhost,
* and it failed for some reason, such as there being I/O
* pending to the target. In this case, we will have already
* deleted the rport from the FC transport so we do a move
- * login, which works even with I/O pending, as it will cancel
- * any active commands.
+ * login, which works even with I/O pending, however, if
+ * there is still I/O pending, it will stay outstanding, so
+ * we only do this if fast fail is disabled for the rport,
+ * otherwise we let terminate_rport_io clean up the port
+ * before we login at the new location.
*/
if (wtgt->action == IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT) {
- /*
- * Do a move login here. The old target is no longer
- * known to the transport layer We don't use the
- * normal ibmvfc_set_tgt_action to set this, as we
- * don't normally want to allow this state change.
- */
- wtgt->old_scsi_id = wtgt->scsi_id;
- wtgt->scsi_id = scsi_id;
- wtgt->action = IBMVFC_TGT_ACTION_INIT;
- ibmvfc_init_tgt(wtgt, ibmvfc_tgt_move_login);
+ if (wtgt->move_login) {
+ /*
+ * Do a move login here. The old target is no longer
+ * known to the transport layer We don't use the
+ * normal ibmvfc_set_tgt_action to set this, as we
+ * don't normally want to allow this state change.
+ */
+ wtgt->new_scsi_id = scsi_id;
+ wtgt->action = IBMVFC_TGT_ACTION_INIT;
+ wtgt->init_retries = 0;
+ ibmvfc_init_tgt(wtgt, ibmvfc_tgt_move_login);
+ }
goto unlock_out;
} else {
tgt_err(wtgt, "Unexpected target state: %d, %p\n",
@@ -5332,6 +5340,7 @@ static void ibmvfc_tgt_add_rport(struct ibmvfc_target *tgt)
tgt_dbg(tgt, "Deleting rport with outstanding I/O\n");
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT);
tgt->rport = NULL;
+ tgt->init_retries = 0;
spin_unlock_irqrestore(vhost->host->host_lock, flags);
fc_remote_port_delete(rport);
return;
@@ -5486,7 +5495,20 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost)
tgt_dbg(tgt, "Deleting rport with I/O outstanding\n");
rport = tgt->rport;
tgt->rport = NULL;
+ tgt->init_retries = 0;
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT);
+
+ /*
+ * If fast fail is enabled, we wait for it to fire and then clean up
+ * the old port, since we expect the fast fail timer to clean up the
+ * outstanding I/O faster than waiting for normal command timeouts.
+ * However, if fast fail is disabled, any I/O outstanding to the
+ * rport LUNs will stay outstanding indefinitely, since the EH handlers
+ * won't get invoked for I/O's timing out. If this is a NPIV failover
+ * scenario, the better alternative is to use the move login.
+ */
+ if (rport && rport->fast_io_fail_tmo == -1)
+ tgt->move_login = 1;
spin_unlock_irqrestore(vhost->host->host_lock, flags);
if (rport)
fc_remote_port_delete(rport);
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h
index 19dcec3ae9ba..4f0f3baefae4 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.h
+++ b/drivers/scsi/ibmvscsi/ibmvfc.h
@@ -718,7 +718,7 @@ struct ibmvfc_target {
struct ibmvfc_host *vhost;
u64 scsi_id;
u64 wwpn;
- u64 old_scsi_id;
+ u64 new_scsi_id;
struct fc_rport *rport;
int target_id;
enum ibmvfc_target_action action;
@@ -726,6 +726,7 @@ struct ibmvfc_target {
int add_rport;
int init_retries;
int logo_rcvd;
+ int move_login;
u32 cancel_key;
struct ibmvfc_service_parms service_parms;
struct ibmvfc_service_parms service_parms_change;
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index e75b0068ad84..e6a3eaaa57d9 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -1005,7 +1005,7 @@ static void handle_cmd_rsp(struct srp_event_struct *evt_struct)
if (cmnd) {
cmnd->result |= rsp->status;
- if (((cmnd->result >> 1) & 0x1f) == CHECK_CONDITION)
+ if (scsi_status_is_check_condition(cmnd->result))
memcpy(cmnd->sense_buffer,
rsp->data,
be32_to_cpu(rsp->sense_data_len));
diff --git a/drivers/scsi/imm.c b/drivers/scsi/imm.c
index 862d35a098cf..943c9102a7eb 100644
--- a/drivers/scsi/imm.c
+++ b/drivers/scsi/imm.c
@@ -1283,19 +1283,6 @@ static struct parport_driver imm_driver = {
.detach = imm_detach,
.devmodel = true,
};
-
-static int __init imm_driver_init(void)
-{
- printk("imm: Version %s\n", IMM_VERSION);
- return parport_register_driver(&imm_driver);
-}
-
-static void __exit imm_driver_exit(void)
-{
- parport_unregister_driver(&imm_driver);
-}
-
-module_init(imm_driver_init);
-module_exit(imm_driver_exit);
+module_parport_driver(imm_driver);
MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c
index bc33d54a4011..8b33c9871484 100644
--- a/drivers/scsi/ips.c
+++ b/drivers/scsi/ips.c
@@ -3344,13 +3344,15 @@ ips_map_status(ips_ha_t * ha, ips_scb_t * scb, ips_stat_t * sp)
IPS_CMD_EXTENDED_DCDB_SG)) {
tapeDCDB =
(IPS_DCDB_TABLE_TAPE *) & scb->dcdb;
- memcpy(scb->scsi_cmd->sense_buffer,
+ memcpy_and_pad(scb->scsi_cmd->sense_buffer,
+ SCSI_SENSE_BUFFERSIZE,
tapeDCDB->sense_info,
- SCSI_SENSE_BUFFERSIZE);
+ sizeof(tapeDCDB->sense_info), 0);
} else {
- memcpy(scb->scsi_cmd->sense_buffer,
+ memcpy_and_pad(scb->scsi_cmd->sense_buffer,
+ SCSI_SENSE_BUFFERSIZE,
scb->dcdb.sense_info,
- SCSI_SENSE_BUFFERSIZE);
+ sizeof(scb->dcdb.sense_info), 0);
}
device_error = 2; /* check condition */
}
diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c
index e7c6cb4c1556..e1ff79464131 100644
--- a/drivers/scsi/isci/request.c
+++ b/drivers/scsi/isci/request.c
@@ -2566,7 +2566,7 @@ static void isci_request_handle_controller_specific_errors(
if (!idev)
*status_ptr = SAS_DEVICE_UNKNOWN;
else
- *status_ptr = SAM_STAT_TASK_ABORTED;
+ *status_ptr = SAS_SAM_STAT_TASK_ABORTED;
clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
}
@@ -2696,7 +2696,7 @@ static void isci_request_handle_controller_specific_errors(
default:
/* Task in the target is not done. */
*response_ptr = SAS_TASK_UNDELIVERED;
- *status_ptr = SAM_STAT_TASK_ABORTED;
+ *status_ptr = SAS_SAM_STAT_TASK_ABORTED;
if (task->task_proto == SAS_PROTOCOL_SMP)
set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
@@ -2719,7 +2719,7 @@ static void isci_process_stp_response(struct sas_task *task, struct dev_to_host_
if (ac_err_mask(fis->status))
ts->stat = SAS_PROTO_RESPONSE;
else
- ts->stat = SAM_STAT_GOOD;
+ ts->stat = SAS_SAM_STAT_GOOD;
ts->resp = SAS_TASK_COMPLETE;
}
@@ -2782,7 +2782,7 @@ static void isci_request_io_request_complete(struct isci_host *ihost,
case SCI_IO_SUCCESS_IO_DONE_EARLY:
response = SAS_TASK_COMPLETE;
- status = SAM_STAT_GOOD;
+ status = SAS_SAM_STAT_GOOD;
set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
if (completion_status == SCI_IO_SUCCESS_IO_DONE_EARLY) {
@@ -2852,7 +2852,7 @@ static void isci_request_io_request_complete(struct isci_host *ihost,
/* Fail the I/O. */
response = SAS_TASK_UNDELIVERED;
- status = SAM_STAT_TASK_ABORTED;
+ status = SAS_SAM_STAT_TASK_ABORTED;
clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
break;
diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c
index 62062ed6cd9a..3fd88d72a0c0 100644
--- a/drivers/scsi/isci/task.c
+++ b/drivers/scsi/isci/task.c
@@ -160,7 +160,7 @@ int isci_task_execute_task(struct sas_task *task, gfp_t gfp_flags)
isci_task_refuse(ihost, task,
SAS_TASK_UNDELIVERED,
- SAM_STAT_TASK_ABORTED);
+ SAS_SAM_STAT_TASK_ABORTED);
} else {
task->task_state_flags |= SAS_TASK_AT_INITIATOR;
spin_unlock_irqrestore(&task->task_state_lock, flags);
@@ -709,8 +709,8 @@ isci_task_request_complete(struct isci_host *ihost,
tmf->status = completion_status;
if (tmf->proto == SAS_PROTOCOL_SSP) {
- memcpy(&tmf->resp.resp_iu,
- &ireq->ssp.rsp,
+ memcpy(tmf->resp.rsp_buf,
+ ireq->ssp.rsp_buf,
SSP_RESP_IU_MAX_SIZE);
} else if (tmf->proto == SAS_PROTOCOL_SATA) {
memcpy(&tmf->resp.d2h_fis,
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index dd33ce0e3737..1bc37593c88f 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -600,6 +600,12 @@ static void iscsi_sw_tcp_release_conn(struct iscsi_conn *conn)
if (!sock)
return;
+ /*
+ * Make sure we start socket shutdown now in case userspace is up
+ * but delayed in releasing the socket.
+ */
+ kernel_sock_shutdown(sock, SHUT_RDWR);
+
sock_hold(sock->sk);
iscsi_sw_tcp_conn_restore_callbacks(conn);
sock_put(sock->sk);
@@ -689,6 +695,7 @@ iscsi_sw_tcp_conn_bind(struct iscsi_cls_session *cls_session,
sk->sk_sndtimeo = 15 * HZ; /* FIXME: make it configurable */
sk->sk_allocation = GFP_ATOMIC;
sk_set_memalloc(sk);
+ sock_no_linger(sk);
iscsi_sw_tcp_conn_set_callbacks(conn);
tcp_sw_conn->sendpage = tcp_sw_conn->sock->ops->sendpage;
diff --git a/drivers/scsi/libfc/fc_encode.h b/drivers/scsi/libfc/fc_encode.h
index 602c97a651bc..74ae7fd15d8d 100644
--- a/drivers/scsi/libfc/fc_encode.h
+++ b/drivers/scsi/libfc/fc_encode.h
@@ -166,9 +166,11 @@ static inline int fc_ct_ns_fill(struct fc_lport *lport,
static inline void fc_ct_ms_fill_attr(struct fc_fdmi_attr_entry *entry,
const char *in, size_t len)
{
- int copied = strscpy(entry->value, in, len);
- if (copied > 0)
- memset(entry->value, copied, len - copied);
+ int copied;
+
+ copied = strscpy(entry->value, in, len);
+ if (copied > 0 && copied + 1 < len)
+ memset(entry->value + copied + 1, 0, len - copied - 1);
}
/**
@@ -190,10 +192,11 @@ static inline int fc_ct_ms_fill(struct fc_lport *lport,
struct fc_fdmi_attr_entry *entry;
struct fs_fdmi_attrs *hba_attrs;
int numattrs = 0;
+ struct fc_host_attrs *fc_host = shost_to_fc_host(lport->host);
switch (op) {
case FC_FDMI_RHBA:
- numattrs = 10;
+ numattrs = 11;
len = sizeof(struct fc_fdmi_rhba);
len -= sizeof(struct fc_fdmi_attr_entry);
len += (numattrs * FC_FDMI_ATTR_ENTRY_HEADER_LEN);
@@ -207,8 +210,21 @@ static inline int fc_ct_ms_fill(struct fc_lport *lport,
len += FC_FDMI_HBA_ATTR_OPTIONROMVERSION_LEN;
len += FC_FDMI_HBA_ATTR_FIRMWAREVERSION_LEN;
len += FC_FDMI_HBA_ATTR_OSNAMEVERSION_LEN;
+ len += FC_FDMI_HBA_ATTR_MAXCTPAYLOAD_LEN;
+
+ if (fc_host->fdmi_version == FDMI_V2) {
+ numattrs += 7;
+ len += FC_FDMI_HBA_ATTR_NODESYMBLNAME_LEN;
+ len += FC_FDMI_HBA_ATTR_VENDORSPECIFICINFO_LEN;
+ len += FC_FDMI_HBA_ATTR_NUMBEROFPORTS_LEN;
+ len += FC_FDMI_HBA_ATTR_FABRICNAME_LEN;
+ len += FC_FDMI_HBA_ATTR_BIOSVERSION_LEN;
+ len += FC_FDMI_HBA_ATTR_BIOSSTATE_LEN;
+ len += FC_FDMI_HBA_ATTR_VENDORIDENTIFIER_LEN;
+ }
+
ct = fc_ct_hdr_fill(fp, op, len, FC_FST_MGMT,
- FC_FDMI_SUBTYPE);
+ FC_FDMI_SUBTYPE);
/* HBA Identifier */
put_unaligned_be64(lport->wwpn, &ct->payload.rhba.hbaid.id);
@@ -313,7 +329,7 @@ static inline int fc_ct_ms_fill(struct fc_lport *lport,
&entry->type);
put_unaligned_be16(len, &entry->len);
fc_ct_ms_fill_attr(entry,
- fc_host_optionrom_version(lport->host),
+ "unknown",
FC_FDMI_HBA_ATTR_OPTIONROMVERSION_LEN);
/* Firmware Version */
@@ -341,6 +357,100 @@ static inline int fc_ct_ms_fill(struct fc_lport *lport,
"%s v%s",
init_utsname()->sysname,
init_utsname()->release);
+
+ /* Max CT payload */
+ entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
+ FC_FDMI_HBA_ATTR_OSNAMEVERSION_LEN);
+ len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
+ len += FC_FDMI_HBA_ATTR_MAXCTPAYLOAD_LEN;
+ put_unaligned_be16(FC_FDMI_HBA_ATTR_MAXCTPAYLOAD,
+ &entry->type);
+ put_unaligned_be16(len, &entry->len);
+ put_unaligned_be32(fc_host_max_ct_payload(lport->host),
+ &entry->value);
+
+ if (fc_host->fdmi_version == FDMI_V2) {
+ /* Node symbolic name */
+ entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
+ FC_FDMI_HBA_ATTR_MAXCTPAYLOAD_LEN);
+ len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
+ len += FC_FDMI_HBA_ATTR_NODESYMBLNAME_LEN;
+ put_unaligned_be16(FC_FDMI_HBA_ATTR_NODESYMBLNAME,
+ &entry->type);
+ put_unaligned_be16(len, &entry->len);
+ fc_ct_ms_fill_attr(entry,
+ fc_host_symbolic_name(lport->host),
+ FC_FDMI_HBA_ATTR_NODESYMBLNAME_LEN);
+
+ /* Vendor specific info */
+ entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
+ FC_FDMI_HBA_ATTR_NODESYMBLNAME_LEN);
+ len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
+ len += FC_FDMI_HBA_ATTR_VENDORSPECIFICINFO_LEN;
+ put_unaligned_be16(FC_FDMI_HBA_ATTR_VENDORSPECIFICINFO,
+ &entry->type);
+ put_unaligned_be16(len, &entry->len);
+ put_unaligned_be32(0,
+ &entry->value);
+
+ /* Number of ports */
+ entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
+ FC_FDMI_HBA_ATTR_VENDORSPECIFICINFO_LEN);
+ len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
+ len += FC_FDMI_HBA_ATTR_NUMBEROFPORTS_LEN;
+ put_unaligned_be16(FC_FDMI_HBA_ATTR_NUMBEROFPORTS,
+ &entry->type);
+ put_unaligned_be16(len, &entry->len);
+ put_unaligned_be32(fc_host_num_ports(lport->host),
+ &entry->value);
+
+ /* Fabric name */
+ entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
+ FC_FDMI_HBA_ATTR_NUMBEROFPORTS_LEN);
+ len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
+ len += FC_FDMI_HBA_ATTR_FABRICNAME_LEN;
+ put_unaligned_be16(FC_FDMI_HBA_ATTR_FABRICNAME,
+ &entry->type);
+ put_unaligned_be16(len, &entry->len);
+ put_unaligned_be64(fc_host_fabric_name(lport->host),
+ &entry->value);
+
+ /* BIOS version */
+ entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
+ FC_FDMI_HBA_ATTR_FABRICNAME_LEN);
+ len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
+ len += FC_FDMI_HBA_ATTR_BIOSVERSION_LEN;
+ put_unaligned_be16(FC_FDMI_HBA_ATTR_BIOSVERSION,
+ &entry->type);
+ put_unaligned_be16(len, &entry->len);
+ fc_ct_ms_fill_attr(entry,
+ fc_host_bootbios_version(lport->host),
+ FC_FDMI_HBA_ATTR_BIOSVERSION_LEN);
+
+ /* BIOS state */
+ entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
+ FC_FDMI_HBA_ATTR_BIOSVERSION_LEN);
+ len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
+ len += FC_FDMI_HBA_ATTR_BIOSSTATE_LEN;
+ put_unaligned_be16(FC_FDMI_HBA_ATTR_BIOSSTATE,
+ &entry->type);
+ put_unaligned_be16(len, &entry->len);
+ put_unaligned_be32(fc_host_bootbios_state(lport->host),
+ &entry->value);
+
+ /* Vendor identifier */
+ entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
+ FC_FDMI_HBA_ATTR_BIOSSTATE_LEN);
+ len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
+ len += FC_FDMI_HBA_ATTR_VENDORIDENTIFIER_LEN;
+ put_unaligned_be16(FC_FDMI_HBA_ATTR_VENDORIDENTIFIER,
+ &entry->type);
+ put_unaligned_be16(len, &entry->len);
+ fc_ct_ms_fill_attr(entry,
+ fc_host_vendor_identifier(lport->host),
+ FC_FDMI_HBA_ATTR_VENDORIDENTIFIER_LEN);
+ }
+
break;
case FC_FDMI_RPA:
numattrs = 6;
@@ -353,6 +463,24 @@ static inline int fc_ct_ms_fill(struct fc_lport *lport,
len += FC_FDMI_PORT_ATTR_MAXFRAMESIZE_LEN;
len += FC_FDMI_PORT_ATTR_OSDEVICENAME_LEN;
len += FC_FDMI_PORT_ATTR_HOSTNAME_LEN;
+
+
+ if (fc_host->fdmi_version == FDMI_V2) {
+ numattrs += 10;
+
+ len += FC_FDMI_PORT_ATTR_NODENAME_LEN;
+ len += FC_FDMI_PORT_ATTR_PORTNAME_LEN;
+ len += FC_FDMI_PORT_ATTR_SYMBOLICNAME_LEN;
+ len += FC_FDMI_PORT_ATTR_PORTTYPE_LEN;
+ len += FC_FDMI_PORT_ATTR_SUPPORTEDCLASSSRVC_LEN;
+ len += FC_FDMI_PORT_ATTR_FABRICNAME_LEN;
+ len += FC_FDMI_PORT_ATTR_CURRENTFC4TYPE_LEN;
+ len += FC_FDMI_PORT_ATTR_PORTSTATE_LEN;
+ len += FC_FDMI_PORT_ATTR_DISCOVEREDPORTS_LEN;
+ len += FC_FDMI_PORT_ATTR_PORTID_LEN;
+
+ }
+
ct = fc_ct_hdr_fill(fp, op, len, FC_FST_MGMT,
FC_FDMI_SUBTYPE);
@@ -441,6 +569,122 @@ static inline int fc_ct_ms_fill(struct fc_lport *lport,
fc_ct_ms_fill_attr(entry,
init_utsname()->nodename,
FC_FDMI_PORT_ATTR_HOSTNAME_LEN);
+
+
+ if (fc_host->fdmi_version == FDMI_V2) {
+
+ /* Node name */
+ entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
+ FC_FDMI_PORT_ATTR_HOSTNAME_LEN);
+ len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
+ len += FC_FDMI_PORT_ATTR_NODENAME_LEN;
+ put_unaligned_be16(FC_FDMI_PORT_ATTR_NODENAME,
+ &entry->type);
+ put_unaligned_be16(len, &entry->len);
+ put_unaligned_be64(fc_host_node_name(lport->host),
+ &entry->value);
+
+ /* Port name */
+ entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
+ FC_FDMI_PORT_ATTR_NODENAME_LEN);
+ len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
+ len += FC_FDMI_PORT_ATTR_PORTNAME_LEN;
+ put_unaligned_be16(FC_FDMI_PORT_ATTR_PORTNAME,
+ &entry->type);
+ put_unaligned_be16(len, &entry->len);
+ put_unaligned_be64(lport->wwpn,
+ &entry->value);
+
+ /* Port symbolic name */
+ entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
+ FC_FDMI_PORT_ATTR_PORTNAME_LEN);
+ len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
+ len += FC_FDMI_PORT_ATTR_SYMBOLICNAME_LEN;
+ put_unaligned_be16(FC_FDMI_PORT_ATTR_SYMBOLICNAME,
+ &entry->type);
+ put_unaligned_be16(len, &entry->len);
+ fc_ct_ms_fill_attr(entry,
+ fc_host_symbolic_name(lport->host),
+ FC_FDMI_PORT_ATTR_SYMBOLICNAME_LEN);
+
+ /* Port type */
+ entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
+ FC_FDMI_PORT_ATTR_SYMBOLICNAME_LEN);
+ len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
+ len += FC_FDMI_PORT_ATTR_PORTTYPE_LEN;
+ put_unaligned_be16(FC_FDMI_PORT_ATTR_PORTTYPE,
+ &entry->type);
+ put_unaligned_be16(len, &entry->len);
+ put_unaligned_be32(fc_host_port_type(lport->host),
+ &entry->value);
+
+ /* Supported class of service */
+ entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
+ FC_FDMI_PORT_ATTR_PORTTYPE_LEN);
+ len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
+ len += FC_FDMI_PORT_ATTR_SUPPORTEDCLASSSRVC_LEN;
+ put_unaligned_be16(FC_FDMI_PORT_ATTR_SUPPORTEDCLASSSRVC,
+ &entry->type);
+ put_unaligned_be16(len, &entry->len);
+ put_unaligned_be32(fc_host_supported_classes(lport->host),
+ &entry->value);
+
+ /* Port Fabric name */
+ entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
+ FC_FDMI_PORT_ATTR_SUPPORTEDCLASSSRVC_LEN);
+ len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
+ len += FC_FDMI_PORT_ATTR_FABRICNAME_LEN;
+ put_unaligned_be16(FC_FDMI_PORT_ATTR_FABRICNAME,
+ &entry->type);
+ put_unaligned_be16(len, &entry->len);
+ put_unaligned_be64(fc_host_fabric_name(lport->host),
+ &entry->value);
+
+ /* Port active FC-4 */
+ entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
+ FC_FDMI_PORT_ATTR_FABRICNAME_LEN);
+ len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
+ len += FC_FDMI_PORT_ATTR_CURRENTFC4TYPE_LEN;
+ put_unaligned_be16(FC_FDMI_PORT_ATTR_CURRENTFC4TYPE,
+ &entry->type);
+ put_unaligned_be16(len, &entry->len);
+ memcpy(&entry->value, fc_host_active_fc4s(lport->host),
+ FC_FDMI_PORT_ATTR_CURRENTFC4TYPE_LEN);
+
+ /* Port state */
+ entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
+ FC_FDMI_PORT_ATTR_CURRENTFC4TYPE_LEN);
+ len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
+ len += FC_FDMI_PORT_ATTR_PORTSTATE_LEN;
+ put_unaligned_be16(FC_FDMI_PORT_ATTR_PORTSTATE,
+ &entry->type);
+ put_unaligned_be16(len, &entry->len);
+ put_unaligned_be32(fc_host_port_state(lport->host),
+ &entry->value);
+
+ /* Discovered ports */
+ entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
+ FC_FDMI_PORT_ATTR_PORTSTATE_LEN);
+ len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
+ len += FC_FDMI_PORT_ATTR_DISCOVEREDPORTS_LEN;
+ put_unaligned_be16(FC_FDMI_PORT_ATTR_DISCOVEREDPORTS,
+ &entry->type);
+ put_unaligned_be16(len, &entry->len);
+ put_unaligned_be32(fc_host_num_discovered_ports(lport->host),
+ &entry->value);
+
+ /* Port ID */
+ entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
+ FC_FDMI_PORT_ATTR_DISCOVEREDPORTS_LEN);
+ len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
+ len += FC_FDMI_PORT_ATTR_PORTID_LEN;
+ put_unaligned_be16(FC_FDMI_PORT_ATTR_PORTID,
+ &entry->type);
+ put_unaligned_be16(len, &entry->len);
+ put_unaligned_be32(fc_host_port_id(lport->host),
+ &entry->value);
+ }
+
break;
case FC_FDMI_DPRT:
len = sizeof(struct fc_fdmi_dprt);
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
index cf36c8cb5493..19cd4a95d354 100644
--- a/drivers/scsi/libfc/fc_lport.c
+++ b/drivers/scsi/libfc/fc_lport.c
@@ -93,7 +93,10 @@
#define FC_LOCAL_PTP_FID_LO 0x010101
#define FC_LOCAL_PTP_FID_HI 0x010102
-#define DNS_DELAY 3 /* Discovery delay after RSCN (in seconds)*/
+#define DNS_DELAY 3 /* Discovery delay after RSCN (in seconds)*/
+#define MAX_CT_PAYLOAD 2048
+#define DISCOVERED_PORTS 4
+#define NUMBER_OF_PORTS 1
static void fc_lport_error(struct fc_lport *, struct fc_frame *);
@@ -1185,7 +1188,7 @@ static void fc_lport_ms_resp(struct fc_seq *sp, struct fc_frame *fp,
struct fc_lport *lport = lp_arg;
struct fc_frame_header *fh;
struct fc_ct_hdr *ct;
-
+ struct fc_host_attrs *fc_host = shost_to_fc_host(lport->host);
FC_LPORT_DBG(lport, "Received a ms %s\n", fc_els_resp_type(fp));
if (fp == ERR_PTR(-FC_EX_CLOSED))
@@ -1219,7 +1222,13 @@ static void fc_lport_ms_resp(struct fc_seq *sp, struct fc_frame *fp,
switch (lport->state) {
case LPORT_ST_RHBA:
- if (ntohs(ct->ct_cmd) == FC_FS_ACC)
+ if ((ntohs(ct->ct_cmd) == FC_FS_RJT) && fc_host->fdmi_version == FDMI_V2) {
+ FC_LPORT_DBG(lport, "Error for FDMI-V2, fall back to FDMI-V1\n");
+ fc_host->fdmi_version = FDMI_V1;
+
+ fc_lport_enter_ms(lport, LPORT_ST_RHBA);
+
+ } else if (ntohs(ct->ct_cmd) == FC_FS_ACC)
fc_lport_enter_ms(lport, LPORT_ST_RPA);
else /* Error Skip RPA */
fc_lport_enter_scr(lport);
@@ -1433,7 +1442,7 @@ static void fc_lport_enter_ms(struct fc_lport *lport, enum fc_lport_state state)
int size = sizeof(struct fc_ct_hdr);
size_t len;
int numattrs;
-
+ struct fc_host_attrs *fc_host = shost_to_fc_host(lport->host);
lockdep_assert_held(&lport->lp_mutex);
FC_LPORT_DBG(lport, "Entered %s state from %s state\n",
@@ -1446,10 +1455,10 @@ static void fc_lport_enter_ms(struct fc_lport *lport, enum fc_lport_state state)
case LPORT_ST_RHBA:
cmd = FC_FDMI_RHBA;
/* Number of HBA Attributes */
- numattrs = 10;
+ numattrs = 11;
len = sizeof(struct fc_fdmi_rhba);
len -= sizeof(struct fc_fdmi_attr_entry);
- len += (numattrs * FC_FDMI_ATTR_ENTRY_HEADER_LEN);
+
len += FC_FDMI_HBA_ATTR_NODENAME_LEN;
len += FC_FDMI_HBA_ATTR_MANUFACTURER_LEN;
len += FC_FDMI_HBA_ATTR_SERIALNUMBER_LEN;
@@ -1460,6 +1469,21 @@ static void fc_lport_enter_ms(struct fc_lport *lport, enum fc_lport_state state)
len += FC_FDMI_HBA_ATTR_OPTIONROMVERSION_LEN;
len += FC_FDMI_HBA_ATTR_FIRMWAREVERSION_LEN;
len += FC_FDMI_HBA_ATTR_OSNAMEVERSION_LEN;
+ len += FC_FDMI_HBA_ATTR_MAXCTPAYLOAD_LEN;
+
+
+ if (fc_host->fdmi_version == FDMI_V2) {
+ numattrs += 7;
+ len += FC_FDMI_HBA_ATTR_NODESYMBLNAME_LEN;
+ len += FC_FDMI_HBA_ATTR_VENDORSPECIFICINFO_LEN;
+ len += FC_FDMI_HBA_ATTR_NUMBEROFPORTS_LEN;
+ len += FC_FDMI_HBA_ATTR_FABRICNAME_LEN;
+ len += FC_FDMI_HBA_ATTR_BIOSVERSION_LEN;
+ len += FC_FDMI_HBA_ATTR_BIOSSTATE_LEN;
+ len += FC_FDMI_HBA_ATTR_VENDORIDENTIFIER_LEN;
+ }
+
+ len += (numattrs * FC_FDMI_ATTR_ENTRY_HEADER_LEN);
size += len;
break;
@@ -1469,7 +1493,6 @@ static void fc_lport_enter_ms(struct fc_lport *lport, enum fc_lport_state state)
numattrs = 6;
len = sizeof(struct fc_fdmi_rpa);
len -= sizeof(struct fc_fdmi_attr_entry);
- len += (numattrs * FC_FDMI_ATTR_ENTRY_HEADER_LEN);
len += FC_FDMI_PORT_ATTR_FC4TYPES_LEN;
len += FC_FDMI_PORT_ATTR_SUPPORTEDSPEED_LEN;
len += FC_FDMI_PORT_ATTR_CURRENTPORTSPEED_LEN;
@@ -1477,6 +1500,22 @@ static void fc_lport_enter_ms(struct fc_lport *lport, enum fc_lport_state state)
len += FC_FDMI_PORT_ATTR_OSDEVICENAME_LEN;
len += FC_FDMI_PORT_ATTR_HOSTNAME_LEN;
+ if (fc_host->fdmi_version == FDMI_V2) {
+ numattrs += 10;
+ len += FC_FDMI_PORT_ATTR_NODENAME_LEN;
+ len += FC_FDMI_PORT_ATTR_PORTNAME_LEN;
+ len += FC_FDMI_PORT_ATTR_SYMBOLICNAME_LEN;
+ len += FC_FDMI_PORT_ATTR_PORTTYPE_LEN;
+ len += FC_FDMI_PORT_ATTR_SUPPORTEDCLASSSRVC_LEN;
+ len += FC_FDMI_PORT_ATTR_FABRICNAME_LEN;
+ len += FC_FDMI_PORT_ATTR_CURRENTFC4TYPE_LEN;
+ len += FC_FDMI_PORT_ATTR_PORTSTATE_LEN;
+ len += FC_FDMI_PORT_ATTR_DISCOVEREDPORTS_LEN;
+ len += FC_FDMI_PORT_ATTR_PORTID_LEN;
+ }
+
+ len += (numattrs * FC_FDMI_ATTR_ENTRY_HEADER_LEN);
+
size += len;
break;
case LPORT_ST_DPRT:
@@ -1546,6 +1585,7 @@ static void fc_lport_timeout(struct work_struct *work)
struct fc_lport *lport =
container_of(work, struct fc_lport,
retry_work.work);
+ struct fc_host_attrs *fc_host = shost_to_fc_host(lport->host);
mutex_lock(&lport->lp_mutex);
@@ -1573,6 +1613,13 @@ static void fc_lport_timeout(struct work_struct *work)
fc_lport_enter_fdmi(lport);
break;
case LPORT_ST_RHBA:
+ if (fc_host->fdmi_version == FDMI_V2) {
+ FC_LPORT_DBG(lport, "timeout for FDMI-V2 RHBA,fall back to FDMI-V1\n");
+ fc_host->fdmi_version = FDMI_V1;
+ fc_lport_enter_ms(lport, LPORT_ST_RHBA);
+ break;
+ }
+ fallthrough;
case LPORT_ST_RPA:
case LPORT_ST_DHBA:
case LPORT_ST_DPRT:
@@ -1839,6 +1886,13 @@ EXPORT_SYMBOL(fc_lport_config);
*/
int fc_lport_init(struct fc_lport *lport)
{
+ struct fc_host_attrs *fc_host;
+
+ fc_host = shost_to_fc_host(lport->host);
+
+ /* Set FDMI version to FDMI-2 specification*/
+ fc_host->fdmi_version = FDMI_V2;
+
fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT;
fc_host_node_name(lport->host) = lport->wwnn;
fc_host_port_name(lport->host) = lport->wwpn;
@@ -1847,6 +1901,7 @@ int fc_lport_init(struct fc_lport *lport)
sizeof(fc_host_supported_fc4s(lport->host)));
fc_host_supported_fc4s(lport->host)[2] = 1;
fc_host_supported_fc4s(lport->host)[7] = 1;
+ fc_host_num_discovered_ports(lport->host) = 4;
/* This value is also unchanging */
memset(fc_host_active_fc4s(lport->host), 0,
@@ -1859,8 +1914,27 @@ int fc_lport_init(struct fc_lport *lport)
fc_host_supported_speeds(lport->host) |= FC_PORTSPEED_1GBIT;
if (lport->link_supported_speeds & FC_PORTSPEED_10GBIT)
fc_host_supported_speeds(lport->host) |= FC_PORTSPEED_10GBIT;
+ if (lport->link_supported_speeds & FC_PORTSPEED_40GBIT)
+ fc_host_supported_speeds(lport->host) |= FC_PORTSPEED_40GBIT;
+ if (lport->link_supported_speeds & FC_PORTSPEED_100GBIT)
+ fc_host_supported_speeds(lport->host) |= FC_PORTSPEED_100GBIT;
+ if (lport->link_supported_speeds & FC_PORTSPEED_25GBIT)
+ fc_host_supported_speeds(lport->host) |= FC_PORTSPEED_25GBIT;
+ if (lport->link_supported_speeds & FC_PORTSPEED_50GBIT)
+ fc_host_supported_speeds(lport->host) |= FC_PORTSPEED_50GBIT;
+ if (lport->link_supported_speeds & FC_PORTSPEED_100GBIT)
+ fc_host_supported_speeds(lport->host) |= FC_PORTSPEED_100GBIT;
+
fc_fc4_add_lport(lport);
+ fc_host_num_discovered_ports(lport->host) = DISCOVERED_PORTS;
+ fc_host_port_state(lport->host) = FC_PORTSTATE_ONLINE;
+ fc_host_max_ct_payload(lport->host) = MAX_CT_PAYLOAD;
+ fc_host_num_ports(lport->host) = NUMBER_OF_PORTS;
+ fc_host_bootbios_state(lport->host) = 0X00000000;
+ snprintf(fc_host_bootbios_version(lport->host),
+ FC_SYMBOLIC_NAME_SIZE, "%s", "Unknown");
+
return 0;
}
EXPORT_SYMBOL(fc_lport_init);
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 4834219497ee..4683c183e9d4 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -230,11 +230,11 @@ static int iscsi_prep_ecdb_ahs(struct iscsi_task *task)
*/
static int iscsi_check_tmf_restrictions(struct iscsi_task *task, int opcode)
{
- struct iscsi_conn *conn = task->conn;
- struct iscsi_tm *tmf = &conn->tmhdr;
+ struct iscsi_session *session = task->conn->session;
+ struct iscsi_tm *tmf = &session->tmhdr;
u64 hdr_lun;
- if (conn->tmf_state == TMF_INITIAL)
+ if (session->tmf_state == TMF_INITIAL)
return 0;
if ((tmf->opcode & ISCSI_OPCODE_MASK) != ISCSI_OP_SCSI_TMFUNC)
@@ -254,24 +254,19 @@ static int iscsi_check_tmf_restrictions(struct iscsi_task *task, int opcode)
* Fail all SCSI cmd PDUs
*/
if (opcode != ISCSI_OP_SCSI_DATA_OUT) {
- iscsi_conn_printk(KERN_INFO, conn,
- "task [op %x itt "
- "0x%x/0x%x] "
- "rejected.\n",
- opcode, task->itt,
- task->hdr_itt);
+ iscsi_session_printk(KERN_INFO, session,
+ "task [op %x itt 0x%x/0x%x] rejected.\n",
+ opcode, task->itt, task->hdr_itt);
return -EACCES;
}
/*
* And also all data-out PDUs in response to R2T
* if fast_abort is set.
*/
- if (conn->session->fast_abort) {
- iscsi_conn_printk(KERN_INFO, conn,
- "task [op %x itt "
- "0x%x/0x%x] fast abort.\n",
- opcode, task->itt,
- task->hdr_itt);
+ if (session->fast_abort) {
+ iscsi_session_printk(KERN_INFO, session,
+ "task [op %x itt 0x%x/0x%x] fast abort.\n",
+ opcode, task->itt, task->hdr_itt);
return -EACCES;
}
break;
@@ -284,7 +279,7 @@ static int iscsi_check_tmf_restrictions(struct iscsi_task *task, int opcode)
*/
if (opcode == ISCSI_OP_SCSI_DATA_OUT &&
task->hdr_itt == tmf->rtt) {
- ISCSI_DBG_SESSION(conn->session,
+ ISCSI_DBG_SESSION(session,
"Preventing task %x/%x from sending "
"data-out due to abort task in "
"progress\n", task->itt,
@@ -578,6 +573,11 @@ static bool cleanup_queued_task(struct iscsi_task *task)
__iscsi_put_task(task);
}
+ if (conn->session->running_aborted_task == task) {
+ conn->session->running_aborted_task = NULL;
+ __iscsi_put_task(task);
+ }
+
if (conn->task == task) {
conn->task = NULL;
__iscsi_put_task(task);
@@ -829,10 +829,7 @@ static void iscsi_scsi_cmd_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
ascq = session->tt->check_protection(task, &sector);
if (ascq) {
- sc->result = DRIVER_SENSE << 24 |
- SAM_STAT_CHECK_CONDITION;
- scsi_build_sense_buffer(1, sc->sense_buffer,
- ILLEGAL_REQUEST, 0x10, ascq);
+ scsi_build_sense(sc, 1, ILLEGAL_REQUEST, 0x10, ascq);
scsi_set_sense_information(sc->sense_buffer,
SCSI_SENSE_BUFFERSIZE,
sector);
@@ -936,20 +933,21 @@ iscsi_data_in_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
static void iscsi_tmf_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
{
struct iscsi_tm_rsp *tmf = (struct iscsi_tm_rsp *)hdr;
+ struct iscsi_session *session = conn->session;
conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
conn->tmfrsp_pdus_cnt++;
- if (conn->tmf_state != TMF_QUEUED)
+ if (session->tmf_state != TMF_QUEUED)
return;
if (tmf->response == ISCSI_TMF_RSP_COMPLETE)
- conn->tmf_state = TMF_SUCCESS;
+ session->tmf_state = TMF_SUCCESS;
else if (tmf->response == ISCSI_TMF_RSP_NO_TASK)
- conn->tmf_state = TMF_NOT_FOUND;
+ session->tmf_state = TMF_NOT_FOUND;
else
- conn->tmf_state = TMF_FAILED;
- wake_up(&conn->ehwait);
+ session->tmf_state = TMF_FAILED;
+ wake_up(&session->ehwait);
}
static int iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr)
@@ -1361,7 +1359,6 @@ void iscsi_session_failure(struct iscsi_session *session,
enum iscsi_err err)
{
struct iscsi_conn *conn;
- struct device *dev;
spin_lock_bh(&session->frwd_lock);
conn = session->leadconn;
@@ -1370,10 +1367,8 @@ void iscsi_session_failure(struct iscsi_session *session,
return;
}
- dev = get_device(&conn->cls_conn->dev);
+ iscsi_get_conn(conn->cls_conn);
spin_unlock_bh(&session->frwd_lock);
- if (!dev)
- return;
/*
* if the host is being removed bypass the connection
* recovery initialization because we are going to kill
@@ -1383,27 +1378,36 @@ void iscsi_session_failure(struct iscsi_session *session,
iscsi_conn_error_event(conn->cls_conn, err);
else
iscsi_conn_failure(conn, err);
- put_device(dev);
+ iscsi_put_conn(conn->cls_conn);
}
EXPORT_SYMBOL_GPL(iscsi_session_failure);
-void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err)
+static bool iscsi_set_conn_failed(struct iscsi_conn *conn)
{
struct iscsi_session *session = conn->session;
- spin_lock_bh(&session->frwd_lock);
- if (session->state == ISCSI_STATE_FAILED) {
- spin_unlock_bh(&session->frwd_lock);
- return;
- }
+ if (session->state == ISCSI_STATE_FAILED)
+ return false;
if (conn->stop_stage == 0)
session->state = ISCSI_STATE_FAILED;
- spin_unlock_bh(&session->frwd_lock);
set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
- iscsi_conn_error_event(conn->cls_conn, err);
+ return true;
+}
+
+void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err)
+{
+ struct iscsi_session *session = conn->session;
+ bool needs_evt;
+
+ spin_lock_bh(&session->frwd_lock);
+ needs_evt = iscsi_set_conn_failed(conn);
+ spin_unlock_bh(&session->frwd_lock);
+
+ if (needs_evt)
+ iscsi_conn_error_event(conn->cls_conn, err);
}
EXPORT_SYMBOL_GPL(iscsi_conn_failure);
@@ -1820,15 +1824,14 @@ EXPORT_SYMBOL_GPL(iscsi_target_alloc);
static void iscsi_tmf_timedout(struct timer_list *t)
{
- struct iscsi_conn *conn = from_timer(conn, t, tmf_timer);
- struct iscsi_session *session = conn->session;
+ struct iscsi_session *session = from_timer(session, t, tmf_timer);
spin_lock(&session->frwd_lock);
- if (conn->tmf_state == TMF_QUEUED) {
- conn->tmf_state = TMF_TIMEDOUT;
+ if (session->tmf_state == TMF_QUEUED) {
+ session->tmf_state = TMF_TIMEDOUT;
ISCSI_DBG_EH(session, "tmf timedout\n");
/* unblock eh_abort() */
- wake_up(&conn->ehwait);
+ wake_up(&session->ehwait);
}
spin_unlock(&session->frwd_lock);
}
@@ -1851,8 +1854,8 @@ static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn,
return -EPERM;
}
conn->tmfcmd_pdus_cnt++;
- conn->tmf_timer.expires = timeout * HZ + jiffies;
- add_timer(&conn->tmf_timer);
+ session->tmf_timer.expires = timeout * HZ + jiffies;
+ add_timer(&session->tmf_timer);
ISCSI_DBG_EH(session, "tmf set timeout\n");
spin_unlock_bh(&session->frwd_lock);
@@ -1866,12 +1869,12 @@ static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn,
* 3) session is terminated or restarted or userspace has
* given up on recovery
*/
- wait_event_interruptible(conn->ehwait, age != session->age ||
+ wait_event_interruptible(session->ehwait, age != session->age ||
session->state != ISCSI_STATE_LOGGED_IN ||
- conn->tmf_state != TMF_QUEUED);
+ session->tmf_state != TMF_QUEUED);
if (signal_pending(current))
flush_signals(current);
- del_timer_sync(&conn->tmf_timer);
+ del_timer_sync(&session->tmf_timer);
mutex_lock(&session->eh_mutex);
spin_lock_bh(&session->frwd_lock);
@@ -2180,6 +2183,51 @@ done:
spin_unlock(&session->frwd_lock);
}
+/**
+ * iscsi_conn_unbind - prevent queueing to conn.
+ * @cls_conn: iscsi conn ep is bound to.
+ * @is_active: is the conn in use for boot or is this for EH/termination
+ *
+ * This must be called by drivers implementing the ep_disconnect callout.
+ * It disables queueing to the connection from libiscsi in preparation for
+ * an ep_disconnect call.
+ */
+void iscsi_conn_unbind(struct iscsi_cls_conn *cls_conn, bool is_active)
+{
+ struct iscsi_session *session;
+ struct iscsi_conn *conn;
+
+ if (!cls_conn)
+ return;
+
+ conn = cls_conn->dd_data;
+ session = conn->session;
+ /*
+ * Wait for iscsi_eh calls to exit. We don't wait for the tmf to
+ * complete or timeout. The caller just wants to know what's running
+ * is everything that needs to be cleaned up, and no cmds will be
+ * queued.
+ */
+ mutex_lock(&session->eh_mutex);
+
+ iscsi_suspend_queue(conn);
+ iscsi_suspend_tx(conn);
+
+ spin_lock_bh(&session->frwd_lock);
+ if (!is_active) {
+ /*
+ * if logout timed out before userspace could even send a PDU
+ * the state might still be in ISCSI_STATE_LOGGED_IN and
+ * allowing new cmds and TMFs.
+ */
+ if (session->state == ISCSI_STATE_LOGGED_IN)
+ iscsi_set_conn_failed(conn);
+ }
+ spin_unlock_bh(&session->frwd_lock);
+ mutex_unlock(&session->eh_mutex);
+}
+EXPORT_SYMBOL_GPL(iscsi_conn_unbind);
+
static void iscsi_prep_abort_task_pdu(struct iscsi_task *task,
struct iscsi_tm *hdr)
{
@@ -2234,6 +2282,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
}
conn = session->leadconn;
+ iscsi_get_conn(conn->cls_conn);
conn->eh_abort_cnt++;
age = session->age;
@@ -2244,9 +2293,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
ISCSI_DBG_EH(session, "sc completed while abort in progress\n");
spin_unlock(&session->back_lock);
- spin_unlock_bh(&session->frwd_lock);
- mutex_unlock(&session->eh_mutex);
- return SUCCESS;
+ goto success;
}
ISCSI_DBG_EH(session, "aborting [sc %p itt 0x%x]\n", sc, task->itt);
__iscsi_get_task(task);
@@ -2258,17 +2305,17 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
}
/* only have one tmf outstanding at a time */
- if (conn->tmf_state != TMF_INITIAL)
+ if (session->tmf_state != TMF_INITIAL)
goto failed;
- conn->tmf_state = TMF_QUEUED;
+ session->tmf_state = TMF_QUEUED;
- hdr = &conn->tmhdr;
+ hdr = &session->tmhdr;
iscsi_prep_abort_task_pdu(task, hdr);
if (iscsi_exec_task_mgmt_fn(conn, hdr, age, session->abort_timeout))
goto failed;
- switch (conn->tmf_state) {
+ switch (session->tmf_state) {
case TMF_SUCCESS:
spin_unlock_bh(&session->frwd_lock);
/*
@@ -2283,18 +2330,19 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
*/
spin_lock_bh(&session->frwd_lock);
fail_scsi_task(task, DID_ABORT);
- conn->tmf_state = TMF_INITIAL;
+ session->tmf_state = TMF_INITIAL;
memset(hdr, 0, sizeof(*hdr));
spin_unlock_bh(&session->frwd_lock);
iscsi_start_tx(conn);
goto success_unlocked;
case TMF_TIMEDOUT:
+ session->running_aborted_task = task;
spin_unlock_bh(&session->frwd_lock);
iscsi_conn_failure(conn, ISCSI_ERR_SCSI_EH_SESSION_RST);
goto failed_unlocked;
case TMF_NOT_FOUND:
- if (!sc->SCp.ptr) {
- conn->tmf_state = TMF_INITIAL;
+ if (iscsi_task_is_completed(task)) {
+ session->tmf_state = TMF_INITIAL;
memset(hdr, 0, sizeof(*hdr));
/* task completed before tmf abort response */
ISCSI_DBG_EH(session, "sc completed while abort in "
@@ -2303,7 +2351,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
}
fallthrough;
default:
- conn->tmf_state = TMF_INITIAL;
+ session->tmf_state = TMF_INITIAL;
goto failed;
}
@@ -2313,6 +2361,7 @@ success_unlocked:
ISCSI_DBG_EH(session, "abort success [sc %p itt 0x%x]\n",
sc, task->itt);
iscsi_put_task(task);
+ iscsi_put_conn(conn->cls_conn);
mutex_unlock(&session->eh_mutex);
return SUCCESS;
@@ -2321,7 +2370,15 @@ failed:
failed_unlocked:
ISCSI_DBG_EH(session, "abort failed [sc %p itt 0x%x]\n", sc,
task ? task->itt : 0);
- iscsi_put_task(task);
+ /*
+ * The driver might be accessing the task so hold the ref. The conn
+ * stop cleanup will drop the ref after ep_disconnect so we know the
+ * driver's no longer touching the task.
+ */
+ if (!session->running_aborted_task)
+ iscsi_put_task(task);
+
+ iscsi_put_conn(conn->cls_conn);
mutex_unlock(&session->eh_mutex);
return FAILED;
}
@@ -2362,11 +2419,11 @@ int iscsi_eh_device_reset(struct scsi_cmnd *sc)
conn = session->leadconn;
/* only have one tmf outstanding at a time */
- if (conn->tmf_state != TMF_INITIAL)
+ if (session->tmf_state != TMF_INITIAL)
goto unlock;
- conn->tmf_state = TMF_QUEUED;
+ session->tmf_state = TMF_QUEUED;
- hdr = &conn->tmhdr;
+ hdr = &session->tmhdr;
iscsi_prep_lun_reset_pdu(sc, hdr);
if (iscsi_exec_task_mgmt_fn(conn, hdr, session->age,
@@ -2375,7 +2432,7 @@ int iscsi_eh_device_reset(struct scsi_cmnd *sc)
goto unlock;
}
- switch (conn->tmf_state) {
+ switch (session->tmf_state) {
case TMF_SUCCESS:
break;
case TMF_TIMEDOUT:
@@ -2383,7 +2440,7 @@ int iscsi_eh_device_reset(struct scsi_cmnd *sc)
iscsi_conn_failure(conn, ISCSI_ERR_SCSI_EH_SESSION_RST);
goto done;
default:
- conn->tmf_state = TMF_INITIAL;
+ session->tmf_state = TMF_INITIAL;
goto unlock;
}
@@ -2395,7 +2452,7 @@ int iscsi_eh_device_reset(struct scsi_cmnd *sc)
spin_lock_bh(&session->frwd_lock);
memset(hdr, 0, sizeof(*hdr));
fail_scsi_tasks(conn, sc->device->lun, DID_ERROR);
- conn->tmf_state = TMF_INITIAL;
+ session->tmf_state = TMF_INITIAL;
spin_unlock_bh(&session->frwd_lock);
iscsi_start_tx(conn);
@@ -2418,8 +2475,7 @@ void iscsi_session_recovery_timedout(struct iscsi_cls_session *cls_session)
spin_lock_bh(&session->frwd_lock);
if (session->state != ISCSI_STATE_LOGGED_IN) {
session->state = ISCSI_STATE_RECOVERY_FAILED;
- if (session->leadconn)
- wake_up(&session->leadconn->ehwait);
+ wake_up(&session->ehwait);
}
spin_unlock_bh(&session->frwd_lock);
}
@@ -2440,7 +2496,6 @@ int iscsi_eh_session_reset(struct scsi_cmnd *sc)
cls_session = starget_to_session(scsi_target(sc->device));
session = cls_session->dd_data;
- conn = session->leadconn;
mutex_lock(&session->eh_mutex);
spin_lock_bh(&session->frwd_lock);
@@ -2455,16 +2510,17 @@ failed:
return FAILED;
}
+ conn = session->leadconn;
+ iscsi_get_conn(conn->cls_conn);
+
spin_unlock_bh(&session->frwd_lock);
mutex_unlock(&session->eh_mutex);
- /*
- * we drop the lock here but the leadconn cannot be destoyed while
- * we are in the scsi eh
- */
+
iscsi_conn_failure(conn, ISCSI_ERR_SCSI_EH_SESSION_RST);
+ iscsi_put_conn(conn->cls_conn);
ISCSI_DBG_EH(session, "wait for relogin\n");
- wait_event_interruptible(conn->ehwait,
+ wait_event_interruptible(session->ehwait,
session->state == ISCSI_STATE_TERMINATE ||
session->state == ISCSI_STATE_LOGGED_IN ||
session->state == ISCSI_STATE_RECOVERY_FAILED);
@@ -2525,11 +2581,11 @@ static int iscsi_eh_target_reset(struct scsi_cmnd *sc)
conn = session->leadconn;
/* only have one tmf outstanding at a time */
- if (conn->tmf_state != TMF_INITIAL)
+ if (session->tmf_state != TMF_INITIAL)
goto unlock;
- conn->tmf_state = TMF_QUEUED;
+ session->tmf_state = TMF_QUEUED;
- hdr = &conn->tmhdr;
+ hdr = &session->tmhdr;
iscsi_prep_tgt_reset_pdu(sc, hdr);
if (iscsi_exec_task_mgmt_fn(conn, hdr, session->age,
@@ -2538,7 +2594,7 @@ static int iscsi_eh_target_reset(struct scsi_cmnd *sc)
goto unlock;
}
- switch (conn->tmf_state) {
+ switch (session->tmf_state) {
case TMF_SUCCESS:
break;
case TMF_TIMEDOUT:
@@ -2546,7 +2602,7 @@ static int iscsi_eh_target_reset(struct scsi_cmnd *sc)
iscsi_conn_failure(conn, ISCSI_ERR_SCSI_EH_SESSION_RST);
goto done;
default:
- conn->tmf_state = TMF_INITIAL;
+ session->tmf_state = TMF_INITIAL;
goto unlock;
}
@@ -2558,7 +2614,7 @@ static int iscsi_eh_target_reset(struct scsi_cmnd *sc)
spin_lock_bh(&session->frwd_lock);
memset(hdr, 0, sizeof(*hdr));
fail_scsi_tasks(conn, -1, DID_ERROR);
- conn->tmf_state = TMF_INITIAL;
+ session->tmf_state = TMF_INITIAL;
spin_unlock_bh(&session->frwd_lock);
iscsi_start_tx(conn);
@@ -2888,7 +2944,10 @@ iscsi_session_setup(struct iscsi_transport *iscsit, struct Scsi_Host *shost,
session->tt = iscsit;
session->dd_data = cls_session->dd_data + sizeof(*session);
+ session->tmf_state = TMF_INITIAL;
+ timer_setup(&session->tmf_timer, iscsi_tmf_timedout, 0);
mutex_init(&session->eh_mutex);
+
spin_lock_init(&session->frwd_lock);
spin_lock_init(&session->back_lock);
@@ -2939,10 +2998,9 @@ void iscsi_session_teardown(struct iscsi_cls_session *cls_session)
struct module *owner = cls_session->transport->owner;
struct Scsi_Host *shost = session->host;
- iscsi_pool_free(&session->cmdpool);
-
iscsi_remove_session(cls_session);
+ iscsi_pool_free(&session->cmdpool);
kfree(session->password);
kfree(session->password_in);
kfree(session->username);
@@ -2992,7 +3050,6 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, int dd_size,
conn->c_stage = ISCSI_CONN_INITIAL_STAGE;
conn->id = conn_idx;
conn->exp_statsn = 0;
- conn->tmf_state = TMF_INITIAL;
timer_setup(&conn->transport_timer, iscsi_check_transport_timeouts, 0);
@@ -3017,8 +3074,7 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, int dd_size,
goto login_task_data_alloc_fail;
conn->login_task->data = conn->data = data;
- timer_setup(&conn->tmf_timer, iscsi_tmf_timedout, 0);
- init_waitqueue_head(&conn->ehwait);
+ init_waitqueue_head(&session->ehwait);
return cls_conn;
@@ -3053,7 +3109,7 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
* leading connection? then give up on recovery.
*/
session->state = ISCSI_STATE_TERMINATE;
- wake_up(&conn->ehwait);
+ wake_up(&session->ehwait);
}
spin_unlock_bh(&session->frwd_lock);
@@ -3128,7 +3184,7 @@ int iscsi_conn_start(struct iscsi_cls_conn *cls_conn)
* commands after successful recovery
*/
conn->stop_stage = 0;
- conn->tmf_state = TMF_INITIAL;
+ session->tmf_state = TMF_INITIAL;
session->age++;
if (session->age == 16)
session->age = 0;
@@ -3142,7 +3198,7 @@ int iscsi_conn_start(struct iscsi_cls_conn *cls_conn)
spin_unlock_bh(&session->frwd_lock);
iscsi_unblock_session(session->cls_session);
- wake_up(&conn->ehwait);
+ wake_up(&session->ehwait);
return 0;
}
EXPORT_SYMBOL_GPL(iscsi_conn_start);
@@ -3236,7 +3292,7 @@ void iscsi_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
spin_lock_bh(&session->frwd_lock);
fail_scsi_tasks(conn, -1, DID_TRANSPORT_DISRUPTED);
fail_mgmt_tasks(session, conn);
- memset(&conn->tmhdr, 0, sizeof(conn->tmhdr));
+ memset(&session->tmhdr, 0, sizeof(session->tmhdr));
spin_unlock_bh(&session->frwd_lock);
mutex_unlock(&session->eh_mutex);
}
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index e9a86128f1f1..4aa1fda95f35 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -116,9 +116,10 @@ static void sas_ata_task_done(struct sas_task *task)
}
}
- if (stat->stat == SAS_PROTO_RESPONSE || stat->stat == SAM_STAT_GOOD ||
- ((stat->stat == SAM_STAT_CHECK_CONDITION &&
- dev->sata_dev.class == ATA_DEV_ATAPI))) {
+ if (stat->stat == SAS_PROTO_RESPONSE ||
+ stat->stat == SAS_SAM_STAT_GOOD ||
+ (stat->stat == SAS_SAM_STAT_CHECK_CONDITION &&
+ dev->sata_dev.class == ATA_DEV_ATAPI)) {
memcpy(dev->sata_dev.fis, resp->ending_fis, ATA_RESP_FIS_SIZE);
if (!link->sactive) {
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index 6d583e8c403a..e00688540219 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -101,7 +101,7 @@ static int smp_execute_task_sg(struct domain_device *dev,
}
}
if (task->task_status.resp == SAS_TASK_COMPLETE &&
- task->task_status.stat == SAM_STAT_GOOD) {
+ task->task_status.stat == SAS_SAM_STAT_GOOD) {
res = 0;
break;
}
diff --git a/drivers/scsi/libsas/sas_task.c b/drivers/scsi/libsas/sas_task.c
index e2d42593ce52..2966ead1d421 100644
--- a/drivers/scsi/libsas/sas_task.c
+++ b/drivers/scsi/libsas/sas_task.c
@@ -20,7 +20,7 @@ void sas_ssp_task_response(struct device *dev, struct sas_task *task,
else if (iu->datapres == 1)
tstat->stat = iu->resp_data[3];
else if (iu->datapres == 2) {
- tstat->stat = SAM_STAT_CHECK_CONDITION;
+ tstat->stat = SAS_SAM_STAT_CHECK_CONDITION;
tstat->buf_valid_size =
min_t(int, SAS_STATUS_BUF_SIZE,
be32_to_cpu(iu->sense_data_len));
@@ -32,7 +32,7 @@ void sas_ssp_task_response(struct device *dev, struct sas_task *task,
}
else
/* when datapres contains corrupt/unknown value... */
- tstat->stat = SAM_STAT_CHECK_CONDITION;
+ tstat->stat = SAS_SAM_STAT_CHECK_CONDITION;
}
EXPORT_SYMBOL_GPL(sas_ssp_task_response);
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index f8de0d10620b..17028861234b 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -266,6 +266,7 @@ struct lpfc_stats {
uint32_t elsRcvECHO;
uint32_t elsRcvLCB;
uint32_t elsRcvRDP;
+ uint32_t elsRcvRDF;
uint32_t elsXmitFLOGI;
uint32_t elsXmitFDISC;
uint32_t elsXmitPLOGI;
@@ -303,6 +304,64 @@ struct lpfc_stats {
struct lpfc_hba;
+#define LPFC_VMID_TIMER 300 /* timer interval in seconds */
+
+#define LPFC_MAX_VMID_SIZE 256
+#define LPFC_COMPRESS_VMID_SIZE 16
+
+union lpfc_vmid_io_tag {
+ u32 app_id; /* App Id vmid */
+ u8 cs_ctl_vmid; /* Priority tag vmid */
+};
+
+#define JIFFIES_PER_HR (HZ * 60 * 60)
+
+struct lpfc_vmid {
+ u8 flag;
+#define LPFC_VMID_SLOT_FREE 0x0
+#define LPFC_VMID_SLOT_USED 0x1
+#define LPFC_VMID_REQ_REGISTER 0x2
+#define LPFC_VMID_REGISTERED 0x4
+#define LPFC_VMID_DE_REGISTER 0x8
+ char host_vmid[LPFC_MAX_VMID_SIZE];
+ union lpfc_vmid_io_tag un;
+ struct hlist_node hnode;
+ u64 io_rd_cnt;
+ u64 io_wr_cnt;
+ u8 vmid_len;
+ u8 delete_inactive; /* Delete if inactive flag 0 = no, 1 = yes */
+ u32 hash_index;
+ u64 __percpu *last_io_time;
+};
+
+#define lpfc_vmid_is_type_priority_tag(vport)\
+ (vport->vmid_priority_tagging ? 1 : 0)
+
+#define LPFC_VMID_HASH_SIZE 256
+#define LPFC_VMID_HASH_MASK 255
+#define LPFC_VMID_HASH_SHIFT 6
+
+struct lpfc_vmid_context {
+ struct lpfc_vmid *vmp;
+ struct lpfc_nodelist *nlp;
+ bool instantiated;
+};
+
+struct lpfc_vmid_priority_range {
+ u8 low;
+ u8 high;
+ u8 qos;
+};
+
+struct lpfc_vmid_priority_info {
+ u32 num_descriptors;
+ struct lpfc_vmid_priority_range *vmid_range;
+};
+
+#define QFPA_EVEN_ONLY 0x01
+#define QFPA_ODD_ONLY 0x02
+#define QFPA_EVEN_ODD 0x03
+
enum discovery_state {
LPFC_VPORT_UNKNOWN = 0, /* vport state is unknown */
LPFC_VPORT_FAILED = 1, /* vport has failed */
@@ -442,6 +501,9 @@ struct lpfc_vport {
#define WORKER_RAMP_DOWN_QUEUE 0x800 /* hba: Decrease Q depth */
#define WORKER_RAMP_UP_QUEUE 0x1000 /* hba: Increase Q depth */
#define WORKER_SERVICE_TXQ 0x2000 /* hba: IOCBs on the txq */
+#define WORKER_CHECK_INACTIVE_VMID 0x4000 /* hba: check inactive vmids */
+#define WORKER_CHECK_VMID_ISSUE_QFPA 0x8000 /* vport: Check if qfpa needs
+ * to be issued */
struct timer_list els_tmofunc;
struct timer_list delayed_disc_tmo;
@@ -452,6 +514,8 @@ struct lpfc_vport {
#define FC_LOADING 0x1 /* HBA in process of loading drvr */
#define FC_UNLOADING 0x2 /* HBA in process of unloading drvr */
#define FC_ALLOW_FDMI 0x4 /* port is ready for FDMI requests */
+#define FC_ALLOW_VMID 0x8 /* Allow VMID I/Os */
+#define FC_DEREGISTER_ALL_APP_ID 0x10 /* Deregister all VMIDs */
/* Vport Config Parameters */
uint32_t cfg_scan_down;
uint32_t cfg_lun_queue_depth;
@@ -470,9 +534,36 @@ struct lpfc_vport {
uint32_t cfg_tgt_queue_depth;
uint32_t cfg_first_burst_size;
uint32_t dev_loss_tmo_changed;
+ /* VMID parameters */
+ u8 lpfc_vmid_host_uuid[LPFC_COMPRESS_VMID_SIZE];
+ u32 max_vmid; /* maximum VMIDs allowed per port */
+ u32 cur_vmid_cnt; /* Current VMID count */
+#define LPFC_MIN_VMID 4
+#define LPFC_MAX_VMID 255
+ u32 vmid_inactivity_timeout; /* Time after which the VMID */
+ /* deregisters from switch */
+ u32 vmid_priority_tagging;
+#define LPFC_VMID_PRIO_TAG_DISABLE 0 /* Disable */
+#define LPFC_VMID_PRIO_TAG_SUP_TARGETS 1 /* Allow supported targets only */
+#define LPFC_VMID_PRIO_TAG_ALL_TARGETS 2 /* Allow all targets */
+ unsigned long *vmid_priority_range;
+#define LPFC_VMID_MAX_PRIORITY_RANGE 256
+#define LPFC_VMID_PRIORITY_BITMAP_SIZE 32
+ u8 vmid_flag;
+#define LPFC_VMID_IN_USE 0x1
+#define LPFC_VMID_ISSUE_QFPA 0x2
+#define LPFC_VMID_QFPA_CMPL 0x4
+#define LPFC_VMID_QOS_ENABLED 0x8
+#define LPFC_VMID_TIMER_ENBLD 0x10
+ struct fc_qfpa_res *qfpa_res;
struct fc_vport *fc_vport;
+ struct lpfc_vmid *vmid;
+ DECLARE_HASHTABLE(hash_table, 8);
+ rwlock_t vmid_lock;
+ struct lpfc_vmid_priority_info vmid_priority;
+
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
struct dentry *debug_disc_trc;
struct dentry *debug_nodelist;
@@ -915,6 +1006,7 @@ struct lpfc_hba {
uint32_t cfg_request_firmware_upgrade;
uint32_t cfg_suppress_link_up;
uint32_t cfg_rrq_xri_bitmap_sz;
+ u32 cfg_fcp_wait_abts_rsp;
uint32_t cfg_delay_discovery;
uint32_t cfg_sli_mode;
#define LPFC_INITIALIZE_LINK 0 /* do normal init_link mbox */
@@ -938,6 +1030,13 @@ struct lpfc_hba {
struct nvmet_fc_target_port *targetport;
lpfc_vpd_t vpd; /* vital product data */
+ u32 cfg_max_vmid; /* maximum VMIDs allowed per port */
+ u32 cfg_vmid_app_header;
+#define LPFC_VMID_APP_HEADER_DISABLE 0
+#define LPFC_VMID_APP_HEADER_ENABLE 1
+ u32 cfg_vmid_priority_tagging;
+ u32 cfg_vmid_inactivity_timeout; /* Time after which the VMID */
+ /* deregisters from switch */
struct pci_dev *pcidev;
struct list_head work_list;
uint32_t work_ha; /* Host Attention Bits for WT */
@@ -1178,6 +1277,7 @@ struct lpfc_hba {
struct list_head ct_ev_waiters;
struct unsol_rcv_ct_ctx ct_ctx[LPFC_CT_CTX_MAX];
uint32_t ctx_idx;
+ struct timer_list inactive_vmid_poll;
/* RAS Support */
struct lpfc_ras_fwlog ras_fwlog;
@@ -1419,3 +1519,27 @@ static const char *routine(enum enum_name table_key) \
} \
return name; \
}
+
+/**
+ * lpfc_is_vmid_enabled - returns if VMID is enabled for either switch types
+ * @phba: Pointer to HBA context object.
+ *
+ * Relationship between the enable, target support and if vmid tag is required
+ * for the particular combination
+ * ---------------------------------------------------
+ * Switch Enable Flag Target Support VMID Needed
+ * ---------------------------------------------------
+ * App Id 0 NA N
+ * App Id 1 0 N
+ * App Id 1 1 Y
+ * Pr Tag 0 NA N
+ * Pr Tag 1 0 N
+ * Pr Tag 1 1 Y
+ * Pr Tag 2 * Y
+ ---------------------------------------------------
+ *
+ **/
+static inline int lpfc_is_vmid_enabled(struct lpfc_hba *phba)
+{
+ return phba->cfg_vmid_app_header || phba->cfg_vmid_priority_tagging;
+}
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 0975a8b252a0..eb88aaaf36eb 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -3449,6 +3449,15 @@ LPFC_ATTR_R(fcf_failover_policy, 1, 1, 2,
"FCF Fast failover=1 Priority failover=2");
/*
+ * lpfc_fcp_wait_abts_rsp: Modifies criteria for reporting completion of
+ * aborted IO.
+ * The range is [0,1]. Default value is 0
+ * 0, IO completes after ABTS issued (default).
+ * 1, IO completes after receipt of ABTS response or timeout.
+ */
+LPFC_ATTR_R(fcp_wait_abts_rsp, 0, 0, 1, "Wait for FCP ABTS completion");
+
+/*
# lpfc_enable_rrq: Track XRI/OXID reuse after IO failures
# 0x0 = disabled, XRI/OXID use not tracked.
# 0x1 = XRI/OXID reuse is timed with ratov, RRQ sent.
@@ -6153,6 +6162,45 @@ LPFC_ATTR_RW(enable_dpp, 1, 0, 1, "Enable Direct Packet Push");
*/
LPFC_ATTR_R(enable_mi, 1, 0, 1, "Enable MI");
+/*
+ * lpfc_max_vmid: Maximum number of VMs to be tagged. This is valid only if
+ * either vmid_app_header or vmid_priority_tagging is enabled.
+ * 4 - 255 = vmid support enabled for 4-255 VMs
+ * Value range is [4,255].
+ */
+LPFC_ATTR_RW(max_vmid, LPFC_MIN_VMID, LPFC_MIN_VMID, LPFC_MAX_VMID,
+ "Maximum number of VMs supported");
+
+/*
+ * lpfc_vmid_inactivity_timeout: Inactivity timeout duration in hours
+ * 0 = Timeout is disabled
+ * Value range is [0,24].
+ */
+LPFC_ATTR_RW(vmid_inactivity_timeout, 4, 0, 24,
+ "Inactivity timeout in hours");
+
+/*
+ * lpfc_vmid_app_header: Enable App Header VMID support
+ * 0 = Support is disabled (default)
+ * 1 = Support is enabled
+ * Value range is [0,1].
+ */
+LPFC_ATTR_RW(vmid_app_header, LPFC_VMID_APP_HEADER_DISABLE,
+ LPFC_VMID_APP_HEADER_DISABLE, LPFC_VMID_APP_HEADER_ENABLE,
+ "Enable App Header VMID support");
+
+/*
+ * lpfc_vmid_priority_tagging: Enable Priority Tagging VMID support
+ * 0 = Support is disabled (default)
+ * 1 = Allow supported targets only
+ * 2 = Allow all targets
+ * Value range is [0,2].
+ */
+LPFC_ATTR_RW(vmid_priority_tagging, LPFC_VMID_PRIO_TAG_DISABLE,
+ LPFC_VMID_PRIO_TAG_DISABLE,
+ LPFC_VMID_PRIO_TAG_ALL_TARGETS,
+ "Enable Priority Tagging VMID support");
+
struct device_attribute *lpfc_hba_attrs[] = {
&dev_attr_nvme_info,
&dev_attr_scsi_stat,
@@ -6205,6 +6253,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
&dev_attr_lpfc_enable_npiv,
&dev_attr_lpfc_fcf_failover_policy,
&dev_attr_lpfc_enable_rrq,
+ &dev_attr_lpfc_fcp_wait_abts_rsp,
&dev_attr_nport_evt_cnt,
&dev_attr_board_mode,
&dev_attr_max_vpi,
@@ -6271,6 +6320,10 @@ struct device_attribute *lpfc_hba_attrs[] = {
&dev_attr_lpfc_enable_bbcr,
&dev_attr_lpfc_enable_dpp,
&dev_attr_lpfc_enable_mi,
+ &dev_attr_lpfc_max_vmid,
+ &dev_attr_lpfc_vmid_inactivity_timeout,
+ &dev_attr_lpfc_vmid_app_header,
+ &dev_attr_lpfc_vmid_priority_tagging,
NULL,
};
@@ -7332,6 +7385,7 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
lpfc_enable_npiv_init(phba, lpfc_enable_npiv);
lpfc_fcf_failover_policy_init(phba, lpfc_fcf_failover_policy);
lpfc_enable_rrq_init(phba, lpfc_enable_rrq);
+ lpfc_fcp_wait_abts_rsp_init(phba, lpfc_fcp_wait_abts_rsp);
lpfc_fdmi_on_init(phba, lpfc_fdmi_on);
lpfc_enable_SmartSAN_init(phba, lpfc_enable_SmartSAN);
lpfc_use_msi_init(phba, lpfc_use_msi);
@@ -7346,6 +7400,11 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
lpfc_enable_hba_heartbeat_init(phba, lpfc_enable_hba_heartbeat);
lpfc_EnableXLane_init(phba, lpfc_EnableXLane);
+ /* VMID Inits */
+ lpfc_max_vmid_init(phba, lpfc_max_vmid);
+ lpfc_vmid_inactivity_timeout_init(phba, lpfc_vmid_inactivity_timeout);
+ lpfc_vmid_app_header_init(phba, lpfc_vmid_app_header);
+ lpfc_vmid_priority_tagging_init(phba, lpfc_vmid_priority_tagging);
if (phba->sli_rev != LPFC_SLI_REV4)
phba->cfg_EnableXLane = 0;
lpfc_XLanePriority_init(phba, lpfc_XLanePriority);
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 383abf46fd29..737483c3f01d 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -80,6 +80,7 @@ void lpfc_mbx_cmpl_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_mbx_cmpl_fc_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb);
void lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_unregister_vfi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
@@ -607,3 +608,14 @@ extern unsigned long lpfc_no_hba_reset[];
extern union lpfc_wqe128 lpfc_iread_cmd_template;
extern union lpfc_wqe128 lpfc_iwrite_cmd_template;
extern union lpfc_wqe128 lpfc_icmnd_cmd_template;
+
+/* vmid interface */
+int lpfc_vmid_uvem(struct lpfc_vport *vport, struct lpfc_vmid *vmid, bool ins);
+uint32_t lpfc_vmid_get_cs_ctl(struct lpfc_vport *vport);
+int lpfc_vmid_cmd(struct lpfc_vport *vport,
+ int cmdcode, struct lpfc_vmid *vmid);
+int lpfc_vmid_hash_fn(const char *vmid, int len);
+struct lpfc_vmid *lpfc_get_vmid_from_hashtable(struct lpfc_vport *vport,
+ uint32_t hash, uint8_t *buf);
+void lpfc_vmid_vport_cleanup(struct lpfc_vport *vport);
+int lpfc_issue_els_qfpa(struct lpfc_vport *vport);
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 3bbefa225484..610b6dabb3b5 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -75,6 +75,9 @@
static char *lpfc_release_version = LPFC_DRIVER_VERSION;
+static void
+lpfc_cmpl_ct_cmd_vmid(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_iocbq *rspiocb);
static void
lpfc_ct_ignore_hbq_buffer(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
@@ -587,7 +590,7 @@ lpfc_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
struct lpfc_dmabuf *inp, struct lpfc_dmabuf *outp,
void (*cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,
struct lpfc_iocbq *),
- struct lpfc_nodelist *ndlp, uint32_t usr_flg, uint32_t num_entry,
+ struct lpfc_nodelist *ndlp, uint32_t event_tag, uint32_t num_entry,
uint32_t tmo, uint8_t retry)
{
struct lpfc_hba *phba = vport->phba;
@@ -608,15 +611,14 @@ lpfc_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
icmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
icmd->un.genreq64.bdl.bdeSize = (num_entry * sizeof(struct ulp_bde64));
- if (usr_flg)
- geniocb->context3 = NULL;
- else
- geniocb->context3 = (uint8_t *) bmp;
+ geniocb->context3 = (uint8_t *) bmp;
/* Save for completion so we can release these resources */
geniocb->context1 = (uint8_t *) inp;
geniocb->context2 = (uint8_t *) outp;
+ geniocb->event_tag = event_tag;
+
/* Fill in payload, bp points to frame payload */
icmd->ulpCommand = CMD_GEN_REQUEST64_CR;
@@ -707,8 +709,8 @@ lpfc_ct_cmd(struct lpfc_vport *vport, struct lpfc_dmabuf *inmp,
* lpfc_alloc_ct_rsp.
*/
cnt += 1;
- status = lpfc_gen_req(vport, bmp, inmp, outmp, cmpl, ndlp, 0,
- cnt, 0, retry);
+ status = lpfc_gen_req(vport, bmp, inmp, outmp, cmpl, ndlp,
+ phba->fc_eventTag, cnt, 0, retry);
if (status) {
lpfc_free_ct_rsp(phba, outmp);
return -ENOMEM;
@@ -957,6 +959,13 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
"GID_FT cmpl: status:x%x/x%x rtry:%d",
irsp->ulpStatus, irsp->un.ulpWord[4], vport->fc_ns_retry);
+ /* Ignore response if link flipped after this request was made */
+ if (cmdiocb->event_tag != phba->fc_eventTag) {
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ "9043 Event tag mismatch. Ignoring NS rsp\n");
+ goto out;
+ }
+
/* Don't bother processing response if vport is being torn down. */
if (vport->load_flag & FC_UNLOADING) {
if (vport->fc_flag & FC_RSCN_MODE)
@@ -1167,6 +1176,13 @@ lpfc_cmpl_ct_cmd_gid_pt(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
irsp->ulpStatus, irsp->un.ulpWord[4],
vport->fc_ns_retry);
+ /* Ignore response if link flipped after this request was made */
+ if (cmdiocb->event_tag != phba->fc_eventTag) {
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ "9044 Event tag mismatch. Ignoring NS rsp\n");
+ goto out;
+ }
+
/* Don't bother processing response if vport is being torn down. */
if (vport->load_flag & FC_UNLOADING) {
if (vport->fc_flag & FC_RSCN_MODE)
@@ -1366,6 +1382,13 @@ lpfc_cmpl_ct_cmd_gff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
"GFF_ID cmpl: status:x%x/x%x did:x%x",
irsp->ulpStatus, irsp->un.ulpWord[4], did);
+ /* Ignore response if link flipped after this request was made */
+ if (cmdiocb->event_tag != phba->fc_eventTag) {
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ "9045 Event tag mismatch. Ignoring NS rsp\n");
+ goto iocb_free;
+ }
+
if (irsp->ulpStatus == IOSTAT_SUCCESS) {
/* Good status, continue checking */
CTrsp = (struct lpfc_sli_ct_request *) outp->virt;
@@ -1479,6 +1502,7 @@ out:
lpfc_disc_start(vport);
}
+iocb_free:
free_ndlp = cmdiocb->context_un.ndlp;
lpfc_ct_free_iocb(phba, cmdiocb);
lpfc_nlp_put(free_ndlp);
@@ -1506,6 +1530,13 @@ lpfc_cmpl_ct_cmd_gft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
"GFT_ID cmpl: status:x%x/x%x did:x%x",
irsp->ulpStatus, irsp->un.ulpWord[4], did);
+ /* Ignore response if link flipped after this request was made */
+ if ((uint32_t) cmdiocb->event_tag != phba->fc_eventTag) {
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ "9046 Event tag mismatch. Ignoring NS rsp\n");
+ goto out;
+ }
+
/* Preserve the nameserver node to release the reference. */
ns_ndlp = cmdiocb->context_un.ndlp;
@@ -1572,6 +1603,7 @@ lpfc_cmpl_ct_cmd_gft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"3065 GFT_ID failed x%08x\n", irsp->ulpStatus);
+out:
lpfc_ct_free_iocb(phba, cmdiocb);
lpfc_nlp_put(ns_ndlp);
}
@@ -3748,3 +3780,255 @@ lpfc_decode_firmware_rev(struct lpfc_hba *phba, char *fwrevision, int flag)
}
return;
}
+
+static void
+lpfc_cmpl_ct_cmd_vmid(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_iocbq *rspiocb)
+{
+ struct lpfc_vport *vport = cmdiocb->vport;
+ struct lpfc_dmabuf *inp = cmdiocb->context1;
+ struct lpfc_dmabuf *outp = cmdiocb->context2;
+ struct lpfc_sli_ct_request *ctcmd = inp->virt;
+ struct lpfc_sli_ct_request *ctrsp = outp->virt;
+ u16 rsp = ctrsp->CommandResponse.bits.CmdRsp;
+ struct app_id_object *app;
+ u32 cmd, hash, bucket;
+ struct lpfc_vmid *vmp, *cur;
+ u8 *data = outp->virt;
+ int i;
+
+ cmd = be16_to_cpu(ctcmd->CommandResponse.bits.CmdRsp);
+ if (cmd == SLI_CTAS_DALLAPP_ID)
+ lpfc_ct_free_iocb(phba, cmdiocb);
+
+ if (lpfc_els_chk_latt(vport) || rspiocb->iocb.ulpStatus) {
+ if (cmd != SLI_CTAS_DALLAPP_ID)
+ return;
+ }
+ /* Check for a CT LS_RJT response */
+ if (rsp == be16_to_cpu(SLI_CT_RESPONSE_FS_RJT)) {
+ if (cmd != SLI_CTAS_DALLAPP_ID)
+ lpfc_printf_vlog(vport, KERN_DEBUG, LOG_DISCOVERY,
+ "3306 VMID FS_RJT Data: x%x x%x x%x\n",
+ cmd, ctrsp->ReasonCode,
+ ctrsp->Explanation);
+ if ((cmd != SLI_CTAS_DALLAPP_ID) ||
+ (ctrsp->ReasonCode != SLI_CT_UNABLE_TO_PERFORM_REQ) ||
+ (ctrsp->Explanation != SLI_CT_APP_ID_NOT_AVAILABLE)) {
+ /* If DALLAPP_ID failed retry later */
+ if (cmd == SLI_CTAS_DALLAPP_ID)
+ vport->load_flag |= FC_DEREGISTER_ALL_APP_ID;
+ return;
+ }
+ }
+
+ switch (cmd) {
+ case SLI_CTAS_RAPP_IDENT:
+ app = (struct app_id_object *)(RAPP_IDENT_OFFSET + data);
+ lpfc_printf_vlog(vport, KERN_DEBUG, LOG_DISCOVERY,
+ "6712 RAPP_IDENT app id %d port id x%x id "
+ "len %d\n", be32_to_cpu(app->app_id),
+ be32_to_cpu(app->port_id),
+ app->obj.entity_id_len);
+
+ if (app->obj.entity_id_len == 0 || app->port_id == 0)
+ return;
+
+ hash = lpfc_vmid_hash_fn(app->obj.entity_id,
+ app->obj.entity_id_len);
+ vmp = lpfc_get_vmid_from_hashtable(vport, hash,
+ app->obj.entity_id);
+ if (vmp) {
+ write_lock(&vport->vmid_lock);
+ vmp->un.app_id = be32_to_cpu(app->app_id);
+ vmp->flag |= LPFC_VMID_REGISTERED;
+ vmp->flag &= ~LPFC_VMID_REQ_REGISTER;
+ write_unlock(&vport->vmid_lock);
+ /* Set IN USE flag */
+ vport->vmid_flag |= LPFC_VMID_IN_USE;
+ } else {
+ lpfc_printf_vlog(vport, KERN_DEBUG, LOG_DISCOVERY,
+ "6901 No entry found %s hash %d\n",
+ app->obj.entity_id, hash);
+ }
+ break;
+ case SLI_CTAS_DAPP_IDENT:
+ app = (struct app_id_object *)(DAPP_IDENT_OFFSET + data);
+ lpfc_printf_vlog(vport, KERN_DEBUG, LOG_DISCOVERY,
+ "6713 DAPP_IDENT app id %d port id x%x\n",
+ be32_to_cpu(app->app_id),
+ be32_to_cpu(app->port_id));
+ break;
+ case SLI_CTAS_DALLAPP_ID:
+ lpfc_printf_vlog(vport, KERN_DEBUG, LOG_DISCOVERY,
+ "8856 Deregistered all app ids\n");
+ read_lock(&vport->vmid_lock);
+ for (i = 0; i < phba->cfg_max_vmid; i++) {
+ vmp = &vport->vmid[i];
+ if (vmp->flag != LPFC_VMID_SLOT_FREE)
+ memset(vmp, 0, sizeof(struct lpfc_vmid));
+ }
+ read_unlock(&vport->vmid_lock);
+ /* for all elements in the hash table */
+ if (!hash_empty(vport->hash_table))
+ hash_for_each(vport->hash_table, bucket, cur, hnode)
+ hash_del(&cur->hnode);
+ vport->load_flag |= FC_ALLOW_VMID;
+ break;
+ default:
+ lpfc_printf_vlog(vport, KERN_DEBUG, LOG_DISCOVERY,
+ "8857 Invalid command code\n");
+ }
+}
+
+/**
+ * lpfc_vmid_cmd - Build and send a FDMI cmd to the specified NPort
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @ndlp: ndlp to send FDMI cmd to (if NULL use FDMI_DID)
+ * cmdcode: FDMI command to send
+ * mask: Mask of HBA or PORT Attributes to send
+ *
+ * Builds and sends a FDMI command using the CT subsystem.
+ */
+int
+lpfc_vmid_cmd(struct lpfc_vport *vport,
+ int cmdcode, struct lpfc_vmid *vmid)
+{
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_dmabuf *mp, *bmp;
+ struct lpfc_sli_ct_request *ctreq;
+ struct ulp_bde64 *bpl;
+ u32 size;
+ u32 rsp_size;
+ u8 *data;
+ struct lpfc_vmid_rapp_ident_list *rap;
+ struct lpfc_vmid_dapp_ident_list *dap;
+ u8 retry = 0;
+ struct lpfc_nodelist *ndlp;
+
+ void (*cmpl)(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_iocbq *rspiocb);
+
+ ndlp = lpfc_findnode_did(vport, FDMI_DID);
+ if (!ndlp || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE)
+ return 0;
+
+ cmpl = lpfc_cmpl_ct_cmd_vmid;
+
+ /* fill in BDEs for command */
+ /* Allocate buffer for command payload */
+ mp = kmalloc(sizeof(*mp), GFP_KERNEL);
+ if (!mp)
+ goto vmid_free_mp_exit;
+
+ mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
+ if (!mp->virt)
+ goto vmid_free_mp_virt_exit;
+
+ /* Allocate buffer for Buffer ptr list */
+ bmp = kmalloc(sizeof(*bmp), GFP_KERNEL);
+ if (!bmp)
+ goto vmid_free_bmp_exit;
+
+ bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
+ if (!bmp->virt)
+ goto vmid_free_bmp_virt_exit;
+
+ INIT_LIST_HEAD(&mp->list);
+ INIT_LIST_HEAD(&bmp->list);
+
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ "3275 VMID Request Data: x%x x%x x%x\n",
+ vport->fc_flag, vport->port_state, cmdcode);
+ ctreq = (struct lpfc_sli_ct_request *)mp->virt;
+ data = mp->virt;
+ /* First populate the CT_IU preamble */
+ memset(data, 0, LPFC_BPL_SIZE);
+ ctreq->RevisionId.bits.Revision = SLI_CT_REVISION;
+ ctreq->RevisionId.bits.InId = 0;
+
+ ctreq->FsType = SLI_CT_MANAGEMENT_SERVICE;
+ ctreq->FsSubType = SLI_CT_APP_SEV_Subtypes;
+
+ ctreq->CommandResponse.bits.CmdRsp = cpu_to_be16(cmdcode);
+ rsp_size = LPFC_BPL_SIZE;
+ size = 0;
+
+ switch (cmdcode) {
+ case SLI_CTAS_RAPP_IDENT:
+ lpfc_printf_vlog(vport, KERN_DEBUG, LOG_DISCOVERY,
+ "1329 RAPP_IDENT for %s\n", vmid->host_vmid);
+ ctreq->un.PortID = cpu_to_be32(vport->fc_myDID);
+ rap = (struct lpfc_vmid_rapp_ident_list *)
+ (DAPP_IDENT_OFFSET + data);
+ rap->no_of_objects = cpu_to_be32(1);
+ rap->obj[0].entity_id_len = vmid->vmid_len;
+ memcpy(rap->obj[0].entity_id, vmid->host_vmid, vmid->vmid_len);
+ size = RAPP_IDENT_OFFSET +
+ sizeof(struct lpfc_vmid_rapp_ident_list);
+ retry = 1;
+ break;
+
+ case SLI_CTAS_GALLAPPIA_ID:
+ ctreq->un.PortID = cpu_to_be32(vport->fc_myDID);
+ size = GALLAPPIA_ID_SIZE;
+ break;
+
+ case SLI_CTAS_DAPP_IDENT:
+ lpfc_printf_vlog(vport, KERN_DEBUG, LOG_DISCOVERY,
+ "1469 DAPP_IDENT for %s\n", vmid->host_vmid);
+ ctreq->un.PortID = cpu_to_be32(vport->fc_myDID);
+ dap = (struct lpfc_vmid_dapp_ident_list *)
+ (DAPP_IDENT_OFFSET + data);
+ dap->no_of_objects = cpu_to_be32(1);
+ dap->obj[0].entity_id_len = vmid->vmid_len;
+ memcpy(dap->obj[0].entity_id, vmid->host_vmid, vmid->vmid_len);
+ size = DAPP_IDENT_OFFSET +
+ sizeof(struct lpfc_vmid_dapp_ident_list);
+ write_lock(&vport->vmid_lock);
+ vmid->flag &= ~LPFC_VMID_REGISTERED;
+ write_unlock(&vport->vmid_lock);
+ retry = 1;
+ break;
+
+ case SLI_CTAS_DALLAPP_ID:
+ ctreq->un.PortID = cpu_to_be32(vport->fc_myDID);
+ size = DALLAPP_ID_SIZE;
+ break;
+
+ default:
+ lpfc_printf_vlog(vport, KERN_DEBUG, LOG_DISCOVERY,
+ "7062 VMID cmdcode x%x not supported\n",
+ cmdcode);
+ goto vmid_free_all_mem;
+ }
+
+ ctreq->CommandResponse.bits.Size = cpu_to_be16(rsp_size);
+
+ bpl = (struct ulp_bde64 *)bmp->virt;
+ bpl->addrHigh = putPaddrHigh(mp->phys);
+ bpl->addrLow = putPaddrLow(mp->phys);
+ bpl->tus.f.bdeFlags = 0;
+ bpl->tus.f.bdeSize = size;
+
+ /* The lpfc_ct_cmd/lpfc_get_req shall increment ndlp reference count
+ * to hold ndlp reference for the corresponding callback function.
+ */
+ if (!lpfc_ct_cmd(vport, mp, bmp, ndlp, cmpl, rsp_size, retry))
+ return 0;
+
+ vmid_free_all_mem:
+ lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
+ vmid_free_bmp_virt_exit:
+ kfree(bmp);
+ vmid_free_bmp_exit:
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ vmid_free_mp_virt_exit:
+ kfree(mp);
+ vmid_free_mp_exit:
+
+ /* Issue CT request failed */
+ lpfc_printf_vlog(vport, KERN_DEBUG, LOG_DISCOVERY,
+ "3276 VMID CT request failed Data: x%x\n", cmdcode);
+ return -EIO;
+}
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index 658a962832b3..6ff85ae57e79 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -863,16 +863,13 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
len += scnprintf(buf+len, size-len, "%s DID:x%06x ",
statep, ndlp->nlp_DID);
len += scnprintf(buf+len, size-len,
- "WWPN x%llx ",
+ "WWPN x%016llx ",
wwn_to_u64(ndlp->nlp_portname.u.wwn));
len += scnprintf(buf+len, size-len,
- "WWNN x%llx ",
+ "WWNN x%016llx ",
wwn_to_u64(ndlp->nlp_nodename.u.wwn));
- if (ndlp->nlp_flag & NLP_RPI_REGISTERED)
- len += scnprintf(buf+len, size-len, "RPI:%04d ",
- ndlp->nlp_rpi);
- else
- len += scnprintf(buf+len, size-len, "RPI:none ");
+ len += scnprintf(buf+len, size-len, "RPI:x%04x ",
+ ndlp->nlp_rpi);
len += scnprintf(buf+len, size-len, "flag:x%08x ",
ndlp->nlp_flag);
if (!ndlp->nlp_type)
diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h
index 08999aad6a10..131374a61d7e 100644
--- a/drivers/scsi/lpfc/lpfc_disc.h
+++ b/drivers/scsi/lpfc/lpfc_disc.h
@@ -86,6 +86,7 @@ enum lpfc_fc4_xpt_flags {
struct lpfc_nodelist {
struct list_head nlp_listp;
+ struct serv_parm fc_sparam; /* buffer for service params */
struct lpfc_name nlp_portname;
struct lpfc_name nlp_nodename;
@@ -124,6 +125,7 @@ struct lpfc_nodelist {
uint8_t nlp_fcp_info; /* class info, bits 0-3 */
#define NLP_FCP_2_DEVICE 0x10 /* FCP-2 device */
u8 nlp_nvme_info; /* NVME NSLER Support */
+ uint8_t vmid_support; /* destination VMID support */
#define NLP_NVME_NSLER 0x1 /* NVME NSLER device */
struct timer_list nlp_delayfunc; /* Used for delayed ELS cmds */
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 21108f322c99..e481f5fe29d7 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -25,6 +25,7 @@
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
+#include <linux/delay.h>
#include <scsi/scsi.h>
#include <scsi/scsi_device.h>
@@ -55,9 +56,15 @@ static int lpfc_issue_els_fdisc(struct lpfc_vport *vport,
struct lpfc_nodelist *ndlp, uint8_t retry);
static int lpfc_issue_fabric_iocb(struct lpfc_hba *phba,
struct lpfc_iocbq *iocb);
+static void lpfc_cmpl_els_uvem(struct lpfc_hba *, struct lpfc_iocbq *,
+ struct lpfc_iocbq *);
static int lpfc_max_els_tries = 3;
+static void lpfc_init_cs_ctl_bitmap(struct lpfc_vport *vport);
+static void lpfc_vmid_set_cs_ctl_range(struct lpfc_vport *vport, u32 min, u32 max);
+static void lpfc_vmid_put_cs_ctl(struct lpfc_vport *vport, u32 ctcl_vmid);
+
/**
* lpfc_els_chk_latt - Check host link attention event for a vport
* @vport: pointer to a host virtual N_Port data structure.
@@ -314,10 +321,10 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"0116 Xmit ELS command x%x to remote "
"NPORT x%x I/O tag: x%x, port state:x%x "
- "rpi x%x fc_flag:x%x\n",
+ "rpi x%x fc_flag:x%x nlp_flag:x%x vport:x%p\n",
elscmd, did, elsiocb->iotag,
vport->port_state, ndlp->nlp_rpi,
- vport->fc_flag);
+ vport->fc_flag, ndlp->nlp_flag, vport);
} else {
/* Xmit ELS response <elsCmd> to remote NPORT <did> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
@@ -1112,11 +1119,15 @@ stop_rr_fcf_flogi:
/* FLOGI completes successfully */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"0101 FLOGI completes successfully, I/O tag:x%x, "
- "xri x%x Data: x%x x%x x%x x%x x%x %x\n",
+ "xri x%x Data: x%x x%x x%x x%x x%x x%x x%x\n",
cmdiocb->iotag, cmdiocb->sli4_xritag,
irsp->un.ulpWord[4], sp->cmn.e_d_tov,
sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution,
- vport->port_state, vport->fc_flag);
+ vport->port_state, vport->fc_flag,
+ sp->cmn.priority_tagging);
+
+ if (sp->cmn.priority_tagging)
+ vport->vmid_flag |= LPFC_VMID_ISSUE_QFPA;
if (vport->port_state == LPFC_FLOGI) {
/*
@@ -1175,6 +1186,15 @@ stop_rr_fcf_flogi:
phba->fcf.fcf_redisc_attempted = 0; /* reset */
goto out;
}
+ } else if (vport->port_state > LPFC_FLOGI &&
+ vport->fc_flag & FC_PT2PT) {
+ /*
+ * In a p2p topology, it is possible that discovery has
+ * already progressed, and this completion can be ignored.
+ * Recheck the indicated topology.
+ */
+ if (!sp->cmn.fPort)
+ goto out;
}
flogifail:
@@ -1299,6 +1319,18 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (sp->cmn.fcphHigh < FC_PH3)
sp->cmn.fcphHigh = FC_PH3;
+ /* Determine if switch supports priority tagging */
+ if (phba->cfg_vmid_priority_tagging) {
+ sp->cmn.priority_tagging = 1;
+ /* lpfc_vmid_host_uuid is combination of wwpn and wwnn */
+ if (uuid_is_null((uuid_t *)vport->lpfc_vmid_host_uuid)) {
+ memcpy(vport->lpfc_vmid_host_uuid, phba->wwpn,
+ sizeof(phba->wwpn));
+ memcpy(&vport->lpfc_vmid_host_uuid[8], phba->wwnn,
+ sizeof(phba->wwnn));
+ }
+ }
+
if (phba->sli_rev == LPFC_SLI_REV4) {
if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
LPFC_SLI_INTF_IF_TYPE_0) {
@@ -1925,6 +1957,7 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_nodelist *ndlp, *free_ndlp;
struct lpfc_dmabuf *prsp;
int disc;
+ struct serv_parm *sp = NULL;
/* we pass cmdiocb to state machine which needs rspiocb as well */
cmdiocb->context_un.rsp_iocb = rspiocb;
@@ -1998,9 +2031,20 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
lpfc_disc_state_machine(vport, ndlp, cmdiocb,
NLP_EVT_CMPL_PLOGI);
- /* As long as this node is not registered with the scsi or nvme
- * transport, it is no longer an active node. Otherwise
- * devloss handles the final cleanup.
+ /* If a PLOGI collision occurred, the node needs to continue
+ * with the reglogin process.
+ */
+ spin_lock_irq(&ndlp->lock);
+ if ((ndlp->nlp_flag & (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI)) &&
+ ndlp->nlp_state == NLP_STE_REG_LOGIN_ISSUE) {
+ spin_unlock_irq(&ndlp->lock);
+ goto out;
+ }
+ spin_unlock_irq(&ndlp->lock);
+
+ /* No PLOGI collision and the node is not registered with the
+ * scsi or nvme transport. It is no longer an active node. Just
+ * start the device remove process.
*/
if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) {
spin_lock_irq(&ndlp->lock);
@@ -2015,6 +2059,23 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
cmdiocb->context2)->list.next,
struct lpfc_dmabuf, list);
ndlp = lpfc_plogi_confirm_nport(phba, prsp->virt, ndlp);
+
+ sp = (struct serv_parm *)((u8 *)prsp->virt +
+ sizeof(u32));
+
+ ndlp->vmid_support = 0;
+ if ((phba->cfg_vmid_app_header && sp->cmn.app_hdr_support) ||
+ (phba->cfg_vmid_priority_tagging &&
+ sp->cmn.priority_tagging)) {
+ lpfc_printf_log(phba, KERN_DEBUG, LOG_ELS,
+ "4018 app_hdr_support %d tagging %d DID x%x\n",
+ sp->cmn.app_hdr_support,
+ sp->cmn.priority_tagging,
+ ndlp->nlp_DID);
+ /* if the dest port supports VMID, mark it in ndlp */
+ ndlp->vmid_support = 1;
+ }
+
lpfc_disc_state_machine(vport, ndlp, cmdiocb,
NLP_EVT_CMPL_PLOGI);
}
@@ -2137,6 +2198,14 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
memset(sp->un.vendorVersion, 0, sizeof(sp->un.vendorVersion));
sp->cmn.bbRcvSizeMsb &= 0xF;
+ /* Check if the destination port supports VMID */
+ ndlp->vmid_support = 0;
+ if (vport->vmid_priority_tagging)
+ sp->cmn.priority_tagging = 1;
+ else if (phba->cfg_vmid_app_header &&
+ bf_get(lpfc_ftr_ashdr, &phba->sli4_hba.sli4_flags))
+ sp->cmn.app_hdr_support = 1;
+
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
"Issue PLOGI: did:x%x",
did, 0, 0);
@@ -2869,6 +2938,11 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
* log into the remote port.
*/
if (ndlp->nlp_flag & NLP_TARGET_REMOVE) {
+ spin_lock_irq(&ndlp->lock);
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ ndlp->nlp_flag |= NLP_RELEASE_RPI;
+ ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
+ spin_unlock_irq(&ndlp->lock);
lpfc_disc_state_machine(vport, ndlp, cmdiocb,
NLP_EVT_DEVICE_RM);
lpfc_els_free_iocb(phba, cmdiocb);
@@ -3061,6 +3135,95 @@ lpfc_cmpl_els_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
}
/**
+ * lpfc_reg_fab_ctrl_node - RPI register the fabric controller node.
+ * @vport: pointer to lpfc_vport data structure.
+ * @fc_ndlp: pointer to the fabric controller (0xfffffd) node.
+ *
+ * This routine registers the rpi assigned to the fabric controller
+ * NPort_ID (0xfffffd) with the port and moves the node to UNMAPPED
+ * state triggering a registration with the SCSI transport.
+ *
+ * This routine is single out because the fabric controller node
+ * does not receive a PLOGI. This routine is consumed by the
+ * SCR and RDF ELS commands. Callers are expected to qualify
+ * with SLI4 first.
+ **/
+static int
+lpfc_reg_fab_ctrl_node(struct lpfc_vport *vport, struct lpfc_nodelist *fc_ndlp)
+{
+ int rc = 0;
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_nodelist *ns_ndlp;
+ LPFC_MBOXQ_t *mbox;
+ struct lpfc_dmabuf *mp;
+
+ if (fc_ndlp->nlp_flag & NLP_RPI_REGISTERED)
+ return rc;
+
+ ns_ndlp = lpfc_findnode_did(vport, NameServer_DID);
+ if (!ns_ndlp)
+ return -ENODEV;
+
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
+ "0935 %s: Reg FC RPI x%x on FC DID x%x NSSte: x%x\n",
+ __func__, fc_ndlp->nlp_rpi, fc_ndlp->nlp_DID,
+ ns_ndlp->nlp_state);
+ if (ns_ndlp->nlp_state != NLP_STE_UNMAPPED_NODE)
+ return -ENODEV;
+
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mbox) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE,
+ "0936 %s: no memory for reg_login "
+ "Data: x%x x%x x%x x%x\n", __func__,
+ fc_ndlp->nlp_DID, fc_ndlp->nlp_state,
+ fc_ndlp->nlp_flag, fc_ndlp->nlp_rpi);
+ return -ENOMEM;
+ }
+ rc = lpfc_reg_rpi(phba, vport->vpi, fc_ndlp->nlp_DID,
+ (u8 *)&vport->fc_sparam, mbox, fc_ndlp->nlp_rpi);
+ if (rc) {
+ rc = -EACCES;
+ goto out;
+ }
+
+ fc_ndlp->nlp_flag |= NLP_REG_LOGIN_SEND;
+ mbox->mbox_cmpl = lpfc_mbx_cmpl_fc_reg_login;
+ mbox->ctx_ndlp = lpfc_nlp_get(fc_ndlp);
+ if (!mbox->ctx_ndlp) {
+ rc = -ENOMEM;
+ goto out_mem;
+ }
+
+ mbox->vport = vport;
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
+ if (rc == MBX_NOT_FINISHED) {
+ rc = -ENODEV;
+ lpfc_nlp_put(fc_ndlp);
+ goto out_mem;
+ }
+ /* Success path. Exit. */
+ lpfc_nlp_set_state(vport, fc_ndlp,
+ NLP_STE_REG_LOGIN_ISSUE);
+ return 0;
+
+ out_mem:
+ fc_ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
+ mp = (struct lpfc_dmabuf *)mbox->ctx_buf;
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+
+ out:
+ mempool_free(mbox, phba->mbox_mem_pool);
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE,
+ "0938 %s: failed to format reg_login "
+ "Data: x%x x%x x%x x%x\n", __func__,
+ fc_ndlp->nlp_DID, fc_ndlp->nlp_state,
+ fc_ndlp->nlp_flag, fc_ndlp->nlp_rpi);
+ return rc;
+}
+
+/**
* lpfc_cmpl_els_disc_cmd - Completion callback function for Discovery ELS cmd
* @phba: pointer to lpfc hba data structure.
* @cmdiocb: pointer to lpfc command iocb data structure.
@@ -3206,10 +3369,18 @@ lpfc_issue_els_scr(struct lpfc_vport *vport, uint8_t retry)
elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
ndlp->nlp_DID, ELS_CMD_SCR);
-
if (!elsiocb)
return 1;
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ rc = lpfc_reg_fab_ctrl_node(vport, ndlp);
+ if (rc) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE,
+ "0937 %s: Failed to reg fc node, rc %d\n",
+ __func__, rc);
+ return 1;
+ }
+ }
pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
*((uint32_t *) (pcmd)) = ELS_CMD_SCR;
@@ -3497,6 +3668,17 @@ lpfc_issue_els_rdf(struct lpfc_vport *vport, uint8_t retry)
if (!elsiocb)
return -ENOMEM;
+ if (phba->sli_rev == LPFC_SLI_REV4 &&
+ !(ndlp->nlp_flag & NLP_RPI_REGISTERED)) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE,
+ "0939 %s: FC_NODE x%x RPI x%x flag x%x "
+ "ste x%x type x%x Not registered\n",
+ __func__, ndlp->nlp_DID, ndlp->nlp_rpi,
+ ndlp->nlp_flag, ndlp->nlp_state,
+ ndlp->nlp_type);
+ return -ENODEV;
+ }
+
/* Configure the payload for the supported FPIN events. */
prdf = (struct lpfc_els_rdf_req *)
(((struct lpfc_dmabuf *)elsiocb->context2)->virt);
@@ -3537,6 +3719,43 @@ lpfc_issue_els_rdf(struct lpfc_vport *vport, uint8_t retry)
return 0;
}
+ /**
+ * lpfc_els_rcv_rdf - Receive RDF ELS request from the fabric.
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * A received RDF implies a possible change to fabric supported diagnostic
+ * functions. This routine sends LS_ACC and then has the Nx_Port issue a new
+ * RDF request to reregister for supported diagnostic functions.
+ *
+ * Return code
+ * 0 - Success
+ * -EIO - Failed to process received RDF
+ **/
+static int
+lpfc_els_rcv_rdf(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_nodelist *ndlp)
+{
+ /* Send LS_ACC */
+ if (lpfc_els_rsp_acc(vport, ELS_CMD_RDF, cmdiocb, ndlp, NULL)) {
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+ "1623 Failed to RDF_ACC from x%x for x%x\n",
+ ndlp->nlp_DID, vport->fc_myDID);
+ return -EIO;
+ }
+
+ /* Issue new RDF for reregistering */
+ if (lpfc_issue_els_rdf(vport, 0)) {
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+ "2623 Failed to re register RDF for x%x\n",
+ vport->fc_myDID);
+ return -EIO;
+ }
+
+ return 0;
+}
+
/**
* lpfc_cancel_retry_delay_tmo - Cancel the timer with delayed iocb-cmd retry
* @vport: pointer to a host virtual N_Port data structure.
@@ -4383,12 +4602,27 @@ lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
ndlp->nlp_DID, kref_read(&ndlp->kref), ndlp->nlp_flag,
ndlp->nlp_state, ndlp->nlp_rpi);
+ /* This clause allows the LOGO ACC to complete and free resources
+ * for the Fabric Domain Controller. It does deliberately skip
+ * the unreg_rpi and release rpi because some fabrics send RDP
+ * requests after logging out from the initiator.
+ */
+ if (ndlp->nlp_type & NLP_FABRIC &&
+ ((ndlp->nlp_DID & WELL_KNOWN_DID_MASK) != WELL_KNOWN_DID_MASK))
+ goto out;
+
if (ndlp->nlp_state == NLP_STE_NPR_NODE) {
/* NPort Recovery mode or node is just allocated */
if (!lpfc_nlp_not_used(ndlp)) {
- /* If the ndlp is being used by another discovery
- * thread, just unregister the RPI.
+ /* A LOGO is completing and the node is in NPR state.
+ * If this a fabric node that cleared its transport
+ * registration, release the rpi.
*/
+ spin_lock_irq(&ndlp->lock);
+ ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ ndlp->nlp_flag |= NLP_RELEASE_RPI;
+ spin_unlock_irq(&ndlp->lock);
lpfc_unreg_rpi(vport, ndlp);
} else {
/* Indicate the node has already released, should
@@ -4397,7 +4631,7 @@ lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
cmdiocb->context1 = NULL;
}
}
-
+ out:
/*
* The driver received a LOGO from the rport and has ACK'd it.
* At this point, the driver is done so release the IOCB
@@ -4424,28 +4658,37 @@ lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
+ u32 mbx_flag = pmb->mbox_flag;
+ u32 mbx_cmd = pmb->u.mb.mbxCommand;
pmb->ctx_buf = NULL;
pmb->ctx_ndlp = NULL;
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
- mempool_free(pmb, phba->mbox_mem_pool);
if (ndlp) {
lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
- "0006 rpi x%x DID:%x flg:%x %d x%px\n",
+ "0006 rpi x%x DID:%x flg:%x %d x%px "
+ "mbx_cmd x%x mbx_flag x%x x%px\n",
ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
- kref_read(&ndlp->kref),
- ndlp);
- /* This is the end of the default RPI cleanup logic for
- * this ndlp and it could get released. Clear the nlp_flags to
- * prevent any further processing.
+ kref_read(&ndlp->kref), ndlp, mbx_cmd,
+ mbx_flag, pmb);
+
+ /* This ends the default/temporary RPI cleanup logic for this
+ * ndlp and the node and rpi needs to be released. Free the rpi
+ * first on an UNREG_LOGIN and then release the final
+ * references.
*/
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
+ if (mbx_cmd == MBX_UNREG_LOGIN)
+ ndlp->nlp_flag &= ~NLP_UNREG_INP;
+ spin_unlock_irq(&ndlp->lock);
lpfc_nlp_put(ndlp);
- lpfc_nlp_not_used(ndlp);
+ lpfc_drop_node(ndlp->vport, ndlp);
}
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+ mempool_free(pmb, phba->mbox_mem_pool);
return;
}
@@ -4503,11 +4746,11 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/* ELS response tag <ulpIoTag> completes */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"0110 ELS response tag x%x completes "
- "Data: x%x x%x x%x x%x x%x x%x x%x\n",
+ "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%px\n",
cmdiocb->iocb.ulpIoTag, rspiocb->iocb.ulpStatus,
rspiocb->iocb.un.ulpWord[4], rspiocb->iocb.ulpTimeout,
ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
- ndlp->nlp_rpi);
+ ndlp->nlp_rpi, kref_read(&ndlp->kref), mbox);
if (mbox) {
if ((rspiocb->iocb.ulpStatus == 0) &&
(ndlp->nlp_flag & NLP_ACC_REGLOGIN)) {
@@ -4587,6 +4830,20 @@ out:
spin_unlock_irq(&ndlp->lock);
}
+ /* An SLI4 NPIV instance wants to drop the node at this point under
+ * these conditions and release the RPI.
+ */
+ if (phba->sli_rev == LPFC_SLI_REV4 &&
+ (vport && vport->port_type == LPFC_NPIV_PORT) &&
+ ndlp->nlp_flag & NLP_RELEASE_RPI) {
+ lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi);
+ spin_lock_irq(&ndlp->lock);
+ ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
+ ndlp->nlp_flag &= ~NLP_RELEASE_RPI;
+ spin_unlock_irq(&ndlp->lock);
+ lpfc_drop_node(vport, ndlp);
+ }
+
/* Release the originating I/O reference. */
lpfc_els_free_iocb(phba, cmdiocb);
lpfc_nlp_put(ndlp);
@@ -4632,6 +4889,7 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
uint16_t cmdsize;
int rc;
ELS_PKT *els_pkt_ptr;
+ struct fc_els_rdf_resp *rdf_resp;
oldcmd = &oldiocb->iocb;
@@ -4743,6 +5001,29 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
"Issue ACC PRLO: did:x%x flg:x%x",
ndlp->nlp_DID, ndlp->nlp_flag, 0);
break;
+ case ELS_CMD_RDF:
+ cmdsize = sizeof(*rdf_resp);
+ elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry,
+ ndlp, ndlp->nlp_DID, ELS_CMD_ACC);
+ if (!elsiocb)
+ return 1;
+
+ icmd = &elsiocb->iocb;
+ icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
+ icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
+ pcmd = (((struct lpfc_dmabuf *)elsiocb->context2)->virt);
+ rdf_resp = (struct fc_els_rdf_resp *)pcmd;
+ memset(rdf_resp, 0, sizeof(*rdf_resp));
+ rdf_resp->acc_hdr.la_cmd = ELS_LS_ACC;
+
+ /* FC-LS-5 specifies desc_list_len shall be set to 12 */
+ rdf_resp->desc_list_len = cpu_to_be32(12);
+
+ /* FC-LS-5 specifies LS REQ Information descriptor */
+ rdf_resp->lsri.desc_tag = cpu_to_be32(1);
+ rdf_resp->lsri.desc_len = cpu_to_be32(sizeof(u32));
+ rdf_resp->lsri.rqst_w0.cmd = ELS_RDF;
+ break;
default:
return 1;
}
@@ -4775,10 +5056,10 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"0128 Xmit ELS ACC response Status: x%x, IoTag: x%x, "
"XRI: x%x, DID: x%x, nlp_flag: x%x nlp_state: x%x "
- "RPI: x%x, fc_flag x%x\n",
+ "RPI: x%x, fc_flag x%x refcnt %d\n",
rc, elsiocb->iotag, elsiocb->sli4_xritag,
ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
- ndlp->nlp_rpi, vport->fc_flag);
+ ndlp->nlp_rpi, vport->fc_flag, kref_read(&ndlp->kref));
return 0;
}
@@ -4856,6 +5137,17 @@ lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError,
return 1;
}
+ /* The NPIV instance is rejecting this unsolicited ELS. Make sure the
+ * node's assigned RPI needs to be released as this node will get
+ * freed.
+ */
+ if (phba->sli_rev == LPFC_SLI_REV4 &&
+ vport->port_type == LPFC_NPIV_PORT) {
+ spin_lock_irq(&ndlp->lock);
+ ndlp->nlp_flag |= NLP_RELEASE_RPI;
+ spin_unlock_irq(&ndlp->lock);
+ }
+
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
if (rc == IOCB_ERROR) {
lpfc_els_free_iocb(phba, elsiocb);
@@ -8845,6 +9137,20 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
/* There are no replies, so no rjt codes */
break;
+ case ELS_CMD_RDF:
+ phba->fc_stat.elsRcvRDF++;
+ /* Accept RDF only from fabric controller */
+ if (did != Fabric_Cntl_DID) {
+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
+ "1115 Received RDF from invalid DID "
+ "x%x\n", did);
+ rjt_err = LSRJT_PROTOCOL_ERR;
+ rjt_exp = LSEXP_NOTHING_MORE;
+ goto lsrjt;
+ }
+
+ lpfc_els_rcv_rdf(vport, elsiocb, ndlp);
+ break;
default:
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
"RCV ELS cmd: cmd:x%x did:x%x/ste:x%x",
@@ -10208,3 +10514,312 @@ lpfc_sli_abts_recover_port(struct lpfc_vport *vport,
lpfc_unreg_rpi(vport, ndlp);
}
+static void lpfc_init_cs_ctl_bitmap(struct lpfc_vport *vport)
+{
+ bitmap_zero(vport->vmid_priority_range, LPFC_VMID_MAX_PRIORITY_RANGE);
+}
+
+static void
+lpfc_vmid_set_cs_ctl_range(struct lpfc_vport *vport, u32 min, u32 max)
+{
+ u32 i;
+
+ if ((min > max) || (max > LPFC_VMID_MAX_PRIORITY_RANGE))
+ return;
+
+ for (i = min; i <= max; i++)
+ set_bit(i, vport->vmid_priority_range);
+}
+
+static void lpfc_vmid_put_cs_ctl(struct lpfc_vport *vport, u32 ctcl_vmid)
+{
+ set_bit(ctcl_vmid, vport->vmid_priority_range);
+}
+
+u32 lpfc_vmid_get_cs_ctl(struct lpfc_vport *vport)
+{
+ u32 i;
+
+ i = find_first_bit(vport->vmid_priority_range,
+ LPFC_VMID_MAX_PRIORITY_RANGE);
+
+ if (i == LPFC_VMID_MAX_PRIORITY_RANGE)
+ return 0;
+
+ clear_bit(i, vport->vmid_priority_range);
+ return i;
+}
+
+#define MAX_PRIORITY_DESC 255
+
+static void
+lpfc_cmpl_els_qfpa(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_iocbq *rspiocb)
+{
+ struct lpfc_vport *vport = cmdiocb->vport;
+ struct priority_range_desc *desc;
+ struct lpfc_dmabuf *prsp = NULL;
+ struct lpfc_vmid_priority_range *vmid_range = NULL;
+ u32 *data;
+ struct lpfc_dmabuf *dmabuf = cmdiocb->context2;
+ IOCB_t *irsp = &rspiocb->iocb;
+ u8 *pcmd, max_desc;
+ u32 len, i;
+ struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)cmdiocb->context1;
+
+ prsp = list_get_first(&dmabuf->list, struct lpfc_dmabuf, list);
+ if (!prsp)
+ goto out;
+
+ pcmd = prsp->virt;
+ data = (u32 *)pcmd;
+ if (data[0] == ELS_CMD_LS_RJT) {
+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_SLI,
+ "3277 QFPA LS_RJT x%x x%x\n",
+ data[0], data[1]);
+ goto out;
+ }
+ if (irsp->ulpStatus) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI,
+ "6529 QFPA failed with status x%x x%x\n",
+ irsp->ulpStatus, irsp->un.ulpWord[4]);
+ goto out;
+ }
+
+ if (!vport->qfpa_res) {
+ max_desc = FCELSSIZE / sizeof(*vport->qfpa_res);
+ vport->qfpa_res = kcalloc(max_desc, sizeof(*vport->qfpa_res),
+ GFP_KERNEL);
+ if (!vport->qfpa_res)
+ goto out;
+ }
+
+ len = *((u32 *)(pcmd + 4));
+ len = be32_to_cpu(len);
+ memcpy(vport->qfpa_res, pcmd, len + 8);
+ len = len / LPFC_PRIORITY_RANGE_DESC_SIZE;
+
+ desc = (struct priority_range_desc *)(pcmd + 8);
+ vmid_range = vport->vmid_priority.vmid_range;
+ if (!vmid_range) {
+ vmid_range = kcalloc(MAX_PRIORITY_DESC, sizeof(*vmid_range),
+ GFP_KERNEL);
+ if (!vmid_range) {
+ kfree(vport->qfpa_res);
+ goto out;
+ }
+ vport->vmid_priority.vmid_range = vmid_range;
+ }
+ vport->vmid_priority.num_descriptors = len;
+
+ for (i = 0; i < len; i++, vmid_range++, desc++) {
+ lpfc_printf_vlog(vport, KERN_DEBUG, LOG_ELS,
+ "6539 vmid values low=%d, high=%d, qos=%d, "
+ "local ve id=%d\n", desc->lo_range,
+ desc->hi_range, desc->qos_priority,
+ desc->local_ve_id);
+
+ vmid_range->low = desc->lo_range << 1;
+ if (desc->local_ve_id == QFPA_ODD_ONLY)
+ vmid_range->low++;
+ if (desc->qos_priority)
+ vport->vmid_flag |= LPFC_VMID_QOS_ENABLED;
+ vmid_range->qos = desc->qos_priority;
+
+ vmid_range->high = desc->hi_range << 1;
+ if ((desc->local_ve_id == QFPA_ODD_ONLY) ||
+ (desc->local_ve_id == QFPA_EVEN_ODD))
+ vmid_range->high++;
+ }
+ lpfc_init_cs_ctl_bitmap(vport);
+ for (i = 0; i < vport->vmid_priority.num_descriptors; i++) {
+ lpfc_vmid_set_cs_ctl_range(vport,
+ vport->vmid_priority.vmid_range[i].low,
+ vport->vmid_priority.vmid_range[i].high);
+ }
+
+ vport->vmid_flag |= LPFC_VMID_QFPA_CMPL;
+ out:
+ lpfc_els_free_iocb(phba, cmdiocb);
+ lpfc_nlp_put(ndlp);
+}
+
+int lpfc_issue_els_qfpa(struct lpfc_vport *vport)
+{
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_nodelist *ndlp;
+ struct lpfc_iocbq *elsiocb;
+ u8 *pcmd;
+ int ret;
+
+ ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
+ if (!ndlp || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE)
+ return -ENXIO;
+
+ elsiocb = lpfc_prep_els_iocb(vport, 1, LPFC_QFPA_SIZE, 2, ndlp,
+ ndlp->nlp_DID, ELS_CMD_QFPA);
+ if (!elsiocb)
+ return -ENOMEM;
+
+ pcmd = (u8 *)(((struct lpfc_dmabuf *)elsiocb->context2)->virt);
+
+ *((u32 *)(pcmd)) = ELS_CMD_QFPA;
+ pcmd += 4;
+
+ elsiocb->iocb_cmpl = lpfc_cmpl_els_qfpa;
+
+ elsiocb->context1 = lpfc_nlp_get(ndlp);
+ if (!elsiocb->context1) {
+ lpfc_els_free_iocb(vport->phba, elsiocb);
+ return -ENXIO;
+ }
+
+ ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 2);
+ if (ret != IOCB_SUCCESS) {
+ lpfc_els_free_iocb(phba, elsiocb);
+ lpfc_nlp_put(ndlp);
+ return -EIO;
+ }
+ vport->vmid_flag &= ~LPFC_VMID_QOS_ENABLED;
+ return 0;
+}
+
+int
+lpfc_vmid_uvem(struct lpfc_vport *vport,
+ struct lpfc_vmid *vmid, bool instantiated)
+{
+ struct lpfc_vem_id_desc *vem_id_desc;
+ struct lpfc_nodelist *ndlp;
+ struct lpfc_iocbq *elsiocb;
+ struct instantiated_ve_desc *inst_desc;
+ struct lpfc_vmid_context *vmid_context;
+ u8 *pcmd;
+ u32 *len;
+ int ret = 0;
+
+ ndlp = lpfc_findnode_did(vport, Fabric_DID);
+ if (!ndlp || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE)
+ return -ENXIO;
+
+ vmid_context = kmalloc(sizeof(*vmid_context), GFP_KERNEL);
+ if (!vmid_context)
+ return -ENOMEM;
+ elsiocb = lpfc_prep_els_iocb(vport, 1, LPFC_UVEM_SIZE, 2,
+ ndlp, Fabric_DID, ELS_CMD_UVEM);
+ if (!elsiocb)
+ goto out;
+
+ lpfc_printf_vlog(vport, KERN_DEBUG, LOG_ELS,
+ "3427 Host vmid %s %d\n",
+ vmid->host_vmid, instantiated);
+ vmid_context->vmp = vmid;
+ vmid_context->nlp = ndlp;
+ vmid_context->instantiated = instantiated;
+ elsiocb->vmid_tag.vmid_context = vmid_context;
+ pcmd = (u8 *)(((struct lpfc_dmabuf *)elsiocb->context2)->virt);
+
+ if (uuid_is_null((uuid_t *)vport->lpfc_vmid_host_uuid))
+ memcpy(vport->lpfc_vmid_host_uuid, vmid->host_vmid,
+ LPFC_COMPRESS_VMID_SIZE);
+
+ *((u32 *)(pcmd)) = ELS_CMD_UVEM;
+ len = (u32 *)(pcmd + 4);
+ *len = cpu_to_be32(LPFC_UVEM_SIZE - 8);
+
+ vem_id_desc = (struct lpfc_vem_id_desc *)(pcmd + 8);
+ vem_id_desc->tag = be32_to_cpu(VEM_ID_DESC_TAG);
+ vem_id_desc->length = be32_to_cpu(LPFC_UVEM_VEM_ID_DESC_SIZE);
+ memcpy(vem_id_desc->vem_id, vport->lpfc_vmid_host_uuid,
+ LPFC_COMPRESS_VMID_SIZE);
+
+ inst_desc = (struct instantiated_ve_desc *)(pcmd + 32);
+ inst_desc->tag = be32_to_cpu(INSTANTIATED_VE_DESC_TAG);
+ inst_desc->length = be32_to_cpu(LPFC_UVEM_VE_MAP_DESC_SIZE);
+ memcpy(inst_desc->global_vem_id, vmid->host_vmid,
+ LPFC_COMPRESS_VMID_SIZE);
+
+ bf_set(lpfc_instantiated_nport_id, inst_desc, vport->fc_myDID);
+ bf_set(lpfc_instantiated_local_id, inst_desc,
+ vmid->un.cs_ctl_vmid);
+ if (instantiated) {
+ inst_desc->tag = be32_to_cpu(INSTANTIATED_VE_DESC_TAG);
+ } else {
+ inst_desc->tag = be32_to_cpu(DEINSTANTIATED_VE_DESC_TAG);
+ lpfc_vmid_put_cs_ctl(vport, vmid->un.cs_ctl_vmid);
+ }
+ inst_desc->word6 = cpu_to_be32(inst_desc->word6);
+
+ elsiocb->iocb_cmpl = lpfc_cmpl_els_uvem;
+
+ elsiocb->context1 = lpfc_nlp_get(ndlp);
+ if (!elsiocb->context1) {
+ lpfc_els_free_iocb(vport->phba, elsiocb);
+ goto out;
+ }
+
+ ret = lpfc_sli_issue_iocb(vport->phba, LPFC_ELS_RING, elsiocb, 0);
+ if (ret != IOCB_SUCCESS) {
+ lpfc_els_free_iocb(vport->phba, elsiocb);
+ lpfc_nlp_put(ndlp);
+ goto out;
+ }
+
+ return 0;
+ out:
+ kfree(vmid_context);
+ return -EIO;
+}
+
+static void
+lpfc_cmpl_els_uvem(struct lpfc_hba *phba, struct lpfc_iocbq *icmdiocb,
+ struct lpfc_iocbq *rspiocb)
+{
+ struct lpfc_vport *vport = icmdiocb->vport;
+ struct lpfc_dmabuf *prsp = NULL;
+ struct lpfc_vmid_context *vmid_context =
+ icmdiocb->vmid_tag.vmid_context;
+ struct lpfc_nodelist *ndlp = icmdiocb->context1;
+ u8 *pcmd;
+ u32 *data;
+ IOCB_t *irsp = &rspiocb->iocb;
+ struct lpfc_dmabuf *dmabuf = icmdiocb->context2;
+ struct lpfc_vmid *vmid;
+
+ vmid = vmid_context->vmp;
+ if (!ndlp || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE)
+ ndlp = NULL;
+
+ prsp = list_get_first(&dmabuf->list, struct lpfc_dmabuf, list);
+ if (!prsp)
+ goto out;
+ pcmd = prsp->virt;
+ data = (u32 *)pcmd;
+ if (data[0] == ELS_CMD_LS_RJT) {
+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_SLI,
+ "4532 UVEM LS_RJT %x %x\n", data[0], data[1]);
+ goto out;
+ }
+ if (irsp->ulpStatus) {
+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_SLI,
+ "4533 UVEM error status %x: %x\n",
+ irsp->ulpStatus, irsp->un.ulpWord[4]);
+ goto out;
+ }
+ spin_lock(&phba->hbalock);
+ /* Set IN USE flag */
+ vport->vmid_flag |= LPFC_VMID_IN_USE;
+ phba->pport->vmid_flag |= LPFC_VMID_IN_USE;
+ spin_unlock(&phba->hbalock);
+
+ if (vmid_context->instantiated) {
+ write_lock(&vport->vmid_lock);
+ vmid->flag |= LPFC_VMID_REGISTERED;
+ vmid->flag &= ~LPFC_VMID_REQ_REGISTER;
+ write_unlock(&vport->vmid_lock);
+ }
+
+ out:
+ kfree(vmid_context);
+ lpfc_els_free_iocb(phba, icmdiocb);
+ lpfc_nlp_put(ndlp);
+}
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index f5a898c2c904..7cc5920979f8 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -72,14 +72,14 @@ static void lpfc_disc_flush_list(struct lpfc_vport *vport);
static void lpfc_unregister_fcfi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
static int lpfc_fcf_inuse(struct lpfc_hba *);
static void lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *, LPFC_MBOXQ_t *);
+static void lpfc_check_inactive_vmid(struct lpfc_hba *phba);
+static void lpfc_check_vmid_qfpa_issue(struct lpfc_hba *phba);
static int
lpfc_valid_xpt_node(struct lpfc_nodelist *ndlp)
{
if (ndlp->nlp_fc4_type ||
- ndlp->nlp_DID == Fabric_DID ||
- ndlp->nlp_DID == NameServer_DID ||
- ndlp->nlp_DID == FDMI_DID)
+ ndlp->nlp_type & NLP_FABRIC)
return 1;
return 0;
}
@@ -237,6 +237,110 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
}
/**
+ * lpfc_check_inactive_vmid_one - VMID inactivity checker for a vport
+ * @vport: Pointer to vport context object.
+ *
+ * This function checks for idle VMID entries related to a particular vport. If
+ * found unused/idle, free them accordingly.
+ **/
+static void lpfc_check_inactive_vmid_one(struct lpfc_vport *vport)
+{
+ u16 keep;
+ u32 difftime = 0, r, bucket;
+ u64 *lta;
+ int cpu;
+ struct lpfc_vmid *vmp;
+
+ write_lock(&vport->vmid_lock);
+
+ if (!vport->cur_vmid_cnt)
+ goto out;
+
+ /* iterate through the table */
+ hash_for_each(vport->hash_table, bucket, vmp, hnode) {
+ keep = 0;
+ if (vmp->flag & LPFC_VMID_REGISTERED) {
+ /* check if the particular VMID is in use */
+ /* for all available per cpu variable */
+ for_each_possible_cpu(cpu) {
+ /* if last access time is less than timeout */
+ lta = per_cpu_ptr(vmp->last_io_time, cpu);
+ if (!lta)
+ continue;
+ difftime = (jiffies) - (*lta);
+ if ((vport->vmid_inactivity_timeout *
+ JIFFIES_PER_HR) > difftime) {
+ keep = 1;
+ break;
+ }
+ }
+
+ /* if none of the cpus have been used by the vm, */
+ /* remove the entry if already registered */
+ if (!keep) {
+ /* mark the entry for deregistration */
+ vmp->flag = LPFC_VMID_DE_REGISTER;
+ write_unlock(&vport->vmid_lock);
+ if (vport->vmid_priority_tagging)
+ r = lpfc_vmid_uvem(vport, vmp, false);
+ else
+ r = lpfc_vmid_cmd(vport,
+ SLI_CTAS_DAPP_IDENT,
+ vmp);
+
+ /* decrement number of active vms and mark */
+ /* entry in slot as free */
+ write_lock(&vport->vmid_lock);
+ if (!r) {
+ struct lpfc_vmid *ht = vmp;
+
+ vport->cur_vmid_cnt--;
+ ht->flag = LPFC_VMID_SLOT_FREE;
+ free_percpu(ht->last_io_time);
+ ht->last_io_time = NULL;
+ hash_del(&ht->hnode);
+ }
+ }
+ }
+ }
+ out:
+ write_unlock(&vport->vmid_lock);
+}
+
+/**
+ * lpfc_check_inactive_vmid - VMID inactivity checker
+ * @phba: Pointer to hba context object.
+ *
+ * This function is called from the worker thread to determine if an entry in
+ * the VMID table can be released since there was no I/O activity seen from that
+ * particular VM for the specified time. When this happens, the entry in the
+ * table is released and also the resources on the switch cleared.
+ **/
+
+static void lpfc_check_inactive_vmid(struct lpfc_hba *phba)
+{
+ struct lpfc_vport *vport;
+ struct lpfc_vport **vports;
+ int i;
+
+ vports = lpfc_create_vport_work_array(phba);
+ if (!vports)
+ return;
+
+ for (i = 0; i <= phba->max_vports; i++) {
+ if ((!vports[i]) && (i == 0))
+ vport = phba->pport;
+ else
+ vport = vports[i];
+ if (!vport)
+ break;
+
+ lpfc_check_inactive_vmid_one(vport);
+ }
+ lpfc_destroy_vport_work_array(phba, vports);
+}
+
+/**
* lpfc_dev_loss_tmo_handler - Remote node devloss timeout handler
* @ndlp: Pointer to remote node object.
*
@@ -325,6 +429,32 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
return fcf_inuse;
}
+static void lpfc_check_vmid_qfpa_issue(struct lpfc_hba *phba)
+{
+ struct lpfc_vport *vport;
+ struct lpfc_vport **vports;
+ int i;
+
+ vports = lpfc_create_vport_work_array(phba);
+ if (!vports)
+ return;
+
+ for (i = 0; i <= phba->max_vports; i++) {
+ if ((!vports[i]) && (i == 0))
+ vport = phba->pport;
+ else
+ vport = vports[i];
+ if (!vport)
+ break;
+
+ if (vport->vmid_flag & LPFC_VMID_ISSUE_QFPA) {
+ if (!lpfc_issue_els_qfpa(vport))
+ vport->vmid_flag &= ~LPFC_VMID_ISSUE_QFPA;
+ }
+ }
+ lpfc_destroy_vport_work_array(phba, vports);
+}
+
/**
* lpfc_sli4_post_dev_loss_tmo_handler - SLI4 post devloss timeout handler
* @phba: Pointer to hba context object.
@@ -645,6 +775,22 @@ lpfc_work_done(struct lpfc_hba *phba)
if (ha_copy & HA_LATT)
lpfc_handle_latt(phba);
+ /* Handle VMID Events */
+ if (lpfc_is_vmid_enabled(phba)) {
+ if (phba->pport->work_port_events &
+ WORKER_CHECK_VMID_ISSUE_QFPA) {
+ lpfc_check_vmid_qfpa_issue(phba);
+ phba->pport->work_port_events &=
+ ~WORKER_CHECK_VMID_ISSUE_QFPA;
+ }
+ if (phba->pport->work_port_events &
+ WORKER_CHECK_INACTIVE_VMID) {
+ lpfc_check_inactive_vmid(phba);
+ phba->pport->work_port_events &=
+ ~WORKER_CHECK_INACTIVE_VMID;
+ }
+ }
+
/* Process SLI4 events */
if (phba->pci_dev_grp == LPFC_PCI_DEV_OC) {
if (phba->hba_flag & HBA_RRQ_ACTIVE)
@@ -826,7 +972,8 @@ lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove)
if ((phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) ||
((vport->port_type == LPFC_NPIV_PORT) &&
((ndlp->nlp_DID == NameServer_DID) ||
- (ndlp->nlp_DID == FDMI_DID))))
+ (ndlp->nlp_DID == FDMI_DID) ||
+ (ndlp->nlp_DID == Fabric_Cntl_DID))))
lpfc_unreg_rpi(vport, ndlp);
/* Leave Fabric nodes alone on link down */
@@ -4160,6 +4307,53 @@ out:
return;
}
+/*
+ * This routine handles processing a Fabric Controller REG_LOGIN mailbox
+ * command upon completion. It is setup in the LPFC_MBOXQ
+ * as the completion routine when the command is handed off to the SLI layer.
+ */
+void
+lpfc_mbx_cmpl_fc_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+{
+ struct lpfc_vport *vport = pmb->vport;
+ MAILBOX_t *mb = &pmb->u.mb;
+ struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
+ struct lpfc_nodelist *ndlp;
+
+ ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
+ pmb->ctx_ndlp = NULL;
+ pmb->ctx_buf = NULL;
+
+ if (mb->mbxStatus) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
+ "0933 %s: Register FC login error: 0x%x\n",
+ __func__, mb->mbxStatus);
+ goto out;
+ }
+
+ if (phba->sli_rev < LPFC_SLI_REV4)
+ ndlp->nlp_rpi = mb->un.varWords[0];
+
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
+ "0934 %s: Complete FC x%x RegLogin rpi x%x ste x%x\n",
+ __func__, ndlp->nlp_DID, ndlp->nlp_rpi,
+ ndlp->nlp_state);
+
+ ndlp->nlp_flag |= NLP_RPI_REGISTERED;
+ ndlp->nlp_type |= NLP_FABRIC;
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
+
+ out:
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+ mempool_free(pmb, phba->mbox_mem_pool);
+
+ /* Drop the reference count from the mbox at the end after
+ * all the current reference to the ndlp have been done.
+ */
+ lpfc_nlp_put(ndlp);
+}
+
static void
lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
{
@@ -4789,12 +4983,17 @@ lpfc_nlp_logo_unreg(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING;
lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
} else {
+ /* NLP_RELEASE_RPI is only set for SLI4 ports. */
if (ndlp->nlp_flag & NLP_RELEASE_RPI) {
lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi);
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag &= ~NLP_RELEASE_RPI;
ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
+ spin_unlock_irq(&ndlp->lock);
}
+ spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag &= ~NLP_UNREG_INP;
+ spin_unlock_irq(&ndlp->lock);
}
}
@@ -5129,8 +5328,10 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
list_del_init(&ndlp->dev_loss_evt.evt_listp);
list_del_init(&ndlp->recovery_evt.evt_listp);
lpfc_cleanup_vports_rrqs(vport, ndlp);
+
if (phba->sli_rev == LPFC_SLI_REV4)
ndlp->nlp_flag |= NLP_RELEASE_RPI;
+
return 0;
}
@@ -6176,8 +6377,23 @@ lpfc_nlp_release(struct kref *kref)
lpfc_cancel_retry_delay_tmo(vport, ndlp);
lpfc_cleanup_node(vport, ndlp);
- /* Clear Node key fields to give other threads notice
- * that this node memory is not valid anymore.
+ /* Not all ELS transactions have registered the RPI with the port.
+ * In these cases the rpi usage is temporary and the node is
+ * released when the WQE is completed. Catch this case to free the
+ * RPI to the pool. Because this node is in the release path, a lock
+ * is unnecessary. All references are gone and the node has been
+ * dequeued.
+ */
+ if (ndlp->nlp_flag & NLP_RELEASE_RPI) {
+ if (ndlp->nlp_rpi != LPFC_RPI_ALLOC_ERROR &&
+ !(ndlp->nlp_flag & (NLP_RPI_REGISTERED | NLP_UNREG_INP))) {
+ lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi);
+ ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
+ }
+ }
+
+ /* The node is not freed back to memory, it is released to a pool so
+ * the node fields need to be cleaned up.
*/
ndlp->vport = NULL;
ndlp->nlp_state = NLP_STE_FREED_NODE;
@@ -6257,6 +6473,7 @@ lpfc_nlp_not_used(struct lpfc_nodelist *ndlp)
"node not used: did:x%x flg:x%x refcnt:x%x",
ndlp->nlp_DID, ndlp->nlp_flag,
kref_read(&ndlp->kref));
+
if (kref_read(&ndlp->kref) == 1)
if (lpfc_nlp_put(ndlp))
return 1;
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 42682d95af52..4a5a85ed42ec 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -275,6 +275,7 @@ struct lpfc_sli_ct_request {
#define SLI_CT_ACCESS_DENIED 0x10
#define SLI_CT_INVALID_PORT_ID 0x11
#define SLI_CT_DATABASE_EMPTY 0x12
+#define SLI_CT_APP_ID_NOT_AVAILABLE 0x40
/*
* Name Server Command Codes
@@ -400,16 +401,16 @@ struct csp {
uint16_t altBbCredit:1; /* FC Word 1, bit 27 */
uint16_t edtovResolution:1; /* FC Word 1, bit 26 */
uint16_t multicast:1; /* FC Word 1, bit 25 */
- uint16_t broadcast:1; /* FC Word 1, bit 24 */
+ uint16_t app_hdr_support:1; /* FC Word 1, bit 24 */
- uint16_t huntgroup:1; /* FC Word 1, bit 23 */
+ uint16_t priority_tagging:1; /* FC Word 1, bit 23 */
uint16_t simplex:1; /* FC Word 1, bit 22 */
uint16_t word1Reserved1:3; /* FC Word 1, bit 21:19 */
uint16_t dhd:1; /* FC Word 1, bit 18 */
uint16_t contIncSeqCnt:1; /* FC Word 1, bit 17 */
uint16_t payloadlength:1; /* FC Word 1, bit 16 */
#else /* __LITTLE_ENDIAN_BITFIELD */
- uint16_t broadcast:1; /* FC Word 1, bit 24 */
+ uint16_t app_hdr_support:1; /* FC Word 1, bit 24 */
uint16_t multicast:1; /* FC Word 1, bit 25 */
uint16_t edtovResolution:1; /* FC Word 1, bit 26 */
uint16_t altBbCredit:1; /* FC Word 1, bit 27 */
@@ -423,7 +424,7 @@ struct csp {
uint16_t dhd:1; /* FC Word 1, bit 18 */
uint16_t word1Reserved1:3; /* FC Word 1, bit 21:19 */
uint16_t simplex:1; /* FC Word 1, bit 22 */
- uint16_t huntgroup:1; /* FC Word 1, bit 23 */
+ uint16_t priority_tagging:1; /* FC Word 1, bit 23 */
#endif
uint8_t bbRcvSizeMsb; /* Upper nibble is reserved */
@@ -607,6 +608,8 @@ struct fc_vft_header {
#define ELS_CMD_LIRR 0x7A000000
#define ELS_CMD_LCB 0x81000000
#define ELS_CMD_FPIN 0x16000000
+#define ELS_CMD_QFPA 0xB0000000
+#define ELS_CMD_UVEM 0xB1000000
#else /* __LITTLE_ENDIAN_BITFIELD */
#define ELS_CMD_MASK 0xffff
#define ELS_RSP_MASK 0xff
@@ -649,6 +652,8 @@ struct fc_vft_header {
#define ELS_CMD_LIRR 0x7A
#define ELS_CMD_LCB 0x81
#define ELS_CMD_FPIN ELS_FPIN
+#define ELS_CMD_QFPA 0xB0
+#define ELS_CMD_UVEM 0xB1
#endif
/*
@@ -1317,6 +1322,117 @@ struct fc_rdp_res_frame {
};
+/* UVEM */
+
+#define LPFC_UVEM_SIZE 60
+#define LPFC_UVEM_VEM_ID_DESC_SIZE 16
+#define LPFC_UVEM_VE_MAP_DESC_SIZE 20
+
+#define VEM_ID_DESC_TAG 0x0001000A
+struct lpfc_vem_id_desc {
+ uint32_t tag;
+ uint32_t length;
+ uint8_t vem_id[16];
+};
+
+#define LPFC_QFPA_SIZE 4
+
+#define INSTANTIATED_VE_DESC_TAG 0x0001000B
+struct instantiated_ve_desc {
+ uint32_t tag;
+ uint32_t length;
+ uint8_t global_vem_id[16];
+ uint32_t word6;
+#define lpfc_instantiated_local_id_SHIFT 0
+#define lpfc_instantiated_local_id_MASK 0x000000ff
+#define lpfc_instantiated_local_id_WORD word6
+#define lpfc_instantiated_nport_id_SHIFT 8
+#define lpfc_instantiated_nport_id_MASK 0x00ffffff
+#define lpfc_instantiated_nport_id_WORD word6
+};
+
+#define DEINSTANTIATED_VE_DESC_TAG 0x0001000C
+struct deinstantiated_ve_desc {
+ uint32_t tag;
+ uint32_t length;
+ uint8_t global_vem_id[16];
+ uint32_t word6;
+#define lpfc_deinstantiated_nport_id_SHIFT 0
+#define lpfc_deinstantiated_nport_id_MASK 0x000000ff
+#define lpfc_deinstantiated_nport_id_WORD word6
+#define lpfc_deinstantiated_local_id_SHIFT 24
+#define lpfc_deinstantiated_local_id_MASK 0x00ffffff
+#define lpfc_deinstantiated_local_id_WORD word6
+};
+
+/* Query Fabric Priority Allocation Response */
+#define LPFC_PRIORITY_RANGE_DESC_SIZE 12
+
+struct priority_range_desc {
+ uint32_t tag;
+ uint32_t length;
+ uint8_t lo_range;
+ uint8_t hi_range;
+ uint8_t qos_priority;
+ uint8_t local_ve_id;
+};
+
+struct fc_qfpa_res {
+ uint32_t reply_sequence; /* LS_ACC or LS_RJT */
+ uint32_t length; /* FC Word 1 */
+ struct priority_range_desc desc[1];
+};
+
+/* Application Server command code */
+/* VMID */
+
+#define SLI_CT_APP_SEV_Subtypes 0x20 /* Application Server subtype */
+
+#define SLI_CTAS_GAPPIA_ENT 0x0100 /* Get Application Identifier */
+#define SLI_CTAS_GALLAPPIA 0x0101 /* Get All Application Identifier */
+#define SLI_CTAS_GALLAPPIA_ID 0x0102 /* Get All Application Identifier */
+ /* for Nport */
+#define SLI_CTAS_GAPPIA_IDAPP 0x0103 /* Get Application Identifier */
+ /* for Nport */
+#define SLI_CTAS_RAPP_IDENT 0x0200 /* Register Application Identifier */
+#define SLI_CTAS_DAPP_IDENT 0x0300 /* Deregister Application */
+ /* Identifier */
+#define SLI_CTAS_DALLAPP_ID 0x0301 /* Deregister All Application */
+ /* Identifier */
+
+struct entity_id_object {
+ uint8_t entity_id_len;
+ uint8_t entity_id[255]; /* VM UUID */
+};
+
+struct app_id_object {
+ uint32_t port_id;
+ uint32_t app_id;
+ struct entity_id_object obj;
+};
+
+struct lpfc_vmid_rapp_ident_list {
+ uint32_t no_of_objects;
+ struct entity_id_object obj[1];
+};
+
+struct lpfc_vmid_dapp_ident_list {
+ uint32_t no_of_objects;
+ struct entity_id_object obj[1];
+};
+
+#define GALLAPPIA_ID_LAST 0x80
+struct lpfc_vmid_gallapp_ident_list {
+ uint8_t control;
+ uint8_t reserved[3];
+ struct app_id_object app_id;
+};
+
+#define RAPP_IDENT_OFFSET (offsetof(struct lpfc_sli_ct_request, un) + 4)
+#define DAPP_IDENT_OFFSET (offsetof(struct lpfc_sli_ct_request, un) + 4)
+#define GALLAPPIA_ID_SIZE (offsetof(struct lpfc_sli_ct_request, un) + 4)
+#define DALLAPP_ID_SIZE (offsetof(struct lpfc_sli_ct_request, un) + 4)
+
/******** FDMI ********/
/* lpfc_sli_ct_request defines the CT_IU preamble for FDMI commands */
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index f77e71e6dbbd..eb8c735a243b 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -273,6 +273,9 @@ struct lpfc_sli4_flags {
#define lpfc_vfi_rsrc_rdy_MASK 0x00000001
#define lpfc_vfi_rsrc_rdy_WORD word0
#define LPFC_VFI_RSRC_RDY 1
+#define lpfc_ftr_ashdr_SHIFT 4
+#define lpfc_ftr_ashdr_MASK 0x00000001
+#define lpfc_ftr_ashdr_WORD word0
};
struct sli4_bls_rsp {
@@ -2944,6 +2947,9 @@ struct lpfc_mbx_request_features {
#define lpfc_mbx_rq_ftr_rq_mrqp_SHIFT 16
#define lpfc_mbx_rq_ftr_rq_mrqp_MASK 0x00000001
#define lpfc_mbx_rq_ftr_rq_mrqp_WORD word2
+#define lpfc_mbx_rq_ftr_rq_ashdr_SHIFT 17
+#define lpfc_mbx_rq_ftr_rq_ashdr_MASK 0x00000001
+#define lpfc_mbx_rq_ftr_rq_ashdr_WORD word2
uint32_t word3;
#define lpfc_mbx_rq_ftr_rsp_iaab_SHIFT 0
#define lpfc_mbx_rq_ftr_rsp_iaab_MASK 0x00000001
@@ -2975,6 +2981,9 @@ struct lpfc_mbx_request_features {
#define lpfc_mbx_rq_ftr_rsp_mrqp_SHIFT 16
#define lpfc_mbx_rq_ftr_rsp_mrqp_MASK 0x00000001
#define lpfc_mbx_rq_ftr_rsp_mrqp_WORD word3
+#define lpfc_mbx_rq_ftr_rsp_ashdr_SHIFT 17
+#define lpfc_mbx_rq_ftr_rsp_ashdr_MASK 0x00000001
+#define lpfc_mbx_rq_ftr_rsp_ashdr_WORD word3
};
struct lpfc_mbx_memory_dump_type3 {
@@ -4219,6 +4228,9 @@ struct wqe_common {
#define wqe_xchg_WORD word10
#define LPFC_SCSI_XCHG 0x0
#define LPFC_NVME_XCHG 0x1
+#define wqe_appid_SHIFT 5
+#define wqe_appid_MASK 0x00000001
+#define wqe_appid_WORD word10
#define wqe_oas_SHIFT 6
#define wqe_oas_MASK 0x00000001
#define wqe_oas_WORD word10
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 5f018d02bf56..f3032e30c3e4 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -98,6 +98,7 @@ static struct scsi_transport_template *lpfc_transport_template = NULL;
static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
static DEFINE_IDR(lpfc_hba_index);
#define LPFC_NVMET_BUF_POST 254
+static int lpfc_vmid_res_alloc(struct lpfc_hba *phba, struct lpfc_vport *vport);
/**
* lpfc_config_port_prep - Perform lpfc initialization prior to config port
@@ -2888,6 +2889,10 @@ lpfc_cleanup(struct lpfc_vport *vport)
if (phba->link_state > LPFC_LINK_DOWN)
lpfc_port_link_failure(vport);
+ /* Clean up VMID resources */
+ if (lpfc_is_vmid_enabled(phba))
+ lpfc_vmid_vport_cleanup(vport);
+
list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
if (vport->port_type != LPFC_PHYSICAL_PORT &&
ndlp->nlp_DID == Fabric_DID) {
@@ -3532,13 +3537,6 @@ lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
list_for_each_entry_safe(ndlp, next_ndlp,
&vports[i]->fc_nodes,
nlp_listp) {
- if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
- /* Driver must assume RPI is invalid for
- * any unused or inactive node.
- */
- ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
- continue;
- }
spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag &= ~NLP_NPR_ADISC;
@@ -4315,6 +4313,55 @@ lpfc_get_wwpn(struct lpfc_hba *phba)
}
/**
+ * lpfc_vmid_res_alloc - Allocates resources for VMID
+ * @phba: pointer to lpfc hba data structure.
+ * @vport: pointer to vport data structure
+ *
+ * This routine allocated the resources needed for the VMID.
+ *
+ * Return codes
+ * 0 on Success
+ * Non-0 on Failure
+ */
+static int
+lpfc_vmid_res_alloc(struct lpfc_hba *phba, struct lpfc_vport *vport)
+{
+ /* VMID feature is supported only on SLI4 */
+ if (phba->sli_rev == LPFC_SLI_REV3) {
+ phba->cfg_vmid_app_header = 0;
+ phba->cfg_vmid_priority_tagging = 0;
+ }
+
+ if (lpfc_is_vmid_enabled(phba)) {
+ vport->vmid =
+ kcalloc(phba->cfg_max_vmid, sizeof(struct lpfc_vmid),
+ GFP_KERNEL);
+ if (!vport->vmid)
+ return -ENOMEM;
+
+ rwlock_init(&vport->vmid_lock);
+
+ /* Set the VMID parameters for the vport */
+ vport->vmid_priority_tagging = phba->cfg_vmid_priority_tagging;
+ vport->vmid_inactivity_timeout =
+ phba->cfg_vmid_inactivity_timeout;
+ vport->max_vmid = phba->cfg_max_vmid;
+ vport->cur_vmid_cnt = 0;
+
+ vport->vmid_priority_range = bitmap_zalloc
+ (LPFC_VMID_MAX_PRIORITY_RANGE, GFP_KERNEL);
+
+ if (!vport->vmid_priority_range) {
+ kfree(vport->vmid);
+ return -ENOMEM;
+ }
+
+ hash_init(vport->hash_table);
+ }
+ return 0;
+}
+
+/**
* lpfc_create_port - Create an FC port
* @phba: pointer to lpfc hba data structure.
* @instance: a unique integer ID to this FC port.
@@ -4466,6 +4513,12 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
vport->port_type, shost->sg_tablesize,
phba->cfg_scsi_seg_cnt, phba->cfg_sg_seg_cnt);
+ /* Allocate the resources for VMID */
+ rc = lpfc_vmid_res_alloc(phba, vport);
+
+ if (rc)
+ goto out;
+
/* Initialize all internally managed lists. */
INIT_LIST_HEAD(&vport->fc_nodes);
INIT_LIST_HEAD(&vport->rcv_buffer_list);
@@ -4490,6 +4543,8 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
return vport;
out_put_shost:
+ kfree(vport->vmid);
+ bitmap_free(vport->vmid_priority_range);
scsi_host_put(shost);
out:
return NULL;
@@ -4789,6 +4844,42 @@ lpfc_sli4_fcf_redisc_wait_tmo(struct timer_list *t)
}
/**
+ * lpfc_vmid_poll - VMID timeout detection
+ * @ptr: Map to lpfc_hba data structure pointer.
+ *
+ * This routine is invoked when there is no I/O on by a VM for the specified
+ * amount of time. When this situation is detected, the VMID has to be
+ * deregistered from the switch and all the local resources freed. The VMID
+ * will be reassigned to the VM once the I/O begins.
+ **/
+static void
+lpfc_vmid_poll(struct timer_list *t)
+{
+ struct lpfc_hba *phba = from_timer(phba, t, inactive_vmid_poll);
+ u32 wake_up = 0;
+
+ /* check if there is a need to issue QFPA */
+ if (phba->pport->vmid_priority_tagging) {
+ wake_up = 1;
+ phba->pport->work_port_events |= WORKER_CHECK_VMID_ISSUE_QFPA;
+ }
+
+ /* Is the vmid inactivity timer enabled */
+ if (phba->pport->vmid_inactivity_timeout ||
+ phba->pport->load_flag & FC_DEREGISTER_ALL_APP_ID) {
+ wake_up = 1;
+ phba->pport->work_port_events |= WORKER_CHECK_INACTIVE_VMID;
+ }
+
+ if (wake_up)
+ lpfc_worker_wake_up(phba);
+
+ /* restart the timer for the next iteration */
+ mod_timer(&phba->inactive_vmid_poll, jiffies + msecs_to_jiffies(1000 *
+ LPFC_VMID_TIMER));
+}
+
+/**
* lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code
* @phba: pointer to lpfc hba data structure.
* @acqe_link: pointer to the async link completion queue entry.
@@ -6636,6 +6727,10 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc;
phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free;
+ /* for VMID idle timeout if VMID is enabled */
+ if (lpfc_is_vmid_enabled(phba))
+ timer_setup(&phba->inactive_vmid_poll, lpfc_vmid_poll, 0);
+
/*
* Initialize the SLI Layer to run with lpfc SLI4 HBAs.
*/
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index 1b40a3bbd1cd..84bc373190d8 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -522,7 +522,8 @@ lpfc_init_link(struct lpfc_hba * phba,
}
/* Enable asynchronous ABTS responses from firmware */
- mb->un.varInitLnk.link_flags |= FLAGS_IMED_ABORT;
+ if (phba->sli_rev == LPFC_SLI_REV3 && !phba->cfg_fcp_wait_abts_rsp)
+ mb->un.varInitLnk.link_flags |= FLAGS_IMED_ABORT;
/* NEW_FEATURE
* Setting up the link speed
@@ -2100,6 +2101,12 @@ lpfc_request_features(struct lpfc_hba *phba, struct lpfcMboxq *mboxq)
bf_set(lpfc_mbx_rq_ftr_rq_iaab, &mboxq->u.mqe.un.req_ftrs, 0);
bf_set(lpfc_mbx_rq_ftr_rq_iaar, &mboxq->u.mqe.un.req_ftrs, 0);
}
+
+ /* Enable Application Services Header for appheader VMID */
+ if (phba->cfg_vmid_app_header) {
+ bf_set(lpfc_mbx_rq_ftr_rq_ashdr, &mboxq->u.mqe.un.req_ftrs, 1);
+ bf_set(lpfc_ftr_ashdr, &phba->sli4_hba.sli4_flags, 1);
+ }
return;
}
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index bb4e65a32ecc..e12f83fb795c 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -567,15 +567,24 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
/* no deferred ACC */
kfree(save_iocb);
- /* In order to preserve RPIs, we want to cleanup
- * the default RPI the firmware created to rcv
- * this ELS request. The only way to do this is
- * to register, then unregister the RPI.
+ /* This is an NPIV SLI4 instance that does not need to register
+ * a default RPI.
*/
- spin_lock_irq(&ndlp->lock);
- ndlp->nlp_flag |= (NLP_RM_DFLT_RPI | NLP_ACC_REGLOGIN |
- NLP_RCV_PLOGI);
- spin_unlock_irq(&ndlp->lock);
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ mempool_free(login_mbox, phba->mbox_mem_pool);
+ login_mbox = NULL;
+ } else {
+ /* In order to preserve RPIs, we want to cleanup
+ * the default RPI the firmware created to rcv
+ * this ELS request. The only way to do this is
+ * to register, then unregister the RPI.
+ */
+ spin_lock_irq(&ndlp->lock);
+ ndlp->nlp_flag |= (NLP_RM_DFLT_RPI | NLP_ACC_REGLOGIN |
+ NLP_RCV_PLOGI);
+ spin_unlock_irq(&ndlp->lock);
+ }
+
stat.un.b.lsRjtRsnCode = LSRJT_INVALID_CMD;
stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
rc = lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb,
@@ -653,6 +662,10 @@ lpfc_mbx_cmpl_resume_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, elsiocb,
ndlp, NULL);
}
+
+ /* This nlp_put pairs with lpfc_sli4_resume_rpi */
+ lpfc_nlp_put(ndlp);
+
kfree(elsiocb);
mempool_free(mboxq, phba->mbox_mem_pool);
}
@@ -772,6 +785,15 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
else
lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
+ /* This clause allows the initiator to ACC the LOGO back to the
+ * Fabric Domain Controller. It does deliberately skip all other
+ * steps because some fabrics send RDP requests after logging out
+ * from the initiator.
+ */
+ if (ndlp->nlp_type & NLP_FABRIC &&
+ ((ndlp->nlp_DID & WELL_KNOWN_DID_MASK) != WELL_KNOWN_DID_MASK))
+ return 0;
+
/* Notify transport of connectivity loss to trigger cleanup. */
if (phba->nvmet_support &&
ndlp->nlp_state == NLP_STE_UNMAPPED_NODE)
@@ -1410,6 +1432,8 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
switch (ndlp->nlp_DID) {
case NameServer_DID:
mbox->mbox_cmpl = lpfc_mbx_cmpl_ns_reg_login;
+ /* Fabric Controller Node needs these parameters. */
+ memcpy(&ndlp->fc_sparam, sp, sizeof(struct serv_parm));
break;
case FDMI_DID:
mbox->mbox_cmpl = lpfc_mbx_cmpl_fdmi_reg_login;
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
index 41e49f61fac2..bcc804cefd30 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.c
+++ b/drivers/scsi/lpfc/lpfc_nvme.c
@@ -1049,9 +1049,19 @@ lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
nCmd->transferred_length = wcqe->total_data_placed;
nCmd->rcv_rsplen = wcqe->parameter;
nCmd->status = 0;
- /* Sanity check */
- if (nCmd->rcv_rsplen == LPFC_NVME_ERSP_LEN)
+
+ /* Check if this is really an ERSP */
+ if (nCmd->rcv_rsplen == LPFC_NVME_ERSP_LEN) {
+ lpfc_ncmd->status = IOSTAT_SUCCESS;
+ lpfc_ncmd->result = 0;
+
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
+ "6084 NVME Completion ERSP: "
+ "xri %x placed x%x\n",
+ lpfc_ncmd->cur_iocbq.sli4_xritag,
+ wcqe->total_data_placed);
break;
+ }
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"6081 NVME Completion Protocol Error: "
"xri %x status x%x result x%x "
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index eefbb9b22798..1b248c237be1 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -28,6 +28,7 @@
#include <asm/unaligned.h>
#include <linux/t10-pi.h>
#include <linux/crc-t10dif.h>
+#include <linux/blk-cgroup.h>
#include <net/checksum.h>
#include <scsi/scsi.h>
@@ -86,6 +87,14 @@ static void
lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *psb);
static int
lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc);
+static void
+lpfc_put_vmid_in_hashtable(struct lpfc_vport *vport, u32 hash,
+ struct lpfc_vmid *vmp);
+static void lpfc_vmid_update_entry(struct lpfc_vport *vport, struct scsi_cmnd
+ *cmd, struct lpfc_vmid *vmp,
+ union lpfc_vmid_io_tag *tag);
+static void lpfc_vmid_assign_cs_ctl(struct lpfc_vport *vport,
+ struct lpfc_vmid *vmid);
static inline unsigned
lpfc_cmd_blksize(struct scsi_cmnd *sc)
@@ -518,6 +527,7 @@ lpfc_sli4_io_xri_aborted(struct lpfc_hba *phba,
struct lpfc_nodelist *ndlp;
int rrq_empty = 0;
struct lpfc_sli_ring *pring = phba->sli4_hba.els_wq->pring;
+ struct scsi_cmnd *cmd;
if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
return;
@@ -553,6 +563,31 @@ lpfc_sli4_io_xri_aborted(struct lpfc_hba *phba,
psb->cur_iocbq.sli4_lxritag, rxid, 1);
lpfc_sli4_abts_err_handler(phba, ndlp, axri);
}
+
+ if (phba->cfg_fcp_wait_abts_rsp) {
+ spin_lock_irqsave(&psb->buf_lock, iflag);
+ cmd = psb->pCmd;
+ psb->pCmd = NULL;
+ spin_unlock_irqrestore(&psb->buf_lock, iflag);
+
+ /* The sdev is not guaranteed to be valid post
+ * scsi_done upcall.
+ */
+ if (cmd)
+ cmd->scsi_done(cmd);
+
+ /*
+ * We expect there is an abort thread waiting
+ * for command completion wake up the thread.
+ */
+ spin_lock_irqsave(&psb->buf_lock, iflag);
+ psb->cur_iocbq.iocb_flag &=
+ ~LPFC_DRIVER_ABORTED;
+ if (psb->waitq)
+ wake_up(psb->waitq);
+ spin_unlock_irqrestore(&psb->buf_lock, iflag);
+ }
+
lpfc_release_scsi_buf_s4(phba, psb);
if (rrq_empty)
lpfc_worker_wake_up(phba);
@@ -780,7 +815,8 @@ lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *psb)
qp = psb->hdwq;
if (psb->flags & LPFC_SBUF_XBUSY) {
spin_lock_irqsave(&qp->abts_io_buf_list_lock, iflag);
- psb->pCmd = NULL;
+ if (!phba->cfg_fcp_wait_abts_rsp)
+ psb->pCmd = NULL;
list_add_tail(&psb->list, &qp->lpfc_abts_io_buf_list);
qp->abts_scsi_io_bufs++;
spin_unlock_irqrestore(&qp->abts_io_buf_list_lock, iflag);
@@ -2869,10 +2905,8 @@ skipit:
}
out:
if (err_type == BGS_GUARD_ERR_MASK) {
- scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
- 0x10, 0x1);
- cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
- SAM_STAT_CHECK_CONDITION;
+ scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x1);
+ set_host_byte(cmd, DID_ABORT);
phba->bg_guard_err_cnt++;
lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
"9069 BLKGRD: reftag %x grd_tag err %x != %x\n",
@@ -2880,10 +2914,8 @@ out:
sum, guard_tag);
} else if (err_type == BGS_REFTAG_ERR_MASK) {
- scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
- 0x10, 0x3);
- cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
- SAM_STAT_CHECK_CONDITION;
+ scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x3);
+ set_host_byte(cmd, DID_ABORT);
phba->bg_reftag_err_cnt++;
lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
@@ -2892,10 +2924,8 @@ out:
ref_tag, start_ref_tag);
} else if (err_type == BGS_APPTAG_ERR_MASK) {
- scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
- 0x10, 0x2);
- cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
- SAM_STAT_CHECK_CONDITION;
+ scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x2);
+ set_host_byte(cmd, DID_ABORT);
phba->bg_apptag_err_cnt++;
lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
@@ -2954,10 +2984,8 @@ lpfc_sli4_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd,
if (lpfc_bgs_get_guard_err(bgstat)) {
ret = 1;
- scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
- 0x10, 0x1);
- cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
- SAM_STAT_CHECK_CONDITION;
+ scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x1);
+ set_host_byte(cmd, DID_ABORT);
phba->bg_guard_err_cnt++;
lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
"9059 BLKGRD: Guard Tag error in cmd"
@@ -2970,10 +2998,8 @@ lpfc_sli4_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd,
if (lpfc_bgs_get_reftag_err(bgstat)) {
ret = 1;
- scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
- 0x10, 0x3);
- cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
- SAM_STAT_CHECK_CONDITION;
+ scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x3);
+ set_host_byte(cmd, DID_ABORT);
phba->bg_reftag_err_cnt++;
lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
@@ -2987,10 +3013,8 @@ lpfc_sli4_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd,
if (lpfc_bgs_get_apptag_err(bgstat)) {
ret = 1;
- scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
- 0x10, 0x2);
- cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
- SAM_STAT_CHECK_CONDITION;
+ scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x2);
+ set_host_byte(cmd, DID_ABORT);
phba->bg_apptag_err_cnt++;
lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
@@ -3100,10 +3124,8 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd,
if (lpfc_bgs_get_guard_err(bgstat)) {
ret = 1;
- scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
- 0x10, 0x1);
- cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
- SAM_STAT_CHECK_CONDITION;
+ scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x1);
+ set_host_byte(cmd, DID_ABORT);
phba->bg_guard_err_cnt++;
lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
"9055 BLKGRD: Guard Tag error in cmd "
@@ -3116,10 +3138,8 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd,
if (lpfc_bgs_get_reftag_err(bgstat)) {
ret = 1;
- scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
- 0x10, 0x3);
- cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
- SAM_STAT_CHECK_CONDITION;
+ scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x3);
+ set_host_byte(cmd, DID_ABORT);
phba->bg_reftag_err_cnt++;
lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
@@ -3133,10 +3153,8 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd,
if (lpfc_bgs_get_apptag_err(bgstat)) {
ret = 1;
- scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
- 0x10, 0x2);
- cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
- SAM_STAT_CHECK_CONDITION;
+ scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x2);
+ set_host_byte(cmd, DID_ABORT);
phba->bg_apptag_err_cnt++;
lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
@@ -4045,6 +4063,7 @@ lpfc_fcp_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
u32 logit = LOG_FCP;
u32 status, idx;
unsigned long iflags = 0;
+ u8 wait_xb_clr = 0;
/* Sanity check on return of outstanding command */
if (!lpfc_cmd) {
@@ -4096,8 +4115,11 @@ lpfc_fcp_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
lpfc_cmd->result = (wcqe->parameter & IOERR_PARAM_MASK);
lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY;
- if (bf_get(lpfc_wcqe_c_xb, wcqe))
+ if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
lpfc_cmd->flags |= LPFC_SBUF_XBUSY;
+ if (phba->cfg_fcp_wait_abts_rsp)
+ wait_xb_clr = 1;
+ }
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
if (lpfc_cmd->prot_data_type) {
@@ -4329,6 +4351,8 @@ lpfc_fcp_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
lpfc_io_ktime(phba, lpfc_cmd);
}
#endif
+ if (wait_xb_clr)
+ goto out;
lpfc_cmd->pCmd = NULL;
spin_unlock(&lpfc_cmd->buf_lock);
@@ -4343,8 +4367,8 @@ lpfc_fcp_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
lpfc_cmd->cur_iocbq.iocb_flag &= ~LPFC_DRIVER_ABORTED;
if (lpfc_cmd->waitq)
wake_up(lpfc_cmd->waitq);
+out:
spin_unlock(&lpfc_cmd->buf_lock);
-
lpfc_release_scsi_buf(phba, lpfc_cmd);
}
@@ -4398,11 +4422,10 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
lpfc_cmd->result = (pIocbOut->iocb.un.ulpWord[4] & IOERR_PARAM_MASK);
lpfc_cmd->status = pIocbOut->iocb.ulpStatus;
- /* pick up SLI4 exhange busy status from HBA */
+ /* pick up SLI4 exchange busy status from HBA */
+ lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY;
if (pIocbOut->iocb_flag & LPFC_EXCHANGE_BUSY)
lpfc_cmd->flags |= LPFC_SBUF_XBUSY;
- else
- lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY;
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
if (lpfc_cmd->prot_data_type) {
@@ -4601,6 +4624,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
lpfc_io_ktime(phba, lpfc_cmd);
}
#endif
+
/* The sdev is not guaranteed to be valid post scsi_done upcall. */
cmd->scsi_done(cmd);
@@ -5145,6 +5169,269 @@ void lpfc_poll_timeout(struct timer_list *t)
}
}
+/*
+ * lpfc_get_vmid_from_hashtable - search the UUID in the hash table
+ * @vport: The virtual port for which this call is being executed.
+ * @hash: calculated hash value
+ * @buf: uuid associated with the VE
+ * Return the VMID entry associated with the UUID
+ * Make sure to acquire the appropriate lock before invoking this routine.
+ */
+struct lpfc_vmid *lpfc_get_vmid_from_hashtable(struct lpfc_vport *vport,
+ u32 hash, u8 *buf)
+{
+ struct lpfc_vmid *vmp;
+
+ hash_for_each_possible(vport->hash_table, vmp, hnode, hash) {
+ if (memcmp(&vmp->host_vmid[0], buf, 16) == 0)
+ return vmp;
+ }
+ return NULL;
+}
+
+/*
+ * lpfc_put_vmid_in_hashtable - put the VMID in the hash table
+ * @vport: The virtual port for which this call is being executed.
+ * @hash - calculated hash value
+ * @vmp: Pointer to a VMID entry representing a VM sending I/O
+ *
+ * This routine will insert the newly acquired VMID entity in the hash table.
+ * Make sure to acquire the appropriate lock before invoking this routine.
+ */
+static void
+lpfc_put_vmid_in_hashtable(struct lpfc_vport *vport, u32 hash,
+ struct lpfc_vmid *vmp)
+{
+ hash_add(vport->hash_table, &vmp->hnode, hash);
+}
+
+/*
+ * lpfc_vmid_hash_fn - create a hash value of the UUID
+ * @vmid: uuid associated with the VE
+ * @len: length of the VMID string
+ * Returns the calculated hash value
+ */
+int lpfc_vmid_hash_fn(const char *vmid, int len)
+{
+ int c;
+ int hash = 0;
+
+ if (len == 0)
+ return 0;
+ while (len--) {
+ c = *vmid++;
+ if (c >= 'A' && c <= 'Z')
+ c += 'a' - 'A';
+
+ hash = (hash + (c << LPFC_VMID_HASH_SHIFT) +
+ (c >> LPFC_VMID_HASH_SHIFT)) * 19;
+ }
+
+ return hash & LPFC_VMID_HASH_MASK;
+}
+
+/*
+ * lpfc_vmid_update_entry - update the vmid entry in the hash table
+ * @vport: The virtual port for which this call is being executed.
+ * @cmd: address of scsi cmd descriptor
+ * @vmp: Pointer to a VMID entry representing a VM sending I/O
+ * @tag: VMID tag
+ */
+static void lpfc_vmid_update_entry(struct lpfc_vport *vport, struct scsi_cmnd
+ *cmd, struct lpfc_vmid *vmp,
+ union lpfc_vmid_io_tag *tag)
+{
+ u64 *lta;
+
+ if (vport->vmid_priority_tagging)
+ tag->cs_ctl_vmid = vmp->un.cs_ctl_vmid;
+ else
+ tag->app_id = vmp->un.app_id;
+
+ if (cmd->sc_data_direction == DMA_TO_DEVICE)
+ vmp->io_wr_cnt++;
+ else
+ vmp->io_rd_cnt++;
+
+ /* update the last access timestamp in the table */
+ lta = per_cpu_ptr(vmp->last_io_time, raw_smp_processor_id());
+ *lta = jiffies;
+}
+
+static void lpfc_vmid_assign_cs_ctl(struct lpfc_vport *vport,
+ struct lpfc_vmid *vmid)
+{
+ u32 hash;
+ struct lpfc_vmid *pvmid;
+
+ if (vport->port_type == LPFC_PHYSICAL_PORT) {
+ vmid->un.cs_ctl_vmid = lpfc_vmid_get_cs_ctl(vport);
+ } else {
+ hash = lpfc_vmid_hash_fn(vmid->host_vmid, vmid->vmid_len);
+ pvmid =
+ lpfc_get_vmid_from_hashtable(vport->phba->pport, hash,
+ vmid->host_vmid);
+ if (pvmid)
+ vmid->un.cs_ctl_vmid = pvmid->un.cs_ctl_vmid;
+ else
+ vmid->un.cs_ctl_vmid = lpfc_vmid_get_cs_ctl(vport);
+ }
+}
+
+/*
+ * lpfc_vmid_get_appid - get the VMID associated with the UUID
+ * @vport: The virtual port for which this call is being executed.
+ * @uuid: UUID associated with the VE
+ * @cmd: address of scsi_cmd descriptor
+ * @tag: VMID tag
+ * Returns status of the function
+ */
+static int lpfc_vmid_get_appid(struct lpfc_vport *vport, char *uuid, struct
+ scsi_cmnd * cmd, union lpfc_vmid_io_tag *tag)
+{
+ struct lpfc_vmid *vmp = NULL;
+ int hash, len, rc, i;
+
+ /* check if QFPA is complete */
+ if (lpfc_vmid_is_type_priority_tag(vport) && !(vport->vmid_flag &
+ LPFC_VMID_QFPA_CMPL)) {
+ vport->work_port_events |= WORKER_CHECK_VMID_ISSUE_QFPA;
+ return -EAGAIN;
+ }
+
+ /* search if the UUID has already been mapped to the VMID */
+ len = strlen(uuid);
+ hash = lpfc_vmid_hash_fn(uuid, len);
+
+ /* search for the VMID in the table */
+ read_lock(&vport->vmid_lock);
+ vmp = lpfc_get_vmid_from_hashtable(vport, hash, uuid);
+
+ /* if found, check if its already registered */
+ if (vmp && vmp->flag & LPFC_VMID_REGISTERED) {
+ read_unlock(&vport->vmid_lock);
+ lpfc_vmid_update_entry(vport, cmd, vmp, tag);
+ rc = 0;
+ } else if (vmp && (vmp->flag & LPFC_VMID_REQ_REGISTER ||
+ vmp->flag & LPFC_VMID_DE_REGISTER)) {
+ /* else if register or dereg request has already been sent */
+ /* Hence VMID tag will not be added for this I/O */
+ read_unlock(&vport->vmid_lock);
+ rc = -EBUSY;
+ } else {
+ /* The VMID was not found in the hashtable. At this point, */
+ /* drop the read lock first before proceeding further */
+ read_unlock(&vport->vmid_lock);
+ /* start the process to obtain one as per the */
+ /* type of the VMID indicated */
+ write_lock(&vport->vmid_lock);
+ vmp = lpfc_get_vmid_from_hashtable(vport, hash, uuid);
+
+ /* while the read lock was released, in case the entry was */
+ /* added by other context or is in process of being added */
+ if (vmp && vmp->flag & LPFC_VMID_REGISTERED) {
+ lpfc_vmid_update_entry(vport, cmd, vmp, tag);
+ write_unlock(&vport->vmid_lock);
+ return 0;
+ } else if (vmp && vmp->flag & LPFC_VMID_REQ_REGISTER) {
+ write_unlock(&vport->vmid_lock);
+ return -EBUSY;
+ }
+
+ /* else search and allocate a free slot in the hash table */
+ if (vport->cur_vmid_cnt < vport->max_vmid) {
+ for (i = 0; i < vport->max_vmid; i++) {
+ vmp = vport->vmid + i;
+ if (vmp->flag == LPFC_VMID_SLOT_FREE)
+ break;
+ }
+ if (i == vport->max_vmid)
+ vmp = NULL;
+ } else {
+ vmp = NULL;
+ }
+
+ if (!vmp) {
+ write_unlock(&vport->vmid_lock);
+ return -ENOMEM;
+ }
+
+ /* Add the vmid and register */
+ lpfc_put_vmid_in_hashtable(vport, hash, vmp);
+ vmp->vmid_len = len;
+ memcpy(vmp->host_vmid, uuid, vmp->vmid_len);
+ vmp->io_rd_cnt = 0;
+ vmp->io_wr_cnt = 0;
+ vmp->flag = LPFC_VMID_SLOT_USED;
+
+ vmp->delete_inactive =
+ vport->vmid_inactivity_timeout ? 1 : 0;
+
+ /* if type priority tag, get next available VMID */
+ if (lpfc_vmid_is_type_priority_tag(vport))
+ lpfc_vmid_assign_cs_ctl(vport, vmp);
+
+ /* allocate the per cpu variable for holding */
+ /* the last access time stamp only if VMID is enabled */
+ if (!vmp->last_io_time)
+ vmp->last_io_time = __alloc_percpu(sizeof(u64),
+ __alignof__(struct
+ lpfc_vmid));
+ if (!vmp->last_io_time) {
+ hash_del(&vmp->hnode);
+ vmp->flag = LPFC_VMID_SLOT_FREE;
+ write_unlock(&vport->vmid_lock);
+ return -EIO;
+ }
+
+ write_unlock(&vport->vmid_lock);
+
+ /* complete transaction with switch */
+ if (lpfc_vmid_is_type_priority_tag(vport))
+ rc = lpfc_vmid_uvem(vport, vmp, true);
+ else
+ rc = lpfc_vmid_cmd(vport, SLI_CTAS_RAPP_IDENT, vmp);
+ if (!rc) {
+ write_lock(&vport->vmid_lock);
+ vport->cur_vmid_cnt++;
+ vmp->flag |= LPFC_VMID_REQ_REGISTER;
+ write_unlock(&vport->vmid_lock);
+ } else {
+ write_lock(&vport->vmid_lock);
+ hash_del(&vmp->hnode);
+ vmp->flag = LPFC_VMID_SLOT_FREE;
+ free_percpu(vmp->last_io_time);
+ write_unlock(&vport->vmid_lock);
+ return -EIO;
+ }
+
+ /* finally, enable the idle timer once */
+ if (!(vport->phba->pport->vmid_flag & LPFC_VMID_TIMER_ENBLD)) {
+ mod_timer(&vport->phba->inactive_vmid_poll,
+ jiffies +
+ msecs_to_jiffies(1000 * LPFC_VMID_TIMER));
+ vport->phba->pport->vmid_flag |= LPFC_VMID_TIMER_ENBLD;
+ }
+ }
+ return rc;
+}
+
+/*
+ * lpfc_is_command_vm_io - get the UUID from blk cgroup
+ * @cmd: Pointer to scsi_cmnd data structure
+ * Returns UUID if present, otherwise NULL
+ */
+static char *lpfc_is_command_vm_io(struct scsi_cmnd *cmd)
+{
+ char *uuid = NULL;
+
+ if (cmd->request) {
+ if (cmd->request->bio)
+ uuid = blkcg_get_fc_appid(cmd->request->bio);
+ }
+ return uuid;
+}
+
/**
* lpfc_queuecommand - scsi_host_template queuecommand entry point
* @shost: kernel scsi host pointer.
@@ -5168,6 +5455,7 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
struct lpfc_io_buf *lpfc_cmd;
struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
int err, idx;
+ u8 *uuid = NULL;
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
uint64_t start = 0L;
@@ -5297,6 +5585,25 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
}
+ /* check the necessary and sufficient condition to support VMID */
+ if (lpfc_is_vmid_enabled(phba) &&
+ (ndlp->vmid_support ||
+ phba->pport->vmid_priority_tagging ==
+ LPFC_VMID_PRIO_TAG_ALL_TARGETS)) {
+ /* is the I/O generated by a VM, get the associated virtual */
+ /* entity id */
+ uuid = lpfc_is_command_vm_io(cmnd);
+
+ if (uuid) {
+ err = lpfc_vmid_get_appid(vport, uuid, cmnd,
+ (union lpfc_vmid_io_tag *)
+ &lpfc_cmd->cur_iocbq.vmid_tag);
+ if (!err)
+ lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_VMID;
+ }
+ }
+
+ atomic_inc(&ndlp->cmd_pending);
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
if (unlikely(phba->hdwqstat_on & LPFC_CHECK_SCSI_IO))
this_cpu_inc(phba->sli4_hba.c_stat->xmt_io);
@@ -5384,6 +5691,31 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
return 0;
}
+/*
+ * lpfc_vmid_vport_cleanup - cleans up the resources associated with a vport
+ * @vport: The virtual port for which this call is being executed.
+ */
+void lpfc_vmid_vport_cleanup(struct lpfc_vport *vport)
+{
+ u32 bucket;
+ struct lpfc_vmid *cur;
+
+ if (vport->port_type == LPFC_PHYSICAL_PORT)
+ del_timer_sync(&vport->phba->inactive_vmid_poll);
+
+ kfree(vport->qfpa_res);
+ kfree(vport->vmid_priority.vmid_range);
+ kfree(vport->vmid);
+
+ if (!hash_empty(vport->hash_table))
+ hash_for_each(vport->hash_table, bucket, cur, hnode)
+ hash_del(&cur->hnode);
+
+ vport->qfpa_res = NULL;
+ vport->vmid_priority.vmid_range = NULL;
+ vport->vmid = NULL;
+ vport->cur_vmid_cnt = 0;
+}
/**
* lpfc_abort_handler - scsi_host_template eh_abort_handler entry point
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index fc3682f15f50..f530d8fe7a8c 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -2679,6 +2679,12 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
}
}
+ /* This nlp_put pairs with lpfc_sli4_resume_rpi */
+ if (pmb->u.mb.mbxCommand == MBX_RESUME_RPI) {
+ ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
+ lpfc_nlp_put(ndlp);
+ }
+
/* Check security permission status on INIT_LINK mailbox command */
if ((pmb->u.mb.mbxCommand == MBX_INIT_LINK) &&
(pmb->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION))
@@ -2749,7 +2755,6 @@ lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
} else {
__lpfc_sli_rpi_release(vport, ndlp);
}
-
lpfc_nlp_put(ndlp);
}
}
@@ -7694,6 +7699,15 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
goto out_free_mbox;
}
+ /* Disable VMID if app header is not supported */
+ if (phba->cfg_vmid_app_header && !(bf_get(lpfc_mbx_rq_ftr_rsp_ashdr,
+ &mqe->un.req_ftrs))) {
+ bf_set(lpfc_ftr_ashdr, &phba->sli4_hba.sli4_flags, 0);
+ phba->cfg_vmid_app_header = 0;
+ lpfc_printf_log(phba, KERN_DEBUG, LOG_SLI,
+ "1242 vmid feature not supported\n");
+ }
+
/*
* The port must support FCP initiator mode as this is the
* only mode running in the host.
@@ -7959,7 +7973,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
"0393 Error %d during rpi post operation\n",
rc);
rc = -ENODEV;
- goto out_destroy_queue;
+ goto out_free_iocblist;
}
lpfc_sli4_node_prep(phba);
@@ -8125,8 +8139,9 @@ out_io_buff_free:
out_unset_queue:
/* Unset all the queues set up in this routine when error out */
lpfc_sli4_queue_unset(phba);
-out_destroy_queue:
+out_free_iocblist:
lpfc_free_iocb_list(phba);
+out_destroy_queue:
lpfc_sli4_queue_destroy(phba);
out_stop_timers:
lpfc_stop_hba_timers(phba);
@@ -9751,6 +9766,8 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
*pcmd == ELS_CMD_RSCN_XMT ||
*pcmd == ELS_CMD_FDISC ||
*pcmd == ELS_CMD_LOGO ||
+ *pcmd == ELS_CMD_QFPA ||
+ *pcmd == ELS_CMD_UVEM ||
*pcmd == ELS_CMD_PLOGI)) {
bf_set(els_req64_sp, &wqe->els_req, 1);
bf_set(els_req64_sid, &wqe->els_req,
@@ -10313,6 +10330,18 @@ __lpfc_sli_issue_fcp_io_s4(struct lpfc_hba *phba, uint32_t ring_number,
bf_set(wqe_wqes, &wqe->generic.wqe_com, 0);
}
+ /* add the VMID tags as per switch response */
+ if (unlikely(piocb->iocb_flag & LPFC_IO_VMID)) {
+ if (phba->pport->vmid_priority_tagging) {
+ bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1);
+ bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
+ (piocb->vmid_tag.cs_ctl_vmid));
+ } else {
+ bf_set(wqe_appid, &wqe->fcp_iwrite.wqe_com, 1);
+ bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
+ wqe->words[31] = piocb->vmid_tag.app_id;
+ }
+ }
rc = lpfc_sli4_issue_wqe(phba, lpfc_cmd->hdwq, piocb);
return rc;
}
@@ -13625,9 +13654,15 @@ lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
if (mcqe_status == MB_CQE_STATUS_SUCCESS) {
mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
- /* Reg_LOGIN of dflt RPI was successful. Now lets get
- * RID of the PPI using the same mbox buffer.
+
+ /* Reg_LOGIN of dflt RPI was successful. Mark the
+ * node as having an UNREG_LOGIN in progress to stop
+ * an unsolicited PLOGI from the same NPortId from
+ * starting another mailbox transaction.
*/
+ spin_lock_irqsave(&ndlp->lock, iflags);
+ ndlp->nlp_flag |= NLP_UNREG_INP;
+ spin_unlock_irqrestore(&ndlp->lock, iflags);
lpfc_unreg_login(phba, vport->vpi,
pmbox->un.varWords[0], pmb);
pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
@@ -17943,7 +17978,6 @@ lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
seq_dmabuf->time_stamp = jiffies;
lpfc_update_rcv_time_stamp(vport);
if (list_empty(&seq_dmabuf->dbuf.list)) {
- temp_hdr = dmabuf->hbuf.virt;
list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
return seq_dmabuf;
}
@@ -19032,14 +19066,28 @@ lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp,
if (!mboxq)
return -ENOMEM;
+ /* If cmpl assigned, then this nlp_get pairs with
+ * lpfc_mbx_cmpl_resume_rpi.
+ *
+ * Else cmpl is NULL, then this nlp_get pairs with
+ * lpfc_sli_def_mbox_cmpl.
+ */
+ if (!lpfc_nlp_get(ndlp)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ "2122 %s: Failed to get nlp ref\n",
+ __func__);
+ mempool_free(mboxq, phba->mbox_mem_pool);
+ return -EIO;
+ }
+
/* Post all rpi memory regions to the port. */
lpfc_resume_rpi(mboxq, ndlp);
if (cmpl) {
mboxq->mbox_cmpl = cmpl;
mboxq->ctx_buf = arg;
- mboxq->ctx_ndlp = ndlp;
} else
mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ mboxq->ctx_ndlp = ndlp;
mboxq->vport = ndlp->vport;
rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
if (rc == MBX_NOT_FINISHED) {
@@ -19047,6 +19095,7 @@ lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp,
"2010 Resume RPI Mailbox failed "
"status %d, mbxStatus x%x\n", rc,
bf_get(lpfc_mqe_status, &mboxq->u.mqe));
+ lpfc_nlp_put(ndlp);
mempool_free(mboxq, phba->mbox_mem_pool);
return -EIO;
}
@@ -20136,8 +20185,7 @@ lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
(mb->u.mb.mbxCommand != MBX_REG_VPI))
continue;
- list_del(&mb->list);
- list_add_tail(&mb->list, &mbox_cmd_list);
+ list_move_tail(&mb->list, &mbox_cmd_list);
}
/* Clean up active mailbox command with the vport */
mb = phba->sli.mbox_active;
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index 4f6936014ff5..dde8eb9d796d 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -35,6 +35,12 @@ typedef enum _lpfc_ctx_cmd {
LPFC_CTX_HOST
} lpfc_ctx_cmd;
+union lpfc_vmid_iocb_tag {
+ uint32_t app_id;
+ uint8_t cs_ctl_vmid;
+ struct lpfc_vmid_context *vmid_context; /* UVEM context information */
+};
+
struct lpfc_cq_event {
struct list_head list;
uint16_t hdwq;
@@ -100,12 +106,14 @@ struct lpfc_iocbq {
#define LPFC_IO_NVME 0x200000 /* NVME FCP command */
#define LPFC_IO_NVME_LS 0x400000 /* NVME LS command */
#define LPFC_IO_NVMET 0x800000 /* NVMET command */
+#define LPFC_IO_VMID 0x1000000 /* VMID tagged IO */
uint32_t drvrTimeout; /* driver timeout in seconds */
struct lpfc_vport *vport;/* virtual port pointer */
void *context1; /* caller context information */
void *context2; /* caller context information */
void *context3; /* caller context information */
+ uint32_t event_tag; /* LA Event tag */
union {
wait_queue_head_t *wait_queue;
struct lpfc_iocbq *rsp_iocb;
@@ -114,6 +122,7 @@ struct lpfc_iocbq {
struct lpfc_node_rrq *rrq;
} context_un;
+ union lpfc_vmid_iocb_tag vmid_tag;
void (*fabric_iocb_cmpl)(struct lpfc_hba *, struct lpfc_iocbq *,
struct lpfc_iocbq *);
void (*wait_iocb_cmpl)(struct lpfc_hba *, struct lpfc_iocbq *,
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 4b8e89375644..2d62fd2a9824 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -20,7 +20,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "12.8.0.9"
+#define LPFC_DRIVER_VERSION "12.8.0.10"
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index 80f546976c7e..56910e94dbf2 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -1583,9 +1583,7 @@ mega_cmd_done(adapter_t *adapter, u8 completed[], int nstatus, int status)
memcpy(cmd->sense_buffer, pthru->reqsensearea,
14);
- cmd->result = (DRIVER_SENSE << 24) |
- (DID_OK << 16) |
- (CHECK_CONDITION << 1);
+ cmd->result = SAM_STAT_CHECK_CONDITION;
}
else {
if (mbox->m_out.cmd == MEGA_MBOXCMD_EXTPTHRU) {
@@ -1593,14 +1591,10 @@ mega_cmd_done(adapter_t *adapter, u8 completed[], int nstatus, int status)
memcpy(cmd->sense_buffer,
epthru->reqsensearea, 14);
- cmd->result = (DRIVER_SENSE << 24) |
- (DID_OK << 16) |
- (CHECK_CONDITION << 1);
- } else {
- cmd->sense_buffer[0] = 0x70;
- cmd->sense_buffer[2] = ABORTED_COMMAND;
- cmd->result |= (CHECK_CONDITION << 1);
- }
+ cmd->result = SAM_STAT_CHECK_CONDITION;
+ } else
+ scsi_build_sense(cmd, 0,
+ ABORTED_COMMAND, 0, 0);
}
break;
@@ -1617,7 +1611,7 @@ mega_cmd_done(adapter_t *adapter, u8 completed[], int nstatus, int status)
*/
if( cmd->cmnd[0] == TEST_UNIT_READY ) {
cmd->result |= (DID_ERROR << 16) |
- (RESERVATION_CONFLICT << 1);
+ SAM_STAT_RESERVATION_CONFLICT;
}
else
/*
@@ -1629,7 +1623,7 @@ mega_cmd_done(adapter_t *adapter, u8 completed[], int nstatus, int status)
cmd->cmnd[0] == RELEASE) ) {
cmd->result |= (DID_ERROR << 16) |
- (RESERVATION_CONFLICT << 1);
+ SAM_STAT_RESERVATION_CONFLICT;
}
else
#endif
diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
index 145fde302d7d..d3fac99db786 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.c
+++ b/drivers/scsi/megaraid/megaraid_mbox.c
@@ -1574,10 +1574,8 @@ megaraid_mbox_build_cmd(adapter_t *adapter, struct scsi_cmnd *scp, int *busy)
}
if (scp->cmnd[1] & MEGA_SCSI_INQ_EVPD) {
- scp->sense_buffer[0] = 0x70;
- scp->sense_buffer[2] = ILLEGAL_REQUEST;
- scp->sense_buffer[12] = MEGA_INVALID_FIELD_IN_CDB;
- scp->result = CHECK_CONDITION << 1;
+ scsi_build_sense(scp, 0, ILLEGAL_REQUEST,
+ MEGA_INVALID_FIELD_IN_CDB, 0);
return NULL;
}
@@ -2301,8 +2299,7 @@ megaraid_mbox_dpc(unsigned long devp)
memcpy(scp->sense_buffer, pthru->reqsensearea,
14);
- scp->result = DRIVER_SENSE << 24 |
- DID_OK << 16 | CHECK_CONDITION << 1;
+ scp->result = SAM_STAT_CHECK_CONDITION;
}
else {
if (mbox->cmd == MBOXCMD_EXTPTHRU) {
@@ -2310,14 +2307,10 @@ megaraid_mbox_dpc(unsigned long devp)
memcpy(scp->sense_buffer,
epthru->reqsensearea, 14);
- scp->result = DRIVER_SENSE << 24 |
- DID_OK << 16 |
- CHECK_CONDITION << 1;
- } else {
- scp->sense_buffer[0] = 0x70;
- scp->sense_buffer[2] = ABORTED_COMMAND;
- scp->result = CHECK_CONDITION << 1;
- }
+ scp->result = SAM_STAT_CHECK_CONDITION;
+ } else
+ scsi_build_sense(scp, 0,
+ ABORTED_COMMAND, 0, 0);
}
break;
@@ -2334,7 +2327,7 @@ megaraid_mbox_dpc(unsigned long devp)
*/
if (scp->cmnd[0] == TEST_UNIT_READY) {
scp->result = DID_ERROR << 16 |
- RESERVATION_CONFLICT << 1;
+ SAM_STAT_RESERVATION_CONFLICT;
}
else
/*
@@ -2345,7 +2338,7 @@ megaraid_mbox_dpc(unsigned long devp)
scp->cmnd[0] == RELEASE)) {
scp->result = DID_ERROR << 16 |
- RESERVATION_CONFLICT << 1;
+ SAM_STAT_RESERVATION_CONFLICT;
}
else {
scp->result = DID_BAD_TARGET << 16 | status;
@@ -3240,8 +3233,6 @@ megaraid_mbox_fire_sync_cmd(adapter_t *adapter)
int i;
uint32_t dword;
- mbox = (mbox_t *)raw_mbox;
-
memset((caddr_t)raw_mbox, 0, sizeof(mbox_t));
raw_mbox[0] = 0xFF;
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index b5a765b73c76..7af2c23652b0 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -21,8 +21,8 @@
/*
* MegaRAID SAS Driver meta data
*/
-#define MEGASAS_VERSION "07.714.04.00-rc1"
-#define MEGASAS_RELDATE "Apr 14, 2020"
+#define MEGASAS_VERSION "07.717.02.00-rc1"
+#define MEGASAS_RELDATE "May 19, 2021"
#define MEGASAS_MSIX_NAME_LEN 32
@@ -2262,6 +2262,15 @@ enum MR_PERF_MODE {
(mode) == MR_LATENCY_PERF_MODE ? "Latency" : \
"Unknown")
+enum MEGASAS_LD_TARGET_ID_STATUS {
+ LD_TARGET_ID_INITIAL,
+ LD_TARGET_ID_ACTIVE,
+ LD_TARGET_ID_DELETED,
+};
+
+#define MEGASAS_TARGET_ID(sdev) \
+ (((sdev->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) + sdev->id)
+
struct megasas_instance {
unsigned int *reply_map;
@@ -2326,6 +2335,9 @@ struct megasas_instance {
struct megasas_pd_list pd_list[MEGASAS_MAX_PD];
struct megasas_pd_list local_pd_list[MEGASAS_MAX_PD];
u8 ld_ids[MEGASAS_MAX_LD_IDS];
+ u8 ld_tgtid_status[MEGASAS_MAX_LD_IDS];
+ u8 ld_ids_prev[MEGASAS_MAX_LD_IDS];
+ u8 ld_ids_from_raidmap[MEGASAS_MAX_LD_IDS];
s8 init_id;
u16 max_num_sge;
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 4d4e9dbe5193..ec10b2497310 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -141,6 +141,8 @@ static int megasas_register_aen(struct megasas_instance *instance,
u32 seq_num, u32 class_locale_word);
static void megasas_get_pd_info(struct megasas_instance *instance,
struct scsi_device *sdev);
+static void
+megasas_set_ld_removed_by_fw(struct megasas_instance *instance);
/*
* PCI ID table for all supported controllers
@@ -213,7 +215,7 @@ static bool support_nvme_encapsulation;
static bool support_pci_lane_margining;
/* define lock for aen poll */
-static spinlock_t poll_aen_lock;
+static DEFINE_SPINLOCK(poll_aen_lock);
extern struct dentry *megasas_debugfs_root;
extern int megasas_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num);
@@ -436,6 +438,12 @@ megasas_decode_evt(struct megasas_instance *instance)
(class_locale.members.locale),
format_class(class_locale.members.class),
evt_detail->description);
+
+ if (megasas_dbg_lvl & LD_PD_DEBUG)
+ dev_info(&instance->pdev->dev,
+ "evt_detail.args.ld.target_id/index %d/%d\n",
+ evt_detail->args.ld.target_id, evt_detail->args.ld.ld_index);
+
}
/*
@@ -1779,6 +1787,7 @@ megasas_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
{
struct megasas_instance *instance;
struct MR_PRIV_DEVICE *mr_device_priv_data;
+ u32 ld_tgt_id;
instance = (struct megasas_instance *)
scmd->device->host->hostdata;
@@ -1805,17 +1814,21 @@ megasas_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
}
}
- if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
+ mr_device_priv_data = scmd->device->hostdata;
+ if (!mr_device_priv_data ||
+ (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)) {
scmd->result = DID_NO_CONNECT << 16;
scmd->scsi_done(scmd);
return 0;
}
- mr_device_priv_data = scmd->device->hostdata;
- if (!mr_device_priv_data) {
- scmd->result = DID_NO_CONNECT << 16;
- scmd->scsi_done(scmd);
- return 0;
+ if (MEGASAS_IS_LOGICAL(scmd->device)) {
+ ld_tgt_id = MEGASAS_TARGET_ID(scmd->device);
+ if (instance->ld_tgtid_status[ld_tgt_id] == LD_TARGET_ID_DELETED) {
+ scmd->result = DID_NO_CONNECT << 16;
+ scmd->scsi_done(scmd);
+ return 0;
+ }
}
if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL)
@@ -2095,7 +2108,7 @@ static int megasas_slave_configure(struct scsi_device *sdev)
static int megasas_slave_alloc(struct scsi_device *sdev)
{
- u16 pd_index = 0;
+ u16 pd_index = 0, ld_tgt_id;
struct megasas_instance *instance ;
struct MR_PRIV_DEVICE *mr_device_priv_data;
@@ -2120,6 +2133,14 @@ scan_target:
GFP_KERNEL);
if (!mr_device_priv_data)
return -ENOMEM;
+
+ if (MEGASAS_IS_LOGICAL(sdev)) {
+ ld_tgt_id = MEGASAS_TARGET_ID(sdev);
+ instance->ld_tgtid_status[ld_tgt_id] = LD_TARGET_ID_ACTIVE;
+ if (megasas_dbg_lvl & LD_PD_DEBUG)
+ sdev_printk(KERN_INFO, sdev, "LD target ID %d created.\n", ld_tgt_id);
+ }
+
sdev->hostdata = mr_device_priv_data;
atomic_set(&mr_device_priv_data->r1_ldio_hint,
@@ -2129,6 +2150,19 @@ scan_target:
static void megasas_slave_destroy(struct scsi_device *sdev)
{
+ u16 ld_tgt_id;
+ struct megasas_instance *instance;
+
+ instance = megasas_lookup_instance(sdev->host->host_no);
+
+ if (MEGASAS_IS_LOGICAL(sdev)) {
+ ld_tgt_id = MEGASAS_TARGET_ID(sdev);
+ instance->ld_tgtid_status[ld_tgt_id] = LD_TARGET_ID_DELETED;
+ if (megasas_dbg_lvl & LD_PD_DEBUG)
+ sdev_printk(KERN_INFO, sdev,
+ "LD target ID %d removed from OS stack\n", ld_tgt_id);
+ }
+
kfree(sdev->hostdata);
sdev->hostdata = NULL;
}
@@ -3525,6 +3559,22 @@ megasas_complete_abort(struct megasas_instance *instance,
}
}
+static void
+megasas_set_ld_removed_by_fw(struct megasas_instance *instance)
+{
+ uint i;
+
+ for (i = 0; (i < MEGASAS_MAX_LD_IDS); i++) {
+ if (instance->ld_ids_prev[i] != 0xff &&
+ instance->ld_ids_from_raidmap[i] == 0xff) {
+ if (megasas_dbg_lvl & LD_PD_DEBUG)
+ dev_info(&instance->pdev->dev,
+ "LD target ID %d removed from RAID map\n", i);
+ instance->ld_tgtid_status[i] = LD_TARGET_ID_DELETED;
+ }
+ }
+}
+
/**
* megasas_complete_cmd - Completes a command
* @instance: Adapter soft state
@@ -3617,8 +3667,6 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
SCSI_SENSE_BUFFERSIZE);
memcpy(cmd->scmd->sense_buffer, cmd->sense,
hdr->sense_len);
-
- cmd->scmd->result |= DRIVER_SENSE << 24;
}
break;
@@ -3687,9 +3735,13 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
fusion->fast_path_io = 0;
}
+ if (instance->adapter_type >= INVADER_SERIES)
+ megasas_set_ld_removed_by_fw(instance);
+
megasas_sync_map_info(instance);
spin_unlock_irqrestore(instance->host->host_lock,
flags);
+
break;
}
if (opcode == MR_DCMD_CTRL_EVENT_GET_INFO ||
@@ -7545,11 +7597,16 @@ static int megasas_probe_one(struct pci_dev *pdev,
return 0;
fail_start_aen:
+ instance->unload = 1;
+ scsi_remove_host(instance->host);
fail_io_attach:
megasas_mgmt_info.count--;
megasas_mgmt_info.max_index--;
megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = NULL;
+ if (instance->requestorId && !instance->skip_heartbeat_timer_del)
+ del_timer_sync(&instance->sriov_heartbeat_timer);
+
instance->instancet->disable_intr(instance);
megasas_destroy_irqs(instance);
@@ -7557,8 +7614,16 @@ fail_io_attach:
megasas_release_fusion(instance);
else
megasas_release_mfi(instance);
+
if (instance->msix_vectors)
pci_free_irq_vectors(instance->pdev);
+ instance->msix_vectors = 0;
+
+ if (instance->fw_crash_state != UNAVAILABLE)
+ megasas_free_host_crash_buffer(instance);
+
+ if (instance->adapter_type != MFI_SERIES)
+ megasas_fusion_stop_watchdog(instance);
fail_init_mfi:
scsi_host_put(host);
fail_alloc_instance:
@@ -8818,8 +8883,10 @@ megasas_aen_polling(struct work_struct *work)
union megasas_evt_class_locale class_locale;
int event_type = 0;
u32 seq_num;
+ u16 ld_target_id;
int error;
u8 dcmd_ret = DCMD_SUCCESS;
+ struct scsi_device *sdev1;
if (!instance) {
printk(KERN_ERR "invalid instance!\n");
@@ -8842,12 +8909,23 @@ megasas_aen_polling(struct work_struct *work)
break;
case MR_EVT_LD_OFFLINE:
- case MR_EVT_CFG_CLEARED:
case MR_EVT_LD_DELETED:
+ ld_target_id = instance->evt_detail->args.ld.target_id;
+ sdev1 = scsi_device_lookup(instance->host,
+ MEGASAS_MAX_PD_CHANNELS +
+ (ld_target_id / MEGASAS_MAX_DEV_PER_CHANNEL),
+ (ld_target_id - MEGASAS_MAX_DEV_PER_CHANNEL),
+ 0);
+ if (sdev1)
+ megasas_remove_scsi_device(sdev1);
+
+ event_type = SCAN_VD_CHANNEL;
+ break;
case MR_EVT_LD_CREATED:
event_type = SCAN_VD_CHANNEL;
break;
+ case MR_EVT_CFG_CLEARED:
case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED:
case MR_EVT_FOREIGN_CFG_IMPORTED:
case MR_EVT_LD_STATE_CHANGE:
@@ -8934,8 +9012,6 @@ static int __init megasas_init(void)
*/
pr_info("megasas: %s\n", MEGASAS_VERSION);
- spin_lock_init(&poll_aen_lock);
-
support_poll_for_event = 2;
support_device_change = 1;
support_nvme_encapsulation = true;
diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c
index b6c08d620033..83f69c33b01a 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fp.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fp.c
@@ -349,6 +349,10 @@ u8 MR_ValidateMapInfo(struct megasas_instance *instance, u64 map_id)
num_lds = le16_to_cpu(drv_map->raidMap.ldCount);
+ memcpy(instance->ld_ids_prev,
+ instance->ld_ids_from_raidmap,
+ sizeof(instance->ld_ids_from_raidmap));
+ memset(instance->ld_ids_from_raidmap, 0xff, MEGASAS_MAX_LD_IDS);
/*Convert Raid capability values to CPU arch */
for (i = 0; (num_lds > 0) && (i < MAX_LOGICAL_DRIVES_EXT); i++) {
ld = MR_TargetIdToLdGet(i, drv_map);
@@ -359,7 +363,7 @@ u8 MR_ValidateMapInfo(struct megasas_instance *instance, u64 map_id)
raid = MR_LdRaidGet(ld, drv_map);
le32_to_cpus((u32 *)&raid->capability);
-
+ instance->ld_ids_from_raidmap[i] = i;
num_lds--;
}
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index 2221175ae051..06399c026a8d 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -2051,7 +2051,6 @@ map_cmd_status(struct fusion_context *fusion,
SCSI_SENSE_BUFFERSIZE);
memcpy(scmd->sense_buffer, sense,
SCSI_SENSE_BUFFERSIZE);
- scmd->result |= DRIVER_SENSE << 24;
}
/*
@@ -3203,6 +3202,8 @@ megasas_build_io_fusion(struct megasas_instance *instance,
{
int sge_count;
u8 cmd_type;
+ u16 pd_index = 0;
+ u8 drive_type = 0;
struct MPI2_RAID_SCSI_IO_REQUEST *io_request = cmd->io_request;
struct MR_PRIV_DEVICE *mr_device_priv_data;
mr_device_priv_data = scp->device->hostdata;
@@ -3237,8 +3238,12 @@ megasas_build_io_fusion(struct megasas_instance *instance,
megasas_build_syspd_fusion(instance, scp, cmd, true);
break;
case NON_READ_WRITE_SYSPDIO:
- if (instance->secure_jbod_support ||
- mr_device_priv_data->is_tm_capable)
+ pd_index = MEGASAS_PD_INDEX(scp);
+ drive_type = instance->pd_list[pd_index].driveType;
+ if ((instance->secure_jbod_support ||
+ mr_device_priv_data->is_tm_capable) ||
+ (instance->adapter_type >= VENTURA_SERIES &&
+ drive_type == TYPE_ENCLOSURE))
megasas_build_syspd_fusion(instance, scp, cmd, false);
else
megasas_build_syspd_fusion(instance, scp, cmd, true);
@@ -3739,6 +3744,7 @@ static void megasas_sync_irqs(unsigned long instance_addr)
if (irq_ctx->irq_poll_scheduled) {
irq_ctx->irq_poll_scheduled = false;
enable_irq(irq_ctx->os_irq);
+ complete_cmd_fusion(instance, irq_ctx->MSIxIndex, irq_ctx);
}
}
}
@@ -3770,6 +3776,7 @@ int megasas_irqpoll(struct irq_poll *irqpoll, int budget)
irq_poll_complete(irqpoll);
irq_ctx->irq_poll_scheduled = false;
enable_irq(irq_ctx->os_irq);
+ complete_cmd_fusion(instance, irq_ctx->MSIxIndex, irq_ctx);
}
return num_entries;
@@ -3786,6 +3793,7 @@ megasas_complete_cmd_dpc_fusion(unsigned long instance_addr)
{
struct megasas_instance *instance =
(struct megasas_instance *)instance_addr;
+ struct megasas_irq_context *irq_ctx = NULL;
u32 count, MSIxIndex;
count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
@@ -3794,8 +3802,10 @@ megasas_complete_cmd_dpc_fusion(unsigned long instance_addr)
if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)
return;
- for (MSIxIndex = 0 ; MSIxIndex < count; MSIxIndex++)
- complete_cmd_fusion(instance, MSIxIndex, NULL);
+ for (MSIxIndex = 0 ; MSIxIndex < count; MSIxIndex++) {
+ irq_ctx = &instance->irq_context[MSIxIndex];
+ complete_cmd_fusion(instance, MSIxIndex, irq_ctx);
+ }
}
/**
@@ -5266,6 +5276,7 @@ megasas_alloc_fusion_context(struct megasas_instance *instance)
if (!fusion->log_to_span) {
dev_err(&instance->pdev->dev, "Failed from %s %d\n",
__func__, __LINE__);
+ kfree(instance->ctrl_context);
return -ENOMEM;
}
}
diff --git a/drivers/scsi/mesh.c b/drivers/scsi/mesh.c
index 0a9f4e44ab2c..78b72bcf58fe 100644
--- a/drivers/scsi/mesh.c
+++ b/drivers/scsi/mesh.c
@@ -595,9 +595,10 @@ static void mesh_done(struct mesh_state *ms, int start_next)
ms->current_req = NULL;
tp->current_req = NULL;
if (cmd) {
- cmd->result = (ms->stat << 16) | cmd->SCp.Status;
+ set_host_byte(cmd, ms->stat);
+ set_status_byte(cmd, cmd->SCp.Status);
if (ms->stat == DID_OK)
- cmd->result |= cmd->SCp.Message << 8;
+ scsi_msg_to_host_byte(cmd, cmd->SCp.Message);
if (DEBUG_TARGET(cmd)) {
printk(KERN_DEBUG "mesh_done: result = %x, data_ptr=%d, buflen=%d\n",
cmd->result, ms->data_ptr, scsi_bufflen(cmd));
@@ -993,7 +994,7 @@ static void handle_reset(struct mesh_state *ms)
for (tgt = 0; tgt < 8; ++tgt) {
tp = &ms->tgts[tgt];
if ((cmd = tp->current_req) != NULL) {
- cmd->result = DID_RESET << 16;
+ set_host_byte(cmd, DID_RESET);
tp->current_req = NULL;
mesh_completed(ms, cmd);
}
@@ -1003,7 +1004,7 @@ static void handle_reset(struct mesh_state *ms)
ms->current_req = NULL;
while ((cmd = ms->request_q) != NULL) {
ms->request_q = (struct scsi_cmnd *) cmd->host_scribble;
- cmd->result = DID_RESET << 16;
+ set_host_byte(cmd, DID_RESET);
mesh_completed(ms, cmd);
}
ms->phase = idle;
diff --git a/drivers/scsi/mpi3mr/Kconfig b/drivers/scsi/mpi3mr/Kconfig
new file mode 100644
index 000000000000..f7882375e74f
--- /dev/null
+++ b/drivers/scsi/mpi3mr/Kconfig
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+config SCSI_MPI3MR
+ tristate "Broadcom MPI3 Storage Controller Device Driver"
+ depends on PCI && SCSI
+ help
+ MPI3 based Storage & RAID Controllers Driver.
diff --git a/drivers/scsi/mpi3mr/Makefile b/drivers/scsi/mpi3mr/Makefile
new file mode 100644
index 000000000000..7c2063e04c81
--- /dev/null
+++ b/drivers/scsi/mpi3mr/Makefile
@@ -0,0 +1,4 @@
+# mpi3mr makefile
+obj-m += mpi3mr.o
+mpi3mr-y += mpi3mr_os.o \
+ mpi3mr_fw.o \
diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h b/drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h
new file mode 100644
index 000000000000..d43bbecef651
--- /dev/null
+++ b/drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h
@@ -0,0 +1,1880 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright 2017-2021 Broadcom Inc. All rights reserved.
+ *
+ */
+#ifndef MPI30_CNFG_H
+#define MPI30_CNFG_H 1
+#define MPI3_CONFIG_PAGETYPE_IO_UNIT (0x00)
+#define MPI3_CONFIG_PAGETYPE_MANUFACTURING (0x01)
+#define MPI3_CONFIG_PAGETYPE_IOC (0x02)
+#define MPI3_CONFIG_PAGETYPE_UEFI_BSD (0x03)
+#define MPI3_CONFIG_PAGETYPE_SECURITY (0x04)
+#define MPI3_CONFIG_PAGETYPE_ENCLOSURE (0x11)
+#define MPI3_CONFIG_PAGETYPE_DEVICE (0x12)
+#define MPI3_CONFIG_PAGETYPE_SAS_IO_UNIT (0x20)
+#define MPI3_CONFIG_PAGETYPE_SAS_EXPANDER (0x21)
+#define MPI3_CONFIG_PAGETYPE_SAS_PHY (0x23)
+#define MPI3_CONFIG_PAGETYPE_SAS_PORT (0x24)
+#define MPI3_CONFIG_PAGETYPE_PCIE_IO_UNIT (0x30)
+#define MPI3_CONFIG_PAGETYPE_PCIE_SWITCH (0x31)
+#define MPI3_CONFIG_PAGETYPE_PCIE_LINK (0x33)
+#define MPI3_CONFIG_PAGEATTR_MASK (0xf0)
+#define MPI3_CONFIG_PAGEATTR_READ_ONLY (0x00)
+#define MPI3_CONFIG_PAGEATTR_CHANGEABLE (0x10)
+#define MPI3_CONFIG_PAGEATTR_PERSISTENT (0x20)
+#define MPI3_CONFIG_ACTION_PAGE_HEADER (0x00)
+#define MPI3_CONFIG_ACTION_READ_DEFAULT (0x01)
+#define MPI3_CONFIG_ACTION_READ_CURRENT (0x02)
+#define MPI3_CONFIG_ACTION_WRITE_CURRENT (0x03)
+#define MPI3_CONFIG_ACTION_READ_PERSISTENT (0x04)
+#define MPI3_CONFIG_ACTION_WRITE_PERSISTENT (0x05)
+#define MPI3_DEVICE_PGAD_FORM_MASK (0xf0000000)
+#define MPI3_DEVICE_PGAD_FORM_GET_NEXT_HANDLE (0x00000000)
+#define MPI3_DEVICE_PGAD_FORM_HANDLE (0x20000000)
+#define MPI3_DEVICE_PGAD_HANDLE_MASK (0x0000ffff)
+#define MPI3_SAS_EXPAND_PGAD_FORM_MASK (0xf0000000)
+#define MPI3_SAS_EXPAND_PGAD_FORM_GET_NEXT_HANDLE (0x00000000)
+#define MPI3_SAS_EXPAND_PGAD_FORM_HANDLE_PHY_NUM (0x10000000)
+#define MPI3_SAS_EXPAND_PGAD_FORM_HANDLE (0x20000000)
+#define MPI3_SAS_EXPAND_PGAD_PHYNUM_MASK (0x00ff0000)
+#define MPI3_SAS_EXPAND_PGAD_PHYNUM_SHIFT (16)
+#define MPI3_SAS_EXPAND_PGAD_HANDLE_MASK (0x0000ffff)
+#define MPI3_SAS_PHY_PGAD_FORM_MASK (0xf0000000)
+#define MPI3_SAS_PHY_PGAD_FORM_PHY_NUMBER (0x00000000)
+#define MPI3_SAS_PHY_PGAD_PHY_NUMBER_MASK (0x000000ff)
+#define MPI3_SASPORT_PGAD_FORM_MASK (0xf0000000)
+#define MPI3_SASPORT_PGAD_FORM_GET_NEXT_PORT (0x00000000)
+#define MPI3_SASPORT_PGAD_FORM_PORT_NUM (0x10000000)
+#define MPI3_SASPORT_PGAD_PORT_NUMBER_MASK (0x000000ff)
+#define MPI3_ENCLOS_PGAD_FORM_MASK (0xf0000000)
+#define MPI3_ENCLOS_PGAD_FORM_GET_NEXT_HANDLE (0x00000000)
+#define MPI3_ENCLOS_PGAD_FORM_HANDLE (0x10000000)
+#define MPI3_ENCLOS_PGAD_HANDLE_MASK (0x0000ffff)
+#define MPI3_PCIE_SWITCH_PGAD_FORM_MASK (0xf0000000)
+#define MPI3_PCIE_SWITCH_PGAD_FORM_GET_NEXT_HANDLE (0x00000000)
+#define MPI3_PCIE_SWITCH_PGAD_FORM_HANDLE_PORT_NUM (0x10000000)
+#define MPI3_PCIE_SWITCH_PGAD_FORM_HANDLE (0x20000000)
+#define MPI3_PCIE_SWITCH_PGAD_PORTNUM_MASK (0x00ff0000)
+#define MPI3_PCIE_SWITCH_PGAD_PORTNUM_SHIFT (16)
+#define MPI3_PCIE_SWITCH_PGAD_HANDLE_MASK (0x0000ffff)
+#define MPI3_PCIE_LINK_PGAD_FORM_MASK (0xf0000000)
+#define MPI3_PCIE_LINK_PGAD_FORM_GET_NEXT_LINK (0x00000000)
+#define MPI3_PCIE_LINK_PGAD_FORM_LINK_NUM (0x10000000)
+#define MPI3_PCIE_LINK_PGAD_LINKNUM_MASK (0x000000ff)
+#define MPI3_SECURITY_PGAD_FORM_MASK (0xf0000000)
+#define MPI3_SECURITY_PGAD_FORM_GET_NEXT_SLOT (0x00000000)
+#define MPI3_SECURITY_PGAD_FORM_SOT_NUM (0x10000000)
+#define MPI3_SECURITY_PGAD_SLOT_GROUP_MASK (0x0000ff00)
+#define MPI3_SECURITY_PGAD_SLOT_MASK (0x000000ff)
+struct mpi3_config_request {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 change_count;
+ __le16 reserved0a;
+ u8 page_version;
+ u8 page_number;
+ u8 page_type;
+ u8 action;
+ __le32 page_address;
+ __le16 page_length;
+ __le16 reserved16;
+ __le32 reserved18[2];
+ union mpi3_sge_union sgl;
+};
+
+struct mpi3_config_page_header {
+ u8 page_version;
+ u8 reserved01;
+ u8 page_number;
+ u8 page_attribute;
+ __le16 page_length;
+ u8 page_type;
+ u8 reserved07;
+};
+
+#define MPI3_SAS_NEG_LINK_RATE_LOGICAL_MASK (0xf0)
+#define MPI3_SAS_NEG_LINK_RATE_LOGICAL_SHIFT (4)
+#define MPI3_SAS_NEG_LINK_RATE_PHYSICAL_MASK (0x0f)
+#define MPI3_SAS_NEG_LINK_RATE_UNKNOWN_LINK_RATE (0x00)
+#define MPI3_SAS_NEG_LINK_RATE_PHY_DISABLED (0x01)
+#define MPI3_SAS_NEG_LINK_RATE_NEGOTIATION_FAILED (0x02)
+#define MPI3_SAS_NEG_LINK_RATE_SATA_OOB_COMPLETE (0x03)
+#define MPI3_SAS_NEG_LINK_RATE_PORT_SELECTOR (0x04)
+#define MPI3_SAS_NEG_LINK_RATE_SMP_RESET_IN_PROGRESS (0x05)
+#define MPI3_SAS_NEG_LINK_RATE_UNSUPPORTED_PHY (0x06)
+#define MPI3_SAS_NEG_LINK_RATE_1_5 (0x08)
+#define MPI3_SAS_NEG_LINK_RATE_3_0 (0x09)
+#define MPI3_SAS_NEG_LINK_RATE_6_0 (0x0a)
+#define MPI3_SAS_NEG_LINK_RATE_12_0 (0x0b)
+#define MPI3_SAS_NEG_LINK_RATE_22_5 (0x0c)
+#define MPI3_SAS_APHYINFO_INSIDE_ZPSDS_PERSISTENT (0x00000040)
+#define MPI3_SAS_APHYINFO_REQUESTED_INSIDE_ZPSDS (0x00000020)
+#define MPI3_SAS_APHYINFO_BREAK_REPLY_CAPABLE (0x00000010)
+#define MPI3_SAS_APHYINFO_REASON_MASK (0x0000000f)
+#define MPI3_SAS_APHYINFO_REASON_UNKNOWN (0x00000000)
+#define MPI3_SAS_APHYINFO_REASON_POWER_ON (0x00000001)
+#define MPI3_SAS_APHYINFO_REASON_HARD_RESET (0x00000002)
+#define MPI3_SAS_APHYINFO_REASON_SMP_PHY_CONTROL (0x00000003)
+#define MPI3_SAS_APHYINFO_REASON_LOSS_OF_SYNC (0x00000004)
+#define MPI3_SAS_APHYINFO_REASON_MULTIPLEXING_SEQ (0x00000005)
+#define MPI3_SAS_APHYINFO_REASON_IT_NEXUS_LOSS_TIMER (0x00000006)
+#define MPI3_SAS_APHYINFO_REASON_BREAK_TIMEOUT (0x00000007)
+#define MPI3_SAS_APHYINFO_REASON_PHY_TEST_STOPPED (0x00000008)
+#define MPI3_SAS_APHYINFO_REASON_EXP_REDUCED_FUNC (0x00000009)
+#define MPI3_SAS_PHYINFO_STATUS_MASK (0xc0000000)
+#define MPI3_SAS_PHYINFO_STATUS_SHIFT (30)
+#define MPI3_SAS_PHYINFO_STATUS_ACCESSIBLE (0x00000000)
+#define MPI3_SAS_PHYINFO_STATUS_NOT_EXIST (0x40000000)
+#define MPI3_SAS_PHYINFO_STATUS_VACANT (0x80000000)
+#define MPI3_SAS_PHYINFO_PHY_POWER_CONDITION_MASK (0x18000000)
+#define MPI3_SAS_PHYINFO_PHY_POWER_CONDITION_ACTIVE (0x00000000)
+#define MPI3_SAS_PHYINFO_PHY_POWER_CONDITION_PARTIAL (0x08000000)
+#define MPI3_SAS_PHYINFO_PHY_POWER_CONDITION_SLUMBER (0x10000000)
+#define MPI3_SAS_PHYINFO_REASON_MASK (0x000f0000)
+#define MPI3_SAS_PHYINFO_REASON_UNKNOWN (0x00000000)
+#define MPI3_SAS_PHYINFO_REASON_POWER_ON (0x00010000)
+#define MPI3_SAS_PHYINFO_REASON_HARD_RESET (0x00020000)
+#define MPI3_SAS_PHYINFO_REASON_SMP_PHY_CONTROL (0x00030000)
+#define MPI3_SAS_PHYINFO_REASON_LOSS_OF_SYNC (0x00040000)
+#define MPI3_SAS_PHYINFO_REASON_MULTIPLEXING_SEQ (0x00050000)
+#define MPI3_SAS_PHYINFO_REASON_IT_NEXUS_LOSS_TIMER (0x00060000)
+#define MPI3_SAS_PHYINFO_REASON_BREAK_TIMEOUT (0x00070000)
+#define MPI3_SAS_PHYINFO_REASON_PHY_TEST_STOPPED (0x00080000)
+#define MPI3_SAS_PHYINFO_REASON_EXP_REDUCED_FUNC (0x00090000)
+#define MPI3_SAS_PHYINFO_SATA_PORT_ACTIVE (0x00004000)
+#define MPI3_SAS_PHYINFO_SATA_PORT_SELECTOR_PRESENT (0x00002000)
+#define MPI3_SAS_PHYINFO_VIRTUAL_PHY (0x00001000)
+#define MPI3_SAS_PHYINFO_PARTIAL_PATHWAY_TIME_MASK (0x00000f00)
+#define MPI3_SAS_PHYINFO_PARTIAL_PATHWAY_TIME_SHIFT (8)
+#define MPI3_SAS_PHYINFO_ROUTING_ATTRIBUTE_MASK (0x000000f0)
+#define MPI3_SAS_PHYINFO_ROUTING_ATTRIBUTE_DIRECT (0x00000000)
+#define MPI3_SAS_PHYINFO_ROUTING_ATTRIBUTE_SUBTRACTIVE (0x00000010)
+#define MPI3_SAS_PHYINFO_ROUTING_ATTRIBUTE_TABLE (0x00000020)
+#define MPI3_SAS_PRATE_MAX_RATE_MASK (0xf0)
+#define MPI3_SAS_PRATE_MAX_RATE_NOT_PROGRAMMABLE (0x00)
+#define MPI3_SAS_PRATE_MAX_RATE_1_5 (0x80)
+#define MPI3_SAS_PRATE_MAX_RATE_3_0 (0x90)
+#define MPI3_SAS_PRATE_MAX_RATE_6_0 (0xa0)
+#define MPI3_SAS_PRATE_MAX_RATE_12_0 (0xb0)
+#define MPI3_SAS_PRATE_MAX_RATE_22_5 (0xc0)
+#define MPI3_SAS_PRATE_MIN_RATE_MASK (0x0f)
+#define MPI3_SAS_PRATE_MIN_RATE_NOT_PROGRAMMABLE (0x00)
+#define MPI3_SAS_PRATE_MIN_RATE_1_5 (0x08)
+#define MPI3_SAS_PRATE_MIN_RATE_3_0 (0x09)
+#define MPI3_SAS_PRATE_MIN_RATE_6_0 (0x0a)
+#define MPI3_SAS_PRATE_MIN_RATE_12_0 (0x0b)
+#define MPI3_SAS_PRATE_MIN_RATE_22_5 (0x0c)
+#define MPI3_SAS_HWRATE_MAX_RATE_MASK (0xf0)
+#define MPI3_SAS_HWRATE_MAX_RATE_1_5 (0x80)
+#define MPI3_SAS_HWRATE_MAX_RATE_3_0 (0x90)
+#define MPI3_SAS_HWRATE_MAX_RATE_6_0 (0xa0)
+#define MPI3_SAS_HWRATE_MAX_RATE_12_0 (0xb0)
+#define MPI3_SAS_HWRATE_MAX_RATE_22_5 (0xc0)
+#define MPI3_SAS_HWRATE_MIN_RATE_MASK (0x0f)
+#define MPI3_SAS_HWRATE_MIN_RATE_1_5 (0x08)
+#define MPI3_SAS_HWRATE_MIN_RATE_3_0 (0x09)
+#define MPI3_SAS_HWRATE_MIN_RATE_6_0 (0x0a)
+#define MPI3_SAS_HWRATE_MIN_RATE_12_0 (0x0b)
+#define MPI3_SAS_HWRATE_MIN_RATE_22_5 (0x0c)
+#define MPI3_SLOT_INVALID (0xffff)
+#define MPI3_SLOT_INDEX_INVALID (0xffff)
+struct mpi3_man_page0 {
+ struct mpi3_config_page_header header;
+ u8 chip_revision[8];
+ u8 chip_name[32];
+ u8 board_name[32];
+ u8 board_assembly[32];
+ u8 board_tracer_number[32];
+ __le32 board_power;
+ __le32 reserved94;
+ __le32 reserved98;
+ u8 oem;
+ u8 sub_oem;
+ __le16 reserved9e;
+ u8 board_mfg_day;
+ u8 board_mfg_month;
+ __le16 board_mfg_year;
+ u8 board_rework_day;
+ u8 board_rework_month;
+ __le16 board_rework_year;
+ __le64 board_revision;
+ u8 e_pack_fru[16];
+ u8 product_name[256];
+};
+
+#define MPI3_MAN0_PAGEVERSION (0x00)
+#define MPI3_MAN1_VPD_SIZE (512)
+struct mpi3_man_page1 {
+ struct mpi3_config_page_header header;
+ __le32 reserved08[2];
+ u8 vpd[MPI3_MAN1_VPD_SIZE];
+};
+
+#define MPI3_MAN1_PAGEVERSION (0x00)
+struct mpi3_man5_phy_entry {
+ __le64 ioc_wwid;
+ __le64 device_name;
+ __le64 sata_wwid;
+};
+
+#ifndef MPI3_MAN5_PHY_MAX
+#define MPI3_MAN5_PHY_MAX (1)
+#endif
+struct mpi3_man_page5 {
+ struct mpi3_config_page_header header;
+ u8 num_phys;
+ u8 reserved09[3];
+ __le32 reserved0c;
+ struct mpi3_man5_phy_entry phy[MPI3_MAN5_PHY_MAX];
+};
+
+#define MPI3_MAN5_PAGEVERSION (0x00)
+struct mpi3_man6_gpio_entry {
+ u8 function_code;
+ u8 reserved01;
+ __le16 flags;
+ u8 param1;
+ u8 param2;
+ __le16 reserved06;
+ __le32 param3;
+};
+
+#define MPI3_MAN6_GPIO_FUNCTION_GENERIC (0x00)
+#define MPI3_MAN6_GPIO_FUNCTION_ALTERNATE (0x01)
+#define MPI3_MAN6_GPIO_FUNCTION_EXT_INTERRUPT (0x02)
+#define MPI3_MAN6_GPIO_FUNCTION_GLOBAL_ACTIVITY (0x03)
+#define MPI3_MAN6_GPIO_FUNCTION_OVER_TEMPERATURE (0x04)
+#define MPI3_MAN6_GPIO_FUNCTION_PORT_STATUS_GREEN (0x05)
+#define MPI3_MAN6_GPIO_FUNCTION_PORT_STATUS_YELLOW (0x06)
+#define MPI3_MAN6_GPIO_FUNCTION_CABLE_MANAGEMENT (0x07)
+#define MPI3_MAN6_GPIO_FUNCTION_BKPLANE_MGMT_TYPE (0x08)
+#define MPI3_MAN6_GPIO_FUNCTION_ISTWI_MUX_RESET (0x09)
+#define MPI3_MAN6_GPIO_FUNCTION_ISTWI_RESET (0x0a)
+#define MPI3_MAN6_GPIO_FUNCTION_BACKEND_PCIE_RESET (0x0b)
+#define MPI3_MAN6_GPIO_FUNCTION_GLOBAL_FAULT (0x0c)
+#define MPI3_MAN6_GPIO_FUNCTION_EPACK_ATTN (0x0d)
+#define MPI3_MAN6_GPIO_FUNCTION_EPACK_ONLINE (0x0e)
+#define MPI3_MAN6_GPIO_FUNCTION_EPACK_FAULT (0x0f)
+#define MPI3_MAN6_GPIO_FUNCTION_CTRL_TYPE (0x10)
+#define MPI3_MAN6_GPIO_FUNCTION_LICENSE (0x11)
+#define MPI3_MAN6_GPIO_FUNCTION_REFCLK_CONTROL (0x12)
+#define MPI3_MAN6_GPIO_EXTINT_PARAM1_FLAGS_SOURCE_MASK (0xf0)
+#define MPI3_MAN6_GPIO_EXTINT_PARAM1_FLAGS_SOURCE_GENERIC (0x00)
+#define MPI3_MAN6_GPIO_EXTINT_PARAM1_FLAGS_SOURCE_CABLE_MGMT (0x10)
+#define MPI3_MAN6_GPIO_EXTINT_PARAM1_FLAGS_SOURCE_ACTIVE_CABLE_OVERCURRENT (0x20)
+#define MPI3_MAN6_GPIO_EXTINT_PARAM1_FLAGS_TRIGGER_MASK (0x01)
+#define MPI3_MAN6_GPIO_EXTINT_PARAM1_FLAGS_TRIGGER_EDGE (0x00)
+#define MPI3_MAN6_GPIO_EXTINT_PARAM1_FLAGS_TRIGGER_LEVEL (0x01)
+#define MPI3_MAN6_GPIO_PORT_GREEN_PARAM1_PHY_STATUS_ALL_UP (0x00)
+#define MPI3_MAN6_GPIO_PORT_GREEN_PARAM1_PHY_STATUS_ONE_OR_MORE_UP (0x01)
+#define MPI3_MAN6_GPIO_CABLE_MGMT_PARAM1_INTERFACE_MODULE_PRESENT (0x00)
+#define MPI3_MAN6_GPIO_CABLE_MGMT_PARAM1_INTERFACE_ACTIVE_CABLE_ENABLE (0x01)
+#define MPI3_MAN6_GPIO_CABLE_MGMT_PARAM1_INTERFACE_CABLE_MGMT_ENABLE (0x02)
+#define MPI3_MAN6_GPIO_ISTWI_MUX_RESET_PARAM2_SPEC_MUX (0x00)
+#define MPI3_MAN6_GPIO_ISTWI_MUX_RESET_PARAM2_ALL_MUXES (0x01)
+#define MPI3_MAN6_GPIO_LICENSE_PARAM1_TYPE_IBUTTON (0x00)
+#define MPI3_MAN6_GPIO_FLAGS_SLEW_RATE_MASK (0x0100)
+#define MPI3_MAN6_GPIO_FLAGS_SLEW_RATE_FAST_EDGE (0x0100)
+#define MPI3_MAN6_GPIO_FLAGS_SLEW_RATE_SLOW_EDGE (0x0000)
+#define MPI3_MAN6_GPIO_FLAGS_DRIVE_STRENGTH_MASK (0x00c0)
+#define MPI3_MAN6_GPIO_FLAGS_DRIVE_STRENGTH_100OHM (0x0000)
+#define MPI3_MAN6_GPIO_FLAGS_DRIVE_STRENGTH_66OHM (0x0040)
+#define MPI3_MAN6_GPIO_FLAGS_DRIVE_STRENGTH_50OHM (0x0080)
+#define MPI3_MAN6_GPIO_FLAGS_DRIVE_STRENGTH_33OHM (0x00c0)
+#define MPI3_MAN6_GPIO_FLAGS_ALT_DATA_SEL_MASK (0x0030)
+#define MPI3_MAN6_GPIO_FLAGS_ALT_DATA_SEL_SHIFT (4)
+#define MPI3_MAN6_GPIO_FLAGS_ACTIVE_HIGH (0x0008)
+#define MPI3_MAN6_GPIO_FLAGS_BI_DIR_ENABLED (0x0004)
+#define MPI3_MAN6_GPIO_FLAGS_DIRECTION_MASK (0x0003)
+#define MPI3_MAN6_GPIO_FLAGS_DIRECTION_INPUT (0x0000)
+#define MPI3_MAN6_GPIO_FLAGS_DIRECTION_OPEN_DRAIN_OUTPUT (0x0001)
+#define MPI3_MAN6_GPIO_FLAGS_DIRECTION_OPEN_SOURCE_OUTPUT (0x0002)
+#define MPI3_MAN6_GPIO_FLAGS_DIRECTION_PUSH_PULL_OUTPUT (0x0003)
+#ifndef MPI3_MAN6_GPIO_MAX
+#define MPI3_MAN6_GPIO_MAX (1)
+#endif
+struct mpi3_man_page6 {
+ struct mpi3_config_page_header header;
+ __le16 flags;
+ __le16 reserved0a;
+ u8 num_gpio;
+ u8 reserved0d[3];
+ struct mpi3_man6_gpio_entry gpio[MPI3_MAN6_GPIO_MAX];
+};
+
+#define MPI3_MAN6_PAGEVERSION (0x00)
+#define MPI3_MAN6_FLAGS_HEARTBEAT_LED_DISABLED (0x0001)
+struct mpi3_man7_receptacle_info {
+ __le32 name[4];
+ u8 location;
+ u8 connector_type;
+ u8 ped_clk;
+ u8 connector_id;
+ __le32 reserved14;
+};
+
+#define MPI3_MAN7_LOCATION_UNKNOWN (0x00)
+#define MPI3_MAN7_LOCATION_INTERNAL (0x01)
+#define MPI3_MAN7_LOCATION_EXTERNAL (0x02)
+#define MPI3_MAN7_LOCATION_VIRTUAL (0x03)
+#define MPI3_MAN7_PEDCLK_ROUTING_MASK (0x10)
+#define MPI3_MAN7_PEDCLK_ROUTING_DIRECT (0x00)
+#define MPI3_MAN7_PEDCLK_ROUTING_CLOCK_BUFFER (0x10)
+#define MPI3_MAN7_PEDCLK_ID_MASK (0x0f)
+#ifndef MPI3_MAN7_RECEPTACLE_INFO_MAX
+#define MPI3_MAN7_RECEPTACLE_INFO_MAX (1)
+#endif
+struct mpi3_man_page7 {
+ struct mpi3_config_page_header header;
+ __le32 flags;
+ u8 num_receptacles;
+ u8 reserved0d[3];
+ __le32 enclosure_name[4];
+ struct mpi3_man7_receptacle_info receptacle_info[MPI3_MAN7_RECEPTACLE_INFO_MAX];
+};
+
+#define MPI3_MAN7_PAGEVERSION (0x00)
+#define MPI3_MAN7_FLAGS_BASE_ENCLOSURE_LEVEL_MASK (0x01)
+#define MPI3_MAN7_FLAGS_BASE_ENCLOSURE_LEVEL_0 (0x00)
+#define MPI3_MAN7_FLAGS_BASE_ENCLOSURE_LEVEL_1 (0x01)
+struct mpi3_man8_phy_info {
+ u8 receptacle_id;
+ u8 connector_lane;
+ __le16 reserved02;
+ __le16 slotx1;
+ __le16 slotx2;
+ __le16 slotx4;
+ __le16 reserved0a;
+ __le32 reserved0c;
+};
+
+#ifndef MPI3_MAN8_PHY_INFO_MAX
+#define MPI3_MAN8_PHY_INFO_MAX (1)
+#endif
+struct mpi3_man_page8 {
+ struct mpi3_config_page_header header;
+ __le32 reserved08;
+ u8 num_phys;
+ u8 reserved0d[3];
+ struct mpi3_man8_phy_info phy_info[MPI3_MAN8_PHY_INFO_MAX];
+};
+
+#define MPI3_MAN8_PAGEVERSION (0x00)
+struct mpi3_man9_rsrc_entry {
+ __le32 maximum;
+ __le32 decrement;
+ __le32 minimum;
+ __le32 actual;
+};
+
+enum mpi3_man9_resources {
+ MPI3_MAN9_RSRC_OUTSTANDING_REQS = 0,
+ MPI3_MAN9_RSRC_TARGET_CMDS = 1,
+ MPI3_MAN9_RSRC_SAS_TARGETS = 2,
+ MPI3_MAN9_RSRC_PCIE_TARGETS = 3,
+ MPI3_MAN9_RSRC_INITIATORS = 4,
+ MPI3_MAN9_RSRC_VDS = 5,
+ MPI3_MAN9_RSRC_ENCLOSURES = 6,
+ MPI3_MAN9_RSRC_ENCLOSURE_PHYS = 7,
+ MPI3_MAN9_RSRC_EXPANDERS = 8,
+ MPI3_MAN9_RSRC_PCIE_SWITCHES = 9,
+ MPI3_MAN9_RSRC_PDS = 10,
+ MPI3_MAN9_RSRC_HOST_PDS = 11,
+ MPI3_MAN9_RSRC_ADV_HOST_PDS = 12,
+ MPI3_MAN9_RSRC_RAID_PDS = 13,
+ MPI3_MAN9_RSRC_NUM_RESOURCES
+};
+
+#define MPI3_MAN9_MIN_OUTSTANDING_REQS (1)
+#define MPI3_MAN9_MAX_OUTSTANDING_REQS (65000)
+#define MPI3_MAN9_MIN_TARGET_CMDS (0)
+#define MPI3_MAN9_MAX_TARGET_CMDS (65535)
+#define MPI3_MAN9_MIN_SAS_TARGETS (0)
+#define MPI3_MAN9_MAX_SAS_TARGETS (65535)
+#define MPI3_MAN9_MIN_PCIE_TARGETS (0)
+#define MPI3_MAN9_MIN_INITIATORS (0)
+#define MPI3_MAN9_MAX_INITIATORS (65535)
+#define MPI3_MAN9_MIN_ENCLOSURES (0)
+#define MPI3_MAN9_MAX_ENCLOSURES (65535)
+#define MPI3_MAN9_MIN_ENCLOSURE_PHYS (0)
+#define MPI3_MAN9_MIN_EXPANDERS (0)
+#define MPI3_MAN9_MAX_EXPANDERS (65535)
+#define MPI3_MAN9_MIN_PCIE_SWITCHES (0)
+struct mpi3_man_page9 {
+ struct mpi3_config_page_header header;
+ u8 num_resources;
+ u8 reserved09;
+ __le16 reserved0a;
+ __le32 reserved0c;
+ __le32 reserved10;
+ __le32 reserved14;
+ __le32 reserved18;
+ __le32 reserved1c;
+ struct mpi3_man9_rsrc_entry resource[MPI3_MAN9_RSRC_NUM_RESOURCES];
+};
+
+#define MPI3_MAN9_PAGEVERSION (0x00)
+struct mpi3_man10_istwi_ctrlr_entry {
+ __le16 slave_address;
+ __le16 flags;
+ __le32 reserved04;
+};
+
+#define MPI3_MAN10_ISTWI_CTRLR_FLAGS_SLAVE_ENABLED (0x0002)
+#define MPI3_MAN10_ISTWI_CTRLR_FLAGS_MASTER_ENABLED (0x0001)
+#ifndef MPI3_MAN10_ISTWI_CTRLR_MAX
+#define MPI3_MAN10_ISTWI_CTRLR_MAX (1)
+#endif
+struct mpi3_man_page10 {
+ struct mpi3_config_page_header header;
+ __le32 reserved08;
+ u8 num_istwi_ctrl;
+ u8 reserved0d[3];
+ struct mpi3_man10_istwi_ctrlr_entry istwi_controller[MPI3_MAN10_ISTWI_CTRLR_MAX];
+};
+
+#define MPI3_MAN10_PAGEVERSION (0x00)
+struct mpi3_man11_mux_device_format {
+ u8 max_channel;
+ u8 reserved01[3];
+ __le32 reserved04;
+};
+
+struct mpi3_man11_temp_sensor_device_format {
+ u8 type;
+ u8 reserved01[3];
+ u8 temp_channel[4];
+};
+
+#define MPI3_MAN11_TEMP_SENSOR_TYPE_MAX6654 (0x00)
+#define MPI3_MAN11_TEMP_SENSOR_TYPE_EMC1442 (0x01)
+#define MPI3_MAN11_TEMP_SENSOR_TYPE_ADT7476 (0x02)
+#define MPI3_MAN11_TEMP_SENSOR_CHANNEL_ENABLED (0x01)
+struct mpi3_man11_seeprom_device_format {
+ u8 size;
+ u8 page_write_size;
+ __le16 reserved02;
+ __le32 reserved04;
+};
+
+#define MPI3_MAN11_SEEPROM_SIZE_1KBITS (0x01)
+#define MPI3_MAN11_SEEPROM_SIZE_2KBITS (0x02)
+#define MPI3_MAN11_SEEPROM_SIZE_4KBITS (0x03)
+#define MPI3_MAN11_SEEPROM_SIZE_8KBITS (0x04)
+#define MPI3_MAN11_SEEPROM_SIZE_16KBITS (0x05)
+#define MPI3_MAN11_SEEPROM_SIZE_32KBITS (0x06)
+#define MPI3_MAN11_SEEPROM_SIZE_64KBITS (0x07)
+#define MPI3_MAN11_SEEPROM_SIZE_128KBITS (0x08)
+struct mpi3_man11_ddr_spd_device_format {
+ u8 channel;
+ u8 reserved01[3];
+ __le32 reserved04;
+};
+
+struct mpi3_man11_cable_mgmt_device_format {
+ u8 type;
+ u8 receptacle_id;
+ __le16 reserved02;
+ __le32 reserved04;
+};
+
+#define MPI3_MAN11_CABLE_MGMT_TYPE_SFF_8636 (0x00)
+struct mpi3_man11_bkplane_spec_ubm_format {
+ __le16 flags;
+ __le16 reserved02;
+};
+
+#define MPI3_MAN11_BKPLANE_UBM_FLAGS_REFCLK_POLICY_ALWAYS_ENABLED (0x0200)
+#define MPI3_MAN11_BKPLANE_UBM_FLAGS_FORCE_POLLING (0x0100)
+#define MPI3_MAN11_BKPLANE_UBM_FLAGS_MAX_FRU_MASK (0x00f0)
+#define MPI3_MAN11_BKPLANE_UBM_FLAGS_MAX_FRU_SHIFT (4)
+#define MPI3_MAN11_BKPLANE_UBM_FLAGS_POLL_INTERVAL_MASK (0x000f)
+#define MPI3_MAN11_BKPLANE_UBM_FLAGS_POLL_INTERVAL_SHIFT (0)
+struct mpi3_man11_bkplane_spec_vpp_format {
+ __le16 flags;
+ __le16 reserved02;
+};
+
+#define MPI3_MAN11_BKPLANE_VPP_FLAGS_REFCLK_POLICY_ALWAYS_ENABLED (0x0040)
+#define MPI3_MAN11_BKPLANE_VPP_FLAGS_PRESENCE_DETECT_MASK (0x0030)
+#define MPI3_MAN11_BKPLANE_VPP_FLAGS_PRESENCE_DETECT_GPIO (0x0000)
+#define MPI3_MAN11_BKPLANE_VPP_FLAGS_PRESENCE_DETECT_REG (0x0010)
+#define MPI3_MAN11_BKPLANE_VPP_FLAGS_POLL_INTERVAL_MASK (0x000f)
+#define MPI3_MAN11_BKPLANE_VPP_FLAGS_POLL_INTERVAL_SHIFT (0)
+union mpi3_man11_bkplane_spec_format {
+ struct mpi3_man11_bkplane_spec_ubm_format ubm;
+ struct mpi3_man11_bkplane_spec_vpp_format vpp;
+};
+
+struct mpi3_man11_bkplane_mgmt_device_format {
+ u8 type;
+ u8 receptacle_id;
+ __le16 reserved02;
+ union mpi3_man11_bkplane_spec_format backplane_mgmt_specific;
+};
+
+#define MPI3_MAN11_BKPLANE_MGMT_TYPE_UBM (0x00)
+#define MPI3_MAN11_BKPLANE_MGMT_TYPE_VPP (0x01)
+struct mpi3_man11_gas_gauge_device_format {
+ u8 type;
+ u8 reserved01[3];
+ __le32 reserved04;
+};
+
+#define MPI3_MAN11_GAS_GAUGE_TYPE_STANDARD (0x00)
+union mpi3_man11_device_specific_format {
+ struct mpi3_man11_mux_device_format mux;
+ struct mpi3_man11_temp_sensor_device_format temp_sensor;
+ struct mpi3_man11_seeprom_device_format seeprom;
+ struct mpi3_man11_ddr_spd_device_format ddr_spd;
+ struct mpi3_man11_cable_mgmt_device_format cable_mgmt;
+ struct mpi3_man11_bkplane_mgmt_device_format bkplane_mgmt;
+ struct mpi3_man11_gas_gauge_device_format gas_gauge;
+ __le32 words[2];
+};
+
+struct mpi3_man11_istwi_device_format {
+ u8 device_type;
+ u8 controller;
+ u8 reserved02;
+ u8 flags;
+ __le16 device_address;
+ u8 mux_channel;
+ u8 mux_index;
+ union mpi3_man11_device_specific_format device_specific;
+};
+
+#define MPI3_MAN11_ISTWI_DEVTYPE_MUX (0x00)
+#define MPI3_MAN11_ISTWI_DEVTYPE_TEMP_SENSOR (0x01)
+#define MPI3_MAN11_ISTWI_DEVTYPE_SEEPROM (0x02)
+#define MPI3_MAN11_ISTWI_DEVTYPE_DDR_SPD (0x03)
+#define MPI3_MAN11_ISTWI_DEVTYPE_CABLE_MGMT (0x04)
+#define MPI3_MAN11_ISTWI_DEVTYPE_BACKPLANE_MGMT (0x05)
+#define MPI3_MAN11_ISTWI_DEVTYPE_GAS_GAUGE (0x06)
+#define MPI3_MAN11_ISTWI_FLAGS_MUX_PRESENT (0x01)
+#define MPI3_MAN11_ISTWI_FLAGS_BUS_SPEED_MASK (0x06)
+#define MPI3_MAN11_ISTWI_FLAGS_BUS_SPEED_100KHZ (0x00)
+#define MPI3_MAN11_ISTWI_FLAGS_BUS_SPEED_400KHZ (0x02)
+#ifndef MPI3_MAN11_ISTWI_DEVICE_MAX
+#define MPI3_MAN11_ISTWI_DEVICE_MAX (1)
+#endif
+struct mpi3_man_page11 {
+ struct mpi3_config_page_header header;
+ __le32 reserved08;
+ u8 num_istwi_dev;
+ u8 reserved0d[3];
+ struct mpi3_man11_istwi_device_format istwi_device[MPI3_MAN11_ISTWI_DEVICE_MAX];
+};
+
+#define MPI3_MAN11_PAGEVERSION (0x00)
+#ifndef MPI3_MAN12_NUM_SGPIO_MAX
+#define MPI3_MAN12_NUM_SGPIO_MAX (1)
+#endif
+struct mpi3_man12_sgpio_info {
+ u8 slot_count;
+ u8 reserved01[3];
+ __le32 reserved04;
+ u8 phy_order[32];
+};
+
+struct mpi3_man_page12 {
+ struct mpi3_config_page_header header;
+ __le32 flags;
+ __le32 s_clock_freq;
+ __le32 activity_modulation;
+ u8 num_sgpio;
+ u8 reserved15[3];
+ __le32 reserved18;
+ __le32 reserved1c;
+ __le32 pattern[8];
+ struct mpi3_man12_sgpio_info sgpio_info[MPI3_MAN12_NUM_SGPIO_MAX];
+};
+
+#define MPI3_MAN12_PAGEVERSION (0x00)
+#define MPI3_MAN12_FLAGS_ERROR_PRESENCE_ENABLED (0x0400)
+#define MPI3_MAN12_FLAGS_ACTIVITY_INVERT_ENABLED (0x0200)
+#define MPI3_MAN12_FLAGS_GROUP_ID_DISABLED (0x0100)
+#define MPI3_MAN12_FLAGS_SIO_CLK_FILTER_ENABLED (0x0004)
+#define MPI3_MAN12_FLAGS_SCLOCK_SLOAD_TYPE_MASK (0x0002)
+#define MPI3_MAN12_FLAGS_SCLOCK_SLOAD_TYPE_PUSH_PULL (0x0000)
+#define MPI3_MAN12_FLAGS_SCLOCK_SLOAD_TYPE_OPEN_DRAIN (0x0002)
+#define MPI3_MAN12_FLAGS_SDATAOUT_TYPE_MASK (0x0001)
+#define MPI3_MAN12_FLAGS_SDATAOUT_TYPE_PUSH_PULL (0x0000)
+#define MPI3_MAN12_FLAGS_SDATAOUT_TYPE_OPEN_DRAIN (0x0001)
+#define MPI3_MAN12_SIO_CLK_FREQ_MIN (32)
+#define MPI3_MAN12_SIO_CLK_FREQ_MAX (100000)
+#define MPI3_MAN12_ACTIVITY_MODULATION_FORCE_OFF_MASK (0x0000f000)
+#define MPI3_MAN12_ACTIVITY_MODULATION_FORCE_OFF_SHIFT (12)
+#define MPI3_MAN12_ACTIVITY_MODULATION_MAX_ON_MASK (0x00000f00)
+#define MPI3_MAN12_ACTIVITY_MODULATION_MAX_ON_SHIFT (8)
+#define MPI3_MAN12_ACTIVITY_MODULATION_STRETCH_OFF_MASK (0x000000f0)
+#define MPI3_MAN12_ACTIVITY_MODULATION_STRETCH_OFF_SHIFT (4)
+#define MPI3_MAN12_ACTIVITY_MODULATION_STRETCH_ON_MASK (0x0000000f)
+#define MPI3_MAN12_ACTIVITY_MODULATION_STRETCH_ON_SHIFT (0)
+#define MPI3_MAN12_PATTERN_RATE_MASK (0xe0000000)
+#define MPI3_MAN12_PATTERN_RATE_2_HZ (0x00000000)
+#define MPI3_MAN12_PATTERN_RATE_4_HZ (0x20000000)
+#define MPI3_MAN12_PATTERN_RATE_8_HZ (0x40000000)
+#define MPI3_MAN12_PATTERN_RATE_16_HZ (0x60000000)
+#define MPI3_MAN12_PATTERN_RATE_10_HZ (0x80000000)
+#define MPI3_MAN12_PATTERN_RATE_20_HZ (0xa0000000)
+#define MPI3_MAN12_PATTERN_RATE_40_HZ (0xc0000000)
+#define MPI3_MAN12_PATTERN_LENGTH_MASK (0x1f000000)
+#define MPI3_MAN12_PATTERN_LENGTH_SHIFT (24)
+#define MPI3_MAN12_PATTERN_BIT_PATTERN_MASK (0x00ffffff)
+#define MPI3_MAN12_PATTERN_BIT_PATTERN_SHIFT (0)
+#ifndef MPI3_MAN13_NUM_TRANSLATION_MAX
+#define MPI3_MAN13_NUM_TRANSLATION_MAX (1)
+#endif
+struct mpi3_man13_translation_info {
+ __le32 slot_status;
+ __le32 mask;
+ u8 activity;
+ u8 locate;
+ u8 error;
+ u8 reserved0b;
+};
+
+#define MPI3_MAN13_TRANSLATION_SLOTSTATUS_FAULT (0x20000000)
+#define MPI3_MAN13_TRANSLATION_SLOTSTATUS_DEVICE_OFF (0x10000000)
+#define MPI3_MAN13_TRANSLATION_SLOTSTATUS_DEVICE_ACTIVITY (0x00800000)
+#define MPI3_MAN13_TRANSLATION_SLOTSTATUS_DO_NOT_REMOVE (0x00400000)
+#define MPI3_MAN13_TRANSLATION_SLOTSTATUS_DEVICE_MISSING (0x00100000)
+#define MPI3_MAN13_TRANSLATION_SLOTSTATUS_INSERT (0x00080000)
+#define MPI3_MAN13_TRANSLATION_SLOTSTATUS_REMOVAL (0x00040000)
+#define MPI3_MAN13_TRANSLATION_SLOTSTATUS_IDENTIFY (0x00020000)
+#define MPI3_MAN13_TRANSLATION_SLOTSTATUS_OK (0x00008000)
+#define MPI3_MAN13_TRANSLATION_SLOTSTATUS_RESERVED_DEVICE (0x00004000)
+#define MPI3_MAN13_TRANSLATION_SLOTSTATUS_HOT_SPARE (0x00002000)
+#define MPI3_MAN13_TRANSLATION_SLOTSTATUS_CONSISTENCY_CHECK (0x00001000)
+#define MPI3_MAN13_TRANSLATION_SLOTSTATUS_IN_CRITICAL_ARRAY (0x00000800)
+#define MPI3_MAN13_TRANSLATION_SLOTSTATUS_IN_FAILED_ARRAY (0x00000400)
+#define MPI3_MAN13_TRANSLATION_SLOTSTATUS_REBUILD_REMAP (0x00000200)
+#define MPI3_MAN13_TRANSLATION_SLOTSTATUS_REBUILD_REMAP_ABORT (0x00000100)
+#define MPI3_MAN13_TRANSLATION_SLOTSTATUS_PREDICTED_FAILURE (0x00000040)
+#define MPI3_MAN13_BLINK_PATTERN_FORCE_OFF (0x00)
+#define MPI3_MAN13_BLINK_PATTERN_FORCE_ON (0x01)
+#define MPI3_MAN13_BLINK_PATTERN_PATTERN_0 (0x02)
+#define MPI3_MAN13_BLINK_PATTERN_PATTERN_1 (0x03)
+#define MPI3_MAN13_BLINK_PATTERN_PATTERN_2 (0x04)
+#define MPI3_MAN13_BLINK_PATTERN_PATTERN_3 (0x05)
+#define MPI3_MAN13_BLINK_PATTERN_PATTERN_4 (0x06)
+#define MPI3_MAN13_BLINK_PATTERN_PATTERN_5 (0x07)
+#define MPI3_MAN13_BLINK_PATTERN_PATTERN_6 (0x08)
+#define MPI3_MAN13_BLINK_PATTERN_PATTERN_7 (0x09)
+#define MPI3_MAN13_BLINK_PATTERN_ACTIVITY (0x0a)
+#define MPI3_MAN13_BLINK_PATTERN_ACTIVITY_TRAIL (0x0b)
+struct mpi3_man_page13 {
+ struct mpi3_config_page_header header;
+ u8 num_trans;
+ u8 reserved09[3];
+ __le32 reserved0c;
+ struct mpi3_man13_translation_info translation[MPI3_MAN13_NUM_TRANSLATION_MAX];
+};
+
+#define MPI3_MAN13_PAGEVERSION (0x00)
+struct mpi3_man_page14 {
+ struct mpi3_config_page_header header;
+ __le16 flags;
+ __le16 reserved0a;
+ u8 num_slot_groups;
+ u8 num_slots;
+ __le16 max_cert_chain_length;
+ __le32 sealed_slots;
+};
+
+#define MPI3_MAN14_PAGEVERSION (0x00)
+#define MPI3_MAN14_FLAGS_AUTH_SESSION_REQ (0x01)
+#define MPI3_MAN14_FLAGS_AUTH_API_MASK (0x0e)
+#define MPI3_MAN14_FLAGS_AUTH_API_NONE (0x00)
+#define MPI3_MAN14_FLAGS_AUTH_API_CEREBUS (0x02)
+#define MPI3_MAN14_FLAGS_AUTH_API_DMTF_PMCI (0x04)
+#ifndef MPI3_MAN15_VERSION_RECORD_MAX
+#define MPI3_MAN15_VERSION_RECORD_MAX 1
+#endif
+struct mpi3_man15_version_record {
+ __le16 spdm_version;
+ __le16 reserved02;
+};
+
+struct mpi3_man_page15 {
+ struct mpi3_config_page_header header;
+ u8 num_version_records;
+ u8 reserved09[3];
+ __le32 reserved0c;
+ struct mpi3_man15_version_record version_record[MPI3_MAN15_VERSION_RECORD_MAX];
+};
+
+#define MPI3_MAN15_PAGEVERSION (0x00)
+#ifndef MPI3_MAN16_CERT_ALGO_MAX
+#define MPI3_MAN16_CERT_ALGO_MAX 1
+#endif
+struct mpi3_man16_certificate_algorithm {
+ u8 slot_group;
+ u8 reserved01[3];
+ __le32 base_asym_algo;
+ __le32 base_hash_algo;
+ __le32 reserved0c[3];
+};
+
+struct mpi3_man_page16 {
+ struct mpi3_config_page_header header;
+ __le32 reserved08;
+ u8 num_cert_algos;
+ u8 reserved0d[3];
+ struct mpi3_man16_certificate_algorithm certificate_algorithm[MPI3_MAN16_CERT_ALGO_MAX];
+};
+
+#define MPI3_MAN16_PAGEVERSION (0x00)
+#ifndef MPI3_MAN17_HASH_ALGORITHM_MAX
+#define MPI3_MAN17_HASH_ALGORITHM_MAX 1
+#endif
+struct mpi3_man17_hash_algorithm {
+ u8 meas_specification;
+ u8 reserved01[3];
+ __le32 measurement_hash_algo;
+ __le32 reserved08[2];
+};
+
+struct mpi3_man_page17 {
+ struct mpi3_config_page_header header;
+ __le32 reserved08;
+ u8 num_hash_algos;
+ u8 reserved0d[3];
+ struct mpi3_man17_hash_algorithm hash_algorithm[MPI3_MAN17_HASH_ALGORITHM_MAX];
+};
+
+#define MPI3_MAN17_PAGEVERSION (0x00)
+struct mpi3_man_page20 {
+ struct mpi3_config_page_header header;
+ __le32 reserved08;
+ __le32 nonpremium_features;
+ u8 allowed_personalities;
+ u8 reserved11[3];
+};
+
+#define MPI3_MAN20_PAGEVERSION (0x00)
+#define MPI3_MAN20_ALLOWEDPERSON_RAID_MASK (0x02)
+#define MPI3_MAN20_ALLOWEDPERSON_RAID_ALLOWED (0x02)
+#define MPI3_MAN20_ALLOWEDPERSON_RAID_NOT_ALLOWED (0x00)
+#define MPI3_MAN20_ALLOWEDPERSON_EHBA_MASK (0x01)
+#define MPI3_MAN20_ALLOWEDPERSON_EHBA_ALLOWED (0x01)
+#define MPI3_MAN20_ALLOWEDPERSON_EHBA_NOT_ALLOWED (0x00)
+#define MPI3_MAN20_NONPREMUIM_DISABLE_PD_DEGRADED_MASK (0x01)
+#define MPI3_MAN20_NONPREMUIM_DISABLE_PD_DEGRADED_ENABLED (0x00)
+#define MPI3_MAN20_NONPREMUIM_DISABLE_PD_DEGRADED_DISABLED (0x01)
+struct mpi3_man_page21 {
+ struct mpi3_config_page_header header;
+ __le32 reserved08;
+ __le32 flags;
+};
+
+#define MPI3_MAN21_PAGEVERSION (0x00)
+#define MPI3_MAN21_FLAGS_HOST_METADATA_CAPABILITY_MASK (0x80)
+#define MPI3_MAN21_FLAGS_HOST_METADATA_CAPABILITY_ENABLED (0x80)
+#define MPI3_MAN21_FLAGS_HOST_METADATA_CAPABILITY_DISABLED (0x00)
+#define MPI3_MAN21_FLAGS_UNCERTIFIED_DRIVES_MASK (0x60)
+#define MPI3_MAN21_FLAGS_UNCERTIFIED_DRIVES_BLOCK (0x00)
+#define MPI3_MAN21_FLAGS_UNCERTIFIED_DRIVES_ALLOW (0x20)
+#define MPI3_MAN21_FLAGS_UNCERTIFIED_DRIVES_WARN (0x40)
+#define MPI3_MAN21_FLAGS_BLOCK_SSD_WR_CACHE_CHANGE_MASK (0x08)
+#define MPI3_MAN21_FLAGS_BLOCK_SSD_WR_CACHE_CHANGE_ALLOW (0x00)
+#define MPI3_MAN21_FLAGS_BLOCK_SSD_WR_CACHE_CHANGE_PREVENT (0x08)
+#define MPI3_MAN21_FLAGS_SES_VPD_ASSOC_MASK (0x01)
+#define MPI3_MAN21_FLAGS_SES_VPD_ASSOC_DEFAULT (0x00)
+#define MPI3_MAN21_FLAGS_SES_VPD_ASSOC_OEM_SPECIFIC (0x01)
+#ifndef MPI3_MAN_PROD_SPECIFIC_MAX
+#define MPI3_MAN_PROD_SPECIFIC_MAX (1)
+#endif
+struct mpi3_man_page_product_specific {
+ struct mpi3_config_page_header header;
+ __le32 product_specific_info[MPI3_MAN_PROD_SPECIFIC_MAX];
+};
+
+struct mpi3_io_unit_page0 {
+ struct mpi3_config_page_header header;
+ __le64 unique_value;
+ __le32 nvdata_version_default;
+ __le32 nvdata_version_persistent;
+};
+
+#define MPI3_IOUNIT0_PAGEVERSION (0x00)
+struct mpi3_io_unit_page1 {
+ struct mpi3_config_page_header header;
+ __le32 flags;
+ u8 dmd_io_delay;
+ u8 dmd_report_pc_ie;
+ u8 dmd_report_sata;
+ u8 dmd_report_sas;
+};
+
+#define MPI3_IOUNIT1_PAGEVERSION (0x00)
+#define MPI3_IOUNIT1_FLAGS_NVME_WRITE_CACHE_MASK (0x00000030)
+#define MPI3_IOUNIT1_FLAGS_NVME_WRITE_CACHE_ENABLE (0x00000000)
+#define MPI3_IOUNIT1_FLAGS_NVME_WRITE_CACHE_DISABLE (0x00000010)
+#define MPI3_IOUNIT1_FLAGS_NVME_WRITE_CACHE_NO_MODIFY (0x00000020)
+#define MPI3_IOUNIT1_FLAGS_ATA_SECURITY_FREEZE_LOCK (0x00000008)
+#define MPI3_IOUNIT1_FLAGS_WRITE_SAME_BUFFER (0x00000004)
+#define MPI3_IOUNIT1_FLAGS_SATA_WRITE_CACHE_MASK (0x00000003)
+#define MPI3_IOUNIT1_FLAGS_SATA_WRITE_CACHE_ENABLE (0x00000000)
+#define MPI3_IOUNIT1_FLAGS_SATA_WRITE_CACHE_DISABLE (0x00000001)
+#define MPI3_IOUNIT1_FLAGS_SATA_WRITE_CACHE_UNCHANGED (0x00000002)
+#define MPI3_IOUNIT1_DMD_REPORT_DELAY_TIME_MASK (0x7f)
+#define MPI3_IOUNIT1_DMD_REPORT_UNIT_16_SEC (0x80)
+#ifndef MPI3_IO_UNIT2_GPIO_VAL_MAX
+#define MPI3_IO_UNIT2_GPIO_VAL_MAX (1)
+#endif
+struct mpi3_io_unit_page2 {
+ struct mpi3_config_page_header header;
+ u8 gpio_count;
+ u8 reserved09[3];
+ __le16 gpio_val[MPI3_IO_UNIT2_GPIO_VAL_MAX];
+};
+
+#define MPI3_IOUNIT2_PAGEVERSION (0x00)
+#define MPI3_IOUNIT2_GPIO_FUNCTION_MASK (0xfffc)
+#define MPI3_IOUNIT2_GPIO_FUNCTION_SHIFT (2)
+#define MPI3_IOUNIT2_GPIO_SETTING_MASK (0x0001)
+#define MPI3_IOUNIT2_GPIO_SETTING_OFF (0x0000)
+#define MPI3_IOUNIT2_GPIO_SETTING_ON (0x0001)
+struct mpi3_io_unit3_sensor {
+ __le16 flags;
+ __le16 reserved02;
+ __le16 threshold[4];
+ __le32 reserved0c;
+ __le32 reserved10;
+ __le32 reserved14;
+};
+
+#define MPI3_IOUNIT3_SENSOR_FLAGS_T3_ENABLE (0x0008)
+#define MPI3_IOUNIT3_SENSOR_FLAGS_T2_ENABLE (0x0004)
+#define MPI3_IOUNIT3_SENSOR_FLAGS_T1_ENABLE (0x0002)
+#define MPI3_IOUNIT3_SENSOR_FLAGS_T0_ENABLE (0x0001)
+#ifndef MPI3_IO_UNIT3_SENSOR_MAX
+#define MPI3_IO_UNIT3_SENSOR_MAX (1)
+#endif
+struct mpi3_io_unit_page3 {
+ struct mpi3_config_page_header header;
+ __le32 reserved08;
+ u8 num_sensors;
+ u8 polling_interval;
+ __le16 reserved0e;
+ struct mpi3_io_unit3_sensor sensor[MPI3_IO_UNIT3_SENSOR_MAX];
+};
+
+#define MPI3_IOUNIT3_PAGEVERSION (0x00)
+struct mpi3_io_unit4_sensor {
+ __le16 current_temperature;
+ __le16 reserved02;
+ u8 flags;
+ u8 reserved05[3];
+ __le32 reserved08;
+ __le32 reserved0c;
+};
+
+#define MPI3_IOUNIT4_SENSOR_FLAGS_TEMP_VALID (0x01)
+#ifndef MPI3_IO_UNIT4_SENSOR_MAX
+#define MPI3_IO_UNIT4_SENSOR_MAX (1)
+#endif
+struct mpi3_io_unit_page4 {
+ struct mpi3_config_page_header header;
+ __le32 reserved08;
+ u8 num_sensors;
+ u8 reserved0d[3];
+ struct mpi3_io_unit4_sensor sensor[MPI3_IO_UNIT4_SENSOR_MAX];
+};
+
+#define MPI3_IOUNIT4_PAGEVERSION (0x00)
+struct mpi3_io_unit5_spinup_group {
+ u8 max_target_spinup;
+ u8 spinup_delay;
+ u8 spinup_flags;
+ u8 reserved03;
+};
+
+#define MPI3_IOUNIT5_SPINUP_FLAGS_DISABLE (0x01)
+#ifndef MPI3_IO_UNIT5_PHY_MAX
+#define MPI3_IO_UNIT5_PHY_MAX (4)
+#endif
+struct mpi3_io_unit_page5 {
+ struct mpi3_config_page_header header;
+ struct mpi3_io_unit5_spinup_group spinup_group_parameters[4];
+ __le32 reserved18;
+ __le32 reserved1c;
+ __le32 reserved20;
+ u8 reserved24;
+ u8 sata_device_wait_time;
+ u8 spinup_encl_drive_count;
+ u8 spinup_encl_delay;
+ u8 num_phys;
+ u8 pe_initial_spinup_delay;
+ u8 topology_stable_time;
+ u8 flags;
+ u8 phy[MPI3_IO_UNIT5_PHY_MAX];
+};
+
+#define MPI3_IOUNIT5_PAGEVERSION (0x00)
+#define MPI3_IOUNIT5_FLAGS_POWER_CAPABLE_SPINUP (0x02)
+#define MPI3_IOUNIT5_FLAGS_AUTO_PORT_ENABLE (0x01)
+#define MPI3_IOUNIT5_PHY_SPINUP_GROUP_MASK (0x03)
+struct mpi3_io_unit_page6 {
+ struct mpi3_config_page_header header;
+ __le32 board_power_requirement;
+ __le32 pci_slot_power_allocation;
+ u8 flags;
+ u8 reserved11[3];
+};
+
+#define MPI3_IOUNIT6_PAGEVERSION (0x00)
+#define MPI3_IOUNIT6_FLAGS_ACT_CABLE_PWR_EXC (0x01)
+struct mpi3_io_unit_page7 {
+ struct mpi3_config_page_header header;
+ __le32 reserved08;
+};
+
+#define MPI3_IOUNIT7_PAGEVERSION (0x00)
+#ifndef MPI3_IOUNIT8_DIGEST_MAX
+#define MPI3_IOUNIT8_DIGEST_MAX (1)
+#endif
+union mpi3_iounit8_digest {
+ __le32 dword[16];
+ __le16 word[32];
+ u8 byte[64];
+};
+
+struct mpi3_io_unit_page8 {
+ struct mpi3_config_page_header header;
+ u8 sb_mode;
+ u8 sb_state;
+ __le16 reserved0a;
+ u8 num_slots;
+ u8 slots_available;
+ u8 current_key_encryption_algo;
+ u8 key_digest_hash_algo;
+ __le32 reserved10[2];
+ __le32 current_key[128];
+ union mpi3_iounit8_digest digest[MPI3_IOUNIT8_DIGEST_MAX];
+};
+
+#define MPI3_IOUNIT8_PAGEVERSION (0x00)
+#define MPI3_IOUNIT8_SBMODE_SECURE_DEBUG (0x04)
+#define MPI3_IOUNIT8_SBMODE_HARD_SECURE (0x02)
+#define MPI3_IOUNIT8_SBMODE_CONFIG_SECURE (0x01)
+#define MPI3_IOUNIT8_SBSTATE_KEY_UPDATE_PENDING (0x02)
+#define MPI3_IOUNIT8_SBSTATE_SECURE_BOOT_ENABLED (0x01)
+struct mpi3_io_unit_page9 {
+ struct mpi3_config_page_header header;
+ __le32 flags;
+ __le16 first_device;
+ __le16 reserved0e;
+};
+
+#define MPI3_IOUNIT9_PAGEVERSION (0x00)
+#define MPI3_IOUNIT9_FLAGS_VDFIRST_ENABLED (0x01)
+#define MPI3_IOUNIT9_FIRSTDEVICE_UNKNOWN (0xffff)
+struct mpi3_ioc_page0 {
+ struct mpi3_config_page_header header;
+ __le32 reserved08;
+ __le16 vendor_id;
+ __le16 device_id;
+ u8 revision_id;
+ u8 reserved11[3];
+ __le32 class_code;
+ __le16 subsystem_vendor_id;
+ __le16 subsystem_id;
+};
+
+#define MPI3_IOC0_PAGEVERSION (0x00)
+struct mpi3_ioc_page1 {
+ struct mpi3_config_page_header header;
+ __le32 coalescing_timeout;
+ u8 coalescing_depth;
+ u8 pci_slot_num;
+ __le16 reserved0e;
+};
+
+#define MPI3_IOC1_PAGEVERSION (0x00)
+#define MPI3_IOC1_PCISLOTNUM_UNKNOWN (0xff)
+#ifndef MPI3_IOC2_EVENTMASK_WORDS
+#define MPI3_IOC2_EVENTMASK_WORDS (4)
+#endif
+struct mpi3_ioc_page2 {
+ struct mpi3_config_page_header header;
+ __le32 reserved08;
+ __le16 sas_broadcast_primitive_masks;
+ __le16 sas_notify_primitive_masks;
+ __le32 event_masks[MPI3_IOC2_EVENTMASK_WORDS];
+};
+
+#define MPI3_IOC2_PAGEVERSION (0x00)
+struct mpi3_uefibsd_page0 {
+ struct mpi3_config_page_header header;
+ __le32 bsd_options;
+ u8 ssu_timeout;
+ u8 io_timeout;
+ u8 tur_retries;
+ u8 tur_interval;
+ u8 reserved10;
+ u8 security_key_timeout;
+ __le16 reserved12;
+ __le32 reserved14;
+ __le32 reserved18;
+};
+
+#define MPI3_UEFIBSD_PAGEVERSION (0x00)
+#define MPI3_UEFIBSD_BSDOPTS_REGISTRATION_MASK (0x00000003)
+#define MPI3_UEFIBSD_BSDOPTS_REGISTRATION_IOC_AND_DEVS (0x00000000)
+#define MPI3_UEFIBSD_BSDOPTS_REGISTRATION_IOC_ONLY (0x00000001)
+#define MPI3_UEFIBSD_BSDOPTS_REGISTRATION_NONE (0x00000002)
+#define MPI3_UEFIBSD_BSDOPTS_DIS_HII_CONFIG_UTIL (0x00000004)
+#define MPI3_UEFIBSD_BSDOPTS_EN_ADV_ADAPTER_CONFIG (0x00000008)
+union mpi3_security_mac {
+ __le32 dword[16];
+ __le16 word[32];
+ u8 byte[64];
+};
+
+union mpi3_security_nonce {
+ __le32 dword[16];
+ __le16 word[32];
+ u8 byte[64];
+};
+
+union mpi3_security0_cert_chain {
+ __le32 dword[1024];
+ __le16 word[2048];
+ u8 byte[4096];
+};
+
+struct mpi3_security_page0 {
+ struct mpi3_config_page_header header;
+ u8 slot_num_group;
+ u8 slot_num;
+ __le16 cert_chain_length;
+ u8 cert_chain_flags;
+ u8 reserved0d[3];
+ __le32 base_asym_algo;
+ __le32 base_hash_algo;
+ __le32 reserved18[4];
+ union mpi3_security_mac mac;
+ union mpi3_security_nonce nonce;
+ union mpi3_security0_cert_chain certificate_chain;
+};
+
+#define MPI3_SECURITY0_PAGEVERSION (0x00)
+#define MPI3_SECURITY0_CERTCHAIN_FLAGS_AUTH_API_MASK (0x0e)
+#define MPI3_SECURITY0_CERTCHAIN_FLAGS_AUTH_API_UNUSED (0x00)
+#define MPI3_SECURITY0_CERTCHAIN_FLAGS_AUTH_API_CERBERUS (0x02)
+#define MPI3_SECURITY0_CERTCHAIN_FLAGS_AUTH_API_SPDM (0x04)
+#define MPI3_SECURITY0_CERTCHAIN_FLAGS_SEALED (0x01)
+#ifndef MPI3_SECURITY1_KEY_RECORD_MAX
+#define MPI3_SECURITY1_KEY_RECORD_MAX 1
+#endif
+#ifndef MPI3_SECURITY1_PAD_MAX
+#define MPI3_SECURITY1_PAD_MAX 1
+#endif
+union mpi3_security1_key_data {
+ __le32 dword[128];
+ __le16 word[256];
+ u8 byte[512];
+};
+
+struct mpi3_security1_key_record {
+ u8 flags;
+ u8 consumer;
+ __le16 key_data_size;
+ __le32 additional_key_data;
+ __le32 reserved08[2];
+ union mpi3_security1_key_data key_data;
+};
+
+#define MPI3_SECURITY1_KEY_RECORD_FLAGS_TYPE_MASK (0x1f)
+#define MPI3_SECURITY1_KEY_RECORD_FLAGS_TYPE_NOT_VALID (0x00)
+#define MPI3_SECURITY1_KEY_RECORD_FLAGS_TYPE_HMAC (0x01)
+#define MPI3_SECURITY1_KEY_RECORD_FLAGS_TYPE_AES (0x02)
+#define MPI3_SECURITY1_KEY_RECORD_FLAGS_TYPE_ECDSA_PRIVATE (0x03)
+#define MPI3_SECURITY1_KEY_RECORD_FLAGS_TYPE_ECDSA_PUBLIC (0x04)
+#define MPI3_SECURITY1_KEY_RECORD_CONSUMER_NOT_VALID (0x00)
+#define MPI3_SECURITY1_KEY_RECORD_CONSUMER_SAFESTORE (0x01)
+#define MPI3_SECURITY1_KEY_RECORD_CONSUMER_CERT_CHAIN (0x02)
+#define MPI3_SECURITY1_KEY_RECORD_CONSUMER_AUTH_DEV_KEY (0x03)
+#define MPI3_SECURITY1_KEY_RECORD_CONSUMER_CACHE_OFFLOAD (0x04)
+struct mpi3_security_page1 {
+ struct mpi3_config_page_header header;
+ __le32 reserved08[2];
+ union mpi3_security_mac mac;
+ union mpi3_security_nonce nonce;
+ u8 num_keys;
+ u8 reserved91[3];
+ __le32 reserved94[3];
+ struct mpi3_security1_key_record key_record[MPI3_SECURITY1_KEY_RECORD_MAX];
+ u8 pad[MPI3_SECURITY1_PAD_MAX];
+};
+
+#define MPI3_SECURITY1_PAGEVERSION (0x00)
+struct mpi3_sas_io_unit0_phy_data {
+ u8 io_unit_port;
+ u8 port_flags;
+ u8 phy_flags;
+ u8 negotiated_link_rate;
+ __le16 controller_phy_device_info;
+ __le16 reserved06;
+ __le16 attached_dev_handle;
+ __le16 controller_dev_handle;
+ __le32 discovery_status;
+ __le32 reserved10;
+};
+
+#ifndef MPI3_SAS_IO_UNIT0_PHY_MAX
+#define MPI3_SAS_IO_UNIT0_PHY_MAX (1)
+#endif
+struct mpi3_sas_io_unit_page0 {
+ struct mpi3_config_page_header header;
+ __le32 reserved08;
+ u8 num_phys;
+ u8 reserved0d[3];
+ struct mpi3_sas_io_unit0_phy_data phy_data[MPI3_SAS_IO_UNIT0_PHY_MAX];
+};
+
+#define MPI3_SASIOUNIT0_PAGEVERSION (0x00)
+#define MPI3_SASIOUNIT0_PORTFLAGS_DISC_IN_PROGRESS (0x08)
+#define MPI3_SASIOUNIT0_PORTFLAGS_AUTO_PORT_CONFIG (0x01)
+#define MPI3_SASIOUNIT0_PHYFLAGS_INIT_PERSIST_CONNECT (0x40)
+#define MPI3_SASIOUNIT0_PHYFLAGS_TARG_PERSIST_CONNECT (0x20)
+#define MPI3_SASIOUNIT0_PHYFLAGS_PHY_DISABLED (0x08)
+struct mpi3_sas_io_unit1_phy_data {
+ u8 io_unit_port;
+ u8 port_flags;
+ u8 phy_flags;
+ u8 max_min_link_rate;
+ __le16 controller_phy_device_info;
+ __le16 max_target_port_connect_time;
+ __le32 reserved08;
+};
+
+#ifndef MPI3_SAS_IO_UNIT1_PHY_MAX
+#define MPI3_SAS_IO_UNIT1_PHY_MAX (1)
+#endif
+struct mpi3_sas_io_unit_page1 {
+ struct mpi3_config_page_header header;
+ __le16 control_flags;
+ __le16 sas_narrow_max_queue_depth;
+ __le16 additional_control_flags;
+ __le16 sas_wide_max_queue_depth;
+ u8 num_phys;
+ u8 sata_max_q_depth;
+ __le16 reserved12;
+ struct mpi3_sas_io_unit1_phy_data phy_data[MPI3_SAS_IO_UNIT1_PHY_MAX];
+};
+
+#define MPI3_SASIOUNIT1_PAGEVERSION (0x00)
+#define MPI3_SASIOUNIT1_CONTROL_CONTROLLER_DEVICE_SELF_TEST (0x8000)
+#define MPI3_SASIOUNIT1_CONTROL_SATA_SW_PRESERVE (0x1000)
+#define MPI3_SASIOUNIT1_CONTROL_SATA_48BIT_LBA_REQUIRED (0x0080)
+#define MPI3_SASIOUNIT1_CONTROL_SATA_SMART_REQUIRED (0x0040)
+#define MPI3_SASIOUNIT1_CONTROL_SATA_NCQ_REQUIRED (0x0020)
+#define MPI3_SASIOUNIT1_CONTROL_SATA_FUA_REQUIRED (0x0010)
+#define MPI3_SASIOUNIT1_CONTROL_TABLE_SUBTRACTIVE_ILLEGAL (0x0008)
+#define MPI3_SASIOUNIT1_CONTROL_SUBTRACTIVE_ILLEGAL (0x0004)
+#define MPI3_SASIOUNIT1_CONTROL_FIRST_LVL_DISC_ONLY (0x0002)
+#define MPI3_SASIOUNIT1_CONTROL_HARD_RESET_MASK (0x0001)
+#define MPI3_SASIOUNIT1_CONTROL_HARD_RESET_DEVICE_NAME (0x0000)
+#define MPI3_SASIOUNIT1_CONTROL_HARD_RESET_SAS_ADDRESS (0x0001)
+#define MPI3_SASIOUNIT1_ACONTROL_DA_PERSIST_CONNECT (0x0100)
+#define MPI3_SASIOUNIT1_ACONTROL_MULTI_PORT_DOMAIN_ILLEGAL (0x0080)
+#define MPI3_SASIOUNIT1_ACONTROL_SATA_ASYNCHROUNOUS_NOTIFICATION (0x0040)
+#define MPI3_SASIOUNIT1_ACONTROL_INVALID_TOPOLOGY_CORRECTION (0x0020)
+#define MPI3_SASIOUNIT1_ACONTROL_PORT_ENABLE_ONLY_SATA_LINK_RESET (0x0010)
+#define MPI3_SASIOUNIT1_ACONTROL_OTHER_AFFILIATION_SATA_LINK_RESET (0x0008)
+#define MPI3_SASIOUNIT1_ACONTROL_SELF_AFFILIATION_SATA_LINK_RESET (0x0004)
+#define MPI3_SASIOUNIT1_ACONTROL_NO_AFFILIATION_SATA_LINK_RESET (0x0002)
+#define MPI3_SASIOUNIT1_ACONTROL_ALLOW_TABLE_TO_TABLE (0x0001)
+#define MPI3_SASIOUNIT1_PORT_FLAGS_AUTO_PORT_CONFIG (0x01)
+#define MPI3_SASIOUNIT1_PHYFLAGS_INIT_PERSIST_CONNECT (0x40)
+#define MPI3_SASIOUNIT1_PHYFLAGS_TARG_PERSIST_CONNECT (0x20)
+#define MPI3_SASIOUNIT1_PHYFLAGS_PHY_DISABLE (0x08)
+#define MPI3_SASIOUNIT1_MMLR_MAX_RATE_MASK (0xf0)
+#define MPI3_SASIOUNIT1_MMLR_MAX_RATE_SHIFT (4)
+#define MPI3_SASIOUNIT1_MMLR_MAX_RATE_6_0 (0xa0)
+#define MPI3_SASIOUNIT1_MMLR_MAX_RATE_12_0 (0xb0)
+#define MPI3_SASIOUNIT1_MMLR_MAX_RATE_22_5 (0xc0)
+#define MPI3_SASIOUNIT1_MMLR_MIN_RATE_MASK (0x0f)
+#define MPI3_SASIOUNIT1_MMLR_MIN_RATE_6_0 (0x0a)
+#define MPI3_SASIOUNIT1_MMLR_MIN_RATE_12_0 (0x0b)
+#define MPI3_SASIOUNIT1_MMLR_MIN_RATE_22_5 (0x0c)
+struct mpi3_sas_io_unit2_phy_pm_settings {
+ u8 control_flags;
+ u8 reserved01;
+ __le16 inactivity_timer_exponent;
+ u8 sata_partial_timeout;
+ u8 reserved05;
+ u8 sata_slumber_timeout;
+ u8 reserved07;
+ u8 sas_partial_timeout;
+ u8 reserved09;
+ u8 sas_slumber_timeout;
+ u8 reserved0b;
+};
+
+#ifndef MPI3_SAS_IO_UNIT2_PHY_MAX
+#define MPI3_SAS_IO_UNIT2_PHY_MAX (1)
+#endif
+struct mpi3_sas_io_unit_page2 {
+ struct mpi3_config_page_header header;
+ u8 num_phys;
+ u8 reserved09[3];
+ __le32 reserved0c;
+ struct mpi3_sas_io_unit2_phy_pm_settings sas_phy_power_management_settings[MPI3_SAS_IO_UNIT2_PHY_MAX];
+};
+
+#define MPI3_SASIOUNIT2_PAGEVERSION (0x00)
+#define MPI3_SASIOUNIT2_CONTROL_SAS_SLUMBER_ENABLE (0x08)
+#define MPI3_SASIOUNIT2_CONTROL_SAS_PARTIAL_ENABLE (0x04)
+#define MPI3_SASIOUNIT2_CONTROL_SATA_SLUMBER_ENABLE (0x02)
+#define MPI3_SASIOUNIT2_CONTROL_SATA_PARTIAL_ENABLE (0x01)
+#define MPI3_SASIOUNIT2_ITE_SAS_SLUMBER_MASK (0x7000)
+#define MPI3_SASIOUNIT2_ITE_SAS_SLUMBER_SHIFT (12)
+#define MPI3_SASIOUNIT2_ITE_SAS_PARTIAL_MASK (0x0700)
+#define MPI3_SASIOUNIT2_ITE_SAS_PARTIAL_SHIFT (8)
+#define MPI3_SASIOUNIT2_ITE_SATA_SLUMBER_MASK (0x0070)
+#define MPI3_SASIOUNIT2_ITE_SATA_SLUMBER_SHIFT (4)
+#define MPI3_SASIOUNIT2_ITE_SATA_PARTIAL_MASK (0x0007)
+#define MPI3_SASIOUNIT2_ITE_SATA_PARTIAL_SHIFT (0)
+#define MPI3_SASIOUNIT2_ITE_EXP_TEN_SECONDS (7)
+#define MPI3_SASIOUNIT2_ITE_EXP_ONE_SECOND (6)
+#define MPI3_SASIOUNIT2_ITE_EXP_HUNDRED_MILLISECONDS (5)
+#define MPI3_SASIOUNIT2_ITE_EXP_TEN_MILLISECONDS (4)
+#define MPI3_SASIOUNIT2_ITE_EXP_ONE_MILLISECOND (3)
+#define MPI3_SASIOUNIT2_ITE_EXP_HUNDRED_MICROSECONDS (2)
+#define MPI3_SASIOUNIT2_ITE_EXP_TEN_MICROSECONDS (1)
+#define MPI3_SASIOUNIT2_ITE_EXP_ONE_MICROSECOND (0)
+struct mpi3_sas_io_unit_page3 {
+ struct mpi3_config_page_header header;
+ __le32 reserved08;
+ __le32 power_management_capabilities;
+};
+
+#define MPI3_SASIOUNIT3_PAGEVERSION (0x00)
+#define MPI3_SASIOUNIT3_PM_HOST_SAS_SLUMBER_MODE (0x00000800)
+#define MPI3_SASIOUNIT3_PM_HOST_SAS_PARTIAL_MODE (0x00000400)
+#define MPI3_SASIOUNIT3_PM_HOST_SATA_SLUMBER_MODE (0x00000200)
+#define MPI3_SASIOUNIT3_PM_HOST_SATA_PARTIAL_MODE (0x00000100)
+#define MPI3_SASIOUNIT3_PM_IOUNIT_SAS_SLUMBER_MODE (0x00000008)
+#define MPI3_SASIOUNIT3_PM_IOUNIT_SAS_PARTIAL_MODE (0x00000004)
+#define MPI3_SASIOUNIT3_PM_IOUNIT_SATA_SLUMBER_MODE (0x00000002)
+#define MPI3_SASIOUNIT3_PM_IOUNIT_SATA_PARTIAL_MODE (0x00000001)
+struct mpi3_sas_expander_page0 {
+ struct mpi3_config_page_header header;
+ u8 io_unit_port;
+ u8 report_gen_length;
+ __le16 enclosure_handle;
+ __le32 reserved0c;
+ __le64 sas_address;
+ __le32 discovery_status;
+ __le16 dev_handle;
+ __le16 parent_dev_handle;
+ __le16 expander_change_count;
+ __le16 expander_route_indexes;
+ u8 num_phys;
+ u8 sas_level;
+ __le16 flags;
+ __le16 stp_bus_inactivity_time_limit;
+ __le16 stp_max_connect_time_limit;
+ __le16 stp_smp_nexus_loss_time;
+ __le16 max_num_routed_sas_addresses;
+ __le64 active_zone_manager_sas_address;
+ __le16 zone_lock_inactivity_limit;
+ __le16 reserved3a;
+ u8 time_to_reduced_func;
+ u8 initial_time_to_reduced_func;
+ u8 max_reduced_func_time;
+ u8 exp_status;
+};
+
+#define MPI3_SASEXPANDER0_PAGEVERSION (0x00)
+#define MPI3_SASEXPANDER0_FLAGS_REDUCED_FUNCTIONALITY (0x2000)
+#define MPI3_SASEXPANDER0_FLAGS_ZONE_LOCKED (0x1000)
+#define MPI3_SASEXPANDER0_FLAGS_SUPPORTED_PHYSICAL_PRES (0x0800)
+#define MPI3_SASEXPANDER0_FLAGS_ASSERTED_PHYSICAL_PRES (0x0400)
+#define MPI3_SASEXPANDER0_FLAGS_ZONING_SUPPORT (0x0200)
+#define MPI3_SASEXPANDER0_FLAGS_ENABLED_ZONING (0x0100)
+#define MPI3_SASEXPANDER0_FLAGS_TABLE_TO_TABLE_SUPPORT (0x0080)
+#define MPI3_SASEXPANDER0_FLAGS_CONNECTOR_END_DEVICE (0x0010)
+#define MPI3_SASEXPANDER0_FLAGS_OTHERS_CONFIG (0x0004)
+#define MPI3_SASEXPANDER0_FLAGS_CONFIG_IN_PROGRESS (0x0002)
+#define MPI3_SASEXPANDER0_FLAGS_ROUTE_TABLE_CONFIG (0x0001)
+#define MPI3_SASEXPANDER0_ES_NOT_RESPONDING (0x02)
+#define MPI3_SASEXPANDER0_ES_RESPONDING (0x03)
+#define MPI3_SASEXPANDER0_ES_DELAY_NOT_RESPONDING (0x04)
+struct mpi3_sas_expander_page1 {
+ struct mpi3_config_page_header header;
+ u8 io_unit_port;
+ u8 reserved09[3];
+ u8 num_phys;
+ u8 phy;
+ __le16 num_table_entries_programmed;
+ u8 programmed_link_rate;
+ u8 hw_link_rate;
+ __le16 attached_dev_handle;
+ __le32 phy_info;
+ __le16 attached_device_info;
+ __le16 reserved1a;
+ __le16 expander_dev_handle;
+ u8 change_count;
+ u8 negotiated_link_rate;
+ u8 phy_identifier;
+ u8 attached_phy_identifier;
+ u8 reserved22;
+ u8 discovery_info;
+ __le32 attached_phy_info;
+ u8 zone_group;
+ u8 self_config_status;
+ __le16 reserved2a;
+ __le16 slot;
+ __le16 slot_index;
+};
+
+#define MPI3_SASEXPANDER1_PAGEVERSION (0x00)
+#define MPI3_SASEXPANDER1_DISCINFO_BAD_PHY_DISABLED (0x04)
+#define MPI3_SASEXPANDER1_DISCINFO_LINK_STATUS_CHANGE (0x02)
+#define MPI3_SASEXPANDER1_DISCINFO_NO_ROUTING_ENTRIES (0x01)
+struct mpi3_sas_port_page0 {
+ struct mpi3_config_page_header header;
+ u8 port_number;
+ u8 reserved09;
+ u8 port_width;
+ u8 reserved0b;
+ u8 zone_group;
+ u8 reserved0d[3];
+ __le64 sas_address;
+ __le16 device_info;
+ __le16 reserved1a;
+ __le32 reserved1c;
+};
+
+#define MPI3_SASPORT0_PAGEVERSION (0x00)
+struct mpi3_sas_phy_page0 {
+ struct mpi3_config_page_header header;
+ __le16 owner_dev_handle;
+ __le16 reserved0a;
+ __le16 attached_dev_handle;
+ u8 attached_phy_identifier;
+ u8 reserved0f;
+ __le32 attached_phy_info;
+ u8 programmed_link_rate;
+ u8 hw_link_rate;
+ u8 change_count;
+ u8 flags;
+ __le32 phy_info;
+ u8 negotiated_link_rate;
+ u8 reserved1d[3];
+ __le16 slot;
+ __le16 slot_index;
+};
+
+#define MPI3_SASPHY0_PAGEVERSION (0x00)
+#define MPI3_SASPHY0_FLAGS_SGPIO_DIRECT_ATTACH_ENC (0x01)
+struct mpi3_sas_phy_page1 {
+ struct mpi3_config_page_header header;
+ __le32 reserved08;
+ __le32 invalid_dword_count;
+ __le32 running_disparity_error_count;
+ __le32 loss_dword_synch_count;
+ __le32 phy_reset_problem_count;
+};
+
+#define MPI3_SASPHY1_PAGEVERSION (0x00)
+struct mpi3_sas_phy2_phy_event {
+ u8 phy_event_code;
+ u8 reserved01[3];
+ __le32 phy_event_info;
+};
+
+#ifndef MPI3_SAS_PHY2_PHY_EVENT_MAX
+#define MPI3_SAS_PHY2_PHY_EVENT_MAX (1)
+#endif
+struct mpi3_sas_phy_page2 {
+ struct mpi3_config_page_header header;
+ __le32 reserved08;
+ u8 num_phy_events;
+ u8 reserved0d[3];
+ struct mpi3_sas_phy2_phy_event phy_event[MPI3_SAS_PHY2_PHY_EVENT_MAX];
+};
+
+#define MPI3_SASPHY2_PAGEVERSION (0x00)
+struct mpi3_sas_phy3_phy_event_config {
+ u8 phy_event_code;
+ u8 reserved01[3];
+ u8 counter_type;
+ u8 threshold_window;
+ u8 time_units;
+ u8 reserved07;
+ __le32 event_threshold;
+ __le16 threshold_flags;
+ __le16 reserved0e;
+};
+
+#define MPI3_SASPHY3_EVENT_CODE_NO_EVENT (0x00)
+#define MPI3_SASPHY3_EVENT_CODE_INVALID_DWORD (0x01)
+#define MPI3_SASPHY3_EVENT_CODE_RUNNING_DISPARITY_ERROR (0x02)
+#define MPI3_SASPHY3_EVENT_CODE_LOSS_DWORD_SYNC (0x03)
+#define MPI3_SASPHY3_EVENT_CODE_PHY_RESET_PROBLEM (0x04)
+#define MPI3_SASPHY3_EVENT_CODE_ELASTICITY_BUF_OVERFLOW (0x05)
+#define MPI3_SASPHY3_EVENT_CODE_RX_ERROR (0x06)
+#define MPI3_SASPHY3_EVENT_CODE_INV_SPL_PACKETS (0x07)
+#define MPI3_SASPHY3_EVENT_CODE_LOSS_SPL_PACKET_SYNC (0x08)
+#define MPI3_SASPHY3_EVENT_CODE_RX_ADDR_FRAME_ERROR (0x20)
+#define MPI3_SASPHY3_EVENT_CODE_TX_AC_OPEN_REJECT (0x21)
+#define MPI3_SASPHY3_EVENT_CODE_RX_AC_OPEN_REJECT (0x22)
+#define MPI3_SASPHY3_EVENT_CODE_TX_RC_OPEN_REJECT (0x23)
+#define MPI3_SASPHY3_EVENT_CODE_RX_RC_OPEN_REJECT (0x24)
+#define MPI3_SASPHY3_EVENT_CODE_RX_AIP_PARTIAL_WAITING_ON (0x25)
+#define MPI3_SASPHY3_EVENT_CODE_RX_AIP_CONNECT_WAITING_ON (0x26)
+#define MPI3_SASPHY3_EVENT_CODE_TX_BREAK (0x27)
+#define MPI3_SASPHY3_EVENT_CODE_RX_BREAK (0x28)
+#define MPI3_SASPHY3_EVENT_CODE_BREAK_TIMEOUT (0x29)
+#define MPI3_SASPHY3_EVENT_CODE_CONNECTION (0x2a)
+#define MPI3_SASPHY3_EVENT_CODE_PEAKTX_PATHWAY_BLOCKED (0x2b)
+#define MPI3_SASPHY3_EVENT_CODE_PEAKTX_ARB_WAIT_TIME (0x2c)
+#define MPI3_SASPHY3_EVENT_CODE_PEAK_ARB_WAIT_TIME (0x2d)
+#define MPI3_SASPHY3_EVENT_CODE_PEAK_CONNECT_TIME (0x2e)
+#define MPI3_SASPHY3_EVENT_CODE_PERSIST_CONN (0x2f)
+#define MPI3_SASPHY3_EVENT_CODE_TX_SSP_FRAMES (0x40)
+#define MPI3_SASPHY3_EVENT_CODE_RX_SSP_FRAMES (0x41)
+#define MPI3_SASPHY3_EVENT_CODE_TX_SSP_ERROR_FRAMES (0x42)
+#define MPI3_SASPHY3_EVENT_CODE_RX_SSP_ERROR_FRAMES (0x43)
+#define MPI3_SASPHY3_EVENT_CODE_TX_CREDIT_BLOCKED (0x44)
+#define MPI3_SASPHY3_EVENT_CODE_RX_CREDIT_BLOCKED (0x45)
+#define MPI3_SASPHY3_EVENT_CODE_TX_SATA_FRAMES (0x50)
+#define MPI3_SASPHY3_EVENT_CODE_RX_SATA_FRAMES (0x51)
+#define MPI3_SASPHY3_EVENT_CODE_SATA_OVERFLOW (0x52)
+#define MPI3_SASPHY3_EVENT_CODE_TX_SMP_FRAMES (0x60)
+#define MPI3_SASPHY3_EVENT_CODE_RX_SMP_FRAMES (0x61)
+#define MPI3_SASPHY3_EVENT_CODE_RX_SMP_ERROR_FRAMES (0x63)
+#define MPI3_SASPHY3_EVENT_CODE_HOTPLUG_TIMEOUT (0xd0)
+#define MPI3_SASPHY3_EVENT_CODE_MISALIGNED_MUX_PRIMITIVE (0xd1)
+#define MPI3_SASPHY3_EVENT_CODE_RX_AIP (0xd2)
+#define MPI3_SASPHY3_EVENT_CODE_LCARB_WAIT_TIME (0xd3)
+#define MPI3_SASPHY3_EVENT_CODE_RCVD_CONN_RESP_WAIT_TIME (0xd4)
+#define MPI3_SASPHY3_EVENT_CODE_LCCONN_TIME (0xd5)
+#define MPI3_SASPHY3_EVENT_CODE_SSP_TX_START_TRANSMIT (0xd6)
+#define MPI3_SASPHY3_EVENT_CODE_SATA_TX_START (0xd7)
+#define MPI3_SASPHY3_EVENT_CODE_SMP_TX_START_TRANSMT (0xd8)
+#define MPI3_SASPHY3_EVENT_CODE_TX_SMP_BREAK_CONN (0xd9)
+#define MPI3_SASPHY3_EVENT_CODE_SSP_RX_START_RECEIVE (0xda)
+#define MPI3_SASPHY3_EVENT_CODE_SATA_RX_START_RECEIVE (0xdb)
+#define MPI3_SASPHY3_EVENT_CODE_SMP_RX_START_RECEIVE (0xdc)
+#define MPI3_SASPHY3_COUNTER_TYPE_WRAPPING (0x00)
+#define MPI3_SASPHY3_COUNTER_TYPE_SATURATING (0x01)
+#define MPI3_SASPHY3_COUNTER_TYPE_PEAK_VALUE (0x02)
+#define MPI3_SASPHY3_TIME_UNITS_10_MICROSECONDS (0x00)
+#define MPI3_SASPHY3_TIME_UNITS_100_MICROSECONDS (0x01)
+#define MPI3_SASPHY3_TIME_UNITS_1_MILLISECOND (0x02)
+#define MPI3_SASPHY3_TIME_UNITS_10_MILLISECONDS (0x03)
+#define MPI3_SASPHY3_TFLAGS_PHY_RESET (0x0002)
+#define MPI3_SASPHY3_TFLAGS_EVENT_NOTIFY (0x0001)
+#ifndef MPI3_SAS_PHY3_PHY_EVENT_MAX
+#define MPI3_SAS_PHY3_PHY_EVENT_MAX (1)
+#endif
+struct mpi3_sas_phy_page3 {
+ struct mpi3_config_page_header header;
+ __le32 reserved08;
+ u8 num_phy_events;
+ u8 reserved0d[3];
+ struct mpi3_sas_phy3_phy_event_config phy_event_config[MPI3_SAS_PHY3_PHY_EVENT_MAX];
+};
+
+#define MPI3_SASPHY3_PAGEVERSION (0x00)
+struct mpi3_sas_phy_page4 {
+ struct mpi3_config_page_header header;
+ u8 reserved08[3];
+ u8 flags;
+ u8 initial_frame[28];
+};
+
+#define MPI3_SASPHY4_PAGEVERSION (0x00)
+#define MPI3_SASPHY4_FLAGS_FRAME_VALID (0x02)
+#define MPI3_SASPHY4_FLAGS_SATA_FRAME (0x01)
+#define MPI3_PCIE_LINK_RETIMERS_MASK (0x30)
+#define MPI3_PCIE_LINK_RETIMERS_SHIFT (4)
+#define MPI3_PCIE_NEG_LINK_RATE_MASK (0x0f)
+#define MPI3_PCIE_NEG_LINK_RATE_UNKNOWN (0x00)
+#define MPI3_PCIE_NEG_LINK_RATE_PHY_DISABLED (0x01)
+#define MPI3_PCIE_NEG_LINK_RATE_2_5 (0x02)
+#define MPI3_PCIE_NEG_LINK_RATE_5_0 (0x03)
+#define MPI3_PCIE_NEG_LINK_RATE_8_0 (0x04)
+#define MPI3_PCIE_NEG_LINK_RATE_16_0 (0x05)
+#define MPI3_PCIE_NEG_LINK_RATE_32_0 (0x06)
+struct mpi3_pcie_io_unit0_phy_data {
+ u8 link;
+ u8 link_flags;
+ u8 phy_flags;
+ u8 negotiated_link_rate;
+ __le16 attached_dev_handle;
+ __le16 controller_dev_handle;
+ __le32 enumeration_status;
+ u8 io_unit_port;
+ u8 reserved0d[3];
+};
+
+#define MPI3_PCIEIOUNIT0_LINKFLAGS_CONFIG_SOURCE_MASK (0x10)
+#define MPI3_PCIEIOUNIT0_LINKFLAGS_CONFIG_SOURCE_IOUNIT1 (0x00)
+#define MPI3_PCIEIOUNIT0_LINKFLAGS_CONFIG_SOURCE_BKPLANE (0x10)
+#define MPI3_PCIEIOUNIT0_LINKFLAGS_ENUM_IN_PROGRESS (0x08)
+#define MPI3_PCIEIOUNIT0_PHYFLAGS_PHY_DISABLED (0x08)
+#define MPI3_PCIEIOUNIT0_PHYFLAGS_HOST_PHY (0x01)
+#define MPI3_PCIEIOUNIT0_ES_MAX_SWITCH_DEPTH_EXCEEDED (0x80000000)
+#define MPI3_PCIEIOUNIT0_ES_MAX_SWITCHES_EXCEEDED (0x40000000)
+#define MPI3_PCIEIOUNIT0_ES_MAX_ENDPOINTS_EXCEEDED (0x20000000)
+#define MPI3_PCIEIOUNIT0_ES_INSUFFICIENT_RESOURCES (0x10000000)
+#ifndef MPI3_PCIE_IO_UNIT0_PHY_MAX
+#define MPI3_PCIE_IO_UNIT0_PHY_MAX (1)
+#endif
+struct mpi3_pcie_io_unit_page0 {
+ struct mpi3_config_page_header header;
+ __le32 reserved08;
+ u8 num_phys;
+ u8 init_status;
+ __le16 reserved0e;
+ struct mpi3_pcie_io_unit0_phy_data phy_data[MPI3_PCIE_IO_UNIT0_PHY_MAX];
+};
+
+#define MPI3_PCIEIOUNIT0_PAGEVERSION (0x00)
+#define MPI3_PCIEIOUNIT0_INITSTATUS_NO_ERRORS (0x00)
+#define MPI3_PCIEIOUNIT0_INITSTATUS_NEEDS_INITIALIZATION (0x01)
+#define MPI3_PCIEIOUNIT0_INITSTATUS_NO_TARGETS_ALLOCATED (0x02)
+#define MPI3_PCIEIOUNIT0_INITSTATUS_RESOURCE_ALLOC_FAILED (0x03)
+#define MPI3_PCIEIOUNIT0_INITSTATUS_BAD_NUM_PHYS (0x04)
+#define MPI3_PCIEIOUNIT0_INITSTATUS_UNSUPPORTED_CONFIG (0x05)
+#define MPI3_PCIEIOUNIT0_INITSTATUS_HOST_PORT_MISMATCH (0x06)
+#define MPI3_PCIEIOUNIT0_INITSTATUS_PHYS_NOT_CONSECUTIVE (0x07)
+#define MPI3_PCIEIOUNIT0_INITSTATUS_BAD_CLOCKING_MODE (0x08)
+#define MPI3_PCIEIOUNIT0_INITSTATUS_PROD_SPEC_START (0xf0)
+#define MPI3_PCIEIOUNIT0_INITSTATUS_PROD_SPEC_END (0xff)
+struct mpi3_pcie_io_unit1_phy_data {
+ u8 link;
+ u8 link_flags;
+ u8 phy_flags;
+ u8 max_min_link_rate;
+ __le32 reserved04;
+ __le32 reserved08;
+};
+
+#define MPI3_PCIEIOUNIT1_LINKFLAGS_PCIE_CLK_MODE_MASK (0x03)
+#define MPI3_PCIEIOUNIT1_LINKFLAGS_PCIE_CLK_MODE_DIS_SEPARATE_REFCLK (0x00)
+#define MPI3_PCIEIOUNIT1_LINKFLAGS_PCIE_CLK_MODE_EN_SRIS (0x01)
+#define MPI3_PCIEIOUNIT1_LINKFLAGS_PCIE_CLK_MODE_EN_SRNS (0x02)
+#define MPI3_PCIEIOUNIT1_PHYFLAGS_PHY_DISABLE (0x08)
+#define MPI3_PCIEIOUNIT1_MMLR_MAX_RATE_MASK (0xf0)
+#define MPI3_PCIEIOUNIT1_MMLR_MAX_RATE_SHIFT (4)
+#define MPI3_PCIEIOUNIT1_MMLR_MAX_RATE_2_5 (0x20)
+#define MPI3_PCIEIOUNIT1_MMLR_MAX_RATE_5_0 (0x30)
+#define MPI3_PCIEIOUNIT1_MMLR_MAX_RATE_8_0 (0x40)
+#define MPI3_PCIEIOUNIT1_MMLR_MAX_RATE_16_0 (0x50)
+#define MPI3_PCIEIOUNIT1_MMLR_MAX_RATE_32_0 (0x60)
+#ifndef MPI3_PCIE_IO_UNIT1_PHY_MAX
+#define MPI3_PCIE_IO_UNIT1_PHY_MAX (1)
+#endif
+struct mpi3_pcie_io_unit_page1 {
+ struct mpi3_config_page_header header;
+ __le32 control_flags;
+ __le32 reserved0c;
+ u8 num_phys;
+ u8 reserved11;
+ __le16 reserved12;
+ struct mpi3_pcie_io_unit1_phy_data phy_data[MPI3_PCIE_IO_UNIT1_PHY_MAX];
+};
+
+#define MPI3_PCIEIOUNIT1_PAGEVERSION (0x00)
+struct mpi3_pcie_io_unit_page2 {
+ struct mpi3_config_page_header header;
+ __le16 nv_me_max_queue_depth;
+ __le16 reserved0a;
+ u8 nv_me_abort_to;
+ u8 reserved0d;
+ __le16 reserved0e;
+};
+
+#define MPI3_PCIEIOUNIT2_PAGEVERSION (0x00)
+struct mpi3_pcie_switch_page0 {
+ struct mpi3_config_page_header header;
+ u8 io_unit_port;
+ u8 switch_status;
+ u8 reserved0a[2];
+ __le16 dev_handle;
+ __le16 parent_dev_handle;
+ u8 num_ports;
+ u8 pc_ie_level;
+ __le16 reserved12;
+ __le32 reserved14;
+ __le32 reserved18;
+ __le32 reserved1c;
+};
+
+#define MPI3_PCIESWITCH0_PAGEVERSION (0x00)
+#define MPI3_PCIESWITCH0_SS_NOT_RESPONDING (0x02)
+#define MPI3_PCIESWITCH0_SS_RESPONDING (0x03)
+#define MPI3_PCIESWITCH0_SS_DELAY_NOT_RESPONDING (0x04)
+struct mpi3_pcie_switch_page1 {
+ struct mpi3_config_page_header header;
+ u8 io_unit_port;
+ u8 reserved09[3];
+ u8 num_ports;
+ u8 port_num;
+ __le16 attached_dev_handle;
+ __le16 switch_dev_handle;
+ u8 negotiated_port_width;
+ u8 negotiated_link_rate;
+ __le16 slot;
+ __le16 slot_index;
+ __le32 reserved18;
+};
+
+#define MPI3_PCIESWITCH1_PAGEVERSION (0x00)
+struct mpi3_pcie_link_page0 {
+ struct mpi3_config_page_header header;
+ u8 link;
+ u8 reserved09[3];
+ __le32 correctable_error_count;
+ __le16 n_fatal_error_count;
+ __le16 reserved12;
+ __le16 fatal_error_count;
+ __le16 reserved16;
+};
+
+#define MPI3_PCIELINK0_PAGEVERSION (0x00)
+struct mpi3_enclosure_page0 {
+ struct mpi3_config_page_header header;
+ __le64 enclosure_logical_id;
+ __le16 flags;
+ __le16 enclosure_handle;
+ __le16 num_slots;
+ __le16 start_slot;
+ u8 io_unit_port;
+ u8 enclosure_level;
+ __le16 sep_dev_handle;
+ __le32 reserved1c;
+};
+
+#define MPI3_ENCLOSURE0_PAGEVERSION (0x00)
+#define MPI3_ENCLS0_FLAGS_ENCL_TYPE_MASK (0xc000)
+#define MPI3_ENCLS0_FLAGS_ENCL_TYPE_VIRTUAL (0x0000)
+#define MPI3_ENCLS0_FLAGS_ENCL_TYPE_SAS (0x4000)
+#define MPI3_ENCLS0_FLAGS_ENCL_TYPE_PCIE (0x8000)
+#define MPI3_ENCLS0_FLAGS_ENCL_DEV_PRESENT_MASK (0x0010)
+#define MPI3_ENCLS0_FLAGS_ENCL_DEV_NOT_FOUND (0x0000)
+#define MPI3_ENCLS0_FLAGS_ENCL_DEV_PRESENT (0x0010)
+#define MPI3_ENCLS0_FLAGS_MNG_MASK (0x000f)
+#define MPI3_ENCLS0_FLAGS_MNG_UNKNOWN (0x0000)
+#define MPI3_ENCLS0_FLAGS_MNG_IOC_SES (0x0001)
+#define MPI3_ENCLS0_FLAGS_MNG_SES_ENCLOSURE (0x0002)
+#define MPI3_DEVICE_DEVFORM_SAS_SATA (0x00)
+#define MPI3_DEVICE_DEVFORM_PCIE (0x01)
+#define MPI3_DEVICE_DEVFORM_VD (0x02)
+struct mpi3_device0_sas_sata_format {
+ __le64 sas_address;
+ __le16 flags;
+ __le16 device_info;
+ u8 phy_num;
+ u8 attached_phy_identifier;
+ u8 max_port_connections;
+ u8 zone_group;
+};
+
+#define MPI3_DEVICE0_SASSATA_FLAGS_SLUMBER_CAP (0x0200)
+#define MPI3_DEVICE0_SASSATA_FLAGS_PARTIAL_CAP (0x0100)
+#define MPI3_DEVICE0_SASSATA_FLAGS_ASYNC_NOTIFY (0x0080)
+#define MPI3_DEVICE0_SASSATA_FLAGS_SW_PRESERVE (0x0040)
+#define MPI3_DEVICE0_SASSATA_FLAGS_UNSUPP_DEV (0x0020)
+#define MPI3_DEVICE0_SASSATA_FLAGS_48BIT_LBA (0x0010)
+#define MPI3_DEVICE0_SASSATA_FLAGS_SMART_SUPP (0x0008)
+#define MPI3_DEVICE0_SASSATA_FLAGS_NCQ_SUPP (0x0004)
+#define MPI3_DEVICE0_SASSATA_FLAGS_FUA_SUPP (0x0002)
+#define MPI3_DEVICE0_SASSATA_FLAGS_PERSIST_CAP (0x0001)
+struct mpi3_device0_pcie_format {
+ u8 supported_link_rates;
+ u8 max_port_width;
+ u8 negotiated_port_width;
+ u8 negotiated_link_rate;
+ u8 port_num;
+ u8 controller_reset_to;
+ __le16 device_info;
+ __le32 maximum_data_transfer_size;
+ __le32 capabilities;
+ __le16 noiob;
+ u8 nv_me_abort_to;
+ u8 page_size;
+ __le16 shutdown_latency;
+ __le16 reserved16;
+};
+
+#define MPI3_DEVICE0_PCIE_LINK_RATE_32_0_SUPP (0x10)
+#define MPI3_DEVICE0_PCIE_LINK_RATE_16_0_SUPP (0x08)
+#define MPI3_DEVICE0_PCIE_LINK_RATE_8_0_SUPP (0x04)
+#define MPI3_DEVICE0_PCIE_LINK_RATE_5_0_SUPP (0x02)
+#define MPI3_DEVICE0_PCIE_LINK_RATE_2_5_SUPP (0x01)
+#define MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK (0x0003)
+#define MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NO_DEVICE (0x0000)
+#define MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE (0x0001)
+#define MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_SWITCH_DEVICE (0x0002)
+#define MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_SCSI_DEVICE (0x0003)
+#define MPI3_DEVICE0_PCIE_CAP_METADATA_SEPARATED (0x00000010)
+#define MPI3_DEVICE0_PCIE_CAP_SGL_DWORD_ALIGN_REQUIRED (0x00000008)
+#define MPI3_DEVICE0_PCIE_CAP_NVME_SGL_ENABLED (0x00000004)
+#define MPI3_DEVICE0_PCIE_CAP_BIT_BUCKET_SGL_SUPP (0x00000002)
+#define MPI3_DEVICE0_PCIE_CAP_SGL_SUPP (0x00000001)
+struct mpi3_device0_vd_format {
+ u8 vd_state;
+ u8 raid_level;
+ __le16 device_info;
+ __le16 flags;
+ __le16 reserved06;
+ __le32 reserved08[2];
+};
+
+#define MPI3_DEVICE0_VD_STATE_OFFLINE (0x00)
+#define MPI3_DEVICE0_VD_STATE_PARTIALLY_DEGRADED (0x01)
+#define MPI3_DEVICE0_VD_STATE_DEGRADED (0x02)
+#define MPI3_DEVICE0_VD_STATE_OPTIMAL (0x03)
+#define MPI3_DEVICE0_VD_RAIDLEVEL_RAID_0 (0)
+#define MPI3_DEVICE0_VD_RAIDLEVEL_RAID_1 (1)
+#define MPI3_DEVICE0_VD_RAIDLEVEL_RAID_5 (5)
+#define MPI3_DEVICE0_VD_RAIDLEVEL_RAID_6 (6)
+#define MPI3_DEVICE0_VD_RAIDLEVEL_RAID_10 (10)
+#define MPI3_DEVICE0_VD_RAIDLEVEL_RAID_50 (50)
+#define MPI3_DEVICE0_VD_RAIDLEVEL_RAID_60 (60)
+#define MPI3_DEVICE0_VD_DEVICE_INFO_HDD (0x0010)
+#define MPI3_DEVICE0_VD_DEVICE_INFO_SSD (0x0008)
+#define MPI3_DEVICE0_VD_DEVICE_INFO_NVME (0x0004)
+#define MPI3_DEVICE0_VD_DEVICE_INFO_SATA (0x0002)
+#define MPI3_DEVICE0_VD_DEVICE_INFO_SAS (0x0001)
+#define MPI3_DEVICE0_VD_FLAGS_METADATA_MODE_MASK (0x0003)
+#define MPI3_DEVICE0_VD_FLAGS_METADATA_MODE_NONE (0x0000)
+#define MPI3_DEVICE0_VD_FLAGS_METADATA_MODE_HOST (0x0001)
+#define MPI3_DEVICE0_VD_FLAGS_METADATA_MODE_IOC (0x0002)
+union mpi3_device0_dev_spec_format {
+ struct mpi3_device0_sas_sata_format sas_sata_format;
+ struct mpi3_device0_pcie_format pcie_format;
+ struct mpi3_device0_vd_format vd_format;
+};
+
+struct mpi3_device_page0 {
+ struct mpi3_config_page_header header;
+ __le16 dev_handle;
+ __le16 parent_dev_handle;
+ __le16 slot;
+ __le16 enclosure_handle;
+ __le64 wwid;
+ __le16 persistent_id;
+ u8 io_unit_port;
+ u8 access_status;
+ __le16 flags;
+ __le16 reserved1e;
+ __le16 slot_index;
+ __le16 queue_depth;
+ u8 reserved24[3];
+ u8 device_form;
+ union mpi3_device0_dev_spec_format device_specific;
+};
+
+#define MPI3_DEVICE0_PAGEVERSION (0x00)
+#define MPI3_DEVICE0_WWID_INVALID (0xffffffffffffffff)
+#define MPI3_DEVICE0_PERSISTENTID_INVALID (0xffff)
+#define MPI3_DEVICE0_IOUNITPORT_INVALID (0xff)
+#define MPI3_DEVICE0_ASTATUS_NO_ERRORS (0x00)
+#define MPI3_DEVICE0_ASTATUS_NEEDS_INITIALIZATION (0x01)
+#define MPI3_DEVICE0_ASTATUS_CAP_UNSUPPORTED (0x02)
+#define MPI3_DEVICE0_ASTATUS_DEVICE_BLOCKED (0x03)
+#define MPI3_DEVICE0_ASTATUS_UNAUTHORIZED (0x04)
+#define MPI3_DEVICE0_ASTATUS_DEVICE_MISSING_DELAY (0x05)
+#define MPI3_DEVICE0_ASTATUS_SAS_UNKNOWN (0x10)
+#define MPI3_DEVICE0_ASTATUS_ROUTE_NOT_ADDRESSABLE (0x11)
+#define MPI3_DEVICE0_ASTATUS_SMP_ERROR_NOT_ADDRESSABLE (0x12)
+#define MPI3_DEVICE0_ASTATUS_SIF_UNKNOWN (0x20)
+#define MPI3_DEVICE0_ASTATUS_SIF_AFFILIATION_CONFLICT (0x21)
+#define MPI3_DEVICE0_ASTATUS_SIF_DIAG (0x22)
+#define MPI3_DEVICE0_ASTATUS_SIF_IDENTIFICATION (0x23)
+#define MPI3_DEVICE0_ASTATUS_SIF_CHECK_POWER (0x24)
+#define MPI3_DEVICE0_ASTATUS_SIF_PIO_SN (0x25)
+#define MPI3_DEVICE0_ASTATUS_SIF_MDMA_SN (0x26)
+#define MPI3_DEVICE0_ASTATUS_SIF_UDMA_SN (0x27)
+#define MPI3_DEVICE0_ASTATUS_SIF_ZONING_VIOLATION (0x28)
+#define MPI3_DEVICE0_ASTATUS_SIF_NOT_ADDRESSABLE (0x29)
+#define MPI3_DEVICE0_ASTATUS_SIF_MAX (0x2f)
+#define MPI3_DEVICE0_ASTATUS_PCIE_UNKNOWN (0x30)
+#define MPI3_DEVICE0_ASTATUS_PCIE_MEM_SPACE_ACCESS (0x31)
+#define MPI3_DEVICE0_ASTATUS_PCIE_UNSUPPORTED (0x32)
+#define MPI3_DEVICE0_ASTATUS_PCIE_MSIX_REQUIRED (0x33)
+#define MPI3_DEVICE0_ASTATUS_NVME_UNKNOWN (0x40)
+#define MPI3_DEVICE0_ASTATUS_NVME_READY_TIMEOUT (0x41)
+#define MPI3_DEVICE0_ASTATUS_NVME_DEVCFG_UNSUPPORTED (0x42)
+#define MPI3_DEVICE0_ASTATUS_NVME_IDENTIFY_FAILED (0x43)
+#define MPI3_DEVICE0_ASTATUS_NVME_QCONFIG_FAILED (0x44)
+#define MPI3_DEVICE0_ASTATUS_NVME_QCREATION_FAILED (0x45)
+#define MPI3_DEVICE0_ASTATUS_NVME_EVENTCFG_FAILED (0x46)
+#define MPI3_DEVICE0_ASTATUS_NVME_GET_FEATURE_STAT_FAILED (0x47)
+#define MPI3_DEVICE0_ASTATUS_NVME_IDLE_TIMEOUT (0x48)
+#define MPI3_DEVICE0_ASTATUS_NVME_CTRL_FAILURE_STATUS (0x49)
+#define MPI3_DEVICE0_ASTATUS_VD_UNKNOWN (0x50)
+#define MPI3_DEVICE0_FLAGS_CONTROLLER_DEV_HANDLE (0x0080)
+#define MPI3_DEVICE0_FLAGS_HIDDEN (0x0008)
+#define MPI3_DEVICE0_FLAGS_ATT_METHOD_MASK (0x0006)
+#define MPI3_DEVICE0_FLAGS_ATT_METHOD_NOT_DIR_ATTACHED (0x0000)
+#define MPI3_DEVICE0_FLAGS_ATT_METHOD_DIR_ATTACHED (0x0002)
+#define MPI3_DEVICE0_FLAGS_ATT_METHOD_VIRTUAL (0x0004)
+#define MPI3_DEVICE0_FLAGS_DEVICE_PRESENT (0x0001)
+#define MPI3_DEVICE0_QUEUE_DEPTH_NOT_APPLICABLE (0x0000)
+struct mpi3_device1_sas_sata_format {
+ __le32 reserved00;
+};
+
+struct mpi3_device1_pcie_format {
+ __le16 vendor_id;
+ __le16 device_id;
+ __le16 subsystem_vendor_id;
+ __le16 subsystem_id;
+ __le32 reserved08;
+ u8 revision_id;
+ u8 reserved0d;
+ __le16 pci_parameters;
+};
+
+#define MPI3_DEVICE1_PCIE_PARAMS_DATA_SIZE_128B (0x0)
+#define MPI3_DEVICE1_PCIE_PARAMS_DATA_SIZE_256B (0x1)
+#define MPI3_DEVICE1_PCIE_PARAMS_DATA_SIZE_512B (0x2)
+#define MPI3_DEVICE1_PCIE_PARAMS_DATA_SIZE_1024B (0x3)
+#define MPI3_DEVICE1_PCIE_PARAMS_DATA_SIZE_2048B (0x4)
+#define MPI3_DEVICE1_PCIE_PARAMS_DATA_SIZE_4096B (0x5)
+#define MPI3_DEVICE1_PCIE_PARAMS_MAX_READ_REQ_MASK (0x01c0)
+#define MPI3_DEVICE1_PCIE_PARAMS_MAX_READ_REQ_SHIFT (6)
+#define MPI3_DEVICE1_PCIE_PARAMS_CURR_MAX_PAYLOAD_MASK (0x0038)
+#define MPI3_DEVICE1_PCIE_PARAMS_CURR_MAX_PAYLOAD_SHIFT (3)
+#define MPI3_DEVICE1_PCIE_PARAMS_SUPP_MAX_PAYLOAD_MASK (0x0007)
+#define MPI3_DEVICE1_PCIE_PARAMS_SUPP_MAX_PAYLOAD_SHIFT (0)
+struct mpi3_device1_vd_format {
+ __le32 reserved00;
+};
+
+union mpi3_device1_dev_spec_format {
+ struct mpi3_device1_sas_sata_format sas_sata_format;
+ struct mpi3_device1_pcie_format pcie_format;
+ struct mpi3_device1_vd_format vd_format;
+};
+
+struct mpi3_device_page1 {
+ struct mpi3_config_page_header header;
+ __le16 dev_handle;
+ __le16 reserved0a;
+ __le32 reserved0c[12];
+ u8 reserved3c[3];
+ u8 device_form;
+ union mpi3_device1_dev_spec_format device_specific;
+};
+
+#define MPI3_DEVICE1_PAGEVERSION (0x00)
+#endif
diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_image.h b/drivers/scsi/mpi3mr/mpi/mpi30_image.h
new file mode 100644
index 000000000000..169e4f9b7b7c
--- /dev/null
+++ b/drivers/scsi/mpi3mr/mpi/mpi30_image.h
@@ -0,0 +1,216 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright 2018-2021 Broadcom Inc. All rights reserved.
+ *
+ */
+#ifndef MPI30_IMAGE_H
+#define MPI30_IMAGE_H 1
+struct mpi3_comp_image_version {
+ __le16 build_num;
+ __le16 customer_id;
+ u8 phase_minor;
+ u8 phase_major;
+ u8 gen_minor;
+ u8 gen_major;
+};
+
+struct mpi3_hash_exclusion_format {
+ __le32 offset;
+ __le32 size;
+};
+
+#define MPI3_IMAGE_HASH_EXCUSION_NUM (4)
+struct mpi3_component_image_header {
+ __le32 signature0;
+ __le32 load_address;
+ __le32 data_size;
+ __le32 start_offset;
+ __le32 signature1;
+ __le32 flash_offset;
+ __le32 image_size;
+ __le32 version_string_offset;
+ __le32 build_date_string_offset;
+ __le32 build_time_string_offset;
+ __le32 environment_variable_offset;
+ __le32 application_specific;
+ __le32 signature2;
+ __le32 header_size;
+ __le32 crc;
+ __le32 flags;
+ __le32 secondary_flash_offset;
+ __le32 etp_offset;
+ __le32 etp_size;
+ union mpi3_version_union rmc_interface_version;
+ union mpi3_version_union etp_interface_version;
+ struct mpi3_comp_image_version component_image_version;
+ struct mpi3_hash_exclusion_format hash_exclusion[MPI3_IMAGE_HASH_EXCUSION_NUM];
+ __le32 next_image_header_offset;
+ union mpi3_version_union security_version;
+ __le32 reserved84[31];
+};
+
+#define MPI3_IMAGE_HEADER_SIGNATURE0_MPI3 (0xeb00003e)
+#define MPI3_IMAGE_HEADER_LOAD_ADDRESS_INVALID (0x00000000)
+#define MPI3_IMAGE_HEADER_SIGNATURE1_APPLICATION (0x20505041)
+#define MPI3_IMAGE_HEADER_SIGNATURE1_FIRST_MUTABLE (0x20434d46)
+#define MPI3_IMAGE_HEADER_SIGNATURE1_BSP (0x20505342)
+#define MPI3_IMAGE_HEADER_SIGNATURE1_ROM_BIOS (0x534f4942)
+#define MPI3_IMAGE_HEADER_SIGNATURE1_HII_X64 (0x4d494948)
+#define MPI3_IMAGE_HEADER_SIGNATURE1_HII_ARM (0x41494948)
+#define MPI3_IMAGE_HEADER_SIGNATURE1_CPLD (0x444c5043)
+#define MPI3_IMAGE_HEADER_SIGNATURE1_SPD (0x20445053)
+#define MPI3_IMAGE_HEADER_SIGNATURE1_GAS_GAUGE (0x20534147)
+#define MPI3_IMAGE_HEADER_SIGNATURE1_PBLP (0x504c4250)
+#define MPI3_IMAGE_HEADER_SIGNATURE2_VALUE (0x50584546)
+#define MPI3_IMAGE_HEADER_FLAGS_DEVICE_KEY_BASIS_MASK (0x00000030)
+#define MPI3_IMAGE_HEADER_FLAGS_DEVICE_KEY_BASIS_CDI (0x00000000)
+#define MPI3_IMAGE_HEADER_FLAGS_DEVICE_KEY_BASIS_DI (0x00000010)
+#define MPI3_IMAGE_HEADER_FLAGS_SIGNED_NVDATA (0x00000008)
+#define MPI3_IMAGE_HEADER_FLAGS_REQUIRES_ACTIVATION (0x00000004)
+#define MPI3_IMAGE_HEADER_FLAGS_COMPRESSED (0x00000002)
+#define MPI3_IMAGE_HEADER_FLAGS_FLASH (0x00000001)
+#define MPI3_IMAGE_HEADER_SIGNATURE0_OFFSET (0x00)
+#define MPI3_IMAGE_HEADER_LOAD_ADDRESS_OFFSET (0x04)
+#define MPI3_IMAGE_HEADER_DATA_SIZE_OFFSET (0x08)
+#define MPI3_IMAGE_HEADER_START_OFFSET_OFFSET (0x0c)
+#define MPI3_IMAGE_HEADER_SIGNATURE1_OFFSET (0x10)
+#define MPI3_IMAGE_HEADER_FLASH_OFFSET_OFFSET (0x14)
+#define MPI3_IMAGE_HEADER_FLASH_SIZE_OFFSET (0x18)
+#define MPI3_IMAGE_HEADER_VERSION_STRING_OFFSET_OFFSET (0x1c)
+#define MPI3_IMAGE_HEADER_BUILD_DATE_STRING_OFFSET_OFFSET (0x20)
+#define MPI3_IMAGE_HEADER_BUILD_TIME_OFFSET_OFFSET (0x24)
+#define MPI3_IMAGE_HEADER_ENVIROMENT_VAR_OFFSET_OFFSET (0x28)
+#define MPI3_IMAGE_HEADER_APPLICATION_SPECIFIC_OFFSET (0x2c)
+#define MPI3_IMAGE_HEADER_SIGNATURE2_OFFSET (0x30)
+#define MPI3_IMAGE_HEADER_HEADER_SIZE_OFFSET (0x34)
+#define MPI3_IMAGE_HEADER_CRC_OFFSET (0x38)
+#define MPI3_IMAGE_HEADER_FLAGS_OFFSET (0x3c)
+#define MPI3_IMAGE_HEADER_SECONDARY_FLASH_OFFSET_OFFSET (0x40)
+#define MPI3_IMAGE_HEADER_ETP_OFFSET_OFFSET (0x44)
+#define MPI3_IMAGE_HEADER_ETP_SIZE_OFFSET (0x48)
+#define MPI3_IMAGE_HEADER_RMC_INTERFACE_VER_OFFSET (0x4c)
+#define MPI3_IMAGE_HEADER_ETP_INTERFACE_VER_OFFSET (0x50)
+#define MPI3_IMAGE_HEADER_COMPONENT_IMAGE_VER_OFFSET (0x54)
+#define MPI3_IMAGE_HEADER_HASH_EXCLUSION_OFFSET (0x5c)
+#define MPI3_IMAGE_HEADER_NEXT_IMAGE_HEADER_OFFSET_OFFSET (0x7c)
+#define MPI3_IMAGE_HEADER_SIZE (0x100)
+struct mpi3_extended_image_header {
+ u8 image_type;
+ u8 reserved01[3];
+ __le32 checksum;
+ __le32 image_size;
+ __le32 next_image_header_offset;
+ __le32 reserved10[4];
+ __le32 identify_string[8];
+};
+
+#define MPI3_EXT_IMAGE_IMAGETYPE_OFFSET (0x00)
+#define MPI3_EXT_IMAGE_IMAGESIZE_OFFSET (0x08)
+#define MPI3_EXT_IMAGE_NEXTIMAGE_OFFSET (0x0c)
+#define MPI3_EXT_IMAGE_HEADER_SIZE (0x40)
+#define MPI3_EXT_IMAGE_TYPE_UNSPECIFIED (0x00)
+#define MPI3_EXT_IMAGE_TYPE_NVDATA (0x03)
+#define MPI3_EXT_IMAGE_TYPE_SUPPORTED_DEVICES (0x07)
+#define MPI3_EXT_IMAGE_TYPE_ENCRYPTED_HASH (0x09)
+#define MPI3_EXT_IMAGE_TYPE_RDE (0x0a)
+#define MPI3_EXT_IMAGE_TYPE_AUXILIARY_PROCESSOR (0x0b)
+#define MPI3_EXT_IMAGE_TYPE_MIN_PRODUCT_SPECIFIC (0x80)
+#define MPI3_EXT_IMAGE_TYPE_MAX_PRODUCT_SPECIFIC (0xff)
+struct mpi3_supported_device {
+ __le16 device_id;
+ __le16 vendor_id;
+ __le16 device_id_mask;
+ __le16 reserved06;
+ u8 low_pci_rev;
+ u8 high_pci_rev;
+ __le16 reserved0a;
+ __le32 reserved0c;
+};
+
+#ifndef MPI3_SUPPORTED_DEVICE_MAX
+#define MPI3_SUPPORTED_DEVICE_MAX (1)
+#endif
+struct mpi3_supported_devices_data {
+ u8 image_version;
+ u8 reserved01;
+ u8 num_devices;
+ u8 reserved03;
+ __le32 reserved04;
+ struct mpi3_supported_device supported_device[MPI3_SUPPORTED_DEVICE_MAX];
+};
+
+#ifndef MPI3_ENCRYPTED_HASH_MAX
+#define MPI3_ENCRYPTED_HASH_MAX (1)
+#endif
+struct mpi3_encrypted_hash_entry {
+ u8 hash_image_type;
+ u8 hash_algorithm;
+ u8 encryption_algorithm;
+ u8 reserved03;
+ __le32 reserved04;
+ __le32 encrypted_hash[MPI3_ENCRYPTED_HASH_MAX];
+};
+
+#define MPI3_HASH_IMAGE_TYPE_KEY_WITH_SIGNATURE (0x03)
+#define MPI3_HASH_ALGORITHM_VERSION_MASK (0xe0)
+#define MPI3_HASH_ALGORITHM_VERSION_NONE (0x00)
+#define MPI3_HASH_ALGORITHM_VERSION_SHA1 (0x20)
+#define MPI3_HASH_ALGORITHM_VERSION_SHA2 (0x40)
+#define MPI3_HASH_ALGORITHM_VERSION_SHA3 (0x60)
+#define MPI3_HASH_ALGORITHM_SIZE_MASK (0x1f)
+#define MPI3_HASH_ALGORITHM_SIZE_UNUSED (0x00)
+#define MPI3_HASH_ALGORITHM_SIZE_SHA256 (0x01)
+#define MPI3_HASH_ALGORITHM_SIZE_SHA512 (0x02)
+#define MPI3_ENCRYPTION_ALGORITHM_UNUSED (0x00)
+#define MPI3_ENCRYPTION_ALGORITHM_RSA256 (0x01)
+#define MPI3_ENCRYPTION_ALGORITHM_RSA512 (0x02)
+#define MPI3_ENCRYPTION_ALGORITHM_RSA1024 (0x03)
+#define MPI3_ENCRYPTION_ALGORITHM_RSA2048 (0x04)
+#define MPI3_ENCRYPTION_ALGORITHM_RSA4096 (0x05)
+#define MPI3_ENCRYPTION_ALGORITHM_RSA3072 (0x06)
+#ifndef MPI3_PUBLIC_KEY_MAX
+#define MPI3_PUBLIC_KEY_MAX (1)
+#endif
+struct mpi3_encrypted_key_with_hash_entry {
+ u8 hash_image_type;
+ u8 hash_algorithm;
+ u8 encryption_algorithm;
+ u8 reserved03;
+ __le32 reserved04;
+ __le32 public_key[MPI3_PUBLIC_KEY_MAX];
+ __le32 encrypted_hash[MPI3_ENCRYPTED_HASH_MAX];
+};
+
+#ifndef MPI3_ENCRYPTED_HASH_ENTRY_MAX
+#define MPI3_ENCRYPTED_HASH_ENTRY_MAX (1)
+#endif
+struct mpi3_encrypted_hash_data {
+ u8 image_version;
+ u8 num_hash;
+ __le16 reserved02;
+ __le32 reserved04;
+ struct mpi3_encrypted_hash_entry encrypted_hash_entry[MPI3_ENCRYPTED_HASH_ENTRY_MAX];
+};
+
+#ifndef MPI3_AUX_PROC_DATA_MAX
+#define MPI3_AUX_PROC_DATA_MAX (1)
+#endif
+struct mpi3_aux_processor_data {
+ u8 boot_method;
+ u8 num_load_addr;
+ u8 reserved02;
+ u8 type;
+ __le32 version;
+ __le32 load_address[8];
+ __le32 reserved28[22];
+ __le32 aux_processor_data[MPI3_AUX_PROC_DATA_MAX];
+};
+
+#define MPI3_AUX_PROC_DATA_OFFSET (0x80)
+#define MPI3_AUXPROCESSOR_BOOT_METHOD_MO_MSG (0x00)
+#define MPI3_AUXPROCESSOR_BOOT_METHOD_MO_DOORBELL (0x01)
+#define MPI3_AUXPROCESSOR_BOOT_METHOD_COMPONENT (0x02)
+#define MPI3_AUXPROCESSOR_TYPE_ARM_A15 (0x00)
+#define MPI3_AUXPROCESSOR_TYPE_ARM_M0 (0x01)
+#define MPI3_AUXPROCESSOR_TYPE_ARM_R4 (0x02)
+#endif
diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_init.h b/drivers/scsi/mpi3mr/mpi/mpi30_init.h
new file mode 100644
index 000000000000..e02b6d3cfba2
--- /dev/null
+++ b/drivers/scsi/mpi3mr/mpi/mpi30_init.h
@@ -0,0 +1,159 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright 2016-2021 Broadcom Inc. All rights reserved.
+ *
+ */
+#ifndef MPI30_INIT_H
+#define MPI30_INIT_H 1
+struct mpi3_scsi_io_cdb_eedp32 {
+ u8 cdb[20];
+ __be32 primary_reference_tag;
+ __le16 primary_application_tag;
+ __le16 primary_application_tag_mask;
+ __le32 transfer_length;
+};
+
+union mpi3_scso_io_cdb_union {
+ u8 cdb32[32];
+ struct mpi3_scsi_io_cdb_eedp32 eedp32;
+ struct mpi3_sge_common sge;
+};
+
+struct mpi3_scsi_io_request {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 change_count;
+ __le16 dev_handle;
+ __le32 flags;
+ __le32 skip_count;
+ __le32 data_length;
+ u8 lun[8];
+ union mpi3_scso_io_cdb_union cdb;
+ union mpi3_sge_union sgl[4];
+};
+
+#define MPI3_SCSIIO_MSGFLAGS_METASGL_VALID (0x80)
+#define MPI3_SCSIIO_FLAGS_LARGE_CDB (0x60000000)
+#define MPI3_SCSIIO_FLAGS_CDB_16_OR_LESS (0x00000000)
+#define MPI3_SCSIIO_FLAGS_CDB_GREATER_THAN_16 (0x20000000)
+#define MPI3_SCSIIO_FLAGS_CDB_IN_SEPARATE_BUFFER (0x40000000)
+#define MPI3_SCSIIO_FLAGS_TASKATTRIBUTE_MASK (0x07000000)
+#define MPI3_SCSIIO_FLAGS_TASKATTRIBUTE_SIMPLEQ (0x00000000)
+#define MPI3_SCSIIO_FLAGS_TASKATTRIBUTE_HEADOFQ (0x01000000)
+#define MPI3_SCSIIO_FLAGS_TASKATTRIBUTE_ORDEREDQ (0x02000000)
+#define MPI3_SCSIIO_FLAGS_TASKATTRIBUTE_ACAQ (0x04000000)
+#define MPI3_SCSIIO_FLAGS_CMDPRI_MASK (0x00f00000)
+#define MPI3_SCSIIO_FLAGS_CMDPRI_SHIFT (20)
+#define MPI3_SCSIIO_FLAGS_DATADIRECTION_MASK (0x000c0000)
+#define MPI3_SCSIIO_FLAGS_DATADIRECTION_NO_DATA_TRANSFER (0x00000000)
+#define MPI3_SCSIIO_FLAGS_DATADIRECTION_WRITE (0x00040000)
+#define MPI3_SCSIIO_FLAGS_DATADIRECTION_READ (0x00080000)
+#define MPI3_SCSIIO_FLAGS_DMAOPERATION_MASK (0x00030000)
+#define MPI3_SCSIIO_FLAGS_DMAOPERATION_HOST_PI (0x00010000)
+#define MPI3_SCSIIO_METASGL_INDEX (3)
+struct mpi3_scsi_io_reply {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 ioc_use_only08;
+ __le16 ioc_status;
+ __le32 ioc_log_info;
+ u8 scsi_status;
+ u8 scsi_state;
+ __le16 dev_handle;
+ __le32 transfer_count;
+ __le32 sense_count;
+ __le32 response_data;
+ __le16 task_tag;
+ __le16 scsi_status_qualifier;
+ __le32 eedp_error_offset;
+ __le16 eedp_observed_app_tag;
+ __le16 eedp_observed_guard;
+ __le32 eedp_observed_ref_tag;
+ __le64 sense_data_buffer_address;
+};
+
+#define MPI3_SCSIIO_REPLY_MSGFLAGS_REFTAG_OBSERVED_VALID (0x01)
+#define MPI3_SCSIIO_REPLY_MSGFLAGS_APPTAG_OBSERVED_VALID (0x02)
+#define MPI3_SCSIIO_REPLY_MSGFLAGS_GUARD_OBSERVED_VALID (0x04)
+#define MPI3_SCSI_STATUS_GOOD (0x00)
+#define MPI3_SCSI_STATUS_CHECK_CONDITION (0x02)
+#define MPI3_SCSI_STATUS_CONDITION_MET (0x04)
+#define MPI3_SCSI_STATUS_BUSY (0x08)
+#define MPI3_SCSI_STATUS_INTERMEDIATE (0x10)
+#define MPI3_SCSI_STATUS_INTERMEDIATE_CONDMET (0x14)
+#define MPI3_SCSI_STATUS_RESERVATION_CONFLICT (0x18)
+#define MPI3_SCSI_STATUS_COMMAND_TERMINATED (0x22)
+#define MPI3_SCSI_STATUS_TASK_SET_FULL (0x28)
+#define MPI3_SCSI_STATUS_ACA_ACTIVE (0x30)
+#define MPI3_SCSI_STATUS_TASK_ABORTED (0x40)
+#define MPI3_SCSI_STATE_SENSE_MASK (0x03)
+#define MPI3_SCSI_STATE_SENSE_VALID (0x00)
+#define MPI3_SCSI_STATE_SENSE_FAILED (0x01)
+#define MPI3_SCSI_STATE_SENSE_BUFF_Q_EMPTY (0x02)
+#define MPI3_SCSI_STATE_SENSE_NOT_AVAILABLE (0x03)
+#define MPI3_SCSI_STATE_NO_SCSI_STATUS (0x04)
+#define MPI3_SCSI_STATE_TERMINATED (0x08)
+#define MPI3_SCSI_STATE_RESPONSE_DATA_VALID (0x10)
+#define MPI3_SCSI_RSP_RESPONSECODE_MASK (0x000000ff)
+#define MPI3_SCSI_RSP_RESPONSECODE_SHIFT (0)
+#define MPI3_SCSI_RSP_ARI2_MASK (0x0000ff00)
+#define MPI3_SCSI_RSP_ARI2_SHIFT (8)
+#define MPI3_SCSI_RSP_ARI1_MASK (0x00ff0000)
+#define MPI3_SCSI_RSP_ARI1_SHIFT (16)
+#define MPI3_SCSI_RSP_ARI0_MASK (0xff000000)
+#define MPI3_SCSI_RSP_ARI0_SHIFT (24)
+#define MPI3_SCSI_TASKTAG_UNKNOWN (0xffff)
+struct mpi3_scsi_task_mgmt_request {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 change_count;
+ __le16 dev_handle;
+ __le16 task_host_tag;
+ u8 task_type;
+ u8 reserved0f;
+ __le16 task_request_queue_id;
+ __le16 reserved12;
+ __le32 reserved14;
+ u8 lun[8];
+};
+
+#define MPI3_SCSITASKMGMT_MSGFLAGS_DO_NOT_SEND_TASK_IU (0x08)
+#define MPI3_SCSITASKMGMT_TASKTYPE_ABORT_TASK (0x01)
+#define MPI3_SCSITASKMGMT_TASKTYPE_ABORT_TASK_SET (0x02)
+#define MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET (0x03)
+#define MPI3_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET (0x05)
+#define MPI3_SCSITASKMGMT_TASKTYPE_CLEAR_TASK_SET (0x06)
+#define MPI3_SCSITASKMGMT_TASKTYPE_QUERY_TASK (0x07)
+#define MPI3_SCSITASKMGMT_TASKTYPE_CLEAR_ACA (0x08)
+#define MPI3_SCSITASKMGMT_TASKTYPE_QUERY_TASK_SET (0x09)
+#define MPI3_SCSITASKMGMT_TASKTYPE_QUERY_ASYNC_EVENT (0x0a)
+#define MPI3_SCSITASKMGMT_TASKTYPE_I_T_NEXUS_RESET (0x0b)
+struct mpi3_scsi_task_mgmt_reply {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 ioc_use_only08;
+ __le16 ioc_status;
+ __le32 ioc_log_info;
+ __le32 termination_count;
+ __le32 response_data;
+ __le32 reserved18;
+};
+
+#define MPI3_SCSITASKMGMT_RSPCODE_IO_QUEUED_ON_IOC (0x80)
+#endif
diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_ioc.h b/drivers/scsi/mpi3mr/mpi/mpi30_ioc.h
new file mode 100644
index 000000000000..1af99a5382d5
--- /dev/null
+++ b/drivers/scsi/mpi3mr/mpi/mpi30_ioc.h
@@ -0,0 +1,1004 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright 2016-2021 Broadcom Inc. All rights reserved.
+ *
+ */
+#ifndef MPI30_IOC_H
+#define MPI30_IOC_H 1
+struct mpi3_ioc_init_request {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 change_count;
+ __le16 reserved0a;
+ union mpi3_version_union mpi_version;
+ __le64 time_stamp;
+ u8 reserved18;
+ u8 who_init;
+ __le16 reserved1a;
+ __le16 reply_free_queue_depth;
+ __le16 reserved1e;
+ __le64 reply_free_queue_address;
+ __le32 reserved28;
+ __le16 sense_buffer_free_queue_depth;
+ __le16 sense_buffer_length;
+ __le64 sense_buffer_free_queue_address;
+ __le64 driver_information_address;
+};
+
+#define MPI3_WHOINIT_NOT_INITIALIZED (0x00)
+#define MPI3_WHOINIT_ROM_BIOS (0x02)
+#define MPI3_WHOINIT_HOST_DRIVER (0x03)
+#define MPI3_WHOINIT_MANUFACTURER (0x04)
+struct mpi3_driver_info_layout {
+ __le32 information_length;
+ u8 driver_signature[12];
+ u8 os_name[16];
+ u8 os_version[12];
+ u8 driver_name[20];
+ u8 driver_version[32];
+ u8 driver_release_date[20];
+ __le32 driver_capabilities;
+};
+
+struct mpi3_ioc_facts_request {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 change_count;
+ __le16 reserved0a;
+ __le32 reserved0c;
+ union mpi3_sge_union sgl;
+};
+
+struct mpi3_ioc_facts_data {
+ __le16 ioc_facts_data_length;
+ __le16 reserved02;
+ union mpi3_version_union mpi_version;
+ struct mpi3_comp_image_version fw_version;
+ __le32 ioc_capabilities;
+ u8 ioc_number;
+ u8 who_init;
+ __le16 max_msix_vectors;
+ __le16 max_outstanding_request;
+ __le16 product_id;
+ __le16 ioc_request_frame_size;
+ __le16 reply_frame_size;
+ __le16 ioc_exceptions;
+ __le16 max_persistent_id;
+ u8 sge_modifier_mask;
+ u8 sge_modifier_value;
+ u8 sge_modifier_shift;
+ u8 protocol_flags;
+ __le16 max_sas_initiators;
+ __le16 max_sas_targets;
+ __le16 max_sas_expanders;
+ __le16 max_enclosures;
+ __le16 min_dev_handle;
+ __le16 max_dev_handle;
+ __le16 max_pc_ie_switches;
+ __le16 max_nvme;
+ __le16 max_pds;
+ __le16 max_vds;
+ __le16 max_host_pds;
+ __le16 max_advanced_host_pds;
+ __le16 max_raid_pds;
+ __le16 max_posted_cmd_buffers;
+ __le32 flags;
+ __le16 max_operational_request_queues;
+ __le16 max_operational_reply_queues;
+ __le16 shutdown_timeout;
+ __le16 reserved4e;
+ __le32 diag_trace_size;
+ __le32 diag_fw_size;
+};
+
+#define MPI3_IOCFACTS_CAPABILITY_ADVANCED_HOST_PD (0x00000010)
+#define MPI3_IOCFACTS_CAPABILITY_RAID_CAPABLE (0x00000008)
+#define MPI3_IOCFACTS_CAPABILITY_COALESCE_CTRL_GRAN_MASK (0x00000001)
+#define MPI3_IOCFACTS_CAPABILITY_COALESCE_CTRL_IOC_GRAN (0x00000000)
+#define MPI3_IOCFACTS_CAPABILITY_COALESCE_CTRL_REPLY_Q_GRAN (0x00000001)
+#define MPI3_IOCFACTS_PID_TYPE_MASK (0xf000)
+#define MPI3_IOCFACTS_PID_TYPE_SHIFT (12)
+#define MPI3_IOCFACTS_PID_PRODUCT_MASK (0x0f00)
+#define MPI3_IOCFACTS_PID_PRODUCT_SHIFT (8)
+#define MPI3_IOCFACTS_PID_FAMILY_MASK (0x00ff)
+#define MPI3_IOCFACTS_PID_FAMILY_SHIFT (0)
+#define MPI3_IOCFACTS_EXCEPT_SAFE_MODE (0x0800)
+#define MPI3_IOCFACTS_EXCEPT_SECURITY_KEY_MASK (0x0700)
+#define MPI3_IOCFACTS_EXCEPT_SECURITY_KEY_NONE (0x0000)
+#define MPI3_IOCFACTS_EXCEPT_SECURITY_KEY_LOCAL_VIA_RAID (0x0100)
+#define MPI3_IOCFACTS_EXCEPT_SECURITY_KEY_LOCAL_VIA_OOB (0x0200)
+#define MPI3_IOCFACTS_EXCEPT_SECURITY_KEY_EXT_VIA_RAID (0x0300)
+#define MPI3_IOCFACTS_EXCEPT_SECURITY_KEY_EXT_VIA_OOB (0x0400)
+#define MPI3_IOCFACTS_EXCEPT_PCIE_DISABLED (0x0080)
+#define MPI3_IOCFACTS_EXCEPT_PARTIAL_MEMORY_FAILURE (0x0040)
+#define MPI3_IOCFACTS_EXCEPT_MANUFACT_CHECKSUM_FAIL (0x0020)
+#define MPI3_IOCFACTS_EXCEPT_FW_CHECKSUM_FAIL (0x0010)
+#define MPI3_IOCFACTS_EXCEPT_CONFIG_CHECKSUM_FAIL (0x0008)
+#define MPI3_IOCFACTS_EXCEPT_BOOTSTAT_MASK (0x0001)
+#define MPI3_IOCFACTS_EXCEPT_BOOTSTAT_PRIMARY (0x0000)
+#define MPI3_IOCFACTS_EXCEPT_BOOTSTAT_SECONDARY (0x0001)
+#define MPI3_IOCFACTS_PROTOCOL_SAS (0x0010)
+#define MPI3_IOCFACTS_PROTOCOL_SATA (0x0008)
+#define MPI3_IOCFACTS_PROTOCOL_NVME (0x0004)
+#define MPI3_IOCFACTS_PROTOCOL_SCSI_INITIATOR (0x0002)
+#define MPI3_IOCFACTS_PROTOCOL_SCSI_TARGET (0x0001)
+#define MPI3_IOCFACTS_FLAGS_SIGNED_NVDATA_REQUIRED (0x00010000)
+#define MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_MASK (0x0000ff00)
+#define MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_SHIFT (8)
+#define MPI3_IOCFACTS_FLAGS_INITIAL_PORT_ENABLE_MASK (0x00000030)
+#define MPI3_IOCFACTS_FLAGS_INITIAL_PORT_ENABLE_NOT_STARTED (0x00000000)
+#define MPI3_IOCFACTS_FLAGS_INITIAL_PORT_ENABLE_IN_PROGRESS (0x00000010)
+#define MPI3_IOCFACTS_FLAGS_INITIAL_PORT_ENABLE_COMPLETE (0x00000020)
+#define MPI3_IOCFACTS_FLAGS_PERSONALITY_MASK (0x0000000f)
+#define MPI3_IOCFACTS_FLAGS_PERSONALITY_EHBA (0x00000000)
+#define MPI3_IOCFACTS_FLAGS_PERSONALITY_RAID_DDR (0x00000002)
+struct mpi3_mgmt_passthrough_request {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 change_count;
+ __le16 reserved0a;
+ __le32 reserved0c[5];
+ union mpi3_sge_union command_sgl;
+ union mpi3_sge_union response_sgl;
+};
+
+struct mpi3_create_request_queue_request {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 change_count;
+ u8 flags;
+ u8 burst;
+ __le16 size;
+ __le16 queue_id;
+ __le16 reply_queue_id;
+ __le16 reserved12;
+ __le32 reserved14;
+ __le64 base_address;
+};
+
+#define MPI3_CREATE_REQUEST_QUEUE_FLAGS_SEGMENTED_MASK (0x80)
+#define MPI3_CREATE_REQUEST_QUEUE_FLAGS_SEGMENTED_SEGMENTED (0x80)
+#define MPI3_CREATE_REQUEST_QUEUE_FLAGS_SEGMENTED_CONTIGUOUS (0x00)
+struct mpi3_delete_request_queue_request {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 change_count;
+ __le16 queue_id;
+};
+
+struct mpi3_create_reply_queue_request {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 change_count;
+ u8 flags;
+ u8 reserved0b;
+ __le16 size;
+ __le16 queue_id;
+ __le16 msix_index;
+ __le16 reserved12;
+ __le32 reserved14;
+ __le64 base_address;
+};
+
+#define MPI3_CREATE_REPLY_QUEUE_FLAGS_SEGMENTED_MASK (0x80)
+#define MPI3_CREATE_REPLY_QUEUE_FLAGS_SEGMENTED_SEGMENTED (0x80)
+#define MPI3_CREATE_REPLY_QUEUE_FLAGS_SEGMENTED_CONTIGUOUS (0x00)
+#define MPI3_CREATE_REPLY_QUEUE_FLAGS_INT_ENABLE_MASK (0x01)
+#define MPI3_CREATE_REPLY_QUEUE_FLAGS_INT_ENABLE_DISABLE (0x00)
+#define MPI3_CREATE_REPLY_QUEUE_FLAGS_INT_ENABLE_ENABLE (0x01)
+struct mpi3_delete_reply_queue_request {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 change_count;
+ __le16 queue_id;
+};
+
+struct mpi3_port_enable_request {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 change_count;
+ __le16 reserved0a;
+};
+
+#define MPI3_EVENT_LOG_DATA (0x01)
+#define MPI3_EVENT_CHANGE (0x02)
+#define MPI3_EVENT_GPIO_INTERRUPT (0x04)
+#define MPI3_EVENT_TEMP_THRESHOLD (0x05)
+#define MPI3_EVENT_CABLE_MGMT (0x06)
+#define MPI3_EVENT_DEVICE_ADDED (0x07)
+#define MPI3_EVENT_DEVICE_INFO_CHANGED (0x08)
+#define MPI3_EVENT_PREPARE_FOR_RESET (0x09)
+#define MPI3_EVENT_COMP_IMAGE_ACT_START (0x0a)
+#define MPI3_EVENT_ENCL_DEVICE_ADDED (0x0b)
+#define MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE (0x0c)
+#define MPI3_EVENT_DEVICE_STATUS_CHANGE (0x0d)
+#define MPI3_EVENT_ENERGY_PACK_CHANGE (0x0e)
+#define MPI3_EVENT_SAS_DISCOVERY (0x11)
+#define MPI3_EVENT_SAS_BROADCAST_PRIMITIVE (0x12)
+#define MPI3_EVENT_SAS_NOTIFY_PRIMITIVE (0x13)
+#define MPI3_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE (0x14)
+#define MPI3_EVENT_SAS_INIT_TABLE_OVERFLOW (0x15)
+#define MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST (0x16)
+#define MPI3_EVENT_SAS_PHY_COUNTER (0x18)
+#define MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR (0x19)
+#define MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST (0x20)
+#define MPI3_EVENT_PCIE_ENUMERATION (0x22)
+#define MPI3_EVENT_HARD_RESET_RECEIVED (0x40)
+#define MPI3_EVENT_MIN_PRODUCT_SPECIFIC (0x60)
+#define MPI3_EVENT_MAX_PRODUCT_SPECIFIC (0x7f)
+#define MPI3_EVENT_NOTIFY_EVENTMASK_WORDS (4)
+struct mpi3_event_notification_request {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 change_count;
+ __le16 reserved0a;
+ __le16 sas_broadcast_primitive_masks;
+ __le16 sas_notify_primitive_masks;
+ __le32 event_masks[MPI3_EVENT_NOTIFY_EVENTMASK_WORDS];
+};
+
+struct mpi3_event_notification_reply {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 ioc_use_only08;
+ __le16 ioc_status;
+ __le32 ioc_log_info;
+ u8 event_data_length;
+ u8 event;
+ __le16 ioc_change_count;
+ __le32 event_context;
+ __le32 event_data[1];
+};
+
+#define MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_MASK (0x01)
+#define MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_REQUIRED (0x01)
+#define MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_NOT_REQUIRED (0x00)
+#define MPI3_EVENT_NOTIFY_MSGFLAGS_EVENT_ORIGINALITY_MASK (0x02)
+#define MPI3_EVENT_NOTIFY_MSGFLAGS_EVENT_ORIGINALITY_ORIGINAL (0x00)
+#define MPI3_EVENT_NOTIFY_MSGFLAGS_EVENT_ORIGINALITY_REPLAY (0x02)
+struct mpi3_event_data_gpio_interrupt {
+ u8 gpio_num;
+ u8 reserved01[3];
+};
+
+struct mpi3_event_data_temp_threshold {
+ __le16 status;
+ u8 sensor_num;
+ u8 reserved03;
+ __le16 current_temperature;
+ __le16 reserved06;
+ __le32 reserved08;
+ __le32 reserved0c;
+};
+
+#define MPI3_EVENT_TEMP_THRESHOLD_STATUS_THRESHOLD3_EXCEEDED (0x0008)
+#define MPI3_EVENT_TEMP_THRESHOLD_STATUS_THRESHOLD2_EXCEEDED (0x0004)
+#define MPI3_EVENT_TEMP_THRESHOLD_STATUS_THRESHOLD1_EXCEEDED (0x0002)
+#define MPI3_EVENT_TEMP_THRESHOLD_STATUS_THRESHOLD0_EXCEEDED (0x0001)
+struct mpi3_event_data_cable_management {
+ __le32 active_cable_power_requirement;
+ u8 status;
+ u8 receptacle_id;
+ __le16 reserved06;
+};
+
+#define MPI3_EVENT_CABLE_MGMT_ACT_CABLE_PWR_INVALID (0xffffffff)
+#define MPI3_EVENT_CABLE_MGMT_STATUS_INSUFFICIENT_POWER (0x00)
+#define MPI3_EVENT_CABLE_MGMT_STATUS_PRESENT (0x01)
+#define MPI3_EVENT_CABLE_MGMT_STATUS_DEGRADED (0x02)
+struct mpi3_event_ack_request {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 change_count;
+ __le16 reserved0a;
+ u8 event;
+ u8 reserved0d[3];
+ __le32 event_context;
+};
+
+struct mpi3_event_data_prepare_for_reset {
+ u8 reason_code;
+ u8 reserved01;
+ __le16 reserved02;
+};
+
+#define MPI3_EVENT_PREPARE_RESET_RC_START (0x01)
+#define MPI3_EVENT_PREPARE_RESET_RC_ABORT (0x02)
+struct mpi3_event_data_comp_image_activation {
+ __le32 reserved00;
+};
+
+struct mpi3_event_data_device_status_change {
+ __le16 task_tag;
+ u8 reason_code;
+ u8 io_unit_port;
+ __le16 parent_dev_handle;
+ __le16 dev_handle;
+ __le64 wwid;
+ u8 lun[8];
+};
+
+#define MPI3_EVENT_DEV_STAT_RC_MOVED (0x01)
+#define MPI3_EVENT_DEV_STAT_RC_HIDDEN (0x02)
+#define MPI3_EVENT_DEV_STAT_RC_NOT_HIDDEN (0x03)
+#define MPI3_EVENT_DEV_STAT_RC_ASYNC_NOTIFICATION (0x04)
+#define MPI3_EVENT_DEV_STAT_RC_INT_DEVICE_RESET_STRT (0x20)
+#define MPI3_EVENT_DEV_STAT_RC_INT_DEVICE_RESET_CMP (0x21)
+#define MPI3_EVENT_DEV_STAT_RC_INT_TASK_ABORT_STRT (0x22)
+#define MPI3_EVENT_DEV_STAT_RC_INT_TASK_ABORT_CMP (0x23)
+#define MPI3_EVENT_DEV_STAT_RC_INT_IT_NEXUS_RESET_STRT (0x24)
+#define MPI3_EVENT_DEV_STAT_RC_INT_IT_NEXUS_RESET_CMP (0x25)
+#define MPI3_EVENT_DEV_STAT_RC_PCIE_HOT_RESET_FAILED (0x30)
+#define MPI3_EVENT_DEV_STAT_RC_EXPANDER_REDUCED_FUNC_STRT (0x40)
+#define MPI3_EVENT_DEV_STAT_RC_EXPANDER_REDUCED_FUNC_CMP (0x41)
+#define MPI3_EVENT_DEV_STAT_RC_VD_NOT_RESPONDING (0x50)
+struct mpi3_event_data_energy_pack_change {
+ __le32 reserved00;
+ __le16 shutdown_timeout;
+ __le16 reserved06;
+};
+
+struct mpi3_event_data_sas_discovery {
+ u8 flags;
+ u8 reason_code;
+ u8 io_unit_port;
+ u8 reserved03;
+ __le32 discovery_status;
+};
+
+#define MPI3_EVENT_SAS_DISC_FLAGS_DEVICE_CHANGE (0x02)
+#define MPI3_EVENT_SAS_DISC_FLAGS_IN_PROGRESS (0x01)
+#define MPI3_EVENT_SAS_DISC_RC_STARTED (0x01)
+#define MPI3_EVENT_SAS_DISC_RC_COMPLETED (0x02)
+#define MPI3_SAS_DISC_STATUS_MAX_ENCLOSURES_EXCEED (0x80000000)
+#define MPI3_SAS_DISC_STATUS_MAX_EXPANDERS_EXCEED (0x40000000)
+#define MPI3_SAS_DISC_STATUS_MAX_DEVICES_EXCEED (0x20000000)
+#define MPI3_SAS_DISC_STATUS_MAX_TOPO_PHYS_EXCEED (0x10000000)
+#define MPI3_SAS_DISC_STATUS_MULTIPLE_DEVICES_IN_SLOT (0x00004000)
+#define MPI3_SAS_DISC_STATUS_SLOT_COUNT_MISMATCH (0x00002000)
+#define MPI3_SAS_DISC_STATUS_TOO_MANY_SLOTS (0x00001000)
+#define MPI3_SAS_DISC_STATUS_EXP_MULTI_SUBTRACTIVE (0x00000800)
+#define MPI3_SAS_DISC_STATUS_MULTI_PORT_DOMAIN (0x00000400)
+#define MPI3_SAS_DISC_STATUS_TABLE_TO_SUBTRACTIVE_LINK (0x00000200)
+#define MPI3_SAS_DISC_STATUS_UNSUPPORTED_DEVICE (0x00000100)
+#define MPI3_SAS_DISC_STATUS_TABLE_LINK (0x00000080)
+#define MPI3_SAS_DISC_STATUS_SUBTRACTIVE_LINK (0x00000040)
+#define MPI3_SAS_DISC_STATUS_SMP_CRC_ERROR (0x00000020)
+#define MPI3_SAS_DISC_STATUS_SMP_FUNCTION_FAILED (0x00000010)
+#define MPI3_SAS_DISC_STATUS_SMP_TIMEOUT (0x00000008)
+#define MPI3_SAS_DISC_STATUS_MULTIPLE_PORTS (0x00000004)
+#define MPI3_SAS_DISC_STATUS_INVALID_SAS_ADDRESS (0x00000002)
+#define MPI3_SAS_DISC_STATUS_LOOP_DETECTED (0x00000001)
+struct mpi3_event_data_sas_broadcast_primitive {
+ u8 phy_num;
+ u8 io_unit_port;
+ u8 port_width;
+ u8 primitive;
+};
+
+#define MPI3_EVENT_BROADCAST_PRIMITIVE_CHANGE (0x01)
+#define MPI3_EVENT_BROADCAST_PRIMITIVE_SES (0x02)
+#define MPI3_EVENT_BROADCAST_PRIMITIVE_EXPANDER (0x03)
+#define MPI3_EVENT_BROADCAST_PRIMITIVE_ASYNCHRONOUS_EVENT (0x04)
+#define MPI3_EVENT_BROADCAST_PRIMITIVE_RESERVED3 (0x05)
+#define MPI3_EVENT_BROADCAST_PRIMITIVE_RESERVED4 (0x06)
+#define MPI3_EVENT_BROADCAST_PRIMITIVE_CHANGE0_RESERVED (0x07)
+#define MPI3_EVENT_BROADCAST_PRIMITIVE_CHANGE1_RESERVED (0x08)
+struct mpi3_event_data_sas_notify_primitive {
+ u8 phy_num;
+ u8 io_unit_port;
+ u8 reserved02;
+ u8 primitive;
+};
+
+#define MPI3_EVENT_NOTIFY_PRIMITIVE_ENABLE_SPINUP (0x01)
+#define MPI3_EVENT_NOTIFY_PRIMITIVE_POWER_LOSS_EXPECTED (0x02)
+#define MPI3_EVENT_NOTIFY_PRIMITIVE_RESERVED1 (0x03)
+#define MPI3_EVENT_NOTIFY_PRIMITIVE_RESERVED2 (0x04)
+#ifndef MPI3_EVENT_SAS_TOPO_PHY_COUNT
+#define MPI3_EVENT_SAS_TOPO_PHY_COUNT (1)
+#endif
+struct mpi3_event_sas_topo_phy_entry {
+ __le16 attached_dev_handle;
+ u8 link_rate;
+ u8 status;
+};
+
+#define MPI3_EVENT_SAS_TOPO_LR_CURRENT_MASK (0xf0)
+#define MPI3_EVENT_SAS_TOPO_LR_CURRENT_SHIFT (4)
+#define MPI3_EVENT_SAS_TOPO_LR_PREV_MASK (0x0f)
+#define MPI3_EVENT_SAS_TOPO_LR_PREV_SHIFT (0)
+#define MPI3_EVENT_SAS_TOPO_LR_UNKNOWN_LINK_RATE (0x00)
+#define MPI3_EVENT_SAS_TOPO_LR_PHY_DISABLED (0x01)
+#define MPI3_EVENT_SAS_TOPO_LR_NEGOTIATION_FAILED (0x02)
+#define MPI3_EVENT_SAS_TOPO_LR_SATA_OOB_COMPLETE (0x03)
+#define MPI3_EVENT_SAS_TOPO_LR_PORT_SELECTOR (0x04)
+#define MPI3_EVENT_SAS_TOPO_LR_SMP_RESET_IN_PROGRESS (0x05)
+#define MPI3_EVENT_SAS_TOPO_LR_UNSUPPORTED_PHY (0x06)
+#define MPI3_EVENT_SAS_TOPO_LR_RATE_6_0 (0x0a)
+#define MPI3_EVENT_SAS_TOPO_LR_RATE_12_0 (0x0b)
+#define MPI3_EVENT_SAS_TOPO_LR_RATE_22_5 (0x0c)
+#define MPI3_EVENT_SAS_TOPO_PHY_STATUS_MASK (0xc0)
+#define MPI3_EVENT_SAS_TOPO_PHY_STATUS_SHIFT (6)
+#define MPI3_EVENT_SAS_TOPO_PHY_STATUS_ACCESSIBLE (0x00)
+#define MPI3_EVENT_SAS_TOPO_PHY_STATUS_NO_EXIST (0x40)
+#define MPI3_EVENT_SAS_TOPO_PHY_STATUS_VACANT (0x80)
+#define MPI3_EVENT_SAS_TOPO_PHY_RC_MASK (0x0f)
+#define MPI3_EVENT_SAS_TOPO_PHY_RC_TARG_NOT_RESPONDING (0x02)
+#define MPI3_EVENT_SAS_TOPO_PHY_RC_PHY_CHANGED (0x03)
+#define MPI3_EVENT_SAS_TOPO_PHY_RC_NO_CHANGE (0x04)
+#define MPI3_EVENT_SAS_TOPO_PHY_RC_DELAY_NOT_RESPONDING (0x05)
+#define MPI3_EVENT_SAS_TOPO_PHY_RC_RESPONDING (0x06)
+struct mpi3_event_data_sas_topology_change_list {
+ __le16 enclosure_handle;
+ __le16 expander_dev_handle;
+ u8 num_phys;
+ u8 reserved05[3];
+ u8 num_entries;
+ u8 start_phy_num;
+ u8 exp_status;
+ u8 io_unit_port;
+ struct mpi3_event_sas_topo_phy_entry phy_entry[MPI3_EVENT_SAS_TOPO_PHY_COUNT];
+};
+
+#define MPI3_EVENT_SAS_TOPO_ES_NO_EXPANDER (0x00)
+#define MPI3_EVENT_SAS_TOPO_ES_NOT_RESPONDING (0x02)
+#define MPI3_EVENT_SAS_TOPO_ES_RESPONDING (0x03)
+#define MPI3_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING (0x04)
+struct mpi3_event_data_sas_phy_counter {
+ __le64 time_stamp;
+ __le32 reserved08;
+ u8 phy_event_code;
+ u8 phy_num;
+ __le16 reserved0e;
+ __le32 phy_event_info;
+ u8 counter_type;
+ u8 threshold_window;
+ u8 time_units;
+ u8 reserved17;
+ __le32 event_threshold;
+ __le16 threshold_flags;
+ __le16 reserved1e;
+};
+
+struct mpi3_event_data_sas_device_disc_err {
+ __le16 dev_handle;
+ u8 reason_code;
+ u8 io_unit_port;
+ __le32 reserved04;
+ __le64 sas_address;
+};
+
+#define MPI3_EVENT_SAS_DISC_ERR_RC_SMP_FAILED (0x01)
+#define MPI3_EVENT_SAS_DISC_ERR_RC_SMP_TIMEOUT (0x02)
+struct mpi3_event_data_pcie_enumeration {
+ u8 flags;
+ u8 reason_code;
+ u8 io_unit_port;
+ u8 reserved03;
+ __le32 enumeration_status;
+};
+
+#define MPI3_EVENT_PCIE_ENUM_FLAGS_DEVICE_CHANGE (0x02)
+#define MPI3_EVENT_PCIE_ENUM_FLAGS_IN_PROGRESS (0x01)
+#define MPI3_EVENT_PCIE_ENUM_RC_STARTED (0x01)
+#define MPI3_EVENT_PCIE_ENUM_RC_COMPLETED (0x02)
+#define MPI3_EVENT_PCIE_ENUM_ES_MAX_SWITCH_DEPTH_EXCEED (0x80000000)
+#define MPI3_EVENT_PCIE_ENUM_ES_MAX_SWITCHES_EXCEED (0x40000000)
+#define MPI3_EVENT_PCIE_ENUM_ES_MAX_DEVICES_EXCEED (0x20000000)
+#define MPI3_EVENT_PCIE_ENUM_ES_RESOURCES_EXHAUSTED (0x10000000)
+#ifndef MPI3_EVENT_PCIE_TOPO_PORT_COUNT
+#define MPI3_EVENT_PCIE_TOPO_PORT_COUNT (1)
+#endif
+struct mpi3_event_pcie_topo_port_entry {
+ __le16 attached_dev_handle;
+ u8 port_status;
+ u8 reserved03;
+ u8 current_port_info;
+ u8 reserved05;
+ u8 previous_port_info;
+ u8 reserved07;
+};
+
+#define MPI3_EVENT_PCIE_TOPO_PS_NOT_RESPONDING (0x02)
+#define MPI3_EVENT_PCIE_TOPO_PS_PORT_CHANGED (0x03)
+#define MPI3_EVENT_PCIE_TOPO_PS_NO_CHANGE (0x04)
+#define MPI3_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING (0x05)
+#define MPI3_EVENT_PCIE_TOPO_PS_RESPONDING (0x06)
+#define MPI3_EVENT_PCIE_TOPO_PI_LANES_MASK (0xf0)
+#define MPI3_EVENT_PCIE_TOPO_PI_LANES_UNKNOWN (0x00)
+#define MPI3_EVENT_PCIE_TOPO_PI_LANES_1 (0x10)
+#define MPI3_EVENT_PCIE_TOPO_PI_LANES_2 (0x20)
+#define MPI3_EVENT_PCIE_TOPO_PI_LANES_4 (0x30)
+#define MPI3_EVENT_PCIE_TOPO_PI_LANES_8 (0x40)
+#define MPI3_EVENT_PCIE_TOPO_PI_LANES_16 (0x50)
+#define MPI3_EVENT_PCIE_TOPO_PI_RATE_MASK (0x0f)
+#define MPI3_EVENT_PCIE_TOPO_PI_RATE_UNKNOWN (0x00)
+#define MPI3_EVENT_PCIE_TOPO_PI_RATE_DISABLED (0x01)
+#define MPI3_EVENT_PCIE_TOPO_PI_RATE_2_5 (0x02)
+#define MPI3_EVENT_PCIE_TOPO_PI_RATE_5_0 (0x03)
+#define MPI3_EVENT_PCIE_TOPO_PI_RATE_8_0 (0x04)
+#define MPI3_EVENT_PCIE_TOPO_PI_RATE_16_0 (0x05)
+#define MPI3_EVENT_PCIE_TOPO_PI_RATE_32_0 (0x06)
+struct mpi3_event_data_pcie_topology_change_list {
+ __le16 enclosure_handle;
+ __le16 switch_dev_handle;
+ u8 num_ports;
+ u8 reserved05[3];
+ u8 num_entries;
+ u8 start_port_num;
+ u8 switch_status;
+ u8 io_unit_port;
+ __le32 reserved0c;
+ struct mpi3_event_pcie_topo_port_entry port_entry[MPI3_EVENT_PCIE_TOPO_PORT_COUNT];
+};
+
+#define MPI3_EVENT_PCIE_TOPO_SS_NO_PCIE_SWITCH (0x00)
+#define MPI3_EVENT_PCIE_TOPO_SS_NOT_RESPONDING (0x02)
+#define MPI3_EVENT_PCIE_TOPO_SS_RESPONDING (0x03)
+#define MPI3_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING (0x04)
+struct mpi3_event_data_sas_init_dev_status_change {
+ u8 reason_code;
+ u8 io_unit_port;
+ __le16 dev_handle;
+ __le32 reserved04;
+ __le64 sas_address;
+};
+
+#define MPI3_EVENT_SAS_INIT_RC_ADDED (0x01)
+#define MPI3_EVENT_SAS_INIT_RC_NOT_RESPONDING (0x02)
+struct mpi3_event_data_sas_init_table_overflow {
+ __le16 max_init;
+ __le16 current_init;
+ __le32 reserved04;
+ __le64 sas_address;
+};
+
+struct mpi3_event_data_hard_reset_received {
+ u8 reserved00;
+ u8 io_unit_port;
+ __le16 reserved02;
+};
+
+#define MPI3_PEL_LOCALE_FLAGS_NON_BLOCKING_BOOT_EVENT (0x0200)
+#define MPI3_PEL_LOCALE_FLAGS_BLOCKING_BOOT_EVENT (0x0100)
+#define MPI3_PEL_LOCALE_FLAGS_PCIE (0x0080)
+#define MPI3_PEL_LOCALE_FLAGS_CONFIGURATION (0x0040)
+#define MPI3_PEL_LOCALE_FLAGS_CONTROLER (0x0020)
+#define MPI3_PEL_LOCALE_FLAGS_SAS (0x0010)
+#define MPI3_PEL_LOCALE_FLAGS_EPACK (0x0008)
+#define MPI3_PEL_LOCALE_FLAGS_ENCLOSURE (0x0004)
+#define MPI3_PEL_LOCALE_FLAGS_PD (0x0002)
+#define MPI3_PEL_LOCALE_FLAGS_VD (0x0001)
+#define MPI3_PEL_CLASS_DEBUG (0x00)
+#define MPI3_PEL_CLASS_PROGRESS (0x01)
+#define MPI3_PEL_CLASS_INFORMATIONAL (0x02)
+#define MPI3_PEL_CLASS_WARNING (0x03)
+#define MPI3_PEL_CLASS_CRITICAL (0x04)
+#define MPI3_PEL_CLASS_FATAL (0x05)
+#define MPI3_PEL_CLASS_FAULT (0x06)
+#define MPI3_PEL_CLEARTYPE_CLEAR (0x00)
+#define MPI3_PEL_WAITTIME_INFINITE_WAIT (0x00)
+#define MPI3_PEL_ACTION_GET_SEQNUM (0x01)
+#define MPI3_PEL_ACTION_MARK_CLEAR (0x02)
+#define MPI3_PEL_ACTION_GET_LOG (0x03)
+#define MPI3_PEL_ACTION_GET_COUNT (0x04)
+#define MPI3_PEL_ACTION_WAIT (0x05)
+#define MPI3_PEL_ACTION_ABORT (0x06)
+#define MPI3_PEL_ACTION_GET_PRINT_STRINGS (0x07)
+#define MPI3_PEL_ACTION_ACKNOWLEDGE (0x08)
+#define MPI3_PEL_STATUS_SUCCESS (0x00)
+#define MPI3_PEL_STATUS_NOT_FOUND (0x01)
+#define MPI3_PEL_STATUS_ABORTED (0x02)
+#define MPI3_PEL_STATUS_NOT_READY (0x03)
+struct mpi3_pel_seq {
+ __le32 newest;
+ __le32 oldest;
+ __le32 clear;
+ __le32 shutdown;
+ __le32 boot;
+ __le32 last_acknowledged;
+};
+
+struct mpi3_pel_entry {
+ __le32 sequence_number;
+ __le32 time_stamp[2];
+ __le16 log_code;
+ __le16 arg_type;
+ __le16 locale;
+ u8 class;
+ u8 reserved13;
+ u8 ext_num;
+ u8 num_exts;
+ u8 arg_data_size;
+ u8 fixed_format_size;
+ __le32 reserved18[2];
+ __le32 pel_info[24];
+};
+
+struct mpi3_pel_list {
+ __le32 log_count;
+ __le32 reserved04;
+ struct mpi3_pel_entry entry[1];
+};
+
+struct mpi3_pel_arg_map {
+ u8 arg_type;
+ u8 length;
+ __le16 start_location;
+};
+
+#define MPI3_PEL_ARG_MAP_ARG_TYPE_APPEND_STRING (0x00)
+#define MPI3_PEL_ARG_MAP_ARG_TYPE_INTEGER (0x01)
+#define MPI3_PEL_ARG_MAP_ARG_TYPE_STRING (0x02)
+#define MPI3_PEL_ARG_MAP_ARG_TYPE_BIT_FIELD (0x03)
+struct mpi3_pel_print_string {
+ __le16 log_code;
+ __le16 string_length;
+ u8 num_arg_map;
+ u8 reserved05[3];
+ struct mpi3_pel_arg_map arg_map[1];
+};
+
+struct mpi3_pel_print_string_list {
+ __le32 num_print_strings;
+ __le32 residual_bytes_remain;
+ __le32 reserved08[2];
+ struct mpi3_pel_print_string print_string[1];
+};
+
+#ifndef MPI3_PEL_ACTION_SPECIFIC_MAX
+#define MPI3_PEL_ACTION_SPECIFIC_MAX (1)
+#endif
+struct mpi3_pel_request {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 change_count;
+ u8 action;
+ u8 reserved0b;
+ __le32 action_specific[MPI3_PEL_ACTION_SPECIFIC_MAX];
+};
+
+struct mpi3_pel_req_action_get_sequence_numbers {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 change_count;
+ u8 action;
+ u8 reserved0b;
+ __le32 reserved0c[5];
+ union mpi3_sge_union sgl;
+};
+
+struct mpi3_pel_req_action_clear_log_marker {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 change_count;
+ u8 action;
+ u8 reserved0b;
+ u8 clear_type;
+ u8 reserved0d[3];
+};
+
+struct mpi3_pel_req_action_get_log {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 change_count;
+ u8 action;
+ u8 reserved0b;
+ __le32 starting_sequence_number;
+ __le16 locale;
+ u8 class;
+ u8 reserved13;
+ __le32 reserved14[3];
+ union mpi3_sge_union sgl;
+};
+
+struct mpi3_pel_req_action_get_count {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 change_count;
+ u8 action;
+ u8 reserved0b;
+ __le32 starting_sequence_number;
+ __le16 locale;
+ u8 class;
+ u8 reserved13;
+ __le32 reserved14[3];
+ union mpi3_sge_union sgl;
+};
+
+struct mpi3_pel_req_action_wait {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 change_count;
+ u8 action;
+ u8 reserved0b;
+ __le32 starting_sequence_number;
+ __le16 locale;
+ u8 class;
+ u8 reserved13;
+ __le16 wait_time;
+ __le16 reserved16;
+ __le32 reserved18[2];
+};
+
+struct mpi3_pel_req_action_abort {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 change_count;
+ u8 action;
+ u8 reserved0b;
+ __le32 reserved0c;
+ __le16 abort_host_tag;
+ __le16 reserved12;
+ __le32 reserved14;
+};
+
+struct mpi3_pel_req_action_get_print_strings {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 change_count;
+ u8 action;
+ u8 reserved0b;
+ __le32 reserved0c;
+ __le16 start_log_code;
+ __le16 reserved12;
+ __le32 reserved14[3];
+ union mpi3_sge_union sgl;
+};
+
+struct mpi3_pel_req_action_acknowledge {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 change_count;
+ u8 action;
+ u8 reserved0b;
+ __le32 sequence_number;
+ __le32 reserved10;
+};
+
+#define MPI3_PELACKNOWLEDGE_MSGFLAGS_SAFE_MODE_EXIT (0x01)
+struct mpi3_pel_reply {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 ioc_use_only08;
+ __le16 ioc_status;
+ __le32 ioc_log_info;
+ u8 action;
+ u8 reserved11;
+ __le16 reserved12;
+ __le16 pe_log_status;
+ __le16 reserved16;
+ __le32 transfer_length;
+};
+
+struct mpi3_ci_download_request {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 change_count;
+ u8 action;
+ u8 reserved0b;
+ __le32 signature1;
+ __le32 total_image_size;
+ __le32 image_offset;
+ __le32 segment_size;
+ __le32 reserved1c;
+ union mpi3_sge_union sgl;
+};
+
+#define MPI3_CI_DOWNLOAD_MSGFLAGS_LAST_SEGMENT (0x80)
+#define MPI3_CI_DOWNLOAD_MSGFLAGS_FORCE_FMC_ENABLE (0x40)
+#define MPI3_CI_DOWNLOAD_MSGFLAGS_SIGNED_NVDATA (0x20)
+#define MPI3_CI_DOWNLOAD_MSGFLAGS_WRITE_CACHE_FLUSH_MASK (0x03)
+#define MPI3_CI_DOWNLOAD_MSGFLAGS_WRITE_CACHE_FLUSH_FAST (0x00)
+#define MPI3_CI_DOWNLOAD_MSGFLAGS_WRITE_CACHE_FLUSH_MEDIUM (0x01)
+#define MPI3_CI_DOWNLOAD_MSGFLAGS_WRITE_CACHE_FLUSH_SLOW (0x02)
+#define MPI3_CI_DOWNLOAD_ACTION_DOWNLOAD (0x01)
+#define MPI3_CI_DOWNLOAD_ACTION_ONLINE_ACTIVATION (0x02)
+#define MPI3_CI_DOWNLOAD_ACTION_OFFLINE_ACTIVATION (0x03)
+#define MPI3_CI_DOWNLOAD_ACTION_GET_STATUS (0x04)
+struct mpi3_ci_download_reply {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 ioc_use_only08;
+ __le16 ioc_status;
+ __le32 ioc_log_info;
+ u8 flags;
+ u8 cache_dirty;
+ u8 pending_count;
+ u8 reserved13;
+};
+
+#define MPI3_CI_DOWNLOAD_FLAGS_DOWNLOAD_IN_PROGRESS (0x80)
+#define MPI3_CI_DOWNLOAD_FLAGS_KEY_UPDATE_PENDING (0x10)
+#define MPI3_CI_DOWNLOAD_FLAGS_ACTIVATION_STATUS_MASK (0x0e)
+#define MPI3_CI_DOWNLOAD_FLAGS_ACTIVATION_STATUS_NOT_NEEDED (0x00)
+#define MPI3_CI_DOWNLOAD_FLAGS_ACTIVATION_STATUS_AWAITING (0x02)
+#define MPI3_CI_DOWNLOAD_FLAGS_ACTIVATION_STATUS_ONLINE_PENDING (0x04)
+#define MPI3_CI_DOWNLOAD_FLAGS_ACTIVATION_STATUS_OFFLINE_PENDING (0x06)
+#define MPI3_CI_DOWNLOAD_FLAGS_COMPATIBLE (0x01)
+struct mpi3_ci_upload_request {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 change_count;
+ __le16 reserved0a;
+ __le32 signature1;
+ __le32 reserved10;
+ __le32 image_offset;
+ __le32 segment_size;
+ __le32 reserved1c;
+ union mpi3_sge_union sgl;
+};
+
+#define MPI3_CI_UPLOAD_MSGFLAGS_LOCATION_MASK (0x01)
+#define MPI3_CI_UPLOAD_MSGFLAGS_LOCATION_PRIMARY (0x00)
+#define MPI3_CI_UPLOAD_MSGFLAGS_LOCATION_SECONDARY (0x01)
+#define MPI3_CI_UPLOAD_MSGFLAGS_FORMAT_MASK (0x02)
+#define MPI3_CI_UPLOAD_MSGFLAGS_FORMAT_FLASH (0x00)
+#define MPI3_CI_UPLOAD_MSGFLAGS_FORMAT_EXECUTABLE (0x02)
+#define MPI3_CTRL_OP_FORCE_FULL_DISCOVERY (0x01)
+#define MPI3_CTRL_OP_LOOKUP_MAPPING (0x02)
+#define MPI3_CTRL_OP_UPDATE_TIMESTAMP (0x04)
+#define MPI3_CTRL_OP_GET_TIMESTAMP (0x05)
+#define MPI3_CTRL_OP_REMOVE_DEVICE (0x10)
+#define MPI3_CTRL_OP_CLOSE_PERSISTENT_CONNECTION (0x11)
+#define MPI3_CTRL_OP_HIDDEN_ACK (0x12)
+#define MPI3_CTRL_OP_SAS_SEND_PRIMITIVE (0x20)
+#define MPI3_CTRL_OP_SAS_CLEAR_ERROR_LOG (0x21)
+#define MPI3_CTRL_OP_PCIE_CLEAR_ERROR_LOG (0x22)
+#define MPI3_CTRL_OP_LOOKUP_MAPPING_PARAM8_LOOKUP_METHOD_INDEX (0x00)
+#define MPI3_CTRL_OP_UPDATE_TIMESTAMP_PARAM64_TIMESTAMP_INDEX (0x00)
+#define MPI3_CTRL_OP_REMOVE_DEVICE_PARAM16_DEVHANDLE_INDEX (0x00)
+#define MPI3_CTRL_OP_CLOSE_PERSIST_CONN_PARAM16_DEVHANDLE_INDEX (0x00)
+#define MPI3_CTRL_OP_HIDDEN_ACK_PARAM16_DEVHANDLE_INDEX (0x00)
+#define MPI3_CTRL_OP_SAS_SEND_PRIM_PARAM8_PHY_INDEX (0x00)
+#define MPI3_CTRL_OP_SAS_SEND_PRIM_PARAM8_PRIMSEQ_INDEX (0x01)
+#define MPI3_CTRL_OP_SAS_SEND_PRIM_PARAM32_PRIMITIVE_INDEX (0x00)
+#define MPI3_CTRL_OP_SAS_CLEAR_ERR_LOG_PARAM8_PHY_INDEX (0x00)
+#define MPI3_CTRL_OP_PCIE_CLEAR_ERR_LOG_PARAM8_PHY_INDEX (0x00)
+#define MPI3_CTRL_LOOKUP_METHOD_WWID_ADDRESS (0x01)
+#define MPI3_CTRL_LOOKUP_METHOD_ENCLOSURE_SLOT (0x02)
+#define MPI3_CTRL_LOOKUP_METHOD_SAS_DEVICE_NAME (0x03)
+#define MPI3_CTRL_LOOKUP_METHOD_PERSISTENT_ID (0x04)
+#define MPI3_CTRL_LOOKUP_METHOD_WWIDADDR_PARAM16_DEVH_INDEX (0)
+#define MPI3_CTRL_LOOKUP_METHOD_WWIDADDR_PARAM64_WWID_INDEX (0)
+#define MPI3_CTRL_LOOKUP_METHOD_ENCLSLOT_PARAM16_SLOTNUM_INDEX (0)
+#define MPI3_CTRL_LOOKUP_METHOD_ENCLSLOT_PARAM64_ENCLOSURELID_INDEX (0)
+#define MPI3_CTRL_LOOKUP_METHOD_SASDEVNAME_PARAM16_DEVH_INDEX (0)
+#define MPI3_CTRL_LOOKUP_METHOD_SASDEVNAME_PARAM64_DEVNAME_INDEX (0)
+#define MPI3_CTRL_LOOKUP_METHOD_PERSISTID_PARAM16_DEVH_INDEX (0)
+#define MPI3_CTRL_LOOKUP_METHOD_PERSISTID_PARAM16_PERSISTENT_ID_INDEX (1)
+#define MPI3_CTRL_LOOKUP_METHOD_VALUE16_DEVH_INDEX (0)
+#define MPI3_CTRL_GET_TIMESTAMP_VALUE64_TIMESTAMP_INDEX (0)
+#define MPI3_CTRL_PRIMFLAGS_SINGLE (0x01)
+#define MPI3_CTRL_PRIMFLAGS_TRIPLE (0x03)
+#define MPI3_CTRL_PRIMFLAGS_REDUNDANT (0x06)
+struct mpi3_iounit_control_request {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 change_count;
+ u8 reserved0a;
+ u8 operation;
+ __le32 reserved0c;
+ __le64 param64[2];
+ __le32 param32[4];
+ __le16 param16[4];
+ u8 param8[8];
+};
+
+struct mpi3_iounit_control_reply {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 ioc_use_only08;
+ __le16 ioc_status;
+ __le32 ioc_log_info;
+ __le64 value64[2];
+ __le32 value32[4];
+ __le16 value16[4];
+ u8 value8[8];
+};
+#endif
diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_sas.h b/drivers/scsi/mpi3mr/mpi/mpi30_sas.h
new file mode 100644
index 000000000000..ba5018702960
--- /dev/null
+++ b/drivers/scsi/mpi3mr/mpi/mpi30_sas.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright 2016-2021 Broadcom Inc. All rights reserved.
+ *
+ */
+#ifndef MPI30_SAS_H
+#define MPI30_SAS_H 1
+#define MPI3_SAS_DEVICE_INFO_SSP_TARGET (0x00000100)
+#define MPI3_SAS_DEVICE_INFO_STP_SATA_TARGET (0x00000080)
+#define MPI3_SAS_DEVICE_INFO_SMP_TARGET (0x00000040)
+#define MPI3_SAS_DEVICE_INFO_SSP_INITIATOR (0x00000020)
+#define MPI3_SAS_DEVICE_INFO_STP_INITIATOR (0x00000010)
+#define MPI3_SAS_DEVICE_INFO_SMP_INITIATOR (0x00000008)
+#define MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_MASK (0x00000007)
+#define MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_NO_DEVICE (0x00000000)
+#define MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_END_DEVICE (0x00000001)
+#define MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_EXPANDER (0x00000002)
+struct mpi3_smp_passthrough_request {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 change_count;
+ u8 reserved0a;
+ u8 io_unit_port;
+ __le32 reserved0c[3];
+ __le64 sas_address;
+ struct mpi3_sge_common request_sge;
+ struct mpi3_sge_common response_sge;
+};
+#endif
diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_transport.h b/drivers/scsi/mpi3mr/mpi/mpi30_transport.h
new file mode 100644
index 000000000000..63e4e81d5397
--- /dev/null
+++ b/drivers/scsi/mpi3mr/mpi/mpi30_transport.h
@@ -0,0 +1,463 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright 2016-2021 Broadcom Inc. All rights reserved.
+ *
+ */
+#ifndef MPI30_TRANSPORT_H
+#define MPI30_TRANSPORT_H 1
+struct mpi3_version_struct {
+ u8 dev;
+ u8 unit;
+ u8 minor;
+ u8 major;
+};
+
+union mpi3_version_union {
+ struct mpi3_version_struct mpi3_version;
+ __le32 word;
+};
+
+#define MPI3_VERSION_MAJOR (3)
+#define MPI3_VERSION_MINOR (0)
+#define MPI3_VERSION_UNIT (0)
+#define MPI3_VERSION_DEV (18)
+struct mpi3_sysif_oper_queue_indexes {
+ __le16 producer_index;
+ __le16 reserved02;
+ __le16 consumer_index;
+ __le16 reserved06;
+};
+
+struct mpi3_sysif_registers {
+ __le64 ioc_information;
+ union mpi3_version_union version;
+ __le32 reserved0c[2];
+ __le32 ioc_configuration;
+ __le32 reserved18;
+ __le32 ioc_status;
+ __le32 reserved20;
+ __le32 admin_queue_num_entries;
+ __le64 admin_request_queue_address;
+ __le64 admin_reply_queue_address;
+ __le32 reserved38[2];
+ __le32 coalesce_control;
+ __le32 reserved44[1007];
+ __le16 admin_request_queue_pi;
+ __le16 reserved1002;
+ __le16 admin_reply_queue_ci;
+ __le16 reserved1006;
+ struct mpi3_sysif_oper_queue_indexes oper_queue_indexes[383];
+ __le32 reserved1c00;
+ __le32 write_sequence;
+ __le32 host_diagnostic;
+ __le32 reserved1c0c;
+ __le32 fault;
+ __le32 fault_info[3];
+ __le32 reserved1c20[4];
+ __le64 hcb_address;
+ __le32 hcb_size;
+ __le32 reserved1c3c;
+ __le32 reply_free_host_index;
+ __le32 sense_buffer_free_host_index;
+ __le32 reserved1c48[2];
+ __le64 diag_rw_data;
+ __le64 diag_rw_address;
+ __le16 diag_rw_control;
+ __le16 diag_rw_status;
+ __le32 reserved1c64[35];
+ __le32 scratchpad[4];
+ __le32 reserved1d00[192];
+ __le32 device_assigned_registers[2048];
+};
+
+#define MPI3_SYSIF_IOC_INFO_LOW_OFFSET (0x00000000)
+#define MPI3_SYSIF_IOC_INFO_HIGH_OFFSET (0x00000004)
+#define MPI3_SYSIF_IOC_INFO_LOW_TIMEOUT_MASK (0xff000000)
+#define MPI3_SYSIF_IOC_INFO_LOW_TIMEOUT_SHIFT (24)
+#define MPI3_SYSIF_IOC_CONFIG_OFFSET (0x00000014)
+#define MPI3_SYSIF_IOC_CONFIG_OPER_RPY_ENT_SZ (0x00f00000)
+#define MPI3_SYSIF_IOC_CONFIG_OPER_RPY_ENT_SZ_SHIFT (20)
+#define MPI3_SYSIF_IOC_CONFIG_OPER_REQ_ENT_SZ (0x000f0000)
+#define MPI3_SYSIF_IOC_CONFIG_OPER_REQ_ENT_SZ_SHIFT (16)
+#define MPI3_SYSIF_IOC_CONFIG_SHUTDOWN_MASK (0x0000c000)
+#define MPI3_SYSIF_IOC_CONFIG_SHUTDOWN_NO (0x00000000)
+#define MPI3_SYSIF_IOC_CONFIG_SHUTDOWN_NORMAL (0x00004000)
+#define MPI3_SYSIF_IOC_CONFIG_DEVICE_SHUTDOWN (0x00002000)
+#define MPI3_SYSIF_IOC_CONFIG_DIAG_SAVE (0x00000010)
+#define MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC (0x00000001)
+#define MPI3_SYSIF_IOC_STATUS_OFFSET (0x0000001c)
+#define MPI3_SYSIF_IOC_STATUS_RESET_HISTORY (0x00000010)
+#define MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK (0x0000000c)
+#define MPI3_SYSIF_IOC_STATUS_SHUTDOWN_NONE (0x00000000)
+#define MPI3_SYSIF_IOC_STATUS_SHUTDOWN_IN_PROGRESS (0x00000004)
+#define MPI3_SYSIF_IOC_STATUS_SHUTDOWN_COMPLETE (0x00000008)
+#define MPI3_SYSIF_IOC_STATUS_FAULT (0x00000002)
+#define MPI3_SYSIF_IOC_STATUS_READY (0x00000001)
+#define MPI3_SYSIF_ADMIN_Q_NUM_ENTRIES_OFFSET (0x00000024)
+#define MPI3_SYSIF_ADMIN_Q_NUM_ENTRIES_REQ_MASK (0x0fff)
+#define MPI3_SYSIF_ADMIN_Q_NUM_ENTRIES_REPLY_OFFSET (0x00000026)
+#define MPI3_SYSIF_ADMIN_Q_NUM_ENTRIES_REPLY_MASK (0x0fff0000)
+#define MPI3_SYSIF_ADMIN_Q_NUM_ENTRIES_REPLY_SHIFT (16)
+#define MPI3_SYSIF_ADMIN_REQ_Q_ADDR_LOW_OFFSET (0x00000028)
+#define MPI3_SYSIF_ADMIN_REQ_Q_ADDR_HIGH_OFFSET (0x0000002c)
+#define MPI3_SYSIF_ADMIN_REPLY_Q_ADDR_LOW_OFFSET (0x00000030)
+#define MPI3_SYSIF_ADMIN_REPLY_Q_ADDR_HIGH_OFFSET (0x00000034)
+#define MPI3_SYSIF_COALESCE_CONTROL_OFFSET (0x00000040)
+#define MPI3_SYSIF_COALESCE_CONTROL_ENABLE_MASK (0xc0000000)
+#define MPI3_SYSIF_COALESCE_CONTROL_ENABLE_NO_CHANGE (0x00000000)
+#define MPI3_SYSIF_COALESCE_CONTROL_ENABLE_DISABLE (0x40000000)
+#define MPI3_SYSIF_COALESCE_CONTROL_ENABLE_ENABLE (0xc0000000)
+#define MPI3_SYSIF_COALESCE_CONTROL_VALID (0x30000000)
+#define MPI3_SYSIF_COALESCE_CONTROL_QUEUE_ID_MASK (0x00ff0000)
+#define MPI3_SYSIF_COALESCE_CONTROL_QUEUE_ID_SHIFT (16)
+#define MPI3_SYSIF_COALESCE_CONTROL_TIMEOUT_MASK (0x0000ff00)
+#define MPI3_SYSIF_COALESCE_CONTROL_TIMEOUT_SHIFT (8)
+#define MPI3_SYSIF_COALESCE_CONTROL_DEPTH_MASK (0x000000ff)
+#define MPI3_SYSIF_COALESCE_CONTROL_DEPTH_SHIFT (0)
+#define MPI3_SYSIF_ADMIN_REQ_Q_PI_OFFSET (0x00001000)
+#define MPI3_SYSIF_ADMIN_REPLY_Q_CI_OFFSET (0x00001004)
+#define MPI3_SYSIF_OPER_REQ_Q_PI_OFFSET (0x00001008)
+#define MPI3_SYSIF_OPER_REQ_Q_N_PI_OFFSET(n) (MPI3_SYSIF_OPER_REQ_Q_PI_OFFSET + (((n) - 1) * 8))
+#define MPI3_SYSIF_OPER_REPLY_Q_CI_OFFSET (0x0000100c)
+#define MPI3_SYSIF_OPER_REPLY_Q_N_CI_OFFSET(n) (MPI3_SYSIF_OPER_REPLY_Q_CI_OFFSET + (((n) - 1) * 8))
+#define MPI3_SYSIF_WRITE_SEQUENCE_OFFSET (0x00001c04)
+#define MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_MASK (0x0000000f)
+#define MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_FLUSH (0x0)
+#define MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_1ST (0xf)
+#define MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_2ND (0x4)
+#define MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_3RD (0xb)
+#define MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_4TH (0x2)
+#define MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_5TH (0x7)
+#define MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_6TH (0xd)
+#define MPI3_SYSIF_HOST_DIAG_OFFSET (0x00001c08)
+#define MPI3_SYSIF_HOST_DIAG_RESET_ACTION_MASK (0x00000700)
+#define MPI3_SYSIF_HOST_DIAG_RESET_ACTION_NO_RESET (0x00000000)
+#define MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET (0x00000100)
+#define MPI3_SYSIF_HOST_DIAG_RESET_ACTION_FLASH_RCVRY_RESET (0x00000200)
+#define MPI3_SYSIF_HOST_DIAG_RESET_ACTION_COMPLETE_RESET (0x00000300)
+#define MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT (0x00000700)
+#define MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS (0x00000080)
+#define MPI3_SYSIF_HOST_DIAG_SECURE_BOOT (0x00000040)
+#define MPI3_SYSIF_HOST_DIAG_CLEAR_INVALID_FW_IMAGE (0x00000020)
+#define MPI3_SYSIF_HOST_DIAG_INVALID_FW_IMAGE (0x00000010)
+#define MPI3_SYSIF_HOST_DIAG_HCBENABLE (0x00000008)
+#define MPI3_SYSIF_HOST_DIAG_HCBMODE (0x00000004)
+#define MPI3_SYSIF_HOST_DIAG_DIAG_RW_ENABLE (0x00000002)
+#define MPI3_SYSIF_HOST_DIAG_DIAG_WRITE_ENABLE (0x00000001)
+#define MPI3_SYSIF_FAULT_OFFSET (0x00001c10)
+#define MPI3_SYSIF_FAULT_FUNC_AREA_MASK (0xff000000)
+#define MPI3_SYSIF_FAULT_FUNC_AREA_SHIFT (24)
+#define MPI3_SYSIF_FAULT_FUNC_AREA_MPI_DEFINED (0x00000000)
+#define MPI3_SYSIF_FAULT_CODE_MASK (0x0000ffff)
+#define MPI3_SYSIF_FAULT_CODE_DIAG_FAULT_RESET (0x0000f000)
+#define MPI3_SYSIF_FAULT_CODE_CI_ACTIVATION_RESET (0x0000f001)
+#define MPI3_SYSIF_FAULT_CODE_SOFT_RESET_IN_PROGRESS (0x0000f002)
+#define MPI3_SYSIF_FAULT_CODE_COMPLETE_RESET_NEEDED (0x0000f003)
+#define MPI3_SYSIF_FAULT_CODE_SAFE_MODE_EXIT (0x0000f004)
+#define MPI3_SYSIF_FAULT_CODE_FACTORY_RESET (0x0000f005)
+#define MPI3_SYSIF_FAULT_INFO0_OFFSET (0x00001c14)
+#define MPI3_SYSIF_FAULT_INFO1_OFFSET (0x00001c18)
+#define MPI3_SYSIF_FAULT_INFO2_OFFSET (0x00001c1c)
+#define MPI3_SYSIF_HCB_ADDRESS_LOW_OFFSET (0x00001c30)
+#define MPI3_SYSIF_HCB_ADDRESS_HIGH_OFFSET (0x00001c34)
+#define MPI3_SYSIF_HCB_SIZE_OFFSET (0x00001c38)
+#define MPI3_SYSIF_HCB_SIZE_SIZE_MASK (0xfffff000)
+#define MPI3_SYSIF_HCB_SIZE_SIZE_SHIFT (12)
+#define MPI3_SYSIF_HCB_SIZE_HCDW_ENABLE (0x00000001)
+#define MPI3_SYSIF_REPLY_FREE_HOST_INDEX_OFFSET (0x00001c40)
+#define MPI3_SYSIF_SENSE_BUF_FREE_HOST_INDEX_OFFSET (0x00001c44)
+#define MPI3_SYSIF_DIAG_RW_DATA_LOW_OFFSET (0x00001c50)
+#define MPI3_SYSIF_DIAG_RW_DATA_HIGH_OFFSET (0x00001c54)
+#define MPI3_SYSIF_DIAG_RW_ADDRESS_LOW_OFFSET (0x00001c58)
+#define MPI3_SYSIF_DIAG_RW_ADDRESS_HIGH_OFFSET (0x00001c5c)
+#define MPI3_SYSIF_DIAG_RW_CONTROL_OFFSET (0x00001c60)
+#define MPI3_SYSIF_DIAG_RW_CONTROL_LEN_MASK (0x00000030)
+#define MPI3_SYSIF_DIAG_RW_CONTROL_LEN_1BYTE (0x00000000)
+#define MPI3_SYSIF_DIAG_RW_CONTROL_LEN_2BYTES (0x00000010)
+#define MPI3_SYSIF_DIAG_RW_CONTROL_LEN_4BYTES (0x00000020)
+#define MPI3_SYSIF_DIAG_RW_CONTROL_LEN_8BYTES (0x00000030)
+#define MPI3_SYSIF_DIAG_RW_CONTROL_RESET (0x00000004)
+#define MPI3_SYSIF_DIAG_RW_CONTROL_DIR_MASK (0x00000002)
+#define MPI3_SYSIF_DIAG_RW_CONTROL_DIR_READ (0x00000000)
+#define MPI3_SYSIF_DIAG_RW_CONTROL_DIR_WRITE (0x00000002)
+#define MPI3_SYSIF_DIAG_RW_CONTROL_START (0x00000001)
+#define MPI3_SYSIF_DIAG_RW_STATUS_OFFSET (0x00001c62)
+#define MPI3_SYSIF_DIAG_RW_STATUS_STATUS_MASK (0x0000000e)
+#define MPI3_SYSIF_DIAG_RW_STATUS_STATUS_SUCCESS (0x00000000)
+#define MPI3_SYSIF_DIAG_RW_STATUS_STATUS_INV_ADDR (0x00000002)
+#define MPI3_SYSIF_DIAG_RW_STATUS_STATUS_ACC_ERR (0x00000004)
+#define MPI3_SYSIF_DIAG_RW_STATUS_STATUS_PAR_ERR (0x00000006)
+#define MPI3_SYSIF_DIAG_RW_STATUS_BUSY (0x00000001)
+#define MPI3_SYSIF_SCRATCHPAD0_OFFSET (0x00001cf0)
+#define MPI3_SYSIF_SCRATCHPAD1_OFFSET (0x00001cf4)
+#define MPI3_SYSIF_SCRATCHPAD2_OFFSET (0x00001cf8)
+#define MPI3_SYSIF_SCRATCHPAD3_OFFSET (0x00001cfc)
+#define MPI3_SYSIF_DEVICE_ASSIGNED_REGS_OFFSET (0x00002000)
+#define MPI3_SYSIF_DIAG_SAVE_TIMEOUT (60)
+struct mpi3_default_reply_descriptor {
+ __le32 descriptor_type_dependent1[2];
+ __le16 request_queue_ci;
+ __le16 request_queue_id;
+ __le16 descriptor_type_dependent2;
+ __le16 reply_flags;
+};
+
+#define MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK (0x0001)
+#define MPI3_REPLY_DESCRIPT_FLAGS_TYPE_MASK (0xf000)
+#define MPI3_REPLY_DESCRIPT_FLAGS_TYPE_ADDRESS_REPLY (0x0000)
+#define MPI3_REPLY_DESCRIPT_FLAGS_TYPE_SUCCESS (0x1000)
+#define MPI3_REPLY_DESCRIPT_FLAGS_TYPE_TARGET_COMMAND_BUFFER (0x2000)
+#define MPI3_REPLY_DESCRIPT_FLAGS_TYPE_STATUS (0x3000)
+struct mpi3_address_reply_descriptor {
+ __le64 reply_frame_address;
+ __le16 request_queue_ci;
+ __le16 request_queue_id;
+ __le16 reserved0c;
+ __le16 reply_flags;
+};
+
+struct mpi3_success_reply_descriptor {
+ __le32 reserved00[2];
+ __le16 request_queue_ci;
+ __le16 request_queue_id;
+ __le16 host_tag;
+ __le16 reply_flags;
+};
+
+struct mpi3_target_command_buffer_reply_descriptor {
+ __le32 reserved00;
+ __le16 initiator_dev_handle;
+ u8 phy_num;
+ u8 reserved07;
+ __le16 request_queue_ci;
+ __le16 request_queue_id;
+ __le16 io_index;
+ __le16 reply_flags;
+};
+
+struct mpi3_status_reply_descriptor {
+ __le16 ioc_status;
+ __le16 reserved02;
+ __le32 ioc_log_info;
+ __le16 request_queue_ci;
+ __le16 request_queue_id;
+ __le16 host_tag;
+ __le16 reply_flags;
+};
+
+#define MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL (0x8000)
+#define MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK (0x7fff)
+#define MPI3_REPLY_DESCRIPT_STATUS_IOCLOGINFO_TYPE_MASK (0xf0000000)
+#define MPI3_REPLY_DESCRIPT_STATUS_IOCLOGINFO_TYPE_NO_INFO (0x00000000)
+#define MPI3_REPLY_DESCRIPT_STATUS_IOCLOGINFO_TYPE_SAS (0x30000000)
+#define MPI3_REPLY_DESCRIPT_STATUS_IOCLOGINFO_DATA_MASK (0x0fffffff)
+union mpi3_reply_descriptors_union {
+ struct mpi3_default_reply_descriptor default_reply;
+ struct mpi3_address_reply_descriptor address_reply;
+ struct mpi3_success_reply_descriptor success;
+ struct mpi3_target_command_buffer_reply_descriptor target_command_buffer;
+ struct mpi3_status_reply_descriptor status;
+ __le32 words[4];
+};
+
+struct mpi3_sge_common {
+ __le64 address;
+ __le32 length;
+ u8 reserved0c[3];
+ u8 flags;
+};
+
+struct mpi3_sge_bit_bucket {
+ __le64 reserved00;
+ __le32 length;
+ u8 reserved0c[3];
+ u8 flags;
+};
+
+struct mpi3_sge_extended_eedp {
+ u8 user_data_size;
+ u8 reserved01;
+ __le16 eedp_flags;
+ __le32 secondary_reference_tag;
+ __le16 secondary_application_tag;
+ __le16 application_tag_translation_mask;
+ __le16 reserved0c;
+ u8 extended_operation;
+ u8 flags;
+};
+
+union mpi3_sge_union {
+ struct mpi3_sge_common simple;
+ struct mpi3_sge_common chain;
+ struct mpi3_sge_common last_chain;
+ struct mpi3_sge_bit_bucket bit_bucket;
+ struct mpi3_sge_extended_eedp eedp;
+ __le32 words[4];
+};
+
+#define MPI3_SGE_FLAGS_ELEMENT_TYPE_MASK (0xf0)
+#define MPI3_SGE_FLAGS_ELEMENT_TYPE_SIMPLE (0x00)
+#define MPI3_SGE_FLAGS_ELEMENT_TYPE_BIT_BUCKET (0x10)
+#define MPI3_SGE_FLAGS_ELEMENT_TYPE_CHAIN (0x20)
+#define MPI3_SGE_FLAGS_ELEMENT_TYPE_LAST_CHAIN (0x30)
+#define MPI3_SGE_FLAGS_ELEMENT_TYPE_EXTENDED (0xf0)
+#define MPI3_SGE_FLAGS_END_OF_LIST (0x08)
+#define MPI3_SGE_FLAGS_END_OF_BUFFER (0x04)
+#define MPI3_SGE_FLAGS_DLAS_MASK (0x03)
+#define MPI3_SGE_FLAGS_DLAS_SYSTEM (0x00)
+#define MPI3_SGE_FLAGS_DLAS_IOC_DDR (0x01)
+#define MPI3_SGE_FLAGS_DLAS_IOC_CTL (0x02)
+#define MPI3_SGE_EXT_OPER_EEDP (0x00)
+#define MPI3_EEDPFLAGS_INCR_PRI_REF_TAG (0x8000)
+#define MPI3_EEDPFLAGS_INCR_SEC_REF_TAG (0x4000)
+#define MPI3_EEDPFLAGS_INCR_PRI_APP_TAG (0x2000)
+#define MPI3_EEDPFLAGS_INCR_SEC_APP_TAG (0x1000)
+#define MPI3_EEDPFLAGS_ESC_PASSTHROUGH (0x0800)
+#define MPI3_EEDPFLAGS_CHK_REF_TAG (0x0400)
+#define MPI3_EEDPFLAGS_CHK_APP_TAG (0x0200)
+#define MPI3_EEDPFLAGS_CHK_GUARD (0x0100)
+#define MPI3_EEDPFLAGS_ESC_MODE_MASK (0x00c0)
+#define MPI3_EEDPFLAGS_ESC_MODE_DO_NOT_DISABLE (0x0040)
+#define MPI3_EEDPFLAGS_ESC_MODE_APPTAG_DISABLE (0x0080)
+#define MPI3_EEDPFLAGS_ESC_MODE_APPTAG_REFTAG_DISABLE (0x00c0)
+#define MPI3_EEDPFLAGS_HOST_GUARD_MASK (0x0030)
+#define MPI3_EEDPFLAGS_HOST_GUARD_T10_CRC (0x0000)
+#define MPI3_EEDPFLAGS_HOST_GUARD_IP_CHKSUM (0x0010)
+#define MPI3_EEDPFLAGS_HOST_GUARD_OEM_SPECIFIC (0x0020)
+#define MPI3_EEDPFLAGS_PT_REF_TAG (0x0008)
+#define MPI3_EEDPFLAGS_EEDP_OP_MASK (0x0007)
+#define MPI3_EEDPFLAGS_EEDP_OP_NOOP (0x0000)
+#define MPI3_EEDPFLAGS_EEDP_OP_CHECK (0x0001)
+#define MPI3_EEDPFLAGS_EEDP_OP_STRIP (0x0002)
+#define MPI3_EEDPFLAGS_EEDP_OP_CHECK_REMOVE (0x0003)
+#define MPI3_EEDPFLAGS_EEDP_OP_INSERT (0x0004)
+#define MPI3_EEDPFLAGS_EEDP_OP_REPLACE (0x0006)
+#define MPI3_EEDPFLAGS_EEDP_OP_CHECK_REGEN (0x0007)
+#define MPI3_EEDP_UDS_512 (0x01)
+#define MPI3_EEDP_UDS_520 (0x02)
+#define MPI3_EEDP_UDS_4080 (0x03)
+#define MPI3_EEDP_UDS_4088 (0x04)
+#define MPI3_EEDP_UDS_4096 (0x05)
+#define MPI3_EEDP_UDS_4104 (0x06)
+#define MPI3_EEDP_UDS_4160 (0x07)
+struct mpi3_request_header {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 change_count;
+ __le16 function_dependent;
+};
+
+struct mpi3_default_reply {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 ioc_use_only08;
+ __le16 ioc_status;
+ __le32 ioc_log_info;
+};
+
+#define MPI3_HOST_TAG_INVALID (0xffff)
+#define MPI3_FUNCTION_IOC_FACTS (0x01)
+#define MPI3_FUNCTION_IOC_INIT (0x02)
+#define MPI3_FUNCTION_PORT_ENABLE (0x03)
+#define MPI3_FUNCTION_EVENT_NOTIFICATION (0x04)
+#define MPI3_FUNCTION_EVENT_ACK (0x05)
+#define MPI3_FUNCTION_CI_DOWNLOAD (0x06)
+#define MPI3_FUNCTION_CI_UPLOAD (0x07)
+#define MPI3_FUNCTION_IO_UNIT_CONTROL (0x08)
+#define MPI3_FUNCTION_PERSISTENT_EVENT_LOG (0x09)
+#define MPI3_FUNCTION_MGMT_PASSTHROUGH (0x0a)
+#define MPI3_FUNCTION_CONFIG (0x10)
+#define MPI3_FUNCTION_SCSI_IO (0x20)
+#define MPI3_FUNCTION_SCSI_TASK_MGMT (0x21)
+#define MPI3_FUNCTION_SMP_PASSTHROUGH (0x22)
+#define MPI3_FUNCTION_NVME_ENCAPSULATED (0x24)
+#define MPI3_FUNCTION_TARGET_ASSIST (0x30)
+#define MPI3_FUNCTION_TARGET_STATUS_SEND (0x31)
+#define MPI3_FUNCTION_TARGET_MODE_ABORT (0x32)
+#define MPI3_FUNCTION_TARGET_CMD_BUF_POST_BASE (0x33)
+#define MPI3_FUNCTION_TARGET_CMD_BUF_POST_LIST (0x34)
+#define MPI3_FUNCTION_CREATE_REQUEST_QUEUE (0x70)
+#define MPI3_FUNCTION_DELETE_REQUEST_QUEUE (0x71)
+#define MPI3_FUNCTION_CREATE_REPLY_QUEUE (0x72)
+#define MPI3_FUNCTION_DELETE_REPLY_QUEUE (0x73)
+#define MPI3_FUNCTION_TOOLBOX (0x80)
+#define MPI3_FUNCTION_DIAG_BUFFER_POST (0x81)
+#define MPI3_FUNCTION_DIAG_BUFFER_MANAGE (0x82)
+#define MPI3_FUNCTION_DIAG_BUFFER_UPLOAD (0x83)
+#define MPI3_FUNCTION_MIN_IOC_USE_ONLY (0xc0)
+#define MPI3_FUNCTION_MAX_IOC_USE_ONLY (0xef)
+#define MPI3_FUNCTION_MIN_PRODUCT_SPECIFIC (0xf0)
+#define MPI3_FUNCTION_MAX_PRODUCT_SPECIFIC (0xff)
+#define MPI3_IOCSTATUS_LOG_INFO_AVAIL_MASK (0x8000)
+#define MPI3_IOCSTATUS_LOG_INFO_AVAILABLE (0x8000)
+#define MPI3_IOCSTATUS_STATUS_MASK (0x7fff)
+#define MPI3_IOCSTATUS_SUCCESS (0x0000)
+#define MPI3_IOCSTATUS_INVALID_FUNCTION (0x0001)
+#define MPI3_IOCSTATUS_BUSY (0x0002)
+#define MPI3_IOCSTATUS_INVALID_SGL (0x0003)
+#define MPI3_IOCSTATUS_INTERNAL_ERROR (0x0004)
+#define MPI3_IOCSTATUS_INSUFFICIENT_RESOURCES (0x0006)
+#define MPI3_IOCSTATUS_INVALID_FIELD (0x0007)
+#define MPI3_IOCSTATUS_INVALID_STATE (0x0008)
+#define MPI3_IOCSTATUS_INSUFFICIENT_POWER (0x000a)
+#define MPI3_IOCSTATUS_INVALID_CHANGE_COUNT (0x000b)
+#define MPI3_IOCSTATUS_FAILURE (0x001f)
+#define MPI3_IOCSTATUS_CONFIG_INVALID_ACTION (0x0020)
+#define MPI3_IOCSTATUS_CONFIG_INVALID_TYPE (0x0021)
+#define MPI3_IOCSTATUS_CONFIG_INVALID_PAGE (0x0022)
+#define MPI3_IOCSTATUS_CONFIG_INVALID_DATA (0x0023)
+#define MPI3_IOCSTATUS_CONFIG_NO_DEFAULTS (0x0024)
+#define MPI3_IOCSTATUS_CONFIG_CANT_COMMIT (0x0025)
+#define MPI3_IOCSTATUS_SCSI_RECOVERED_ERROR (0x0040)
+#define MPI3_IOCSTATUS_SCSI_TM_NOT_SUPPORTED (0x0041)
+#define MPI3_IOCSTATUS_SCSI_INVALID_DEVHANDLE (0x0042)
+#define MPI3_IOCSTATUS_SCSI_DEVICE_NOT_THERE (0x0043)
+#define MPI3_IOCSTATUS_SCSI_DATA_OVERRUN (0x0044)
+#define MPI3_IOCSTATUS_SCSI_DATA_UNDERRUN (0x0045)
+#define MPI3_IOCSTATUS_SCSI_IO_DATA_ERROR (0x0046)
+#define MPI3_IOCSTATUS_SCSI_PROTOCOL_ERROR (0x0047)
+#define MPI3_IOCSTATUS_SCSI_TASK_TERMINATED (0x0048)
+#define MPI3_IOCSTATUS_SCSI_RESIDUAL_MISMATCH (0x0049)
+#define MPI3_IOCSTATUS_SCSI_TASK_MGMT_FAILED (0x004a)
+#define MPI3_IOCSTATUS_SCSI_IOC_TERMINATED (0x004b)
+#define MPI3_IOCSTATUS_SCSI_EXT_TERMINATED (0x004c)
+#define MPI3_IOCSTATUS_EEDP_GUARD_ERROR (0x004d)
+#define MPI3_IOCSTATUS_EEDP_REF_TAG_ERROR (0x004e)
+#define MPI3_IOCSTATUS_EEDP_APP_TAG_ERROR (0x004f)
+#define MPI3_IOCSTATUS_TARGET_INVALID_IO_INDEX (0x0062)
+#define MPI3_IOCSTATUS_TARGET_ABORTED (0x0063)
+#define MPI3_IOCSTATUS_TARGET_NO_CONN_RETRYABLE (0x0064)
+#define MPI3_IOCSTATUS_TARGET_NO_CONNECTION (0x0065)
+#define MPI3_IOCSTATUS_TARGET_XFER_COUNT_MISMATCH (0x006a)
+#define MPI3_IOCSTATUS_TARGET_DATA_OFFSET_ERROR (0x006d)
+#define MPI3_IOCSTATUS_TARGET_TOO_MUCH_WRITE_DATA (0x006e)
+#define MPI3_IOCSTATUS_TARGET_IU_TOO_SHORT (0x006f)
+#define MPI3_IOCSTATUS_TARGET_ACK_NAK_TIMEOUT (0x0070)
+#define MPI3_IOCSTATUS_TARGET_NAK_RECEIVED (0x0071)
+#define MPI3_IOCSTATUS_SAS_SMP_REQUEST_FAILED (0x0090)
+#define MPI3_IOCSTATUS_SAS_SMP_DATA_OVERRUN (0x0091)
+#define MPI3_IOCSTATUS_DIAGNOSTIC_RELEASED (0x00a0)
+#define MPI3_IOCSTATUS_CI_UNSUPPORTED (0x00b0)
+#define MPI3_IOCSTATUS_CI_UPDATE_SEQUENCE (0x00b1)
+#define MPI3_IOCSTATUS_CI_VALIDATION_FAILED (0x00b2)
+#define MPI3_IOCSTATUS_CI_UPDATE_PENDING (0x00b3)
+#define MPI3_IOCSTATUS_SECURITY_KEY_REQUIRED (0x00c0)
+#define MPI3_IOCSTATUS_INVALID_QUEUE_ID (0x0f00)
+#define MPI3_IOCSTATUS_INVALID_QUEUE_SIZE (0x0f01)
+#define MPI3_IOCSTATUS_INVALID_MSIX_VECTOR (0x0f02)
+#define MPI3_IOCSTATUS_INVALID_REPLY_QUEUE_ID (0x0f03)
+#define MPI3_IOCSTATUS_INVALID_QUEUE_DELETION (0x0f04)
+#define MPI3_IOCLOGINFO_TYPE_MASK (0xf0000000)
+#define MPI3_IOCLOGINFO_TYPE_SHIFT (28)
+#define MPI3_IOCLOGINFO_TYPE_NONE (0x0)
+#define MPI3_IOCLOGINFO_TYPE_SAS (0x3)
+#define MPI3_IOCLOGINFO_LOG_DATA_MASK (0x0fffffff)
+#endif
diff --git a/drivers/scsi/mpi3mr/mpi3mr.h b/drivers/scsi/mpi3mr/mpi3mr.h
new file mode 100644
index 000000000000..6f5dc9e78553
--- /dev/null
+++ b/drivers/scsi/mpi3mr/mpi3mr.h
@@ -0,0 +1,901 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Driver for Broadcom MPI3 Storage Controllers
+ *
+ * Copyright (C) 2017-2021 Broadcom Inc.
+ * (mailto: mpi3mr-linuxdrv.pdl@broadcom.com)
+ *
+ */
+
+#ifndef MPI3MR_H_INCLUDED
+#define MPI3MR_H_INCLUDED
+
+#include <linux/blkdev.h>
+#include <linux/blk-mq.h>
+#include <linux/blk-mq-pci.h>
+#include <linux/delay.h>
+#include <linux/dmapool.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/poll.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include <linux/utsname.h>
+#include <linux/version.h>
+#include <linux/workqueue.h>
+#include <asm/unaligned.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_dbg.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
+
+#include "mpi/mpi30_transport.h"
+#include "mpi/mpi30_cnfg.h"
+#include "mpi/mpi30_image.h"
+#include "mpi/mpi30_init.h"
+#include "mpi/mpi30_ioc.h"
+#include "mpi/mpi30_sas.h"
+#include "mpi3mr_debug.h"
+
+/* Global list and lock for storing multiple adapters managed by the driver */
+extern spinlock_t mrioc_list_lock;
+extern struct list_head mrioc_list;
+extern int prot_mask;
+
+#define MPI3MR_DRIVER_VERSION "00.255.45.01"
+#define MPI3MR_DRIVER_RELDATE "12-December-2020"
+
+#define MPI3MR_DRIVER_NAME "mpi3mr"
+#define MPI3MR_DRIVER_LICENSE "GPL"
+#define MPI3MR_DRIVER_AUTHOR "Broadcom Inc. <mpi3mr-linuxdrv.pdl@broadcom.com>"
+#define MPI3MR_DRIVER_DESC "MPI3 Storage Controller Device Driver"
+
+#define MPI3MR_NAME_LENGTH 32
+#define IOCNAME "%s: "
+
+/* Definitions for internal SGL and Chain SGL buffers */
+#define MPI3MR_PAGE_SIZE_4K 4096
+#define MPI3MR_SG_DEPTH (MPI3MR_PAGE_SIZE_4K / sizeof(struct mpi3_sge_common))
+
+/* Definitions for MAX values for shost */
+#define MPI3MR_MAX_CMDS_LUN 7
+#define MPI3MR_MAX_CDB_LENGTH 32
+
+/* Admin queue management definitions */
+#define MPI3MR_ADMIN_REQ_Q_SIZE (2 * MPI3MR_PAGE_SIZE_4K)
+#define MPI3MR_ADMIN_REPLY_Q_SIZE (4 * MPI3MR_PAGE_SIZE_4K)
+#define MPI3MR_ADMIN_REQ_FRAME_SZ 128
+#define MPI3MR_ADMIN_REPLY_FRAME_SZ 16
+
+/* Operational queue management definitions */
+#define MPI3MR_OP_REQ_Q_QD 512
+#define MPI3MR_OP_REP_Q_QD 4096
+#define MPI3MR_OP_REQ_Q_SEG_SIZE 4096
+#define MPI3MR_OP_REP_Q_SEG_SIZE 4096
+#define MPI3MR_MAX_SEG_LIST_SIZE 4096
+
+/* Reserved Host Tag definitions */
+#define MPI3MR_HOSTTAG_INVALID 0xFFFF
+#define MPI3MR_HOSTTAG_INITCMDS 1
+#define MPI3MR_HOSTTAG_IOCTLCMDS 2
+#define MPI3MR_HOSTTAG_BLK_TMS 5
+
+#define MPI3MR_NUM_DEVRMCMD 1
+#define MPI3MR_HOSTTAG_DEVRMCMD_MIN (MPI3MR_HOSTTAG_BLK_TMS + 1)
+#define MPI3MR_HOSTTAG_DEVRMCMD_MAX (MPI3MR_HOSTTAG_DEVRMCMD_MIN + \
+ MPI3MR_NUM_DEVRMCMD - 1)
+
+#define MPI3MR_INTERNAL_CMDS_RESVD MPI3MR_HOSTTAG_DEVRMCMD_MAX
+
+/* Reduced resource count definition for crash kernel */
+#define MPI3MR_HOST_IOS_KDUMP 128
+
+/* command/controller interaction timeout definitions in seconds */
+#define MPI3MR_INTADMCMD_TIMEOUT 10
+#define MPI3MR_PORTENABLE_TIMEOUT 300
+#define MPI3MR_ABORTTM_TIMEOUT 30
+#define MPI3MR_RESETTM_TIMEOUT 30
+#define MPI3MR_RESET_HOST_IOWAIT_TIMEOUT 5
+#define MPI3MR_TSUPDATE_INTERVAL 900
+#define MPI3MR_DEFAULT_SHUTDOWN_TIME 120
+#define MPI3MR_RAID_ERRREC_RESET_TIMEOUT 180
+
+#define MPI3MR_WATCHDOG_INTERVAL 1000 /* in milli seconds */
+
+/* Internal admin command state definitions*/
+#define MPI3MR_CMD_NOTUSED 0x8000
+#define MPI3MR_CMD_COMPLETE 0x0001
+#define MPI3MR_CMD_PENDING 0x0002
+#define MPI3MR_CMD_REPLY_VALID 0x0004
+#define MPI3MR_CMD_RESET 0x0008
+
+/* Definitions for Event replies and sense buffer allocated per controller */
+#define MPI3MR_NUM_EVT_REPLIES 64
+#define MPI3MR_SENSEBUF_SZ 256
+#define MPI3MR_SENSEBUF_FACTOR 3
+#define MPI3MR_CHAINBUF_FACTOR 3
+#define MPI3MR_CHAINBUFDIX_FACTOR 2
+
+/* Invalid target device handle */
+#define MPI3MR_INVALID_DEV_HANDLE 0xFFFF
+
+/* Controller Reset related definitions */
+#define MPI3MR_HOSTDIAG_UNLOCK_RETRY_COUNT 5
+#define MPI3MR_MAX_RESET_RETRY_COUNT 3
+
+/* ResponseCode definitions */
+#define MPI3MR_RI_MASK_RESPCODE (0x000000FF)
+#define MPI3MR_RSP_TM_COMPLETE 0x00
+#define MPI3MR_RSP_INVALID_FRAME 0x02
+#define MPI3MR_RSP_TM_NOT_SUPPORTED 0x04
+#define MPI3MR_RSP_TM_FAILED 0x05
+#define MPI3MR_RSP_TM_SUCCEEDED 0x08
+#define MPI3MR_RSP_TM_INVALID_LUN 0x09
+#define MPI3MR_RSP_TM_OVERLAPPED_TAG 0x0A
+#define MPI3MR_RSP_IO_QUEUED_ON_IOC \
+ MPI3_SCSITASKMGMT_RSPCODE_IO_QUEUED_ON_IOC
+
+#define MPI3MR_DEFAULT_MDTS (128 * 1024)
+/* Command retry count definitions */
+#define MPI3MR_DEV_RMHS_RETRY_COUNT 3
+
+/* Default target device queue depth */
+#define MPI3MR_DEFAULT_SDEV_QD 32
+
+/* Definitions for Threaded IRQ poll*/
+#define MPI3MR_IRQ_POLL_SLEEP 2
+#define MPI3MR_IRQ_POLL_TRIGGER_IOCOUNT 8
+
+/* Definitions for the controller security status*/
+#define MPI3MR_CTLR_SECURITY_STATUS_MASK 0x0C
+#define MPI3MR_CTLR_SECURE_DBG_STATUS_MASK 0x02
+
+#define MPI3MR_INVALID_DEVICE 0x00
+#define MPI3MR_CONFIG_SECURE_DEVICE 0x04
+#define MPI3MR_HARD_SECURE_DEVICE 0x08
+#define MPI3MR_TAMPERED_DEVICE 0x0C
+
+/* SGE Flag definition */
+#define MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST \
+ (MPI3_SGE_FLAGS_ELEMENT_TYPE_SIMPLE | MPI3_SGE_FLAGS_DLAS_SYSTEM | \
+ MPI3_SGE_FLAGS_END_OF_LIST)
+
+/* MSI Index from Reply Queue Index */
+#define REPLY_QUEUE_IDX_TO_MSIX_IDX(qidx, offset) (qidx + offset)
+
+/* IOC State definitions */
+enum mpi3mr_iocstate {
+ MRIOC_STATE_READY = 1,
+ MRIOC_STATE_RESET,
+ MRIOC_STATE_FAULT,
+ MRIOC_STATE_BECOMING_READY,
+ MRIOC_STATE_RESET_REQUESTED,
+ MRIOC_STATE_UNRECOVERABLE,
+};
+
+/* Reset reason code definitions*/
+enum mpi3mr_reset_reason {
+ MPI3MR_RESET_FROM_BRINGUP = 1,
+ MPI3MR_RESET_FROM_FAULT_WATCH = 2,
+ MPI3MR_RESET_FROM_IOCTL = 3,
+ MPI3MR_RESET_FROM_EH_HOS = 4,
+ MPI3MR_RESET_FROM_TM_TIMEOUT = 5,
+ MPI3MR_RESET_FROM_IOCTL_TIMEOUT = 6,
+ MPI3MR_RESET_FROM_MUR_FAILURE = 7,
+ MPI3MR_RESET_FROM_CTLR_CLEANUP = 8,
+ MPI3MR_RESET_FROM_CIACTIV_FAULT = 9,
+ MPI3MR_RESET_FROM_PE_TIMEOUT = 10,
+ MPI3MR_RESET_FROM_TSU_TIMEOUT = 11,
+ MPI3MR_RESET_FROM_DELREQQ_TIMEOUT = 12,
+ MPI3MR_RESET_FROM_DELREPQ_TIMEOUT = 13,
+ MPI3MR_RESET_FROM_CREATEREPQ_TIMEOUT = 14,
+ MPI3MR_RESET_FROM_CREATEREQQ_TIMEOUT = 15,
+ MPI3MR_RESET_FROM_IOCFACTS_TIMEOUT = 16,
+ MPI3MR_RESET_FROM_IOCINIT_TIMEOUT = 17,
+ MPI3MR_RESET_FROM_EVTNOTIFY_TIMEOUT = 18,
+ MPI3MR_RESET_FROM_EVTACK_TIMEOUT = 19,
+ MPI3MR_RESET_FROM_CIACTVRST_TIMER = 20,
+ MPI3MR_RESET_FROM_GETPKGVER_TIMEOUT = 21,
+ MPI3MR_RESET_FROM_PELABORT_TIMEOUT = 22,
+ MPI3MR_RESET_FROM_SYSFS = 23,
+ MPI3MR_RESET_FROM_SYSFS_TIMEOUT = 24
+};
+
+/**
+ * struct mpi3mr_compimg_ver - replica of component image
+ * version defined in mpi30_image.h in host endianness
+ *
+ */
+struct mpi3mr_compimg_ver {
+ u16 build_num;
+ u16 cust_id;
+ u8 ph_minor;
+ u8 ph_major;
+ u8 gen_minor;
+ u8 gen_major;
+};
+
+/**
+ * struct mpi3mr_ioc_facs - replica of component image version
+ * defined in mpi30_ioc.h in host endianness
+ *
+ */
+struct mpi3mr_ioc_facts {
+ u32 ioc_capabilities;
+ struct mpi3mr_compimg_ver fw_ver;
+ u32 mpi_version;
+ u16 max_reqs;
+ u16 product_id;
+ u16 op_req_sz;
+ u16 reply_sz;
+ u16 exceptions;
+ u16 max_perids;
+ u16 max_pds;
+ u16 max_sasexpanders;
+ u16 max_sasinitiators;
+ u16 max_enclosures;
+ u16 max_pcie_switches;
+ u16 max_nvme;
+ u16 max_vds;
+ u16 max_hpds;
+ u16 max_advhpds;
+ u16 max_raidpds;
+ u16 min_devhandle;
+ u16 max_devhandle;
+ u16 max_op_req_q;
+ u16 max_op_reply_q;
+ u16 shutdown_timeout;
+ u8 ioc_num;
+ u8 who_init;
+ u16 max_msix_vectors;
+ u8 personality;
+ u8 dma_mask;
+ u8 protocol_flags;
+ u8 sge_mod_mask;
+ u8 sge_mod_value;
+ u8 sge_mod_shift;
+};
+
+/**
+ * struct segments - memory descriptor structure to store
+ * virtual and dma addresses for operational queue segments.
+ *
+ * @segment: virtual address
+ * @segment_dma: dma address
+ */
+struct segments {
+ void *segment;
+ dma_addr_t segment_dma;
+};
+
+/**
+ * struct op_req_qinfo - Operational Request Queue Information
+ *
+ * @ci: consumer index
+ * @pi: producer index
+ * @num_request: Maximum number of entries in the queue
+ * @qid: Queue Id starting from 1
+ * @reply_qid: Associated reply queue Id
+ * @num_segments: Number of discontiguous memory segments
+ * @segment_qd: Depth of each segments
+ * @q_lock: Concurrent queue access lock
+ * @q_segments: Segment descriptor pointer
+ * @q_segment_list: Segment list base virtual address
+ * @q_segment_list_dma: Segment list base DMA address
+ */
+struct op_req_qinfo {
+ u16 ci;
+ u16 pi;
+ u16 num_requests;
+ u16 qid;
+ u16 reply_qid;
+ u16 num_segments;
+ u16 segment_qd;
+ spinlock_t q_lock;
+ struct segments *q_segments;
+ void *q_segment_list;
+ dma_addr_t q_segment_list_dma;
+};
+
+/**
+ * struct op_reply_qinfo - Operational Reply Queue Information
+ *
+ * @ci: consumer index
+ * @qid: Queue Id starting from 1
+ * @num_replies: Maximum number of entries in the queue
+ * @num_segments: Number of discontiguous memory segments
+ * @segment_qd: Depth of each segments
+ * @q_segments: Segment descriptor pointer
+ * @q_segment_list: Segment list base virtual address
+ * @q_segment_list_dma: Segment list base DMA address
+ * @ephase: Expected phased identifier for the reply queue
+ * @pend_ios: Number of IOs pending in HW for this queue
+ * @enable_irq_poll: Flag to indicate polling is enabled
+ * @in_use: Queue is handled by poll/ISR
+ */
+struct op_reply_qinfo {
+ u16 ci;
+ u16 qid;
+ u16 num_replies;
+ u16 num_segments;
+ u16 segment_qd;
+ struct segments *q_segments;
+ void *q_segment_list;
+ dma_addr_t q_segment_list_dma;
+ u8 ephase;
+ atomic_t pend_ios;
+ bool enable_irq_poll;
+ atomic_t in_use;
+};
+
+/**
+ * struct mpi3mr_intr_info - Interrupt cookie information
+ *
+ * @mrioc: Adapter instance reference
+ * @msix_index: MSIx index
+ * @op_reply_q: Associated operational reply queue
+ * @name: Dev name for the irq claiming device
+ */
+struct mpi3mr_intr_info {
+ struct mpi3mr_ioc *mrioc;
+ u16 msix_index;
+ struct op_reply_qinfo *op_reply_q;
+ char name[MPI3MR_NAME_LENGTH];
+};
+
+/**
+ * struct tgt_dev_sas_sata - SAS/SATA device specific
+ * information cached from firmware given data
+ *
+ * @sas_address: World wide unique SAS address
+ * @dev_info: Device information bits
+ */
+struct tgt_dev_sas_sata {
+ u64 sas_address;
+ u16 dev_info;
+};
+
+/**
+ * struct tgt_dev_pcie - PCIe device specific information cached
+ * from firmware given data
+ *
+ * @mdts: Maximum data transfer size
+ * @capb: Device capabilities
+ * @pgsz: Device page size
+ * @abort_to: Timeout for abort TM
+ * @reset_to: Timeout for Target/LUN reset TM
+ */
+struct tgt_dev_pcie {
+ u32 mdts;
+ u16 capb;
+ u8 pgsz;
+ u8 abort_to;
+ u8 reset_to;
+};
+
+/**
+ * struct tgt_dev_volume - virtual device specific information
+ * cached from firmware given data
+ *
+ * @state: State of the VD
+ */
+struct tgt_dev_volume {
+ u8 state;
+};
+
+/**
+ * union _form_spec_inf - union of device specific information
+ */
+union _form_spec_inf {
+ struct tgt_dev_sas_sata sas_sata_inf;
+ struct tgt_dev_pcie pcie_inf;
+ struct tgt_dev_volume vol_inf;
+};
+
+
+
+/**
+ * struct mpi3mr_tgt_dev - target device data structure
+ *
+ * @list: List pointer
+ * @starget: Scsi_target pointer
+ * @dev_handle: FW device handle
+ * @parent_handle: FW parent device handle
+ * @slot: Slot number
+ * @encl_handle: FW enclosure handle
+ * @perst_id: FW assigned Persistent ID
+ * @dev_type: SAS/SATA/PCIE device type
+ * @is_hidden: Should be exposed to upper layers or not
+ * @host_exposed: Already exposed to host or not
+ * @q_depth: Device specific Queue Depth
+ * @wwid: World wide ID
+ * @dev_spec: Device type specific information
+ * @ref_count: Reference count
+ */
+struct mpi3mr_tgt_dev {
+ struct list_head list;
+ struct scsi_target *starget;
+ u16 dev_handle;
+ u16 parent_handle;
+ u16 slot;
+ u16 encl_handle;
+ u16 perst_id;
+ u8 dev_type;
+ u8 is_hidden;
+ u8 host_exposed;
+ u16 q_depth;
+ u64 wwid;
+ union _form_spec_inf dev_spec;
+ struct kref ref_count;
+};
+
+/**
+ * mpi3mr_tgtdev_get - k reference incrementor
+ * @s: Target device reference
+ *
+ * Increment target device reference count.
+ */
+static inline void mpi3mr_tgtdev_get(struct mpi3mr_tgt_dev *s)
+{
+ kref_get(&s->ref_count);
+}
+
+/**
+ * mpi3mr_free_tgtdev - target device memory dealloctor
+ * @r: k reference pointer of the target device
+ *
+ * Free target device memory when no reference.
+ */
+static inline void mpi3mr_free_tgtdev(struct kref *r)
+{
+ kfree(container_of(r, struct mpi3mr_tgt_dev, ref_count));
+}
+
+/**
+ * mpi3mr_tgtdev_put - k reference decrementor
+ * @s: Target device reference
+ *
+ * Decrement target device reference count.
+ */
+static inline void mpi3mr_tgtdev_put(struct mpi3mr_tgt_dev *s)
+{
+ kref_put(&s->ref_count, mpi3mr_free_tgtdev);
+}
+
+
+/**
+ * struct mpi3mr_stgt_priv_data - SCSI target private structure
+ *
+ * @starget: Scsi_target pointer
+ * @dev_handle: FW device handle
+ * @perst_id: FW assigned Persistent ID
+ * @num_luns: Number of Logical Units
+ * @block_io: I/O blocked to the device or not
+ * @dev_removed: Device removed in the Firmware
+ * @dev_removedelay: Device is waiting to be removed in FW
+ * @dev_type: Device type
+ * @tgt_dev: Internal target device pointer
+ */
+struct mpi3mr_stgt_priv_data {
+ struct scsi_target *starget;
+ u16 dev_handle;
+ u16 perst_id;
+ u32 num_luns;
+ atomic_t block_io;
+ u8 dev_removed;
+ u8 dev_removedelay;
+ u8 dev_type;
+ struct mpi3mr_tgt_dev *tgt_dev;
+};
+
+/**
+ * struct mpi3mr_stgt_priv_data - SCSI device private structure
+ *
+ * @tgt_priv_data: Scsi_target private data pointer
+ * @lun_id: LUN ID of the device
+ * @ncq_prio_enable: NCQ priority enable for SATA device
+ */
+struct mpi3mr_sdev_priv_data {
+ struct mpi3mr_stgt_priv_data *tgt_priv_data;
+ u32 lun_id;
+ u8 ncq_prio_enable;
+};
+
+/**
+ * struct mpi3mr_drv_cmd - Internal command tracker
+ *
+ * @mutex: Command mutex
+ * @done: Completeor for wakeup
+ * @reply: Firmware reply for internal commands
+ * @sensebuf: Sensebuf for SCSI IO commands
+ * @iou_rc: IO Unit control reason code
+ * @state: Command State
+ * @dev_handle: Firmware handle for device specific commands
+ * @ioc_status: IOC status from the firmware
+ * @ioc_loginfo:IOC log info from the firmware
+ * @is_waiting: Is the command issued in block mode
+ * @retry_count: Retry count for retriable commands
+ * @host_tag: Host tag used by the command
+ * @callback: Callback for non blocking commands
+ */
+struct mpi3mr_drv_cmd {
+ struct mutex mutex;
+ struct completion done;
+ void *reply;
+ u8 *sensebuf;
+ u8 iou_rc;
+ u16 state;
+ u16 dev_handle;
+ u16 ioc_status;
+ u32 ioc_loginfo;
+ u8 is_waiting;
+ u8 retry_count;
+ u16 host_tag;
+
+ void (*callback)(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_drv_cmd *drv_cmd);
+};
+
+
+/**
+ * struct chain_element - memory descriptor structure to store
+ * virtual and dma addresses for chain elements.
+ *
+ * @addr: virtual address
+ * @dma_addr: dma address
+ */
+struct chain_element {
+ void *addr;
+ dma_addr_t dma_addr;
+};
+
+/**
+ * struct scmd_priv - SCSI command private data
+ *
+ * @host_tag: Host tag specific to operational queue
+ * @in_lld_scope: Command in LLD scope or not
+ * @meta_sg_valid: DIX command with meta data SGL or not
+ * @scmd: SCSI Command pointer
+ * @req_q_idx: Operational request queue index
+ * @chain_idx: Chain frame index
+ * @meta_chain_idx: Chain frame index of meta data SGL
+ * @mpi3mr_scsiio_req: MPI SCSI IO request
+ */
+struct scmd_priv {
+ u16 host_tag;
+ u8 in_lld_scope;
+ u8 meta_sg_valid;
+ struct scsi_cmnd *scmd;
+ u16 req_q_idx;
+ int chain_idx;
+ int meta_chain_idx;
+ u8 mpi3mr_scsiio_req[MPI3MR_ADMIN_REQ_FRAME_SZ];
+};
+
+/**
+ * struct mpi3mr_ioc - Adapter anchor structure stored in shost
+ * private data
+ *
+ * @list: List pointer
+ * @pdev: PCI device pointer
+ * @shost: Scsi_Host pointer
+ * @id: Controller ID
+ * @cpu_count: Number of online CPUs
+ * @irqpoll_sleep: usleep unit used in threaded isr irqpoll
+ * @name: Controller ASCII name
+ * @driver_name: Driver ASCII name
+ * @sysif_regs: System interface registers virtual address
+ * @sysif_regs_phys: System interface registers physical address
+ * @bars: PCI BARS
+ * @dma_mask: DMA mask
+ * @msix_count: Number of MSIX vectors used
+ * @intr_enabled: Is interrupts enabled
+ * @num_admin_req: Number of admin requests
+ * @admin_req_q_sz: Admin request queue size
+ * @admin_req_pi: Admin request queue producer index
+ * @admin_req_ci: Admin request queue consumer index
+ * @admin_req_base: Admin request queue base virtual address
+ * @admin_req_dma: Admin request queue base dma address
+ * @admin_req_lock: Admin queue access lock
+ * @num_admin_replies: Number of admin replies
+ * @admin_reply_q_sz: Admin reply queue size
+ * @admin_reply_ci: Admin reply queue consumer index
+ * @admin_reply_ephase:Admin reply queue expected phase
+ * @admin_reply_base: Admin reply queue base virtual address
+ * @admin_reply_dma: Admin reply queue base dma address
+ * @ready_timeout: Controller ready timeout
+ * @intr_info: Interrupt cookie pointer
+ * @intr_info_count: Number of interrupt cookies
+ * @num_queues: Number of operational queues
+ * @num_op_req_q: Number of operational request queues
+ * @req_qinfo: Operational request queue info pointer
+ * @num_op_reply_q: Number of operational reply queues
+ * @op_reply_qinfo: Operational reply queue info pointer
+ * @init_cmds: Command tracker for initialization commands
+ * @facts: Cached IOC facts data
+ * @op_reply_desc_sz: Operational reply descriptor size
+ * @num_reply_bufs: Number of reply buffers allocated
+ * @reply_buf_pool: Reply buffer pool
+ * @reply_buf: Reply buffer base virtual address
+ * @reply_buf_dma: Reply buffer DMA address
+ * @reply_buf_dma_max_address: Reply DMA address max limit
+ * @reply_free_qsz: Reply free queue size
+ * @reply_free_q_pool: Reply free queue pool
+ * @reply_free_q: Reply free queue base virtual address
+ * @reply_free_q_dma: Reply free queue base DMA address
+ * @reply_free_queue_lock: Reply free queue lock
+ * @reply_free_queue_host_index: Reply free queue host index
+ * @num_sense_bufs: Number of sense buffers
+ * @sense_buf_pool: Sense buffer pool
+ * @sense_buf: Sense buffer base virtual address
+ * @sense_buf_dma: Sense buffer base DMA address
+ * @sense_buf_q_sz: Sense buffer queue size
+ * @sense_buf_q_pool: Sense buffer queue pool
+ * @sense_buf_q: Sense buffer queue virtual address
+ * @sense_buf_q_dma: Sense buffer queue DMA address
+ * @sbq_lock: Sense buffer queue lock
+ * @sbq_host_index: Sense buffer queuehost index
+ * @event_masks: Event mask bitmap
+ * @fwevt_worker_name: Firmware event worker thread name
+ * @fwevt_worker_thread: Firmware event worker thread
+ * @fwevt_lock: Firmware event lock
+ * @fwevt_list: Firmware event list
+ * @watchdog_work_q_name: Fault watchdog worker thread name
+ * @watchdog_work_q: Fault watchdog worker thread
+ * @watchdog_work: Fault watchdog work
+ * @watchdog_lock: Fault watchdog lock
+ * @is_driver_loading: Is driver still loading
+ * @scan_started: Async scan started
+ * @scan_failed: Asycn scan failed
+ * @stop_drv_processing: Stop all command processing
+ * @max_host_ios: Maximum host I/O count
+ * @chain_buf_count: Chain buffer count
+ * @chain_buf_pool: Chain buffer pool
+ * @chain_sgl_list: Chain SGL list
+ * @chain_bitmap_sz: Chain buffer allocator bitmap size
+ * @chain_bitmap: Chain buffer allocator bitmap
+ * @chain_buf_lock: Chain buffer list lock
+ * @host_tm_cmds: Command tracker for task management commands
+ * @dev_rmhs_cmds: Command tracker for device removal commands
+ * @devrem_bitmap_sz: Device removal bitmap size
+ * @devrem_bitmap: Device removal bitmap
+ * @dev_handle_bitmap_sz: Device handle bitmap size
+ * @removepend_bitmap: Remove pending bitmap
+ * @delayed_rmhs_list: Delayed device removal list
+ * @ts_update_counter: Timestamp update counter
+ * @fault_dbg: Fault debug flag
+ * @reset_in_progress: Reset in progress flag
+ * @unrecoverable: Controller unrecoverable flag
+ * @reset_mutex: Controller reset mutex
+ * @reset_waitq: Controller reset wait queue
+ * @diagsave_timeout: Diagnostic information save timeout
+ * @logging_level: Controller debug logging level
+ * @flush_io_count: I/O count to flush after reset
+ * @current_event: Firmware event currently in process
+ * @driver_info: Driver, Kernel, OS information to firmware
+ * @change_count: Topology change count
+ * @op_reply_q_offset: Operational reply queue offset with MSIx
+ */
+struct mpi3mr_ioc {
+ struct list_head list;
+ struct pci_dev *pdev;
+ struct Scsi_Host *shost;
+ u8 id;
+ int cpu_count;
+ bool enable_segqueue;
+ u32 irqpoll_sleep;
+
+ char name[MPI3MR_NAME_LENGTH];
+ char driver_name[MPI3MR_NAME_LENGTH];
+
+ volatile struct mpi3_sysif_registers __iomem *sysif_regs;
+ resource_size_t sysif_regs_phys;
+ int bars;
+ u64 dma_mask;
+
+ u16 msix_count;
+ u8 intr_enabled;
+
+ u16 num_admin_req;
+ u32 admin_req_q_sz;
+ u16 admin_req_pi;
+ u16 admin_req_ci;
+ void *admin_req_base;
+ dma_addr_t admin_req_dma;
+ spinlock_t admin_req_lock;
+
+ u16 num_admin_replies;
+ u32 admin_reply_q_sz;
+ u16 admin_reply_ci;
+ u8 admin_reply_ephase;
+ void *admin_reply_base;
+ dma_addr_t admin_reply_dma;
+
+ u32 ready_timeout;
+
+ struct mpi3mr_intr_info *intr_info;
+ u16 intr_info_count;
+
+ u16 num_queues;
+ u16 num_op_req_q;
+ struct op_req_qinfo *req_qinfo;
+
+ u16 num_op_reply_q;
+ struct op_reply_qinfo *op_reply_qinfo;
+
+ struct mpi3mr_drv_cmd init_cmds;
+ struct mpi3mr_ioc_facts facts;
+ u16 op_reply_desc_sz;
+
+ u32 num_reply_bufs;
+ struct dma_pool *reply_buf_pool;
+ u8 *reply_buf;
+ dma_addr_t reply_buf_dma;
+ dma_addr_t reply_buf_dma_max_address;
+
+ u16 reply_free_qsz;
+ struct dma_pool *reply_free_q_pool;
+ __le64 *reply_free_q;
+ dma_addr_t reply_free_q_dma;
+ spinlock_t reply_free_queue_lock;
+ u32 reply_free_queue_host_index;
+
+ u32 num_sense_bufs;
+ struct dma_pool *sense_buf_pool;
+ u8 *sense_buf;
+ dma_addr_t sense_buf_dma;
+
+ u16 sense_buf_q_sz;
+ struct dma_pool *sense_buf_q_pool;
+ __le64 *sense_buf_q;
+ dma_addr_t sense_buf_q_dma;
+ spinlock_t sbq_lock;
+ u32 sbq_host_index;
+ u32 event_masks[MPI3_EVENT_NOTIFY_EVENTMASK_WORDS];
+
+ char fwevt_worker_name[MPI3MR_NAME_LENGTH];
+ struct workqueue_struct *fwevt_worker_thread;
+ spinlock_t fwevt_lock;
+ struct list_head fwevt_list;
+
+ char watchdog_work_q_name[20];
+ struct workqueue_struct *watchdog_work_q;
+ struct delayed_work watchdog_work;
+ spinlock_t watchdog_lock;
+
+ u8 is_driver_loading;
+ u8 scan_started;
+ u16 scan_failed;
+ u8 stop_drv_processing;
+
+ u16 max_host_ios;
+ spinlock_t tgtdev_lock;
+ struct list_head tgtdev_list;
+
+ u32 chain_buf_count;
+ struct dma_pool *chain_buf_pool;
+ struct chain_element *chain_sgl_list;
+ u16 chain_bitmap_sz;
+ void *chain_bitmap;
+ spinlock_t chain_buf_lock;
+
+ struct mpi3mr_drv_cmd host_tm_cmds;
+ struct mpi3mr_drv_cmd dev_rmhs_cmds[MPI3MR_NUM_DEVRMCMD];
+ u16 devrem_bitmap_sz;
+ void *devrem_bitmap;
+ u16 dev_handle_bitmap_sz;
+ void *removepend_bitmap;
+ struct list_head delayed_rmhs_list;
+
+ u32 ts_update_counter;
+ u8 fault_dbg;
+ u8 reset_in_progress;
+ u8 unrecoverable;
+ struct mutex reset_mutex;
+ wait_queue_head_t reset_waitq;
+
+ u16 diagsave_timeout;
+ int logging_level;
+ u16 flush_io_count;
+
+ struct mpi3mr_fwevt *current_event;
+ struct mpi3_driver_info_layout driver_info;
+ u16 change_count;
+ u16 op_reply_q_offset;
+};
+
+/**
+ * struct mpi3mr_fwevt - Firmware event structure.
+ *
+ * @list: list head
+ * @work: Work structure
+ * @mrioc: Adapter instance reference
+ * @event_id: MPI3 firmware event ID
+ * @send_ack: Event acknowledgment required or not
+ * @process_evt: Bottomhalf processing required or not
+ * @evt_ctx: Event context to send in Ack
+ * @ref_count: kref count
+ * @event_data: Actual MPI3 event data
+ */
+struct mpi3mr_fwevt {
+ struct list_head list;
+ struct work_struct work;
+ struct mpi3mr_ioc *mrioc;
+ u16 event_id;
+ bool send_ack;
+ bool process_evt;
+ u32 evt_ctx;
+ struct kref ref_count;
+ char event_data[0] __aligned(4);
+};
+
+
+/**
+ * struct delayed_dev_rmhs_node - Delayed device removal node
+ *
+ * @list: list head
+ * @handle: Device handle
+ * @iou_rc: IO Unit Control Reason Code
+ */
+struct delayed_dev_rmhs_node {
+ struct list_head list;
+ u16 handle;
+ u8 iou_rc;
+};
+
+int mpi3mr_setup_resources(struct mpi3mr_ioc *mrioc);
+void mpi3mr_cleanup_resources(struct mpi3mr_ioc *mrioc);
+int mpi3mr_init_ioc(struct mpi3mr_ioc *mrioc, u8 re_init);
+void mpi3mr_cleanup_ioc(struct mpi3mr_ioc *mrioc, u8 re_init);
+int mpi3mr_issue_port_enable(struct mpi3mr_ioc *mrioc, u8 async);
+int mpi3mr_admin_request_post(struct mpi3mr_ioc *mrioc, void *admin_req,
+u16 admin_req_sz, u8 ignore_reset);
+int mpi3mr_op_request_post(struct mpi3mr_ioc *mrioc,
+ struct op_req_qinfo *opreqq, u8 *req);
+void mpi3mr_add_sg_single(void *paddr, u8 flags, u32 length,
+ dma_addr_t dma_addr);
+void mpi3mr_build_zero_len_sge(void *paddr);
+void *mpi3mr_get_sensebuf_virt_addr(struct mpi3mr_ioc *mrioc,
+ dma_addr_t phys_addr);
+void *mpi3mr_get_reply_virt_addr(struct mpi3mr_ioc *mrioc,
+ dma_addr_t phys_addr);
+void mpi3mr_repost_sense_buf(struct mpi3mr_ioc *mrioc,
+ u64 sense_buf_dma);
+
+void mpi3mr_os_handle_events(struct mpi3mr_ioc *mrioc,
+ struct mpi3_event_notification_reply *event_reply);
+void mpi3mr_process_op_reply_desc(struct mpi3mr_ioc *mrioc,
+ struct mpi3_default_reply_descriptor *reply_desc,
+ u64 *reply_dma, u16 qidx);
+void mpi3mr_start_watchdog(struct mpi3mr_ioc *mrioc);
+void mpi3mr_stop_watchdog(struct mpi3mr_ioc *mrioc);
+
+int mpi3mr_soft_reset_handler(struct mpi3mr_ioc *mrioc,
+ u32 reset_reason, u8 snapdump);
+int mpi3mr_diagfault_reset_handler(struct mpi3mr_ioc *mrioc,
+ u32 reset_reason);
+void mpi3mr_ioc_disable_intr(struct mpi3mr_ioc *mrioc);
+void mpi3mr_ioc_enable_intr(struct mpi3mr_ioc *mrioc);
+
+enum mpi3mr_iocstate mpi3mr_get_iocstate(struct mpi3mr_ioc *mrioc);
+int mpi3mr_send_event_ack(struct mpi3mr_ioc *mrioc, u8 event,
+ u32 event_ctx);
+
+void mpi3mr_wait_for_host_io(struct mpi3mr_ioc *mrioc, u32 timeout);
+void mpi3mr_cleanup_fwevt_list(struct mpi3mr_ioc *mrioc);
+void mpi3mr_flush_host_io(struct mpi3mr_ioc *mrioc);
+void mpi3mr_invalidate_devhandles(struct mpi3mr_ioc *mrioc);
+void mpi3mr_rfresh_tgtdevs(struct mpi3mr_ioc *mrioc);
+void mpi3mr_flush_delayed_rmhs_list(struct mpi3mr_ioc *mrioc);
+
+#endif /*MPI3MR_H_INCLUDED*/
diff --git a/drivers/scsi/mpi3mr/mpi3mr_debug.h b/drivers/scsi/mpi3mr/mpi3mr_debug.h
new file mode 100644
index 000000000000..c085bb048d41
--- /dev/null
+++ b/drivers/scsi/mpi3mr/mpi3mr_debug.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Driver for Broadcom MPI3 Storage Controllers
+ *
+ * Copyright (C) 2017-2021 Broadcom Inc.
+ * (mailto: mpi3mr-linuxdrv.pdl@broadcom.com)
+ *
+ */
+
+#ifndef MPI3SAS_DEBUG_H_INCLUDED
+
+#define MPI3SAS_DEBUG_H_INCLUDED
+
+/*
+ * debug levels
+ */
+#define MPI3_DEBUG 0x00000001
+#define MPI3_DEBUG_MSG_FRAME 0x00000002
+#define MPI3_DEBUG_SG 0x00000004
+#define MPI3_DEBUG_EVENTS 0x00000008
+#define MPI3_DEBUG_EVENT_WORK_TASK 0x00000010
+#define MPI3_DEBUG_INIT 0x00000020
+#define MPI3_DEBUG_EXIT 0x00000040
+#define MPI3_DEBUG_FAIL 0x00000080
+#define MPI3_DEBUG_TM 0x00000100
+#define MPI3_DEBUG_REPLY 0x00000200
+#define MPI3_DEBUG_HANDSHAKE 0x00000400
+#define MPI3_DEBUG_CONFIG 0x00000800
+#define MPI3_DEBUG_DL 0x00001000
+#define MPI3_DEBUG_RESET 0x00002000
+#define MPI3_DEBUG_SCSI 0x00004000
+#define MPI3_DEBUG_IOCTL 0x00008000
+#define MPI3_DEBUG_CSMISAS 0x00010000
+#define MPI3_DEBUG_SAS 0x00020000
+#define MPI3_DEBUG_TRANSPORT 0x00040000
+#define MPI3_DEBUG_TASK_SET_FULL 0x00080000
+#define MPI3_DEBUG_TRIGGER_DIAG 0x00200000
+
+
+/*
+ * debug macros
+ */
+
+#define ioc_err(ioc, fmt, ...) \
+ pr_err("%s: " fmt, (ioc)->name, ##__VA_ARGS__)
+#define ioc_notice(ioc, fmt, ...) \
+ pr_notice("%s: " fmt, (ioc)->name, ##__VA_ARGS__)
+#define ioc_warn(ioc, fmt, ...) \
+ pr_warn("%s: " fmt, (ioc)->name, ##__VA_ARGS__)
+#define ioc_info(ioc, fmt, ...) \
+ pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__)
+
+
+#define dbgprint(IOC, FMT, ...) \
+ do { \
+ if (IOC->logging_level & MPI3_DEBUG) \
+ pr_info("%s: " FMT, (IOC)->name, ##__VA_ARGS__); \
+ } while (0)
+
+#endif /* MPT3SAS_DEBUG_H_INCLUDED */
diff --git a/drivers/scsi/mpi3mr/mpi3mr_fw.c b/drivers/scsi/mpi3mr/mpi3mr_fw.c
new file mode 100644
index 000000000000..9eceafca59bc
--- /dev/null
+++ b/drivers/scsi/mpi3mr/mpi3mr_fw.c
@@ -0,0 +1,3958 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Driver for Broadcom MPI3 Storage Controllers
+ *
+ * Copyright (C) 2017-2021 Broadcom Inc.
+ * (mailto: mpi3mr-linuxdrv.pdl@broadcom.com)
+ *
+ */
+
+#include "mpi3mr.h"
+#include <linux/io-64-nonatomic-lo-hi.h>
+
+#if defined(writeq) && defined(CONFIG_64BIT)
+static inline void mpi3mr_writeq(__u64 b, volatile void __iomem *addr)
+{
+ writeq(b, addr);
+}
+#else
+static inline void mpi3mr_writeq(__u64 b, volatile void __iomem *addr)
+{
+ __u64 data_out = b;
+
+ writel((u32)(data_out), addr);
+ writel((u32)(data_out >> 32), (addr + 4));
+}
+#endif
+
+static inline bool
+mpi3mr_check_req_qfull(struct op_req_qinfo *op_req_q)
+{
+ u16 pi, ci, max_entries;
+ bool is_qfull = false;
+
+ pi = op_req_q->pi;
+ ci = READ_ONCE(op_req_q->ci);
+ max_entries = op_req_q->num_requests;
+
+ if ((ci == (pi + 1)) || ((!ci) && (pi == (max_entries - 1))))
+ is_qfull = true;
+
+ return is_qfull;
+}
+
+static void mpi3mr_sync_irqs(struct mpi3mr_ioc *mrioc)
+{
+ u16 i, max_vectors;
+
+ max_vectors = mrioc->intr_info_count;
+
+ for (i = 0; i < max_vectors; i++)
+ synchronize_irq(pci_irq_vector(mrioc->pdev, i));
+}
+
+void mpi3mr_ioc_disable_intr(struct mpi3mr_ioc *mrioc)
+{
+ mrioc->intr_enabled = 0;
+ mpi3mr_sync_irqs(mrioc);
+}
+
+void mpi3mr_ioc_enable_intr(struct mpi3mr_ioc *mrioc)
+{
+ mrioc->intr_enabled = 1;
+}
+
+static void mpi3mr_cleanup_isr(struct mpi3mr_ioc *mrioc)
+{
+ u16 i;
+
+ mpi3mr_ioc_disable_intr(mrioc);
+
+ if (!mrioc->intr_info)
+ return;
+
+ for (i = 0; i < mrioc->intr_info_count; i++)
+ free_irq(pci_irq_vector(mrioc->pdev, i),
+ (mrioc->intr_info + i));
+
+ kfree(mrioc->intr_info);
+ mrioc->intr_info = NULL;
+ mrioc->intr_info_count = 0;
+ pci_free_irq_vectors(mrioc->pdev);
+}
+
+void mpi3mr_add_sg_single(void *paddr, u8 flags, u32 length,
+ dma_addr_t dma_addr)
+{
+ struct mpi3_sge_common *sgel = paddr;
+
+ sgel->flags = flags;
+ sgel->length = cpu_to_le32(length);
+ sgel->address = cpu_to_le64(dma_addr);
+}
+
+void mpi3mr_build_zero_len_sge(void *paddr)
+{
+ u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST;
+
+ mpi3mr_add_sg_single(paddr, sgl_flags, 0, -1);
+}
+
+void *mpi3mr_get_reply_virt_addr(struct mpi3mr_ioc *mrioc,
+ dma_addr_t phys_addr)
+{
+ if (!phys_addr)
+ return NULL;
+
+ if ((phys_addr < mrioc->reply_buf_dma) ||
+ (phys_addr > mrioc->reply_buf_dma_max_address))
+ return NULL;
+
+ return mrioc->reply_buf + (phys_addr - mrioc->reply_buf_dma);
+}
+
+void *mpi3mr_get_sensebuf_virt_addr(struct mpi3mr_ioc *mrioc,
+ dma_addr_t phys_addr)
+{
+ if (!phys_addr)
+ return NULL;
+
+ return mrioc->sense_buf + (phys_addr - mrioc->sense_buf_dma);
+}
+
+static void mpi3mr_repost_reply_buf(struct mpi3mr_ioc *mrioc,
+ u64 reply_dma)
+{
+ u32 old_idx = 0;
+
+ spin_lock(&mrioc->reply_free_queue_lock);
+ old_idx = mrioc->reply_free_queue_host_index;
+ mrioc->reply_free_queue_host_index = (
+ (mrioc->reply_free_queue_host_index ==
+ (mrioc->reply_free_qsz - 1)) ? 0 :
+ (mrioc->reply_free_queue_host_index + 1));
+ mrioc->reply_free_q[old_idx] = cpu_to_le64(reply_dma);
+ writel(mrioc->reply_free_queue_host_index,
+ &mrioc->sysif_regs->reply_free_host_index);
+ spin_unlock(&mrioc->reply_free_queue_lock);
+}
+
+void mpi3mr_repost_sense_buf(struct mpi3mr_ioc *mrioc,
+ u64 sense_buf_dma)
+{
+ u32 old_idx = 0;
+
+ spin_lock(&mrioc->sbq_lock);
+ old_idx = mrioc->sbq_host_index;
+ mrioc->sbq_host_index = ((mrioc->sbq_host_index ==
+ (mrioc->sense_buf_q_sz - 1)) ? 0 :
+ (mrioc->sbq_host_index + 1));
+ mrioc->sense_buf_q[old_idx] = cpu_to_le64(sense_buf_dma);
+ writel(mrioc->sbq_host_index,
+ &mrioc->sysif_regs->sense_buffer_free_host_index);
+ spin_unlock(&mrioc->sbq_lock);
+}
+
+static void mpi3mr_print_event_data(struct mpi3mr_ioc *mrioc,
+ struct mpi3_event_notification_reply *event_reply)
+{
+ char *desc = NULL;
+ u16 event;
+
+ event = event_reply->event;
+
+ switch (event) {
+ case MPI3_EVENT_LOG_DATA:
+ desc = "Log Data";
+ break;
+ case MPI3_EVENT_CHANGE:
+ desc = "Event Change";
+ break;
+ case MPI3_EVENT_GPIO_INTERRUPT:
+ desc = "GPIO Interrupt";
+ break;
+ case MPI3_EVENT_TEMP_THRESHOLD:
+ desc = "Temperature Threshold";
+ break;
+ case MPI3_EVENT_CABLE_MGMT:
+ desc = "Cable Management";
+ break;
+ case MPI3_EVENT_ENERGY_PACK_CHANGE:
+ desc = "Energy Pack Change";
+ break;
+ case MPI3_EVENT_DEVICE_ADDED:
+ {
+ struct mpi3_device_page0 *event_data =
+ (struct mpi3_device_page0 *)event_reply->event_data;
+ ioc_info(mrioc, "Device Added: dev=0x%04x Form=0x%x\n",
+ event_data->dev_handle, event_data->device_form);
+ return;
+ }
+ case MPI3_EVENT_DEVICE_INFO_CHANGED:
+ {
+ struct mpi3_device_page0 *event_data =
+ (struct mpi3_device_page0 *)event_reply->event_data;
+ ioc_info(mrioc, "Device Info Changed: dev=0x%04x Form=0x%x\n",
+ event_data->dev_handle, event_data->device_form);
+ return;
+ }
+ case MPI3_EVENT_DEVICE_STATUS_CHANGE:
+ {
+ struct mpi3_event_data_device_status_change *event_data =
+ (struct mpi3_event_data_device_status_change *)event_reply->event_data;
+ ioc_info(mrioc, "Device status Change: dev=0x%04x RC=0x%x\n",
+ event_data->dev_handle, event_data->reason_code);
+ return;
+ }
+ case MPI3_EVENT_SAS_DISCOVERY:
+ {
+ struct mpi3_event_data_sas_discovery *event_data =
+ (struct mpi3_event_data_sas_discovery *)event_reply->event_data;
+ ioc_info(mrioc, "SAS Discovery: (%s) status (0x%08x)\n",
+ (event_data->reason_code == MPI3_EVENT_SAS_DISC_RC_STARTED) ?
+ "start" : "stop",
+ le32_to_cpu(event_data->discovery_status));
+ return;
+ }
+ case MPI3_EVENT_SAS_BROADCAST_PRIMITIVE:
+ desc = "SAS Broadcast Primitive";
+ break;
+ case MPI3_EVENT_SAS_NOTIFY_PRIMITIVE:
+ desc = "SAS Notify Primitive";
+ break;
+ case MPI3_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE:
+ desc = "SAS Init Device Status Change";
+ break;
+ case MPI3_EVENT_SAS_INIT_TABLE_OVERFLOW:
+ desc = "SAS Init Table Overflow";
+ break;
+ case MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
+ desc = "SAS Topology Change List";
+ break;
+ case MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE:
+ desc = "Enclosure Device Status Change";
+ break;
+ case MPI3_EVENT_HARD_RESET_RECEIVED:
+ desc = "Hard Reset Received";
+ break;
+ case MPI3_EVENT_SAS_PHY_COUNTER:
+ desc = "SAS PHY Counter";
+ break;
+ case MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
+ desc = "SAS Device Discovery Error";
+ break;
+ case MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
+ desc = "PCIE Topology Change List";
+ break;
+ case MPI3_EVENT_PCIE_ENUMERATION:
+ {
+ struct mpi3_event_data_pcie_enumeration *event_data =
+ (struct mpi3_event_data_pcie_enumeration *)event_reply->event_data;
+ ioc_info(mrioc, "PCIE Enumeration: (%s)",
+ (event_data->reason_code ==
+ MPI3_EVENT_PCIE_ENUM_RC_STARTED) ? "start" : "stop");
+ if (event_data->enumeration_status)
+ ioc_info(mrioc, "enumeration_status(0x%08x)\n",
+ le32_to_cpu(event_data->enumeration_status));
+ return;
+ }
+ case MPI3_EVENT_PREPARE_FOR_RESET:
+ desc = "Prepare For Reset";
+ break;
+ }
+
+ if (!desc)
+ return;
+
+ ioc_info(mrioc, "%s\n", desc);
+}
+
+static void mpi3mr_handle_events(struct mpi3mr_ioc *mrioc,
+ struct mpi3_default_reply *def_reply)
+{
+ struct mpi3_event_notification_reply *event_reply =
+ (struct mpi3_event_notification_reply *)def_reply;
+
+ mrioc->change_count = le16_to_cpu(event_reply->ioc_change_count);
+ mpi3mr_print_event_data(mrioc, event_reply);
+ mpi3mr_os_handle_events(mrioc, event_reply);
+}
+
+static struct mpi3mr_drv_cmd *
+mpi3mr_get_drv_cmd(struct mpi3mr_ioc *mrioc, u16 host_tag,
+ struct mpi3_default_reply *def_reply)
+{
+ u16 idx;
+
+ switch (host_tag) {
+ case MPI3MR_HOSTTAG_INITCMDS:
+ return &mrioc->init_cmds;
+ case MPI3MR_HOSTTAG_BLK_TMS:
+ return &mrioc->host_tm_cmds;
+ case MPI3MR_HOSTTAG_INVALID:
+ if (def_reply && def_reply->function ==
+ MPI3_FUNCTION_EVENT_NOTIFICATION)
+ mpi3mr_handle_events(mrioc, def_reply);
+ return NULL;
+ default:
+ break;
+ }
+ if (host_tag >= MPI3MR_HOSTTAG_DEVRMCMD_MIN &&
+ host_tag <= MPI3MR_HOSTTAG_DEVRMCMD_MAX) {
+ idx = host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN;
+ return &mrioc->dev_rmhs_cmds[idx];
+ }
+
+ return NULL;
+}
+
+static void mpi3mr_process_admin_reply_desc(struct mpi3mr_ioc *mrioc,
+ struct mpi3_default_reply_descriptor *reply_desc, u64 *reply_dma)
+{
+ u16 reply_desc_type, host_tag = 0;
+ u16 ioc_status = MPI3_IOCSTATUS_SUCCESS;
+ u32 ioc_loginfo = 0;
+ struct mpi3_status_reply_descriptor *status_desc;
+ struct mpi3_address_reply_descriptor *addr_desc;
+ struct mpi3_success_reply_descriptor *success_desc;
+ struct mpi3_default_reply *def_reply = NULL;
+ struct mpi3mr_drv_cmd *cmdptr = NULL;
+ struct mpi3_scsi_io_reply *scsi_reply;
+ u8 *sense_buf = NULL;
+
+ *reply_dma = 0;
+ reply_desc_type = le16_to_cpu(reply_desc->reply_flags) &
+ MPI3_REPLY_DESCRIPT_FLAGS_TYPE_MASK;
+ switch (reply_desc_type) {
+ case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_STATUS:
+ status_desc = (struct mpi3_status_reply_descriptor *)reply_desc;
+ host_tag = le16_to_cpu(status_desc->host_tag);
+ ioc_status = le16_to_cpu(status_desc->ioc_status);
+ if (ioc_status &
+ MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL)
+ ioc_loginfo = le32_to_cpu(status_desc->ioc_log_info);
+ ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK;
+ break;
+ case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_ADDRESS_REPLY:
+ addr_desc = (struct mpi3_address_reply_descriptor *)reply_desc;
+ *reply_dma = le64_to_cpu(addr_desc->reply_frame_address);
+ def_reply = mpi3mr_get_reply_virt_addr(mrioc, *reply_dma);
+ if (!def_reply)
+ goto out;
+ host_tag = le16_to_cpu(def_reply->host_tag);
+ ioc_status = le16_to_cpu(def_reply->ioc_status);
+ if (ioc_status &
+ MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL)
+ ioc_loginfo = le32_to_cpu(def_reply->ioc_log_info);
+ ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK;
+ if (def_reply->function == MPI3_FUNCTION_SCSI_IO) {
+ scsi_reply = (struct mpi3_scsi_io_reply *)def_reply;
+ sense_buf = mpi3mr_get_sensebuf_virt_addr(mrioc,
+ le64_to_cpu(scsi_reply->sense_data_buffer_address));
+ }
+ break;
+ case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_SUCCESS:
+ success_desc = (struct mpi3_success_reply_descriptor *)reply_desc;
+ host_tag = le16_to_cpu(success_desc->host_tag);
+ break;
+ default:
+ break;
+ }
+
+ cmdptr = mpi3mr_get_drv_cmd(mrioc, host_tag, def_reply);
+ if (cmdptr) {
+ if (cmdptr->state & MPI3MR_CMD_PENDING) {
+ cmdptr->state |= MPI3MR_CMD_COMPLETE;
+ cmdptr->ioc_loginfo = ioc_loginfo;
+ cmdptr->ioc_status = ioc_status;
+ cmdptr->state &= ~MPI3MR_CMD_PENDING;
+ if (def_reply) {
+ cmdptr->state |= MPI3MR_CMD_REPLY_VALID;
+ memcpy((u8 *)cmdptr->reply, (u8 *)def_reply,
+ mrioc->facts.reply_sz);
+ }
+ if (cmdptr->is_waiting) {
+ complete(&cmdptr->done);
+ cmdptr->is_waiting = 0;
+ } else if (cmdptr->callback)
+ cmdptr->callback(mrioc, cmdptr);
+ }
+ }
+out:
+ if (sense_buf)
+ mpi3mr_repost_sense_buf(mrioc,
+ le64_to_cpu(scsi_reply->sense_data_buffer_address));
+}
+
+static int mpi3mr_process_admin_reply_q(struct mpi3mr_ioc *mrioc)
+{
+ u32 exp_phase = mrioc->admin_reply_ephase;
+ u32 admin_reply_ci = mrioc->admin_reply_ci;
+ u32 num_admin_replies = 0;
+ u64 reply_dma = 0;
+ struct mpi3_default_reply_descriptor *reply_desc;
+
+ reply_desc = (struct mpi3_default_reply_descriptor *)mrioc->admin_reply_base +
+ admin_reply_ci;
+
+ if ((le16_to_cpu(reply_desc->reply_flags) &
+ MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase)
+ return 0;
+
+ do {
+ mrioc->admin_req_ci = le16_to_cpu(reply_desc->request_queue_ci);
+ mpi3mr_process_admin_reply_desc(mrioc, reply_desc, &reply_dma);
+ if (reply_dma)
+ mpi3mr_repost_reply_buf(mrioc, reply_dma);
+ num_admin_replies++;
+ if (++admin_reply_ci == mrioc->num_admin_replies) {
+ admin_reply_ci = 0;
+ exp_phase ^= 1;
+ }
+ reply_desc =
+ (struct mpi3_default_reply_descriptor *)mrioc->admin_reply_base +
+ admin_reply_ci;
+ if ((le16_to_cpu(reply_desc->reply_flags) &
+ MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase)
+ break;
+ } while (1);
+
+ writel(admin_reply_ci, &mrioc->sysif_regs->admin_reply_queue_ci);
+ mrioc->admin_reply_ci = admin_reply_ci;
+ mrioc->admin_reply_ephase = exp_phase;
+
+ return num_admin_replies;
+}
+
+/**
+ * mpi3mr_get_reply_desc - get reply descriptor frame corresponding to
+ * queue's consumer index from operational reply descriptor queue.
+ * @op_reply_q: op_reply_qinfo object
+ * @reply_ci: operational reply descriptor's queue consumer index
+ *
+ * Returns reply descriptor frame address
+ */
+static inline struct mpi3_default_reply_descriptor *
+mpi3mr_get_reply_desc(struct op_reply_qinfo *op_reply_q, u32 reply_ci)
+{
+ void *segment_base_addr;
+ struct segments *segments = op_reply_q->q_segments;
+ struct mpi3_default_reply_descriptor *reply_desc = NULL;
+
+ segment_base_addr =
+ segments[reply_ci / op_reply_q->segment_qd].segment;
+ reply_desc = (struct mpi3_default_reply_descriptor *)segment_base_addr +
+ (reply_ci % op_reply_q->segment_qd);
+ return reply_desc;
+}
+
+static int mpi3mr_process_op_reply_q(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_intr_info *intr_info)
+{
+ struct op_reply_qinfo *op_reply_q = intr_info->op_reply_q;
+ struct op_req_qinfo *op_req_q;
+ u32 exp_phase;
+ u32 reply_ci;
+ u32 num_op_reply = 0;
+ u64 reply_dma = 0;
+ struct mpi3_default_reply_descriptor *reply_desc;
+ u16 req_q_idx = 0, reply_qidx;
+
+ reply_qidx = op_reply_q->qid - 1;
+
+ if (!atomic_add_unless(&op_reply_q->in_use, 1, 1))
+ return 0;
+
+ exp_phase = op_reply_q->ephase;
+ reply_ci = op_reply_q->ci;
+
+ reply_desc = mpi3mr_get_reply_desc(op_reply_q, reply_ci);
+ if ((le16_to_cpu(reply_desc->reply_flags) &
+ MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase) {
+ atomic_dec(&op_reply_q->in_use);
+ return 0;
+ }
+
+ do {
+ req_q_idx = le16_to_cpu(reply_desc->request_queue_id) - 1;
+ op_req_q = &mrioc->req_qinfo[req_q_idx];
+
+ WRITE_ONCE(op_req_q->ci, le16_to_cpu(reply_desc->request_queue_ci));
+ mpi3mr_process_op_reply_desc(mrioc, reply_desc, &reply_dma,
+ reply_qidx);
+ atomic_dec(&op_reply_q->pend_ios);
+ if (reply_dma)
+ mpi3mr_repost_reply_buf(mrioc, reply_dma);
+ num_op_reply++;
+
+ if (++reply_ci == op_reply_q->num_replies) {
+ reply_ci = 0;
+ exp_phase ^= 1;
+ }
+
+ reply_desc = mpi3mr_get_reply_desc(op_reply_q, reply_ci);
+
+ if ((le16_to_cpu(reply_desc->reply_flags) &
+ MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase)
+ break;
+ /*
+ * Exit completion loop to avoid CPU lockup
+ * Ensure remaining completion happens from threaded ISR.
+ */
+ if (num_op_reply > mrioc->max_host_ios) {
+ intr_info->op_reply_q->enable_irq_poll = true;
+ break;
+ }
+
+ } while (1);
+
+ writel(reply_ci,
+ &mrioc->sysif_regs->oper_queue_indexes[reply_qidx].consumer_index);
+ op_reply_q->ci = reply_ci;
+ op_reply_q->ephase = exp_phase;
+
+ atomic_dec(&op_reply_q->in_use);
+ return num_op_reply;
+}
+
+static irqreturn_t mpi3mr_isr_primary(int irq, void *privdata)
+{
+ struct mpi3mr_intr_info *intr_info = privdata;
+ struct mpi3mr_ioc *mrioc;
+ u16 midx;
+ u32 num_admin_replies = 0, num_op_reply = 0;
+
+ if (!intr_info)
+ return IRQ_NONE;
+
+ mrioc = intr_info->mrioc;
+
+ if (!mrioc->intr_enabled)
+ return IRQ_NONE;
+
+ midx = intr_info->msix_index;
+
+ if (!midx)
+ num_admin_replies = mpi3mr_process_admin_reply_q(mrioc);
+ if (intr_info->op_reply_q)
+ num_op_reply = mpi3mr_process_op_reply_q(mrioc, intr_info);
+
+ if (num_admin_replies || num_op_reply)
+ return IRQ_HANDLED;
+ else
+ return IRQ_NONE;
+}
+
+static irqreturn_t mpi3mr_isr(int irq, void *privdata)
+{
+ struct mpi3mr_intr_info *intr_info = privdata;
+ struct mpi3mr_ioc *mrioc;
+ u16 midx;
+ int ret;
+
+ if (!intr_info)
+ return IRQ_NONE;
+
+ mrioc = intr_info->mrioc;
+ midx = intr_info->msix_index;
+ /* Call primary ISR routine */
+ ret = mpi3mr_isr_primary(irq, privdata);
+
+ /*
+ * If more IOs are expected, schedule IRQ polling thread.
+ * Otherwise exit from ISR.
+ */
+ if (!intr_info->op_reply_q)
+ return ret;
+
+ if (!intr_info->op_reply_q->enable_irq_poll ||
+ !atomic_read(&intr_info->op_reply_q->pend_ios))
+ return ret;
+
+ disable_irq_nosync(pci_irq_vector(mrioc->pdev, midx));
+
+ return IRQ_WAKE_THREAD;
+}
+
+/**
+ * mpi3mr_isr_poll - Reply queue polling routine
+ * @irq: IRQ
+ * @privdata: Interrupt info
+ *
+ * poll for pending I/O completions in a loop until pending I/Os
+ * present or controller queue depth I/Os are processed.
+ *
+ * Return: IRQ_NONE or IRQ_HANDLED
+ */
+static irqreturn_t mpi3mr_isr_poll(int irq, void *privdata)
+{
+ struct mpi3mr_intr_info *intr_info = privdata;
+ struct mpi3mr_ioc *mrioc;
+ u16 midx;
+ u32 num_op_reply = 0;
+
+ if (!intr_info || !intr_info->op_reply_q)
+ return IRQ_NONE;
+
+ mrioc = intr_info->mrioc;
+ midx = intr_info->msix_index;
+
+ /* Poll for pending IOs completions */
+ do {
+ if (!mrioc->intr_enabled)
+ break;
+
+ if (!midx)
+ mpi3mr_process_admin_reply_q(mrioc);
+ if (intr_info->op_reply_q)
+ num_op_reply +=
+ mpi3mr_process_op_reply_q(mrioc, intr_info);
+
+ usleep_range(mrioc->irqpoll_sleep, 10 * mrioc->irqpoll_sleep);
+
+ } while (atomic_read(&intr_info->op_reply_q->pend_ios) &&
+ (num_op_reply < mrioc->max_host_ios));
+
+ intr_info->op_reply_q->enable_irq_poll = false;
+ enable_irq(pci_irq_vector(mrioc->pdev, midx));
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * mpi3mr_request_irq - Request IRQ and register ISR
+ * @mrioc: Adapter instance reference
+ * @index: IRQ vector index
+ *
+ * Request threaded ISR with primary ISR and secondary
+ *
+ * Return: 0 on success and non zero on failures.
+ */
+static inline int mpi3mr_request_irq(struct mpi3mr_ioc *mrioc, u16 index)
+{
+ struct pci_dev *pdev = mrioc->pdev;
+ struct mpi3mr_intr_info *intr_info = mrioc->intr_info + index;
+ int retval = 0;
+
+ intr_info->mrioc = mrioc;
+ intr_info->msix_index = index;
+ intr_info->op_reply_q = NULL;
+
+ snprintf(intr_info->name, MPI3MR_NAME_LENGTH, "%s%d-msix%d",
+ mrioc->driver_name, mrioc->id, index);
+
+ retval = request_threaded_irq(pci_irq_vector(pdev, index), mpi3mr_isr,
+ mpi3mr_isr_poll, IRQF_SHARED, intr_info->name, intr_info);
+ if (retval) {
+ ioc_err(mrioc, "%s: Unable to allocate interrupt %d!\n",
+ intr_info->name, pci_irq_vector(pdev, index));
+ return retval;
+ }
+
+ return retval;
+}
+
+/**
+ * mpi3mr_setup_isr - Setup ISR for the controller
+ * @mrioc: Adapter instance reference
+ * @setup_one: Request one IRQ or more
+ *
+ * Allocate IRQ vectors and call mpi3mr_request_irq to setup ISR
+ *
+ * Return: 0 on success and non zero on failures.
+ */
+static int mpi3mr_setup_isr(struct mpi3mr_ioc *mrioc, u8 setup_one)
+{
+ unsigned int irq_flags = PCI_IRQ_MSIX;
+ int max_vectors;
+ int retval;
+ int i;
+ struct irq_affinity desc = { .pre_vectors = 1};
+
+ mpi3mr_cleanup_isr(mrioc);
+
+ if (setup_one || reset_devices)
+ max_vectors = 1;
+ else {
+ max_vectors =
+ min_t(int, mrioc->cpu_count + 1, mrioc->msix_count);
+
+ ioc_info(mrioc,
+ "MSI-X vectors supported: %d, no of cores: %d,",
+ mrioc->msix_count, mrioc->cpu_count);
+ ioc_info(mrioc,
+ "MSI-x vectors requested: %d\n", max_vectors);
+ }
+
+ irq_flags |= PCI_IRQ_AFFINITY | PCI_IRQ_ALL_TYPES;
+
+ mrioc->op_reply_q_offset = (max_vectors > 1) ? 1 : 0;
+ retval = pci_alloc_irq_vectors_affinity(mrioc->pdev,
+ 1, max_vectors, irq_flags, &desc);
+ if (retval < 0) {
+ ioc_err(mrioc, "Cannot alloc irq vectors\n");
+ goto out_failed;
+ }
+ if (retval != max_vectors) {
+ ioc_info(mrioc,
+ "allocated vectors (%d) are less than configured (%d)\n",
+ retval, max_vectors);
+ /*
+ * If only one MSI-x is allocated, then MSI-x 0 will be shared
+ * between Admin queue and operational queue
+ */
+ if (retval == 1)
+ mrioc->op_reply_q_offset = 0;
+
+ max_vectors = retval;
+ }
+ mrioc->intr_info = kzalloc(sizeof(struct mpi3mr_intr_info) * max_vectors,
+ GFP_KERNEL);
+ if (!mrioc->intr_info) {
+ retval = -ENOMEM;
+ pci_free_irq_vectors(mrioc->pdev);
+ goto out_failed;
+ }
+ for (i = 0; i < max_vectors; i++) {
+ retval = mpi3mr_request_irq(mrioc, i);
+ if (retval) {
+ mrioc->intr_info_count = i;
+ goto out_failed;
+ }
+ }
+ mrioc->intr_info_count = max_vectors;
+ mpi3mr_ioc_enable_intr(mrioc);
+ return 0;
+
+out_failed:
+ mpi3mr_cleanup_isr(mrioc);
+
+ return retval;
+}
+
+static const struct {
+ enum mpi3mr_iocstate value;
+ char *name;
+} mrioc_states[] = {
+ { MRIOC_STATE_READY, "ready" },
+ { MRIOC_STATE_FAULT, "fault" },
+ { MRIOC_STATE_RESET, "reset" },
+ { MRIOC_STATE_BECOMING_READY, "becoming ready" },
+ { MRIOC_STATE_RESET_REQUESTED, "reset requested" },
+ { MRIOC_STATE_UNRECOVERABLE, "unrecoverable error" },
+};
+
+static const char *mpi3mr_iocstate_name(enum mpi3mr_iocstate mrioc_state)
+{
+ int i;
+ char *name = NULL;
+
+ for (i = 0; i < ARRAY_SIZE(mrioc_states); i++) {
+ if (mrioc_states[i].value == mrioc_state) {
+ name = mrioc_states[i].name;
+ break;
+ }
+ }
+ return name;
+}
+
+/* Reset reason to name mapper structure*/
+static const struct {
+ enum mpi3mr_reset_reason value;
+ char *name;
+} mpi3mr_reset_reason_codes[] = {
+ { MPI3MR_RESET_FROM_BRINGUP, "timeout in bringup" },
+ { MPI3MR_RESET_FROM_FAULT_WATCH, "fault" },
+ { MPI3MR_RESET_FROM_IOCTL, "application invocation" },
+ { MPI3MR_RESET_FROM_EH_HOS, "error handling" },
+ { MPI3MR_RESET_FROM_TM_TIMEOUT, "TM timeout" },
+ { MPI3MR_RESET_FROM_IOCTL_TIMEOUT, "IOCTL timeout" },
+ { MPI3MR_RESET_FROM_MUR_FAILURE, "MUR failure" },
+ { MPI3MR_RESET_FROM_CTLR_CLEANUP, "timeout in controller cleanup" },
+ { MPI3MR_RESET_FROM_CIACTIV_FAULT, "component image activation fault" },
+ { MPI3MR_RESET_FROM_PE_TIMEOUT, "port enable timeout" },
+ { MPI3MR_RESET_FROM_TSU_TIMEOUT, "time stamp update timeout" },
+ { MPI3MR_RESET_FROM_DELREQQ_TIMEOUT, "delete request queue timeout" },
+ { MPI3MR_RESET_FROM_DELREPQ_TIMEOUT, "delete reply queue timeout" },
+ {
+ MPI3MR_RESET_FROM_CREATEREPQ_TIMEOUT,
+ "create request queue timeout"
+ },
+ {
+ MPI3MR_RESET_FROM_CREATEREQQ_TIMEOUT,
+ "create reply queue timeout"
+ },
+ { MPI3MR_RESET_FROM_IOCFACTS_TIMEOUT, "IOC facts timeout" },
+ { MPI3MR_RESET_FROM_IOCINIT_TIMEOUT, "IOC init timeout" },
+ { MPI3MR_RESET_FROM_EVTNOTIFY_TIMEOUT, "event notify timeout" },
+ { MPI3MR_RESET_FROM_EVTACK_TIMEOUT, "event acknowledgment timeout" },
+ {
+ MPI3MR_RESET_FROM_CIACTVRST_TIMER,
+ "component image activation timeout"
+ },
+ {
+ MPI3MR_RESET_FROM_GETPKGVER_TIMEOUT,
+ "get package version timeout"
+ },
+ { MPI3MR_RESET_FROM_SYSFS, "sysfs invocation" },
+ { MPI3MR_RESET_FROM_SYSFS_TIMEOUT, "sysfs TM timeout" },
+};
+
+/**
+ * mpi3mr_reset_rc_name - get reset reason code name
+ * @reason_code: reset reason code value
+ *
+ * Map reset reason to an NULL terminated ASCII string
+ *
+ * Return: name corresponding to reset reason value or NULL.
+ */
+static const char *mpi3mr_reset_rc_name(enum mpi3mr_reset_reason reason_code)
+{
+ int i;
+ char *name = NULL;
+
+ for (i = 0; i < ARRAY_SIZE(mpi3mr_reset_reason_codes); i++) {
+ if (mpi3mr_reset_reason_codes[i].value == reason_code) {
+ name = mpi3mr_reset_reason_codes[i].name;
+ break;
+ }
+ }
+ return name;
+}
+
+/* Reset type to name mapper structure*/
+static const struct {
+ u16 reset_type;
+ char *name;
+} mpi3mr_reset_types[] = {
+ { MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET, "soft" },
+ { MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, "diag fault" },
+};
+
+/**
+ * mpi3mr_reset_type_name - get reset type name
+ * @reset_type: reset type value
+ *
+ * Map reset type to an NULL terminated ASCII string
+ *
+ * Return: name corresponding to reset type value or NULL.
+ */
+static const char *mpi3mr_reset_type_name(u16 reset_type)
+{
+ int i;
+ char *name = NULL;
+
+ for (i = 0; i < ARRAY_SIZE(mpi3mr_reset_types); i++) {
+ if (mpi3mr_reset_types[i].reset_type == reset_type) {
+ name = mpi3mr_reset_types[i].name;
+ break;
+ }
+ }
+ return name;
+}
+
+/**
+ * mpi3mr_print_fault_info - Display fault information
+ * @mrioc: Adapter instance reference
+ *
+ * Display the controller fault information if there is a
+ * controller fault.
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_print_fault_info(struct mpi3mr_ioc *mrioc)
+{
+ u32 ioc_status, code, code1, code2, code3;
+
+ ioc_status = readl(&mrioc->sysif_regs->ioc_status);
+
+ if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) {
+ code = readl(&mrioc->sysif_regs->fault);
+ code1 = readl(&mrioc->sysif_regs->fault_info[0]);
+ code2 = readl(&mrioc->sysif_regs->fault_info[1]);
+ code3 = readl(&mrioc->sysif_regs->fault_info[2]);
+
+ ioc_info(mrioc,
+ "fault code(0x%08X): Additional code: (0x%08X:0x%08X:0x%08X)\n",
+ code, code1, code2, code3);
+ }
+}
+
+/**
+ * mpi3mr_get_iocstate - Get IOC State
+ * @mrioc: Adapter instance reference
+ *
+ * Return a proper IOC state enum based on the IOC status and
+ * IOC configuration and unrcoverable state of the controller.
+ *
+ * Return: Current IOC state.
+ */
+enum mpi3mr_iocstate mpi3mr_get_iocstate(struct mpi3mr_ioc *mrioc)
+{
+ u32 ioc_status, ioc_config;
+ u8 ready, enabled;
+
+ ioc_status = readl(&mrioc->sysif_regs->ioc_status);
+ ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
+
+ if (mrioc->unrecoverable)
+ return MRIOC_STATE_UNRECOVERABLE;
+ if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)
+ return MRIOC_STATE_FAULT;
+
+ ready = (ioc_status & MPI3_SYSIF_IOC_STATUS_READY);
+ enabled = (ioc_config & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC);
+
+ if (ready && enabled)
+ return MRIOC_STATE_READY;
+ if ((!ready) && (!enabled))
+ return MRIOC_STATE_RESET;
+ if ((!ready) && (enabled))
+ return MRIOC_STATE_BECOMING_READY;
+
+ return MRIOC_STATE_RESET_REQUESTED;
+}
+
+/**
+ * mpi3mr_clear_reset_history - clear reset history
+ * @mrioc: Adapter instance reference
+ *
+ * Write the reset history bit in IOC status to clear the bit,
+ * if it is already set.
+ *
+ * Return: Nothing.
+ */
+static inline void mpi3mr_clear_reset_history(struct mpi3mr_ioc *mrioc)
+{
+ u32 ioc_status;
+
+ ioc_status = readl(&mrioc->sysif_regs->ioc_status);
+ if (ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY)
+ writel(ioc_status, &mrioc->sysif_regs->ioc_status);
+}
+
+/**
+ * mpi3mr_issue_and_process_mur - Message unit Reset handler
+ * @mrioc: Adapter instance reference
+ * @reset_reason: Reset reason code
+ *
+ * Issue Message unit Reset to the controller and wait for it to
+ * be complete.
+ *
+ * Return: 0 on success, -1 on failure.
+ */
+static int mpi3mr_issue_and_process_mur(struct mpi3mr_ioc *mrioc,
+ u32 reset_reason)
+{
+ u32 ioc_config, timeout, ioc_status;
+ int retval = -1;
+
+ ioc_info(mrioc, "Issuing Message unit Reset(MUR)\n");
+ if (mrioc->unrecoverable) {
+ ioc_info(mrioc, "IOC is unrecoverable MUR not issued\n");
+ return retval;
+ }
+ mpi3mr_clear_reset_history(mrioc);
+ writel(reset_reason, &mrioc->sysif_regs->scratchpad[0]);
+ ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
+ ioc_config &= ~MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC;
+ writel(ioc_config, &mrioc->sysif_regs->ioc_configuration);
+
+ timeout = mrioc->ready_timeout * 10;
+ do {
+ ioc_status = readl(&mrioc->sysif_regs->ioc_status);
+ if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY)) {
+ mpi3mr_clear_reset_history(mrioc);
+ ioc_config =
+ readl(&mrioc->sysif_regs->ioc_configuration);
+ if (!((ioc_status & MPI3_SYSIF_IOC_STATUS_READY) ||
+ (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) ||
+ (ioc_config & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC))) {
+ retval = 0;
+ break;
+ }
+ }
+ msleep(100);
+ } while (--timeout);
+
+ ioc_status = readl(&mrioc->sysif_regs->ioc_status);
+ ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
+
+ ioc_info(mrioc, "Base IOC Sts/Config after %s MUR is (0x%x)/(0x%x)\n",
+ (!retval) ? "successful" : "failed", ioc_status, ioc_config);
+ return retval;
+}
+
+/**
+ * mpi3mr_bring_ioc_ready - Bring controller to ready state
+ * @mrioc: Adapter instance reference
+ *
+ * Set Enable IOC bit in IOC configuration register and wait for
+ * the controller to become ready.
+ *
+ * Return: 0 on success, -1 on failure.
+ */
+static int mpi3mr_bring_ioc_ready(struct mpi3mr_ioc *mrioc)
+{
+ u32 ioc_config, timeout;
+ enum mpi3mr_iocstate current_state;
+
+ ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
+ ioc_config |= MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC;
+ writel(ioc_config, &mrioc->sysif_regs->ioc_configuration);
+
+ timeout = mrioc->ready_timeout * 10;
+ do {
+ current_state = mpi3mr_get_iocstate(mrioc);
+ if (current_state == MRIOC_STATE_READY)
+ return 0;
+ msleep(100);
+ } while (--timeout);
+
+ return -1;
+}
+
+/**
+ * mpi3mr_soft_reset_success - Check softreset is success or not
+ * @ioc_status: IOC status register value
+ * @ioc_config: IOC config register value
+ *
+ * Check whether the soft reset is successful or not based on
+ * IOC status and IOC config register values.
+ *
+ * Return: True when the soft reset is success, false otherwise.
+ */
+static inline bool
+mpi3mr_soft_reset_success(u32 ioc_status, u32 ioc_config)
+{
+ if (!((ioc_status & MPI3_SYSIF_IOC_STATUS_READY) ||
+ (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) ||
+ (ioc_config & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC)))
+ return true;
+ return false;
+}
+
+/**
+ * mpi3mr_diagfault_success - Check diag fault is success or not
+ * @mrioc: Adapter reference
+ * @ioc_status: IOC status register value
+ *
+ * Check whether the controller hit diag reset fault code.
+ *
+ * Return: True when there is diag fault, false otherwise.
+ */
+static inline bool mpi3mr_diagfault_success(struct mpi3mr_ioc *mrioc,
+ u32 ioc_status)
+{
+ u32 fault;
+
+ if (!(ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT))
+ return false;
+ fault = readl(&mrioc->sysif_regs->fault) & MPI3_SYSIF_FAULT_CODE_MASK;
+ if (fault == MPI3_SYSIF_FAULT_CODE_DIAG_FAULT_RESET)
+ return true;
+ return false;
+}
+
+/**
+ * mpi3mr_set_diagsave - Set diag save bit for snapdump
+ * @mrioc: Adapter reference
+ *
+ * Set diag save bit in IOC configuration register to enable
+ * snapdump.
+ *
+ * Return: Nothing.
+ */
+static inline void mpi3mr_set_diagsave(struct mpi3mr_ioc *mrioc)
+{
+ u32 ioc_config;
+
+ ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
+ ioc_config |= MPI3_SYSIF_IOC_CONFIG_DIAG_SAVE;
+ writel(ioc_config, &mrioc->sysif_regs->ioc_configuration);
+}
+
+/**
+ * mpi3mr_issue_reset - Issue reset to the controller
+ * @mrioc: Adapter reference
+ * @reset_type: Reset type
+ * @reset_reason: Reset reason code
+ *
+ * Unlock the host diagnostic registers and write the specific
+ * reset type to that, wait for reset acknowledgment from the
+ * controller, if the reset is not successful retry for the
+ * predefined number of times.
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+static int mpi3mr_issue_reset(struct mpi3mr_ioc *mrioc, u16 reset_type,
+ u32 reset_reason)
+{
+ int retval = -1;
+ u8 unlock_retry_count, reset_retry_count = 0;
+ u32 host_diagnostic, timeout, ioc_status, ioc_config;
+
+ pci_cfg_access_lock(mrioc->pdev);
+ if ((reset_type != MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET) &&
+ (reset_type != MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT))
+ goto out;
+ if (mrioc->unrecoverable)
+ goto out;
+retry_reset:
+ unlock_retry_count = 0;
+ mpi3mr_clear_reset_history(mrioc);
+ do {
+ ioc_info(mrioc,
+ "Write magic sequence to unlock host diag register (retry=%d)\n",
+ ++unlock_retry_count);
+ if (unlock_retry_count >= MPI3MR_HOSTDIAG_UNLOCK_RETRY_COUNT) {
+ writel(reset_reason, &mrioc->sysif_regs->scratchpad[0]);
+ mrioc->unrecoverable = 1;
+ goto out;
+ }
+
+ writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_FLUSH,
+ &mrioc->sysif_regs->write_sequence);
+ writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_1ST,
+ &mrioc->sysif_regs->write_sequence);
+ writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_2ND,
+ &mrioc->sysif_regs->write_sequence);
+ writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_3RD,
+ &mrioc->sysif_regs->write_sequence);
+ writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_4TH,
+ &mrioc->sysif_regs->write_sequence);
+ writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_5TH,
+ &mrioc->sysif_regs->write_sequence);
+ writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_6TH,
+ &mrioc->sysif_regs->write_sequence);
+ usleep_range(1000, 1100);
+ host_diagnostic = readl(&mrioc->sysif_regs->host_diagnostic);
+ ioc_info(mrioc,
+ "wrote magic sequence: retry_count(%d), host_diagnostic(0x%08x)\n",
+ unlock_retry_count, host_diagnostic);
+ } while (!(host_diagnostic & MPI3_SYSIF_HOST_DIAG_DIAG_WRITE_ENABLE));
+
+ writel(reset_reason, &mrioc->sysif_regs->scratchpad[0]);
+ ioc_info(mrioc, "%s reset due to %s(0x%x)\n",
+ mpi3mr_reset_type_name(reset_type),
+ mpi3mr_reset_rc_name(reset_reason), reset_reason);
+ writel(host_diagnostic | reset_type,
+ &mrioc->sysif_regs->host_diagnostic);
+ timeout = mrioc->ready_timeout * 10;
+ if (reset_type == MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET) {
+ do {
+ ioc_status = readl(&mrioc->sysif_regs->ioc_status);
+ if (ioc_status &
+ MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) {
+ mpi3mr_clear_reset_history(mrioc);
+ ioc_config =
+ readl(&mrioc->sysif_regs->ioc_configuration);
+ if (mpi3mr_soft_reset_success(ioc_status,
+ ioc_config)) {
+ retval = 0;
+ break;
+ }
+ }
+ msleep(100);
+ } while (--timeout);
+ writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_2ND,
+ &mrioc->sysif_regs->write_sequence);
+ } else if (reset_type == MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT) {
+ do {
+ ioc_status = readl(&mrioc->sysif_regs->ioc_status);
+ if (mpi3mr_diagfault_success(mrioc, ioc_status)) {
+ retval = 0;
+ break;
+ }
+ msleep(100);
+ } while (--timeout);
+ mpi3mr_clear_reset_history(mrioc);
+ writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_2ND,
+ &mrioc->sysif_regs->write_sequence);
+ }
+ if (retval && ((++reset_retry_count) < MPI3MR_MAX_RESET_RETRY_COUNT)) {
+ ioc_status = readl(&mrioc->sysif_regs->ioc_status);
+ ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
+ ioc_info(mrioc,
+ "Base IOC Sts/Config after reset try %d is (0x%x)/(0x%x)\n",
+ reset_retry_count, ioc_status, ioc_config);
+ goto retry_reset;
+ }
+
+out:
+ pci_cfg_access_unlock(mrioc->pdev);
+ ioc_status = readl(&mrioc->sysif_regs->ioc_status);
+ ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
+
+ ioc_info(mrioc,
+ "Base IOC Sts/Config after %s reset is (0x%x)/(0x%x)\n",
+ (!retval) ? "successful" : "failed", ioc_status,
+ ioc_config);
+ return retval;
+}
+
+/**
+ * mpi3mr_admin_request_post - Post request to admin queue
+ * @mrioc: Adapter reference
+ * @admin_req: MPI3 request
+ * @admin_req_sz: Request size
+ * @ignore_reset: Ignore reset in process
+ *
+ * Post the MPI3 request into admin request queue and
+ * inform the controller, if the queue is full return
+ * appropriate error.
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+int mpi3mr_admin_request_post(struct mpi3mr_ioc *mrioc, void *admin_req,
+ u16 admin_req_sz, u8 ignore_reset)
+{
+ u16 areq_pi = 0, areq_ci = 0, max_entries = 0;
+ int retval = 0;
+ unsigned long flags;
+ u8 *areq_entry;
+
+ if (mrioc->unrecoverable) {
+ ioc_err(mrioc, "%s : Unrecoverable controller\n", __func__);
+ return -EFAULT;
+ }
+
+ spin_lock_irqsave(&mrioc->admin_req_lock, flags);
+ areq_pi = mrioc->admin_req_pi;
+ areq_ci = mrioc->admin_req_ci;
+ max_entries = mrioc->num_admin_req;
+ if ((areq_ci == (areq_pi + 1)) || ((!areq_ci) &&
+ (areq_pi == (max_entries - 1)))) {
+ ioc_err(mrioc, "AdminReqQ full condition detected\n");
+ retval = -EAGAIN;
+ goto out;
+ }
+ if (!ignore_reset && mrioc->reset_in_progress) {
+ ioc_err(mrioc, "AdminReqQ submit reset in progress\n");
+ retval = -EAGAIN;
+ goto out;
+ }
+ areq_entry = (u8 *)mrioc->admin_req_base +
+ (areq_pi * MPI3MR_ADMIN_REQ_FRAME_SZ);
+ memset(areq_entry, 0, MPI3MR_ADMIN_REQ_FRAME_SZ);
+ memcpy(areq_entry, (u8 *)admin_req, admin_req_sz);
+
+ if (++areq_pi == max_entries)
+ areq_pi = 0;
+ mrioc->admin_req_pi = areq_pi;
+
+ writel(mrioc->admin_req_pi, &mrioc->sysif_regs->admin_request_queue_pi);
+
+out:
+ spin_unlock_irqrestore(&mrioc->admin_req_lock, flags);
+
+ return retval;
+}
+
+/**
+ * mpi3mr_free_op_req_q_segments - free request memory segments
+ * @mrioc: Adapter instance reference
+ * @q_idx: operational request queue index
+ *
+ * Free memory segments allocated for operational request queue
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_free_op_req_q_segments(struct mpi3mr_ioc *mrioc, u16 q_idx)
+{
+ u16 j;
+ int size;
+ struct segments *segments;
+
+ segments = mrioc->req_qinfo[q_idx].q_segments;
+ if (!segments)
+ return;
+
+ if (mrioc->enable_segqueue) {
+ size = MPI3MR_OP_REQ_Q_SEG_SIZE;
+ if (mrioc->req_qinfo[q_idx].q_segment_list) {
+ dma_free_coherent(&mrioc->pdev->dev,
+ MPI3MR_MAX_SEG_LIST_SIZE,
+ mrioc->req_qinfo[q_idx].q_segment_list,
+ mrioc->req_qinfo[q_idx].q_segment_list_dma);
+ mrioc->op_reply_qinfo[q_idx].q_segment_list = NULL;
+ }
+ } else
+ size = mrioc->req_qinfo[q_idx].num_requests *
+ mrioc->facts.op_req_sz;
+
+ for (j = 0; j < mrioc->req_qinfo[q_idx].num_segments; j++) {
+ if (!segments[j].segment)
+ continue;
+ dma_free_coherent(&mrioc->pdev->dev,
+ size, segments[j].segment, segments[j].segment_dma);
+ segments[j].segment = NULL;
+ }
+ kfree(mrioc->req_qinfo[q_idx].q_segments);
+ mrioc->req_qinfo[q_idx].q_segments = NULL;
+ mrioc->req_qinfo[q_idx].qid = 0;
+}
+
+/**
+ * mpi3mr_free_op_reply_q_segments - free reply memory segments
+ * @mrioc: Adapter instance reference
+ * @q_idx: operational reply queue index
+ *
+ * Free memory segments allocated for operational reply queue
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_free_op_reply_q_segments(struct mpi3mr_ioc *mrioc, u16 q_idx)
+{
+ u16 j;
+ int size;
+ struct segments *segments;
+
+ segments = mrioc->op_reply_qinfo[q_idx].q_segments;
+ if (!segments)
+ return;
+
+ if (mrioc->enable_segqueue) {
+ size = MPI3MR_OP_REP_Q_SEG_SIZE;
+ if (mrioc->op_reply_qinfo[q_idx].q_segment_list) {
+ dma_free_coherent(&mrioc->pdev->dev,
+ MPI3MR_MAX_SEG_LIST_SIZE,
+ mrioc->op_reply_qinfo[q_idx].q_segment_list,
+ mrioc->op_reply_qinfo[q_idx].q_segment_list_dma);
+ mrioc->op_reply_qinfo[q_idx].q_segment_list = NULL;
+ }
+ } else
+ size = mrioc->op_reply_qinfo[q_idx].segment_qd *
+ mrioc->op_reply_desc_sz;
+
+ for (j = 0; j < mrioc->op_reply_qinfo[q_idx].num_segments; j++) {
+ if (!segments[j].segment)
+ continue;
+ dma_free_coherent(&mrioc->pdev->dev,
+ size, segments[j].segment, segments[j].segment_dma);
+ segments[j].segment = NULL;
+ }
+
+ kfree(mrioc->op_reply_qinfo[q_idx].q_segments);
+ mrioc->op_reply_qinfo[q_idx].q_segments = NULL;
+ mrioc->op_reply_qinfo[q_idx].qid = 0;
+}
+
+/**
+ * mpi3mr_delete_op_reply_q - delete operational reply queue
+ * @mrioc: Adapter instance reference
+ * @qidx: operational reply queue index
+ *
+ * Delete operatinal reply queue by issuing MPI request
+ * through admin queue.
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+static int mpi3mr_delete_op_reply_q(struct mpi3mr_ioc *mrioc, u16 qidx)
+{
+ struct mpi3_delete_reply_queue_request delq_req;
+ int retval = 0;
+ u16 reply_qid = 0, midx;
+
+ reply_qid = mrioc->op_reply_qinfo[qidx].qid;
+
+ midx = REPLY_QUEUE_IDX_TO_MSIX_IDX(qidx, mrioc->op_reply_q_offset);
+
+ if (!reply_qid) {
+ retval = -1;
+ ioc_err(mrioc, "Issue DelRepQ: called with invalid ReqQID\n");
+ goto out;
+ }
+
+ memset(&delq_req, 0, sizeof(delq_req));
+ mutex_lock(&mrioc->init_cmds.mutex);
+ if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
+ retval = -1;
+ ioc_err(mrioc, "Issue DelRepQ: Init command is in use\n");
+ mutex_unlock(&mrioc->init_cmds.mutex);
+ goto out;
+ }
+ mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
+ mrioc->init_cmds.is_waiting = 1;
+ mrioc->init_cmds.callback = NULL;
+ delq_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
+ delq_req.function = MPI3_FUNCTION_DELETE_REPLY_QUEUE;
+ delq_req.queue_id = cpu_to_le16(reply_qid);
+
+ init_completion(&mrioc->init_cmds.done);
+ retval = mpi3mr_admin_request_post(mrioc, &delq_req, sizeof(delq_req),
+ 1);
+ if (retval) {
+ ioc_err(mrioc, "Issue DelRepQ: Admin Post failed\n");
+ goto out_unlock;
+ }
+ wait_for_completion_timeout(&mrioc->init_cmds.done,
+ (MPI3MR_INTADMCMD_TIMEOUT * HZ));
+ if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
+ ioc_err(mrioc, "Issue DelRepQ: command timed out\n");
+ mpi3mr_set_diagsave(mrioc);
+ mpi3mr_issue_reset(mrioc,
+ MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT,
+ MPI3MR_RESET_FROM_DELREPQ_TIMEOUT);
+ mrioc->unrecoverable = 1;
+
+ retval = -1;
+ goto out_unlock;
+ }
+ if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
+ != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc,
+ "Issue DelRepQ: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
+ (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
+ mrioc->init_cmds.ioc_loginfo);
+ retval = -1;
+ goto out_unlock;
+ }
+ mrioc->intr_info[midx].op_reply_q = NULL;
+
+ mpi3mr_free_op_reply_q_segments(mrioc, qidx);
+out_unlock:
+ mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
+ mutex_unlock(&mrioc->init_cmds.mutex);
+out:
+
+ return retval;
+}
+
+/**
+ * mpi3mr_alloc_op_reply_q_segments -Alloc segmented reply pool
+ * @mrioc: Adapter instance reference
+ * @qidx: request queue index
+ *
+ * Allocate segmented memory pools for operational reply
+ * queue.
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+static int mpi3mr_alloc_op_reply_q_segments(struct mpi3mr_ioc *mrioc, u16 qidx)
+{
+ struct op_reply_qinfo *op_reply_q = mrioc->op_reply_qinfo + qidx;
+ int i, size;
+ u64 *q_segment_list_entry = NULL;
+ struct segments *segments;
+
+ if (mrioc->enable_segqueue) {
+ op_reply_q->segment_qd =
+ MPI3MR_OP_REP_Q_SEG_SIZE / mrioc->op_reply_desc_sz;
+
+ size = MPI3MR_OP_REP_Q_SEG_SIZE;
+
+ op_reply_q->q_segment_list = dma_alloc_coherent(&mrioc->pdev->dev,
+ MPI3MR_MAX_SEG_LIST_SIZE, &op_reply_q->q_segment_list_dma,
+ GFP_KERNEL);
+ if (!op_reply_q->q_segment_list)
+ return -ENOMEM;
+ q_segment_list_entry = (u64 *)op_reply_q->q_segment_list;
+ } else {
+ op_reply_q->segment_qd = op_reply_q->num_replies;
+ size = op_reply_q->num_replies * mrioc->op_reply_desc_sz;
+ }
+
+ op_reply_q->num_segments = DIV_ROUND_UP(op_reply_q->num_replies,
+ op_reply_q->segment_qd);
+
+ op_reply_q->q_segments = kcalloc(op_reply_q->num_segments,
+ sizeof(struct segments), GFP_KERNEL);
+ if (!op_reply_q->q_segments)
+ return -ENOMEM;
+
+ segments = op_reply_q->q_segments;
+ for (i = 0; i < op_reply_q->num_segments; i++) {
+ segments[i].segment =
+ dma_alloc_coherent(&mrioc->pdev->dev,
+ size, &segments[i].segment_dma, GFP_KERNEL);
+ if (!segments[i].segment)
+ return -ENOMEM;
+ if (mrioc->enable_segqueue)
+ q_segment_list_entry[i] =
+ (unsigned long)segments[i].segment_dma;
+ }
+
+ return 0;
+}
+
+/**
+ * mpi3mr_alloc_op_req_q_segments - Alloc segmented req pool.
+ * @mrioc: Adapter instance reference
+ * @qidx: request queue index
+ *
+ * Allocate segmented memory pools for operational request
+ * queue.
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+static int mpi3mr_alloc_op_req_q_segments(struct mpi3mr_ioc *mrioc, u16 qidx)
+{
+ struct op_req_qinfo *op_req_q = mrioc->req_qinfo + qidx;
+ int i, size;
+ u64 *q_segment_list_entry = NULL;
+ struct segments *segments;
+
+ if (mrioc->enable_segqueue) {
+ op_req_q->segment_qd =
+ MPI3MR_OP_REQ_Q_SEG_SIZE / mrioc->facts.op_req_sz;
+
+ size = MPI3MR_OP_REQ_Q_SEG_SIZE;
+
+ op_req_q->q_segment_list = dma_alloc_coherent(&mrioc->pdev->dev,
+ MPI3MR_MAX_SEG_LIST_SIZE, &op_req_q->q_segment_list_dma,
+ GFP_KERNEL);
+ if (!op_req_q->q_segment_list)
+ return -ENOMEM;
+ q_segment_list_entry = (u64 *)op_req_q->q_segment_list;
+
+ } else {
+ op_req_q->segment_qd = op_req_q->num_requests;
+ size = op_req_q->num_requests * mrioc->facts.op_req_sz;
+ }
+
+ op_req_q->num_segments = DIV_ROUND_UP(op_req_q->num_requests,
+ op_req_q->segment_qd);
+
+ op_req_q->q_segments = kcalloc(op_req_q->num_segments,
+ sizeof(struct segments), GFP_KERNEL);
+ if (!op_req_q->q_segments)
+ return -ENOMEM;
+
+ segments = op_req_q->q_segments;
+ for (i = 0; i < op_req_q->num_segments; i++) {
+ segments[i].segment =
+ dma_alloc_coherent(&mrioc->pdev->dev,
+ size, &segments[i].segment_dma, GFP_KERNEL);
+ if (!segments[i].segment)
+ return -ENOMEM;
+ if (mrioc->enable_segqueue)
+ q_segment_list_entry[i] =
+ (unsigned long)segments[i].segment_dma;
+ }
+
+ return 0;
+}
+
+/**
+ * mpi3mr_create_op_reply_q - create operational reply queue
+ * @mrioc: Adapter instance reference
+ * @qidx: operational reply queue index
+ *
+ * Create operatinal reply queue by issuing MPI request
+ * through admin queue.
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+static int mpi3mr_create_op_reply_q(struct mpi3mr_ioc *mrioc, u16 qidx)
+{
+ struct mpi3_create_reply_queue_request create_req;
+ struct op_reply_qinfo *op_reply_q = mrioc->op_reply_qinfo + qidx;
+ int retval = 0;
+ u16 reply_qid = 0, midx;
+
+ reply_qid = op_reply_q->qid;
+
+ midx = REPLY_QUEUE_IDX_TO_MSIX_IDX(qidx, mrioc->op_reply_q_offset);
+
+ if (reply_qid) {
+ retval = -1;
+ ioc_err(mrioc, "CreateRepQ: called for duplicate qid %d\n",
+ reply_qid);
+
+ return retval;
+ }
+
+ reply_qid = qidx + 1;
+ op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD;
+ op_reply_q->ci = 0;
+ op_reply_q->ephase = 1;
+ atomic_set(&op_reply_q->pend_ios, 0);
+ atomic_set(&op_reply_q->in_use, 0);
+ op_reply_q->enable_irq_poll = false;
+
+ if (!op_reply_q->q_segments) {
+ retval = mpi3mr_alloc_op_reply_q_segments(mrioc, qidx);
+ if (retval) {
+ mpi3mr_free_op_reply_q_segments(mrioc, qidx);
+ goto out;
+ }
+ }
+
+ memset(&create_req, 0, sizeof(create_req));
+ mutex_lock(&mrioc->init_cmds.mutex);
+ if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
+ retval = -1;
+ ioc_err(mrioc, "CreateRepQ: Init command is in use\n");
+ goto out_unlock;
+ }
+ mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
+ mrioc->init_cmds.is_waiting = 1;
+ mrioc->init_cmds.callback = NULL;
+ create_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
+ create_req.function = MPI3_FUNCTION_CREATE_REPLY_QUEUE;
+ create_req.queue_id = cpu_to_le16(reply_qid);
+ create_req.flags = MPI3_CREATE_REPLY_QUEUE_FLAGS_INT_ENABLE_ENABLE;
+ create_req.msix_index = cpu_to_le16(mrioc->intr_info[midx].msix_index);
+ if (mrioc->enable_segqueue) {
+ create_req.flags |=
+ MPI3_CREATE_REQUEST_QUEUE_FLAGS_SEGMENTED_SEGMENTED;
+ create_req.base_address = cpu_to_le64(
+ op_reply_q->q_segment_list_dma);
+ } else
+ create_req.base_address = cpu_to_le64(
+ op_reply_q->q_segments[0].segment_dma);
+
+ create_req.size = cpu_to_le16(op_reply_q->num_replies);
+
+ init_completion(&mrioc->init_cmds.done);
+ retval = mpi3mr_admin_request_post(mrioc, &create_req,
+ sizeof(create_req), 1);
+ if (retval) {
+ ioc_err(mrioc, "CreateRepQ: Admin Post failed\n");
+ goto out_unlock;
+ }
+ wait_for_completion_timeout(&mrioc->init_cmds.done,
+ (MPI3MR_INTADMCMD_TIMEOUT * HZ));
+ if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
+ ioc_err(mrioc, "CreateRepQ: command timed out\n");
+ mpi3mr_set_diagsave(mrioc);
+ mpi3mr_issue_reset(mrioc,
+ MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT,
+ MPI3MR_RESET_FROM_CREATEREPQ_TIMEOUT);
+ mrioc->unrecoverable = 1;
+ retval = -1;
+ goto out_unlock;
+ }
+ if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
+ != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc,
+ "CreateRepQ: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
+ (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
+ mrioc->init_cmds.ioc_loginfo);
+ retval = -1;
+ goto out_unlock;
+ }
+ op_reply_q->qid = reply_qid;
+ mrioc->intr_info[midx].op_reply_q = op_reply_q;
+
+out_unlock:
+ mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
+ mutex_unlock(&mrioc->init_cmds.mutex);
+out:
+
+ return retval;
+}
+
+/**
+ * mpi3mr_create_op_req_q - create operational request queue
+ * @mrioc: Adapter instance reference
+ * @idx: operational request queue index
+ * @reply_qid: Reply queue ID
+ *
+ * Create operatinal request queue by issuing MPI request
+ * through admin queue.
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+static int mpi3mr_create_op_req_q(struct mpi3mr_ioc *mrioc, u16 idx,
+ u16 reply_qid)
+{
+ struct mpi3_create_request_queue_request create_req;
+ struct op_req_qinfo *op_req_q = mrioc->req_qinfo + idx;
+ int retval = 0;
+ u16 req_qid = 0;
+
+ req_qid = op_req_q->qid;
+
+ if (req_qid) {
+ retval = -1;
+ ioc_err(mrioc, "CreateReqQ: called for duplicate qid %d\n",
+ req_qid);
+
+ return retval;
+ }
+ req_qid = idx + 1;
+
+ op_req_q->num_requests = MPI3MR_OP_REQ_Q_QD;
+ op_req_q->ci = 0;
+ op_req_q->pi = 0;
+ op_req_q->reply_qid = reply_qid;
+ spin_lock_init(&op_req_q->q_lock);
+
+ if (!op_req_q->q_segments) {
+ retval = mpi3mr_alloc_op_req_q_segments(mrioc, idx);
+ if (retval) {
+ mpi3mr_free_op_req_q_segments(mrioc, idx);
+ goto out;
+ }
+ }
+
+ memset(&create_req, 0, sizeof(create_req));
+ mutex_lock(&mrioc->init_cmds.mutex);
+ if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
+ retval = -1;
+ ioc_err(mrioc, "CreateReqQ: Init command is in use\n");
+ goto out_unlock;
+ }
+ mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
+ mrioc->init_cmds.is_waiting = 1;
+ mrioc->init_cmds.callback = NULL;
+ create_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
+ create_req.function = MPI3_FUNCTION_CREATE_REQUEST_QUEUE;
+ create_req.queue_id = cpu_to_le16(req_qid);
+ if (mrioc->enable_segqueue) {
+ create_req.flags =
+ MPI3_CREATE_REQUEST_QUEUE_FLAGS_SEGMENTED_SEGMENTED;
+ create_req.base_address = cpu_to_le64(
+ op_req_q->q_segment_list_dma);
+ } else
+ create_req.base_address = cpu_to_le64(
+ op_req_q->q_segments[0].segment_dma);
+ create_req.reply_queue_id = cpu_to_le16(reply_qid);
+ create_req.size = cpu_to_le16(op_req_q->num_requests);
+
+ init_completion(&mrioc->init_cmds.done);
+ retval = mpi3mr_admin_request_post(mrioc, &create_req,
+ sizeof(create_req), 1);
+ if (retval) {
+ ioc_err(mrioc, "CreateReqQ: Admin Post failed\n");
+ goto out_unlock;
+ }
+ wait_for_completion_timeout(&mrioc->init_cmds.done,
+ (MPI3MR_INTADMCMD_TIMEOUT * HZ));
+ if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
+ ioc_err(mrioc, "CreateReqQ: command timed out\n");
+ mpi3mr_set_diagsave(mrioc);
+ if (mpi3mr_issue_reset(mrioc,
+ MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT,
+ MPI3MR_RESET_FROM_CREATEREQQ_TIMEOUT))
+ mrioc->unrecoverable = 1;
+ retval = -1;
+ goto out_unlock;
+ }
+ if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
+ != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc,
+ "CreateReqQ: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
+ (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
+ mrioc->init_cmds.ioc_loginfo);
+ retval = -1;
+ goto out_unlock;
+ }
+ op_req_q->qid = req_qid;
+
+out_unlock:
+ mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
+ mutex_unlock(&mrioc->init_cmds.mutex);
+out:
+
+ return retval;
+}
+
+/**
+ * mpi3mr_create_op_queues - create operational queue pairs
+ * @mrioc: Adapter instance reference
+ *
+ * Allocate memory for operational queue meta data and call
+ * create request and reply queue functions.
+ *
+ * Return: 0 on success, non-zero on failures.
+ */
+static int mpi3mr_create_op_queues(struct mpi3mr_ioc *mrioc)
+{
+ int retval = 0;
+ u16 num_queues = 0, i = 0, msix_count_op_q = 1;
+
+ num_queues = min_t(int, mrioc->facts.max_op_reply_q,
+ mrioc->facts.max_op_req_q);
+
+ msix_count_op_q =
+ mrioc->intr_info_count - mrioc->op_reply_q_offset;
+ if (!mrioc->num_queues)
+ mrioc->num_queues = min_t(int, num_queues, msix_count_op_q);
+ num_queues = mrioc->num_queues;
+ ioc_info(mrioc, "Trying to create %d Operational Q pairs\n",
+ num_queues);
+
+ if (!mrioc->req_qinfo) {
+ mrioc->req_qinfo = kcalloc(num_queues,
+ sizeof(struct op_req_qinfo), GFP_KERNEL);
+ if (!mrioc->req_qinfo) {
+ retval = -1;
+ goto out_failed;
+ }
+
+ mrioc->op_reply_qinfo = kzalloc(sizeof(struct op_reply_qinfo) *
+ num_queues, GFP_KERNEL);
+ if (!mrioc->op_reply_qinfo) {
+ retval = -1;
+ goto out_failed;
+ }
+ }
+
+ if (mrioc->enable_segqueue)
+ ioc_info(mrioc,
+ "allocating operational queues through segmented queues\n");
+
+ for (i = 0; i < num_queues; i++) {
+ if (mpi3mr_create_op_reply_q(mrioc, i)) {
+ ioc_err(mrioc, "Cannot create OP RepQ %d\n", i);
+ break;
+ }
+ if (mpi3mr_create_op_req_q(mrioc, i,
+ mrioc->op_reply_qinfo[i].qid)) {
+ ioc_err(mrioc, "Cannot create OP ReqQ %d\n", i);
+ mpi3mr_delete_op_reply_q(mrioc, i);
+ break;
+ }
+ }
+
+ if (i == 0) {
+ /* Not even one queue is created successfully*/
+ retval = -1;
+ goto out_failed;
+ }
+ mrioc->num_op_reply_q = mrioc->num_op_req_q = i;
+ ioc_info(mrioc, "Successfully created %d Operational Q pairs\n",
+ mrioc->num_op_reply_q);
+
+ return retval;
+out_failed:
+ kfree(mrioc->req_qinfo);
+ mrioc->req_qinfo = NULL;
+
+ kfree(mrioc->op_reply_qinfo);
+ mrioc->op_reply_qinfo = NULL;
+
+ return retval;
+}
+
+/**
+ * mpi3mr_op_request_post - Post request to operational queue
+ * @mrioc: Adapter reference
+ * @op_req_q: Operational request queue info
+ * @req: MPI3 request
+ *
+ * Post the MPI3 request into operational request queue and
+ * inform the controller, if the queue is full return
+ * appropriate error.
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+int mpi3mr_op_request_post(struct mpi3mr_ioc *mrioc,
+ struct op_req_qinfo *op_req_q, u8 *req)
+{
+ u16 pi = 0, max_entries, reply_qidx = 0, midx;
+ int retval = 0;
+ unsigned long flags;
+ u8 *req_entry;
+ void *segment_base_addr;
+ u16 req_sz = mrioc->facts.op_req_sz;
+ struct segments *segments = op_req_q->q_segments;
+
+ reply_qidx = op_req_q->reply_qid - 1;
+
+ if (mrioc->unrecoverable)
+ return -EFAULT;
+
+ spin_lock_irqsave(&op_req_q->q_lock, flags);
+ pi = op_req_q->pi;
+ max_entries = op_req_q->num_requests;
+
+ if (mpi3mr_check_req_qfull(op_req_q)) {
+ midx = REPLY_QUEUE_IDX_TO_MSIX_IDX(
+ reply_qidx, mrioc->op_reply_q_offset);
+ mpi3mr_process_op_reply_q(mrioc, &mrioc->intr_info[midx]);
+
+ if (mpi3mr_check_req_qfull(op_req_q)) {
+ retval = -EAGAIN;
+ goto out;
+ }
+ }
+
+ if (mrioc->reset_in_progress) {
+ ioc_err(mrioc, "OpReqQ submit reset in progress\n");
+ retval = -EAGAIN;
+ goto out;
+ }
+
+ segment_base_addr = segments[pi / op_req_q->segment_qd].segment;
+ req_entry = (u8 *)segment_base_addr +
+ ((pi % op_req_q->segment_qd) * req_sz);
+
+ memset(req_entry, 0, req_sz);
+ memcpy(req_entry, req, MPI3MR_ADMIN_REQ_FRAME_SZ);
+
+ if (++pi == max_entries)
+ pi = 0;
+ op_req_q->pi = pi;
+
+ if (atomic_inc_return(&mrioc->op_reply_qinfo[reply_qidx].pend_ios)
+ > MPI3MR_IRQ_POLL_TRIGGER_IOCOUNT)
+ mrioc->op_reply_qinfo[reply_qidx].enable_irq_poll = true;
+
+ writel(op_req_q->pi,
+ &mrioc->sysif_regs->oper_queue_indexes[reply_qidx].producer_index);
+
+out:
+ spin_unlock_irqrestore(&op_req_q->q_lock, flags);
+ return retval;
+}
+
+/**
+ * mpi3mr_sync_timestamp - Issue time stamp sync request
+ * @mrioc: Adapter reference
+ *
+ * Issue IO unit control MPI request to synchornize firmware
+ * timestamp with host time.
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+static int mpi3mr_sync_timestamp(struct mpi3mr_ioc *mrioc)
+{
+ ktime_t current_time;
+ struct mpi3_iounit_control_request iou_ctrl;
+ int retval = 0;
+
+ memset(&iou_ctrl, 0, sizeof(iou_ctrl));
+ mutex_lock(&mrioc->init_cmds.mutex);
+ if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
+ retval = -1;
+ ioc_err(mrioc, "Issue IOUCTL time_stamp: command is in use\n");
+ mutex_unlock(&mrioc->init_cmds.mutex);
+ goto out;
+ }
+ mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
+ mrioc->init_cmds.is_waiting = 1;
+ mrioc->init_cmds.callback = NULL;
+ iou_ctrl.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
+ iou_ctrl.function = MPI3_FUNCTION_IO_UNIT_CONTROL;
+ iou_ctrl.operation = MPI3_CTRL_OP_UPDATE_TIMESTAMP;
+ current_time = ktime_get_real();
+ iou_ctrl.param64[0] = cpu_to_le64(ktime_to_ms(current_time));
+
+ init_completion(&mrioc->init_cmds.done);
+ retval = mpi3mr_admin_request_post(mrioc, &iou_ctrl,
+ sizeof(iou_ctrl), 0);
+ if (retval) {
+ ioc_err(mrioc, "Issue IOUCTL time_stamp: Admin Post failed\n");
+ goto out_unlock;
+ }
+
+ wait_for_completion_timeout(&mrioc->init_cmds.done,
+ (MPI3MR_INTADMCMD_TIMEOUT * HZ));
+ if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
+ ioc_err(mrioc, "Issue IOUCTL time_stamp: command timed out\n");
+ mrioc->init_cmds.is_waiting = 0;
+ mpi3mr_soft_reset_handler(mrioc,
+ MPI3MR_RESET_FROM_TSU_TIMEOUT, 1);
+ retval = -1;
+ goto out_unlock;
+ }
+ if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
+ != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc,
+ "Issue IOUCTL time_stamp: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
+ (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
+ mrioc->init_cmds.ioc_loginfo);
+ retval = -1;
+ goto out_unlock;
+ }
+
+out_unlock:
+ mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
+ mutex_unlock(&mrioc->init_cmds.mutex);
+
+out:
+ return retval;
+}
+
+/**
+ * mpi3mr_watchdog_work - watchdog thread to monitor faults
+ * @work: work struct
+ *
+ * Watch dog work periodically executed (1 second interval) to
+ * monitor firmware fault and to issue periodic timer sync to
+ * the firmware.
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_watchdog_work(struct work_struct *work)
+{
+ struct mpi3mr_ioc *mrioc =
+ container_of(work, struct mpi3mr_ioc, watchdog_work.work);
+ unsigned long flags;
+ enum mpi3mr_iocstate ioc_state;
+ u32 fault, host_diagnostic;
+
+ if (mrioc->ts_update_counter++ >= MPI3MR_TSUPDATE_INTERVAL) {
+ mrioc->ts_update_counter = 0;
+ mpi3mr_sync_timestamp(mrioc);
+ }
+
+ /*Check for fault state every one second and issue Soft reset*/
+ ioc_state = mpi3mr_get_iocstate(mrioc);
+ if (ioc_state == MRIOC_STATE_FAULT) {
+ fault = readl(&mrioc->sysif_regs->fault) &
+ MPI3_SYSIF_FAULT_CODE_MASK;
+ host_diagnostic = readl(&mrioc->sysif_regs->host_diagnostic);
+ if (host_diagnostic & MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS) {
+ if (!mrioc->diagsave_timeout) {
+ mpi3mr_print_fault_info(mrioc);
+ ioc_warn(mrioc, "Diag save in progress\n");
+ }
+ if ((mrioc->diagsave_timeout++) <=
+ MPI3_SYSIF_DIAG_SAVE_TIMEOUT)
+ goto schedule_work;
+ } else
+ mpi3mr_print_fault_info(mrioc);
+ mrioc->diagsave_timeout = 0;
+
+ if (fault == MPI3_SYSIF_FAULT_CODE_FACTORY_RESET) {
+ ioc_info(mrioc,
+ "Factory Reset fault occurred marking controller as unrecoverable"
+ );
+ mrioc->unrecoverable = 1;
+ goto out;
+ }
+
+ if ((fault == MPI3_SYSIF_FAULT_CODE_DIAG_FAULT_RESET) ||
+ (fault == MPI3_SYSIF_FAULT_CODE_SOFT_RESET_IN_PROGRESS) ||
+ (mrioc->reset_in_progress))
+ goto out;
+ if (fault == MPI3_SYSIF_FAULT_CODE_CI_ACTIVATION_RESET)
+ mpi3mr_soft_reset_handler(mrioc,
+ MPI3MR_RESET_FROM_CIACTIV_FAULT, 0);
+ else
+ mpi3mr_soft_reset_handler(mrioc,
+ MPI3MR_RESET_FROM_FAULT_WATCH, 0);
+ }
+
+schedule_work:
+ spin_lock_irqsave(&mrioc->watchdog_lock, flags);
+ if (mrioc->watchdog_work_q)
+ queue_delayed_work(mrioc->watchdog_work_q,
+ &mrioc->watchdog_work,
+ msecs_to_jiffies(MPI3MR_WATCHDOG_INTERVAL));
+ spin_unlock_irqrestore(&mrioc->watchdog_lock, flags);
+out:
+ return;
+}
+
+/**
+ * mpi3mr_start_watchdog - Start watchdog
+ * @mrioc: Adapter instance reference
+ *
+ * Create and start the watchdog thread to monitor controller
+ * faults.
+ *
+ * Return: Nothing.
+ */
+void mpi3mr_start_watchdog(struct mpi3mr_ioc *mrioc)
+{
+ if (mrioc->watchdog_work_q)
+ return;
+
+ INIT_DELAYED_WORK(&mrioc->watchdog_work, mpi3mr_watchdog_work);
+ snprintf(mrioc->watchdog_work_q_name,
+ sizeof(mrioc->watchdog_work_q_name), "watchdog_%s%d", mrioc->name,
+ mrioc->id);
+ mrioc->watchdog_work_q =
+ create_singlethread_workqueue(mrioc->watchdog_work_q_name);
+ if (!mrioc->watchdog_work_q) {
+ ioc_err(mrioc, "%s: failed (line=%d)\n", __func__, __LINE__);
+ return;
+ }
+
+ if (mrioc->watchdog_work_q)
+ queue_delayed_work(mrioc->watchdog_work_q,
+ &mrioc->watchdog_work,
+ msecs_to_jiffies(MPI3MR_WATCHDOG_INTERVAL));
+}
+
+/**
+ * mpi3mr_stop_watchdog - Stop watchdog
+ * @mrioc: Adapter instance reference
+ *
+ * Stop the watchdog thread created to monitor controller
+ * faults.
+ *
+ * Return: Nothing.
+ */
+void mpi3mr_stop_watchdog(struct mpi3mr_ioc *mrioc)
+{
+ unsigned long flags;
+ struct workqueue_struct *wq;
+
+ spin_lock_irqsave(&mrioc->watchdog_lock, flags);
+ wq = mrioc->watchdog_work_q;
+ mrioc->watchdog_work_q = NULL;
+ spin_unlock_irqrestore(&mrioc->watchdog_lock, flags);
+ if (wq) {
+ if (!cancel_delayed_work_sync(&mrioc->watchdog_work))
+ flush_workqueue(wq);
+ destroy_workqueue(wq);
+ }
+}
+
+/**
+ * mpi3mr_kill_ioc - Kill the controller
+ * @mrioc: Adapter instance reference
+ * @reason: reason for the failure.
+ *
+ * If fault debug is enabled, display the fault info else issue
+ * diag fault and freeze the system for controller debug
+ * purpose.
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_kill_ioc(struct mpi3mr_ioc *mrioc, u32 reason)
+{
+ enum mpi3mr_iocstate ioc_state;
+
+ if (!mrioc->fault_dbg)
+ return;
+
+ dump_stack();
+
+ ioc_state = mpi3mr_get_iocstate(mrioc);
+ if (ioc_state == MRIOC_STATE_FAULT)
+ mpi3mr_print_fault_info(mrioc);
+ else {
+ ioc_err(mrioc, "Firmware is halted due to the reason %d\n",
+ reason);
+ mpi3mr_diagfault_reset_handler(mrioc, reason);
+ }
+ if (mrioc->fault_dbg == 2)
+ for (;;)
+ ;
+ else
+ panic("panic in %s\n", __func__);
+}
+
+/**
+ * mpi3mr_setup_admin_qpair - Setup admin queue pair
+ * @mrioc: Adapter instance reference
+ *
+ * Allocate memory for admin queue pair if required and register
+ * the admin queue with the controller.
+ *
+ * Return: 0 on success, non-zero on failures.
+ */
+static int mpi3mr_setup_admin_qpair(struct mpi3mr_ioc *mrioc)
+{
+ int retval = 0;
+ u32 num_admin_entries = 0;
+
+ mrioc->admin_req_q_sz = MPI3MR_ADMIN_REQ_Q_SIZE;
+ mrioc->num_admin_req = mrioc->admin_req_q_sz /
+ MPI3MR_ADMIN_REQ_FRAME_SZ;
+ mrioc->admin_req_ci = mrioc->admin_req_pi = 0;
+ mrioc->admin_req_base = NULL;
+
+ mrioc->admin_reply_q_sz = MPI3MR_ADMIN_REPLY_Q_SIZE;
+ mrioc->num_admin_replies = mrioc->admin_reply_q_sz /
+ MPI3MR_ADMIN_REPLY_FRAME_SZ;
+ mrioc->admin_reply_ci = 0;
+ mrioc->admin_reply_ephase = 1;
+ mrioc->admin_reply_base = NULL;
+
+ if (!mrioc->admin_req_base) {
+ mrioc->admin_req_base = dma_alloc_coherent(&mrioc->pdev->dev,
+ mrioc->admin_req_q_sz, &mrioc->admin_req_dma, GFP_KERNEL);
+
+ if (!mrioc->admin_req_base) {
+ retval = -1;
+ goto out_failed;
+ }
+
+ mrioc->admin_reply_base = dma_alloc_coherent(&mrioc->pdev->dev,
+ mrioc->admin_reply_q_sz, &mrioc->admin_reply_dma,
+ GFP_KERNEL);
+
+ if (!mrioc->admin_reply_base) {
+ retval = -1;
+ goto out_failed;
+ }
+ }
+
+ num_admin_entries = (mrioc->num_admin_replies << 16) |
+ (mrioc->num_admin_req);
+ writel(num_admin_entries, &mrioc->sysif_regs->admin_queue_num_entries);
+ mpi3mr_writeq(mrioc->admin_req_dma,
+ &mrioc->sysif_regs->admin_request_queue_address);
+ mpi3mr_writeq(mrioc->admin_reply_dma,
+ &mrioc->sysif_regs->admin_reply_queue_address);
+ writel(mrioc->admin_req_pi, &mrioc->sysif_regs->admin_request_queue_pi);
+ writel(mrioc->admin_reply_ci, &mrioc->sysif_regs->admin_reply_queue_ci);
+ return retval;
+
+out_failed:
+
+ if (mrioc->admin_reply_base) {
+ dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_reply_q_sz,
+ mrioc->admin_reply_base, mrioc->admin_reply_dma);
+ mrioc->admin_reply_base = NULL;
+ }
+ if (mrioc->admin_req_base) {
+ dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_req_q_sz,
+ mrioc->admin_req_base, mrioc->admin_req_dma);
+ mrioc->admin_req_base = NULL;
+ }
+ return retval;
+}
+
+/**
+ * mpi3mr_issue_iocfacts - Send IOC Facts
+ * @mrioc: Adapter instance reference
+ * @facts_data: Cached IOC facts data
+ *
+ * Issue IOC Facts MPI request through admin queue and wait for
+ * the completion of it or time out.
+ *
+ * Return: 0 on success, non-zero on failures.
+ */
+static int mpi3mr_issue_iocfacts(struct mpi3mr_ioc *mrioc,
+ struct mpi3_ioc_facts_data *facts_data)
+{
+ struct mpi3_ioc_facts_request iocfacts_req;
+ void *data = NULL;
+ dma_addr_t data_dma;
+ u32 data_len = sizeof(*facts_data);
+ int retval = 0;
+ u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST;
+
+ data = dma_alloc_coherent(&mrioc->pdev->dev, data_len, &data_dma,
+ GFP_KERNEL);
+
+ if (!data) {
+ retval = -1;
+ goto out;
+ }
+
+ memset(&iocfacts_req, 0, sizeof(iocfacts_req));
+ mutex_lock(&mrioc->init_cmds.mutex);
+ if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
+ retval = -1;
+ ioc_err(mrioc, "Issue IOCFacts: Init command is in use\n");
+ mutex_unlock(&mrioc->init_cmds.mutex);
+ goto out;
+ }
+ mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
+ mrioc->init_cmds.is_waiting = 1;
+ mrioc->init_cmds.callback = NULL;
+ iocfacts_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
+ iocfacts_req.function = MPI3_FUNCTION_IOC_FACTS;
+
+ mpi3mr_add_sg_single(&iocfacts_req.sgl, sgl_flags, data_len,
+ data_dma);
+
+ init_completion(&mrioc->init_cmds.done);
+ retval = mpi3mr_admin_request_post(mrioc, &iocfacts_req,
+ sizeof(iocfacts_req), 1);
+ if (retval) {
+ ioc_err(mrioc, "Issue IOCFacts: Admin Post failed\n");
+ goto out_unlock;
+ }
+ wait_for_completion_timeout(&mrioc->init_cmds.done,
+ (MPI3MR_INTADMCMD_TIMEOUT * HZ));
+ if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
+ ioc_err(mrioc, "Issue IOCFacts: command timed out\n");
+ mpi3mr_set_diagsave(mrioc);
+ mpi3mr_issue_reset(mrioc,
+ MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT,
+ MPI3MR_RESET_FROM_IOCFACTS_TIMEOUT);
+ mrioc->unrecoverable = 1;
+ retval = -1;
+ goto out_unlock;
+ }
+ if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
+ != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc,
+ "Issue IOCFacts: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
+ (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
+ mrioc->init_cmds.ioc_loginfo);
+ retval = -1;
+ goto out_unlock;
+ }
+ memcpy(facts_data, (u8 *)data, data_len);
+out_unlock:
+ mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
+ mutex_unlock(&mrioc->init_cmds.mutex);
+
+out:
+ if (data)
+ dma_free_coherent(&mrioc->pdev->dev, data_len, data, data_dma);
+
+ return retval;
+}
+
+/**
+ * mpi3mr_check_reset_dma_mask - Process IOC facts data
+ * @mrioc: Adapter instance reference
+ *
+ * Check whether the new DMA mask requested through IOCFacts by
+ * firmware needs to be set, if so set it .
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+static inline int mpi3mr_check_reset_dma_mask(struct mpi3mr_ioc *mrioc)
+{
+ struct pci_dev *pdev = mrioc->pdev;
+ int r;
+ u64 facts_dma_mask = DMA_BIT_MASK(mrioc->facts.dma_mask);
+
+ if (!mrioc->facts.dma_mask || (mrioc->dma_mask <= facts_dma_mask))
+ return 0;
+
+ ioc_info(mrioc, "Changing DMA mask from 0x%016llx to 0x%016llx\n",
+ mrioc->dma_mask, facts_dma_mask);
+
+ r = dma_set_mask_and_coherent(&pdev->dev, facts_dma_mask);
+ if (r) {
+ ioc_err(mrioc, "Setting DMA mask to 0x%016llx failed: %d\n",
+ facts_dma_mask, r);
+ return r;
+ }
+ mrioc->dma_mask = facts_dma_mask;
+ return r;
+}
+
+/**
+ * mpi3mr_process_factsdata - Process IOC facts data
+ * @mrioc: Adapter instance reference
+ * @facts_data: Cached IOC facts data
+ *
+ * Convert IOC facts data into cpu endianness and cache it in
+ * the driver .
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_process_factsdata(struct mpi3mr_ioc *mrioc,
+ struct mpi3_ioc_facts_data *facts_data)
+{
+ u32 ioc_config, req_sz, facts_flags;
+
+ if ((le16_to_cpu(facts_data->ioc_facts_data_length)) !=
+ (sizeof(*facts_data) / 4)) {
+ ioc_warn(mrioc,
+ "IOCFactsdata length mismatch driver_sz(%zu) firmware_sz(%d)\n",
+ sizeof(*facts_data),
+ le16_to_cpu(facts_data->ioc_facts_data_length) * 4);
+ }
+
+ ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
+ req_sz = 1 << ((ioc_config & MPI3_SYSIF_IOC_CONFIG_OPER_REQ_ENT_SZ) >>
+ MPI3_SYSIF_IOC_CONFIG_OPER_REQ_ENT_SZ_SHIFT);
+ if (le16_to_cpu(facts_data->ioc_request_frame_size) != (req_sz / 4)) {
+ ioc_err(mrioc,
+ "IOCFacts data reqFrameSize mismatch hw_size(%d) firmware_sz(%d)\n",
+ req_sz / 4, le16_to_cpu(facts_data->ioc_request_frame_size));
+ }
+
+ memset(&mrioc->facts, 0, sizeof(mrioc->facts));
+
+ facts_flags = le32_to_cpu(facts_data->flags);
+ mrioc->facts.op_req_sz = req_sz;
+ mrioc->op_reply_desc_sz = 1 << ((ioc_config &
+ MPI3_SYSIF_IOC_CONFIG_OPER_RPY_ENT_SZ) >>
+ MPI3_SYSIF_IOC_CONFIG_OPER_RPY_ENT_SZ_SHIFT);
+
+ mrioc->facts.ioc_num = facts_data->ioc_number;
+ mrioc->facts.who_init = facts_data->who_init;
+ mrioc->facts.max_msix_vectors = le16_to_cpu(facts_data->max_msix_vectors);
+ mrioc->facts.personality = (facts_flags &
+ MPI3_IOCFACTS_FLAGS_PERSONALITY_MASK);
+ mrioc->facts.dma_mask = (facts_flags &
+ MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_MASK) >>
+ MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_SHIFT;
+ mrioc->facts.protocol_flags = facts_data->protocol_flags;
+ mrioc->facts.mpi_version = le32_to_cpu(facts_data->mpi_version.word);
+ mrioc->facts.max_reqs = le16_to_cpu(facts_data->max_outstanding_request);
+ mrioc->facts.product_id = le16_to_cpu(facts_data->product_id);
+ mrioc->facts.reply_sz = le16_to_cpu(facts_data->reply_frame_size) * 4;
+ mrioc->facts.exceptions = le16_to_cpu(facts_data->ioc_exceptions);
+ mrioc->facts.max_perids = le16_to_cpu(facts_data->max_persistent_id);
+ mrioc->facts.max_pds = le16_to_cpu(facts_data->max_pds);
+ mrioc->facts.max_vds = le16_to_cpu(facts_data->max_vds);
+ mrioc->facts.max_hpds = le16_to_cpu(facts_data->max_host_pds);
+ mrioc->facts.max_advhpds = le16_to_cpu(facts_data->max_advanced_host_pds);
+ mrioc->facts.max_raidpds = le16_to_cpu(facts_data->max_raid_pds);
+ mrioc->facts.max_nvme = le16_to_cpu(facts_data->max_nvme);
+ mrioc->facts.max_pcie_switches =
+ le16_to_cpu(facts_data->max_pc_ie_switches);
+ mrioc->facts.max_sasexpanders =
+ le16_to_cpu(facts_data->max_sas_expanders);
+ mrioc->facts.max_sasinitiators =
+ le16_to_cpu(facts_data->max_sas_initiators);
+ mrioc->facts.max_enclosures = le16_to_cpu(facts_data->max_enclosures);
+ mrioc->facts.min_devhandle = le16_to_cpu(facts_data->min_dev_handle);
+ mrioc->facts.max_devhandle = le16_to_cpu(facts_data->max_dev_handle);
+ mrioc->facts.max_op_req_q =
+ le16_to_cpu(facts_data->max_operational_request_queues);
+ mrioc->facts.max_op_reply_q =
+ le16_to_cpu(facts_data->max_operational_reply_queues);
+ mrioc->facts.ioc_capabilities =
+ le32_to_cpu(facts_data->ioc_capabilities);
+ mrioc->facts.fw_ver.build_num =
+ le16_to_cpu(facts_data->fw_version.build_num);
+ mrioc->facts.fw_ver.cust_id =
+ le16_to_cpu(facts_data->fw_version.customer_id);
+ mrioc->facts.fw_ver.ph_minor = facts_data->fw_version.phase_minor;
+ mrioc->facts.fw_ver.ph_major = facts_data->fw_version.phase_major;
+ mrioc->facts.fw_ver.gen_minor = facts_data->fw_version.gen_minor;
+ mrioc->facts.fw_ver.gen_major = facts_data->fw_version.gen_major;
+ mrioc->msix_count = min_t(int, mrioc->msix_count,
+ mrioc->facts.max_msix_vectors);
+ mrioc->facts.sge_mod_mask = facts_data->sge_modifier_mask;
+ mrioc->facts.sge_mod_value = facts_data->sge_modifier_value;
+ mrioc->facts.sge_mod_shift = facts_data->sge_modifier_shift;
+ mrioc->facts.shutdown_timeout =
+ le16_to_cpu(facts_data->shutdown_timeout);
+
+ ioc_info(mrioc, "ioc_num(%d), maxopQ(%d), maxopRepQ(%d), maxdh(%d),",
+ mrioc->facts.ioc_num, mrioc->facts.max_op_req_q,
+ mrioc->facts.max_op_reply_q, mrioc->facts.max_devhandle);
+ ioc_info(mrioc,
+ "maxreqs(%d), mindh(%d) maxPDs(%d) maxvectors(%d) maxperids(%d)\n",
+ mrioc->facts.max_reqs, mrioc->facts.min_devhandle,
+ mrioc->facts.max_pds, mrioc->facts.max_msix_vectors,
+ mrioc->facts.max_perids);
+ ioc_info(mrioc, "SGEModMask 0x%x SGEModVal 0x%x SGEModShift 0x%x ",
+ mrioc->facts.sge_mod_mask, mrioc->facts.sge_mod_value,
+ mrioc->facts.sge_mod_shift);
+ ioc_info(mrioc, "DMA mask %d InitialPE status 0x%x\n",
+ mrioc->facts.dma_mask, (facts_flags &
+ MPI3_IOCFACTS_FLAGS_INITIAL_PORT_ENABLE_MASK));
+
+ mrioc->max_host_ios = mrioc->facts.max_reqs - MPI3MR_INTERNAL_CMDS_RESVD;
+
+ if (reset_devices)
+ mrioc->max_host_ios = min_t(int, mrioc->max_host_ios,
+ MPI3MR_HOST_IOS_KDUMP);
+}
+
+/**
+ * mpi3mr_alloc_reply_sense_bufs - Send IOC Init
+ * @mrioc: Adapter instance reference
+ *
+ * Allocate and initialize the reply free buffers, sense
+ * buffers, reply free queue and sense buffer queue.
+ *
+ * Return: 0 on success, non-zero on failures.
+ */
+static int mpi3mr_alloc_reply_sense_bufs(struct mpi3mr_ioc *mrioc)
+{
+ int retval = 0;
+ u32 sz, i;
+ dma_addr_t phy_addr;
+
+ if (mrioc->init_cmds.reply)
+ goto post_reply_sbuf;
+
+ mrioc->init_cmds.reply = kzalloc(mrioc->facts.reply_sz, GFP_KERNEL);
+ if (!mrioc->init_cmds.reply)
+ goto out_failed;
+
+ for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) {
+ mrioc->dev_rmhs_cmds[i].reply = kzalloc(mrioc->facts.reply_sz,
+ GFP_KERNEL);
+ if (!mrioc->dev_rmhs_cmds[i].reply)
+ goto out_failed;
+ }
+
+ mrioc->host_tm_cmds.reply = kzalloc(mrioc->facts.reply_sz, GFP_KERNEL);
+ if (!mrioc->host_tm_cmds.reply)
+ goto out_failed;
+
+ mrioc->dev_handle_bitmap_sz = mrioc->facts.max_devhandle / 8;
+ if (mrioc->facts.max_devhandle % 8)
+ mrioc->dev_handle_bitmap_sz++;
+ mrioc->removepend_bitmap = kzalloc(mrioc->dev_handle_bitmap_sz,
+ GFP_KERNEL);
+ if (!mrioc->removepend_bitmap)
+ goto out_failed;
+
+ mrioc->devrem_bitmap_sz = MPI3MR_NUM_DEVRMCMD / 8;
+ if (MPI3MR_NUM_DEVRMCMD % 8)
+ mrioc->devrem_bitmap_sz++;
+ mrioc->devrem_bitmap = kzalloc(mrioc->devrem_bitmap_sz,
+ GFP_KERNEL);
+ if (!mrioc->devrem_bitmap)
+ goto out_failed;
+
+ mrioc->num_reply_bufs = mrioc->facts.max_reqs + MPI3MR_NUM_EVT_REPLIES;
+ mrioc->reply_free_qsz = mrioc->num_reply_bufs + 1;
+ mrioc->num_sense_bufs = mrioc->facts.max_reqs / MPI3MR_SENSEBUF_FACTOR;
+ mrioc->sense_buf_q_sz = mrioc->num_sense_bufs + 1;
+
+ /* reply buffer pool, 16 byte align */
+ sz = mrioc->num_reply_bufs * mrioc->facts.reply_sz;
+ mrioc->reply_buf_pool = dma_pool_create("reply_buf pool",
+ &mrioc->pdev->dev, sz, 16, 0);
+ if (!mrioc->reply_buf_pool) {
+ ioc_err(mrioc, "reply buf pool: dma_pool_create failed\n");
+ goto out_failed;
+ }
+
+ mrioc->reply_buf = dma_pool_zalloc(mrioc->reply_buf_pool, GFP_KERNEL,
+ &mrioc->reply_buf_dma);
+ if (!mrioc->reply_buf)
+ goto out_failed;
+
+ mrioc->reply_buf_dma_max_address = mrioc->reply_buf_dma + sz;
+
+ /* reply free queue, 8 byte align */
+ sz = mrioc->reply_free_qsz * 8;
+ mrioc->reply_free_q_pool = dma_pool_create("reply_free_q pool",
+ &mrioc->pdev->dev, sz, 8, 0);
+ if (!mrioc->reply_free_q_pool) {
+ ioc_err(mrioc, "reply_free_q pool: dma_pool_create failed\n");
+ goto out_failed;
+ }
+ mrioc->reply_free_q = dma_pool_zalloc(mrioc->reply_free_q_pool,
+ GFP_KERNEL, &mrioc->reply_free_q_dma);
+ if (!mrioc->reply_free_q)
+ goto out_failed;
+
+ /* sense buffer pool, 4 byte align */
+ sz = mrioc->num_sense_bufs * MPI3MR_SENSEBUF_SZ;
+ mrioc->sense_buf_pool = dma_pool_create("sense_buf pool",
+ &mrioc->pdev->dev, sz, 4, 0);
+ if (!mrioc->sense_buf_pool) {
+ ioc_err(mrioc, "sense_buf pool: dma_pool_create failed\n");
+ goto out_failed;
+ }
+ mrioc->sense_buf = dma_pool_zalloc(mrioc->sense_buf_pool, GFP_KERNEL,
+ &mrioc->sense_buf_dma);
+ if (!mrioc->sense_buf)
+ goto out_failed;
+
+ /* sense buffer queue, 8 byte align */
+ sz = mrioc->sense_buf_q_sz * 8;
+ mrioc->sense_buf_q_pool = dma_pool_create("sense_buf_q pool",
+ &mrioc->pdev->dev, sz, 8, 0);
+ if (!mrioc->sense_buf_q_pool) {
+ ioc_err(mrioc, "sense_buf_q pool: dma_pool_create failed\n");
+ goto out_failed;
+ }
+ mrioc->sense_buf_q = dma_pool_zalloc(mrioc->sense_buf_q_pool,
+ GFP_KERNEL, &mrioc->sense_buf_q_dma);
+ if (!mrioc->sense_buf_q)
+ goto out_failed;
+
+post_reply_sbuf:
+ sz = mrioc->num_reply_bufs * mrioc->facts.reply_sz;
+ ioc_info(mrioc,
+ "reply buf pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), reply_dma(0x%llx)\n",
+ mrioc->reply_buf, mrioc->num_reply_bufs, mrioc->facts.reply_sz,
+ (sz / 1024), (unsigned long long)mrioc->reply_buf_dma);
+ sz = mrioc->reply_free_qsz * 8;
+ ioc_info(mrioc,
+ "reply_free_q pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), reply_dma(0x%llx)\n",
+ mrioc->reply_free_q, mrioc->reply_free_qsz, 8, (sz / 1024),
+ (unsigned long long)mrioc->reply_free_q_dma);
+ sz = mrioc->num_sense_bufs * MPI3MR_SENSEBUF_SZ;
+ ioc_info(mrioc,
+ "sense_buf pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), sense_dma(0x%llx)\n",
+ mrioc->sense_buf, mrioc->num_sense_bufs, MPI3MR_SENSEBUF_SZ,
+ (sz / 1024), (unsigned long long)mrioc->sense_buf_dma);
+ sz = mrioc->sense_buf_q_sz * 8;
+ ioc_info(mrioc,
+ "sense_buf_q pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), sense_dma(0x%llx)\n",
+ mrioc->sense_buf_q, mrioc->sense_buf_q_sz, 8, (sz / 1024),
+ (unsigned long long)mrioc->sense_buf_q_dma);
+
+ /* initialize Reply buffer Queue */
+ for (i = 0, phy_addr = mrioc->reply_buf_dma;
+ i < mrioc->num_reply_bufs; i++, phy_addr += mrioc->facts.reply_sz)
+ mrioc->reply_free_q[i] = cpu_to_le64(phy_addr);
+ mrioc->reply_free_q[i] = cpu_to_le64(0);
+
+ /* initialize Sense Buffer Queue */
+ for (i = 0, phy_addr = mrioc->sense_buf_dma;
+ i < mrioc->num_sense_bufs; i++, phy_addr += MPI3MR_SENSEBUF_SZ)
+ mrioc->sense_buf_q[i] = cpu_to_le64(phy_addr);
+ mrioc->sense_buf_q[i] = cpu_to_le64(0);
+ return retval;
+
+out_failed:
+ retval = -1;
+ return retval;
+}
+
+/**
+ * mpi3mr_issue_iocinit - Send IOC Init
+ * @mrioc: Adapter instance reference
+ *
+ * Issue IOC Init MPI request through admin queue and wait for
+ * the completion of it or time out.
+ *
+ * Return: 0 on success, non-zero on failures.
+ */
+static int mpi3mr_issue_iocinit(struct mpi3mr_ioc *mrioc)
+{
+ struct mpi3_ioc_init_request iocinit_req;
+ struct mpi3_driver_info_layout *drv_info;
+ dma_addr_t data_dma;
+ u32 data_len = sizeof(*drv_info);
+ int retval = 0;
+ ktime_t current_time;
+
+ drv_info = dma_alloc_coherent(&mrioc->pdev->dev, data_len, &data_dma,
+ GFP_KERNEL);
+ if (!drv_info) {
+ retval = -1;
+ goto out;
+ }
+ drv_info->information_length = cpu_to_le32(data_len);
+ strncpy(drv_info->driver_signature, "Broadcom", sizeof(drv_info->driver_signature));
+ strncpy(drv_info->os_name, utsname()->sysname, sizeof(drv_info->os_name));
+ drv_info->os_name[sizeof(drv_info->os_name) - 1] = 0;
+ strncpy(drv_info->os_version, utsname()->release, sizeof(drv_info->os_version));
+ drv_info->os_version[sizeof(drv_info->os_version) - 1] = 0;
+ strncpy(drv_info->driver_name, MPI3MR_DRIVER_NAME, sizeof(drv_info->driver_name));
+ strncpy(drv_info->driver_version, MPI3MR_DRIVER_VERSION, sizeof(drv_info->driver_version));
+ strncpy(drv_info->driver_release_date, MPI3MR_DRIVER_RELDATE, sizeof(drv_info->driver_release_date));
+ drv_info->driver_capabilities = 0;
+ memcpy((u8 *)&mrioc->driver_info, (u8 *)drv_info,
+ sizeof(mrioc->driver_info));
+
+ memset(&iocinit_req, 0, sizeof(iocinit_req));
+ mutex_lock(&mrioc->init_cmds.mutex);
+ if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
+ retval = -1;
+ ioc_err(mrioc, "Issue IOCInit: Init command is in use\n");
+ mutex_unlock(&mrioc->init_cmds.mutex);
+ goto out;
+ }
+ mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
+ mrioc->init_cmds.is_waiting = 1;
+ mrioc->init_cmds.callback = NULL;
+ iocinit_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
+ iocinit_req.function = MPI3_FUNCTION_IOC_INIT;
+ iocinit_req.mpi_version.mpi3_version.dev = MPI3_VERSION_DEV;
+ iocinit_req.mpi_version.mpi3_version.unit = MPI3_VERSION_UNIT;
+ iocinit_req.mpi_version.mpi3_version.major = MPI3_VERSION_MAJOR;
+ iocinit_req.mpi_version.mpi3_version.minor = MPI3_VERSION_MINOR;
+ iocinit_req.who_init = MPI3_WHOINIT_HOST_DRIVER;
+ iocinit_req.reply_free_queue_depth = cpu_to_le16(mrioc->reply_free_qsz);
+ iocinit_req.reply_free_queue_address =
+ cpu_to_le64(mrioc->reply_free_q_dma);
+ iocinit_req.sense_buffer_length = cpu_to_le16(MPI3MR_SENSEBUF_SZ);
+ iocinit_req.sense_buffer_free_queue_depth =
+ cpu_to_le16(mrioc->sense_buf_q_sz);
+ iocinit_req.sense_buffer_free_queue_address =
+ cpu_to_le64(mrioc->sense_buf_q_dma);
+ iocinit_req.driver_information_address = cpu_to_le64(data_dma);
+
+ current_time = ktime_get_real();
+ iocinit_req.time_stamp = cpu_to_le64(ktime_to_ms(current_time));
+
+ init_completion(&mrioc->init_cmds.done);
+ retval = mpi3mr_admin_request_post(mrioc, &iocinit_req,
+ sizeof(iocinit_req), 1);
+ if (retval) {
+ ioc_err(mrioc, "Issue IOCInit: Admin Post failed\n");
+ goto out_unlock;
+ }
+ wait_for_completion_timeout(&mrioc->init_cmds.done,
+ (MPI3MR_INTADMCMD_TIMEOUT * HZ));
+ if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
+ mpi3mr_set_diagsave(mrioc);
+ mpi3mr_issue_reset(mrioc,
+ MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT,
+ MPI3MR_RESET_FROM_IOCINIT_TIMEOUT);
+ mrioc->unrecoverable = 1;
+ ioc_err(mrioc, "Issue IOCInit: command timed out\n");
+ retval = -1;
+ goto out_unlock;
+ }
+ if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
+ != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc,
+ "Issue IOCInit: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
+ (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
+ mrioc->init_cmds.ioc_loginfo);
+ retval = -1;
+ goto out_unlock;
+ }
+
+out_unlock:
+ mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
+ mutex_unlock(&mrioc->init_cmds.mutex);
+
+out:
+ if (drv_info)
+ dma_free_coherent(&mrioc->pdev->dev, data_len, drv_info,
+ data_dma);
+
+ return retval;
+}
+
+/**
+ * mpi3mr_unmask_events - Unmask events in event mask bitmap
+ * @mrioc: Adapter instance reference
+ * @event: MPI event ID
+ *
+ * Un mask the specific event by resetting the event_mask
+ * bitmap.
+ *
+ * Return: 0 on success, non-zero on failures.
+ */
+static void mpi3mr_unmask_events(struct mpi3mr_ioc *mrioc, u16 event)
+{
+ u32 desired_event;
+ u8 word;
+
+ if (event >= 128)
+ return;
+
+ desired_event = (1 << (event % 32));
+ word = event / 32;
+
+ mrioc->event_masks[word] &= ~desired_event;
+}
+
+/**
+ * mpi3mr_issue_event_notification - Send event notification
+ * @mrioc: Adapter instance reference
+ *
+ * Issue event notification MPI request through admin queue and
+ * wait for the completion of it or time out.
+ *
+ * Return: 0 on success, non-zero on failures.
+ */
+static int mpi3mr_issue_event_notification(struct mpi3mr_ioc *mrioc)
+{
+ struct mpi3_event_notification_request evtnotify_req;
+ int retval = 0;
+ u8 i;
+
+ memset(&evtnotify_req, 0, sizeof(evtnotify_req));
+ mutex_lock(&mrioc->init_cmds.mutex);
+ if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
+ retval = -1;
+ ioc_err(mrioc, "Issue EvtNotify: Init command is in use\n");
+ mutex_unlock(&mrioc->init_cmds.mutex);
+ goto out;
+ }
+ mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
+ mrioc->init_cmds.is_waiting = 1;
+ mrioc->init_cmds.callback = NULL;
+ evtnotify_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
+ evtnotify_req.function = MPI3_FUNCTION_EVENT_NOTIFICATION;
+ for (i = 0; i < MPI3_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
+ evtnotify_req.event_masks[i] =
+ cpu_to_le32(mrioc->event_masks[i]);
+ init_completion(&mrioc->init_cmds.done);
+ retval = mpi3mr_admin_request_post(mrioc, &evtnotify_req,
+ sizeof(evtnotify_req), 1);
+ if (retval) {
+ ioc_err(mrioc, "Issue EvtNotify: Admin Post failed\n");
+ goto out_unlock;
+ }
+ wait_for_completion_timeout(&mrioc->init_cmds.done,
+ (MPI3MR_INTADMCMD_TIMEOUT * HZ));
+ if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
+ ioc_err(mrioc, "Issue EvtNotify: command timed out\n");
+ mpi3mr_set_diagsave(mrioc);
+ mpi3mr_issue_reset(mrioc,
+ MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT,
+ MPI3MR_RESET_FROM_EVTNOTIFY_TIMEOUT);
+ mrioc->unrecoverable = 1;
+ retval = -1;
+ goto out_unlock;
+ }
+ if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
+ != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc,
+ "Issue EvtNotify: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
+ (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
+ mrioc->init_cmds.ioc_loginfo);
+ retval = -1;
+ goto out_unlock;
+ }
+
+out_unlock:
+ mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
+ mutex_unlock(&mrioc->init_cmds.mutex);
+out:
+ return retval;
+}
+
+/**
+ * mpi3mr_send_event_ack - Send event acknowledgment
+ * @mrioc: Adapter instance reference
+ * @event: MPI3 event ID
+ * @event_ctx: Event context
+ *
+ * Send event acknowledgment through admin queue and wait for
+ * it to complete.
+ *
+ * Return: 0 on success, non-zero on failures.
+ */
+int mpi3mr_send_event_ack(struct mpi3mr_ioc *mrioc, u8 event,
+ u32 event_ctx)
+{
+ struct mpi3_event_ack_request evtack_req;
+ int retval = 0;
+
+ memset(&evtack_req, 0, sizeof(evtack_req));
+ mutex_lock(&mrioc->init_cmds.mutex);
+ if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
+ retval = -1;
+ ioc_err(mrioc, "Send EvtAck: Init command is in use\n");
+ mutex_unlock(&mrioc->init_cmds.mutex);
+ goto out;
+ }
+ mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
+ mrioc->init_cmds.is_waiting = 1;
+ mrioc->init_cmds.callback = NULL;
+ evtack_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
+ evtack_req.function = MPI3_FUNCTION_EVENT_ACK;
+ evtack_req.event = event;
+ evtack_req.event_context = cpu_to_le32(event_ctx);
+
+ init_completion(&mrioc->init_cmds.done);
+ retval = mpi3mr_admin_request_post(mrioc, &evtack_req,
+ sizeof(evtack_req), 1);
+ if (retval) {
+ ioc_err(mrioc, "Send EvtAck: Admin Post failed\n");
+ goto out_unlock;
+ }
+ wait_for_completion_timeout(&mrioc->init_cmds.done,
+ (MPI3MR_INTADMCMD_TIMEOUT * HZ));
+ if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
+ ioc_err(mrioc, "Issue EvtNotify: command timed out\n");
+ mpi3mr_soft_reset_handler(mrioc,
+ MPI3MR_RESET_FROM_EVTACK_TIMEOUT, 1);
+ retval = -1;
+ goto out_unlock;
+ }
+ if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
+ != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc,
+ "Send EvtAck: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
+ (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
+ mrioc->init_cmds.ioc_loginfo);
+ retval = -1;
+ goto out_unlock;
+ }
+
+out_unlock:
+ mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
+ mutex_unlock(&mrioc->init_cmds.mutex);
+out:
+ return retval;
+}
+
+/**
+ * mpi3mr_alloc_chain_bufs - Allocate chain buffers
+ * @mrioc: Adapter instance reference
+ *
+ * Allocate chain buffers and set a bitmap to indicate free
+ * chain buffers. Chain buffers are used to pass the SGE
+ * information along with MPI3 SCSI IO requests for host I/O.
+ *
+ * Return: 0 on success, non-zero on failure
+ */
+static int mpi3mr_alloc_chain_bufs(struct mpi3mr_ioc *mrioc)
+{
+ int retval = 0;
+ u32 sz, i;
+ u16 num_chains;
+
+ num_chains = mrioc->max_host_ios / MPI3MR_CHAINBUF_FACTOR;
+
+ if (prot_mask & (SHOST_DIX_TYPE0_PROTECTION
+ | SHOST_DIX_TYPE1_PROTECTION
+ | SHOST_DIX_TYPE2_PROTECTION
+ | SHOST_DIX_TYPE3_PROTECTION))
+ num_chains += (num_chains / MPI3MR_CHAINBUFDIX_FACTOR);
+
+ mrioc->chain_buf_count = num_chains;
+ sz = sizeof(struct chain_element) * num_chains;
+ mrioc->chain_sgl_list = kzalloc(sz, GFP_KERNEL);
+ if (!mrioc->chain_sgl_list)
+ goto out_failed;
+
+ sz = MPI3MR_PAGE_SIZE_4K;
+ mrioc->chain_buf_pool = dma_pool_create("chain_buf pool",
+ &mrioc->pdev->dev, sz, 16, 0);
+ if (!mrioc->chain_buf_pool) {
+ ioc_err(mrioc, "chain buf pool: dma_pool_create failed\n");
+ goto out_failed;
+ }
+
+ for (i = 0; i < num_chains; i++) {
+ mrioc->chain_sgl_list[i].addr =
+ dma_pool_zalloc(mrioc->chain_buf_pool, GFP_KERNEL,
+ &mrioc->chain_sgl_list[i].dma_addr);
+
+ if (!mrioc->chain_sgl_list[i].addr)
+ goto out_failed;
+ }
+ mrioc->chain_bitmap_sz = num_chains / 8;
+ if (num_chains % 8)
+ mrioc->chain_bitmap_sz++;
+ mrioc->chain_bitmap = kzalloc(mrioc->chain_bitmap_sz, GFP_KERNEL);
+ if (!mrioc->chain_bitmap)
+ goto out_failed;
+ return retval;
+out_failed:
+ retval = -1;
+ return retval;
+}
+
+/**
+ * mpi3mr_port_enable_complete - Mark port enable complete
+ * @mrioc: Adapter instance reference
+ * @drv_cmd: Internal command tracker
+ *
+ * Call back for asynchronous port enable request sets the
+ * driver command to indicate port enable request is complete.
+ *
+ * Return: Nothing
+ */
+static void mpi3mr_port_enable_complete(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_drv_cmd *drv_cmd)
+{
+ drv_cmd->state = MPI3MR_CMD_NOTUSED;
+ drv_cmd->callback = NULL;
+ mrioc->scan_failed = drv_cmd->ioc_status;
+ mrioc->scan_started = 0;
+}
+
+/**
+ * mpi3mr_issue_port_enable - Issue Port Enable
+ * @mrioc: Adapter instance reference
+ * @async: Flag to wait for completion or not
+ *
+ * Issue Port Enable MPI request through admin queue and if the
+ * async flag is not set wait for the completion of the port
+ * enable or time out.
+ *
+ * Return: 0 on success, non-zero on failures.
+ */
+int mpi3mr_issue_port_enable(struct mpi3mr_ioc *mrioc, u8 async)
+{
+ struct mpi3_port_enable_request pe_req;
+ int retval = 0;
+ u32 pe_timeout = MPI3MR_PORTENABLE_TIMEOUT;
+
+ memset(&pe_req, 0, sizeof(pe_req));
+ mutex_lock(&mrioc->init_cmds.mutex);
+ if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
+ retval = -1;
+ ioc_err(mrioc, "Issue PortEnable: Init command is in use\n");
+ mutex_unlock(&mrioc->init_cmds.mutex);
+ goto out;
+ }
+ mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
+ if (async) {
+ mrioc->init_cmds.is_waiting = 0;
+ mrioc->init_cmds.callback = mpi3mr_port_enable_complete;
+ } else {
+ mrioc->init_cmds.is_waiting = 1;
+ mrioc->init_cmds.callback = NULL;
+ init_completion(&mrioc->init_cmds.done);
+ }
+ pe_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
+ pe_req.function = MPI3_FUNCTION_PORT_ENABLE;
+
+ retval = mpi3mr_admin_request_post(mrioc, &pe_req, sizeof(pe_req), 1);
+ if (retval) {
+ ioc_err(mrioc, "Issue PortEnable: Admin Post failed\n");
+ goto out_unlock;
+ }
+ if (!async) {
+ wait_for_completion_timeout(&mrioc->init_cmds.done,
+ (pe_timeout * HZ));
+ if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
+ ioc_err(mrioc, "Issue PortEnable: command timed out\n");
+ retval = -1;
+ mrioc->scan_failed = MPI3_IOCSTATUS_INTERNAL_ERROR;
+ mpi3mr_set_diagsave(mrioc);
+ mpi3mr_issue_reset(mrioc,
+ MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT,
+ MPI3MR_RESET_FROM_PE_TIMEOUT);
+ mrioc->unrecoverable = 1;
+ goto out_unlock;
+ }
+ mpi3mr_port_enable_complete(mrioc, &mrioc->init_cmds);
+ }
+out_unlock:
+ mutex_unlock(&mrioc->init_cmds.mutex);
+out:
+ return retval;
+}
+
+/* Protocol type to name mapper structure*/
+static const struct {
+ u8 protocol;
+ char *name;
+} mpi3mr_protocols[] = {
+ { MPI3_IOCFACTS_PROTOCOL_SCSI_INITIATOR, "Initiator" },
+ { MPI3_IOCFACTS_PROTOCOL_SCSI_TARGET, "Target" },
+ { MPI3_IOCFACTS_PROTOCOL_NVME, "NVMe attachment" },
+};
+
+/* Capability to name mapper structure*/
+static const struct {
+ u32 capability;
+ char *name;
+} mpi3mr_capabilities[] = {
+ { MPI3_IOCFACTS_CAPABILITY_RAID_CAPABLE, "RAID" },
+};
+
+/**
+ * mpi3mr_print_ioc_info - Display controller information
+ * @mrioc: Adapter instance reference
+ *
+ * Display controller personalit, capability, supported
+ * protocols etc.
+ *
+ * Return: Nothing
+ */
+static void
+mpi3mr_print_ioc_info(struct mpi3mr_ioc *mrioc)
+{
+ int i = 0, bytes_wrote = 0;
+ char personality[16];
+ char protocol[50] = {0};
+ char capabilities[100] = {0};
+ bool is_string_nonempty = false;
+ struct mpi3mr_compimg_ver *fwver = &mrioc->facts.fw_ver;
+
+ switch (mrioc->facts.personality) {
+ case MPI3_IOCFACTS_FLAGS_PERSONALITY_EHBA:
+ strncpy(personality, "Enhanced HBA", sizeof(personality));
+ break;
+ case MPI3_IOCFACTS_FLAGS_PERSONALITY_RAID_DDR:
+ strncpy(personality, "RAID", sizeof(personality));
+ break;
+ default:
+ strncpy(personality, "Unknown", sizeof(personality));
+ break;
+ }
+
+ ioc_info(mrioc, "Running in %s Personality", personality);
+
+ ioc_info(mrioc, "FW version(%d.%d.%d.%d.%d.%d)\n",
+ fwver->gen_major, fwver->gen_minor, fwver->ph_major,
+ fwver->ph_minor, fwver->cust_id, fwver->build_num);
+
+ for (i = 0; i < ARRAY_SIZE(mpi3mr_protocols); i++) {
+ if (mrioc->facts.protocol_flags &
+ mpi3mr_protocols[i].protocol) {
+ if (is_string_nonempty &&
+ (bytes_wrote < sizeof(protocol)))
+ bytes_wrote += snprintf(protocol + bytes_wrote,
+ (sizeof(protocol) - bytes_wrote), ",");
+
+ if (bytes_wrote < sizeof(protocol))
+ bytes_wrote += snprintf(protocol + bytes_wrote,
+ (sizeof(protocol) - bytes_wrote), "%s",
+ mpi3mr_protocols[i].name);
+ is_string_nonempty = true;
+ }
+ }
+
+ bytes_wrote = 0;
+ is_string_nonempty = false;
+ for (i = 0; i < ARRAY_SIZE(mpi3mr_capabilities); i++) {
+ if (mrioc->facts.protocol_flags &
+ mpi3mr_capabilities[i].capability) {
+ if (is_string_nonempty &&
+ (bytes_wrote < sizeof(capabilities)))
+ bytes_wrote += snprintf(capabilities + bytes_wrote,
+ (sizeof(capabilities) - bytes_wrote), ",");
+
+ if (bytes_wrote < sizeof(capabilities))
+ bytes_wrote += snprintf(capabilities + bytes_wrote,
+ (sizeof(capabilities) - bytes_wrote), "%s",
+ mpi3mr_capabilities[i].name);
+ is_string_nonempty = true;
+ }
+ }
+
+ ioc_info(mrioc, "Protocol=(%s), Capabilities=(%s)\n",
+ protocol, capabilities);
+}
+
+/**
+ * mpi3mr_cleanup_resources - Free PCI resources
+ * @mrioc: Adapter instance reference
+ *
+ * Unmap PCI device memory and disable PCI device.
+ *
+ * Return: 0 on success and non-zero on failure.
+ */
+void mpi3mr_cleanup_resources(struct mpi3mr_ioc *mrioc)
+{
+ struct pci_dev *pdev = mrioc->pdev;
+
+ mpi3mr_cleanup_isr(mrioc);
+
+ if (mrioc->sysif_regs) {
+ iounmap((void __iomem *)mrioc->sysif_regs);
+ mrioc->sysif_regs = NULL;
+ }
+
+ if (pci_is_enabled(pdev)) {
+ if (mrioc->bars)
+ pci_release_selected_regions(pdev, mrioc->bars);
+ pci_disable_device(pdev);
+ }
+}
+
+/**
+ * mpi3mr_setup_resources - Enable PCI resources
+ * @mrioc: Adapter instance reference
+ *
+ * Enable PCI device memory, MSI-x registers and set DMA mask.
+ *
+ * Return: 0 on success and non-zero on failure.
+ */
+int mpi3mr_setup_resources(struct mpi3mr_ioc *mrioc)
+{
+ struct pci_dev *pdev = mrioc->pdev;
+ u32 memap_sz = 0;
+ int i, retval = 0, capb = 0;
+ u16 message_control;
+ u64 dma_mask = mrioc->dma_mask ? mrioc->dma_mask :
+ (((dma_get_required_mask(&pdev->dev) > DMA_BIT_MASK(32)) &&
+ (sizeof(dma_addr_t) > 4)) ? DMA_BIT_MASK(64) : DMA_BIT_MASK(32));
+
+ if (pci_enable_device_mem(pdev)) {
+ ioc_err(mrioc, "pci_enable_device_mem: failed\n");
+ retval = -ENODEV;
+ goto out_failed;
+ }
+
+ capb = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
+ if (!capb) {
+ ioc_err(mrioc, "Unable to find MSI-X Capabilities\n");
+ retval = -ENODEV;
+ goto out_failed;
+ }
+ mrioc->bars = pci_select_bars(pdev, IORESOURCE_MEM);
+
+ if (pci_request_selected_regions(pdev, mrioc->bars,
+ mrioc->driver_name)) {
+ ioc_err(mrioc, "pci_request_selected_regions: failed\n");
+ retval = -ENODEV;
+ goto out_failed;
+ }
+
+ for (i = 0; (i < DEVICE_COUNT_RESOURCE); i++) {
+ if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
+ mrioc->sysif_regs_phys = pci_resource_start(pdev, i);
+ memap_sz = pci_resource_len(pdev, i);
+ mrioc->sysif_regs =
+ ioremap(mrioc->sysif_regs_phys, memap_sz);
+ break;
+ }
+ }
+
+ pci_set_master(pdev);
+
+ retval = dma_set_mask_and_coherent(&pdev->dev, dma_mask);
+ if (retval) {
+ if (dma_mask != DMA_BIT_MASK(32)) {
+ ioc_warn(mrioc, "Setting 64 bit DMA mask failed\n");
+ dma_mask = DMA_BIT_MASK(32);
+ retval = dma_set_mask_and_coherent(&pdev->dev,
+ dma_mask);
+ }
+ if (retval) {
+ mrioc->dma_mask = 0;
+ ioc_err(mrioc, "Setting 32 bit DMA mask also failed\n");
+ goto out_failed;
+ }
+ }
+ mrioc->dma_mask = dma_mask;
+
+ if (!mrioc->sysif_regs) {
+ ioc_err(mrioc,
+ "Unable to map adapter memory or resource not found\n");
+ retval = -EINVAL;
+ goto out_failed;
+ }
+
+ pci_read_config_word(pdev, capb + 2, &message_control);
+ mrioc->msix_count = (message_control & 0x3FF) + 1;
+
+ pci_save_state(pdev);
+
+ pci_set_drvdata(pdev, mrioc->shost);
+
+ mpi3mr_ioc_disable_intr(mrioc);
+
+ ioc_info(mrioc, "iomem(0x%016llx), mapped(0x%p), size(%d)\n",
+ (unsigned long long)mrioc->sysif_regs_phys,
+ mrioc->sysif_regs, memap_sz);
+ ioc_info(mrioc, "Number of MSI-X vectors found in capabilities: (%d)\n",
+ mrioc->msix_count);
+ return retval;
+
+out_failed:
+ mpi3mr_cleanup_resources(mrioc);
+ return retval;
+}
+
+/**
+ * mpi3mr_init_ioc - Initialize the controller
+ * @mrioc: Adapter instance reference
+ * @re_init: Flag to indicate is this fresh init or re-init
+ *
+ * This the controller initialization routine, executed either
+ * after soft reset or from pci probe callback.
+ * Setup the required resources, memory map the controller
+ * registers, create admin and operational reply queue pairs,
+ * allocate required memory for reply pool, sense buffer pool,
+ * issue IOC init request to the firmware, unmask the events and
+ * issue port enable to discover SAS/SATA/NVMe devies and RAID
+ * volumes.
+ *
+ * Return: 0 on success and non-zero on failure.
+ */
+int mpi3mr_init_ioc(struct mpi3mr_ioc *mrioc, u8 re_init)
+{
+ int retval = 0;
+ enum mpi3mr_iocstate ioc_state;
+ u64 base_info;
+ u32 timeout;
+ u32 ioc_status, ioc_config, i;
+ struct mpi3_ioc_facts_data facts_data;
+
+ mrioc->irqpoll_sleep = MPI3MR_IRQ_POLL_SLEEP;
+ mrioc->change_count = 0;
+ if (!re_init) {
+ mrioc->cpu_count = num_online_cpus();
+ retval = mpi3mr_setup_resources(mrioc);
+ if (retval) {
+ ioc_err(mrioc, "Failed to setup resources:error %d\n",
+ retval);
+ goto out_nocleanup;
+ }
+ }
+
+ ioc_status = readl(&mrioc->sysif_regs->ioc_status);
+ ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
+
+ ioc_info(mrioc, "SOD status %x configuration %x\n",
+ ioc_status, ioc_config);
+
+ base_info = lo_hi_readq(&mrioc->sysif_regs->ioc_information);
+ ioc_info(mrioc, "SOD base_info %llx\n", base_info);
+
+ /*The timeout value is in 2sec unit, changing it to seconds*/
+ mrioc->ready_timeout =
+ ((base_info & MPI3_SYSIF_IOC_INFO_LOW_TIMEOUT_MASK) >>
+ MPI3_SYSIF_IOC_INFO_LOW_TIMEOUT_SHIFT) * 2;
+
+ ioc_info(mrioc, "IOC ready timeout %d\n", mrioc->ready_timeout);
+
+ ioc_state = mpi3mr_get_iocstate(mrioc);
+ ioc_info(mrioc, "IOC in %s state during detection\n",
+ mpi3mr_iocstate_name(ioc_state));
+
+ if (ioc_state == MRIOC_STATE_BECOMING_READY ||
+ ioc_state == MRIOC_STATE_RESET_REQUESTED) {
+ timeout = mrioc->ready_timeout * 10;
+ do {
+ msleep(100);
+ } while (--timeout);
+
+ ioc_state = mpi3mr_get_iocstate(mrioc);
+ ioc_info(mrioc,
+ "IOC in %s state after waiting for reset time\n",
+ mpi3mr_iocstate_name(ioc_state));
+ }
+
+ if (ioc_state == MRIOC_STATE_READY) {
+ retval = mpi3mr_issue_and_process_mur(mrioc,
+ MPI3MR_RESET_FROM_BRINGUP);
+ if (retval) {
+ ioc_err(mrioc, "Failed to MU reset IOC error %d\n",
+ retval);
+ }
+ ioc_state = mpi3mr_get_iocstate(mrioc);
+ }
+ if (ioc_state != MRIOC_STATE_RESET) {
+ mpi3mr_print_fault_info(mrioc);
+ retval = mpi3mr_issue_reset(mrioc,
+ MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET,
+ MPI3MR_RESET_FROM_BRINGUP);
+ if (retval) {
+ ioc_err(mrioc,
+ "%s :Failed to soft reset IOC error %d\n",
+ __func__, retval);
+ goto out_failed;
+ }
+ }
+ ioc_state = mpi3mr_get_iocstate(mrioc);
+ if (ioc_state != MRIOC_STATE_RESET) {
+ retval = -1;
+ ioc_err(mrioc, "Cannot bring IOC to reset state\n");
+ goto out_failed;
+ }
+
+ retval = mpi3mr_setup_admin_qpair(mrioc);
+ if (retval) {
+ ioc_err(mrioc, "Failed to setup admin Qs: error %d\n",
+ retval);
+ goto out_failed;
+ }
+
+ retval = mpi3mr_bring_ioc_ready(mrioc);
+ if (retval) {
+ ioc_err(mrioc, "Failed to bring ioc ready: error %d\n",
+ retval);
+ goto out_failed;
+ }
+
+ if (!re_init) {
+ retval = mpi3mr_setup_isr(mrioc, 1);
+ if (retval) {
+ ioc_err(mrioc, "Failed to setup ISR error %d\n",
+ retval);
+ goto out_failed;
+ }
+ } else
+ mpi3mr_ioc_enable_intr(mrioc);
+
+ retval = mpi3mr_issue_iocfacts(mrioc, &facts_data);
+ if (retval) {
+ ioc_err(mrioc, "Failed to Issue IOC Facts %d\n",
+ retval);
+ goto out_failed;
+ }
+
+ mpi3mr_process_factsdata(mrioc, &facts_data);
+ if (!re_init) {
+ retval = mpi3mr_check_reset_dma_mask(mrioc);
+ if (retval) {
+ ioc_err(mrioc, "Resetting dma mask failed %d\n",
+ retval);
+ goto out_failed;
+ }
+ }
+
+ mpi3mr_print_ioc_info(mrioc);
+
+ retval = mpi3mr_alloc_reply_sense_bufs(mrioc);
+ if (retval) {
+ ioc_err(mrioc,
+ "%s :Failed to allocated reply sense buffers %d\n",
+ __func__, retval);
+ goto out_failed;
+ }
+
+ if (!re_init) {
+ retval = mpi3mr_alloc_chain_bufs(mrioc);
+ if (retval) {
+ ioc_err(mrioc, "Failed to allocated chain buffers %d\n",
+ retval);
+ goto out_failed;
+ }
+ }
+
+ retval = mpi3mr_issue_iocinit(mrioc);
+ if (retval) {
+ ioc_err(mrioc, "Failed to Issue IOC Init %d\n",
+ retval);
+ goto out_failed;
+ }
+ mrioc->reply_free_queue_host_index = mrioc->num_reply_bufs;
+ writel(mrioc->reply_free_queue_host_index,
+ &mrioc->sysif_regs->reply_free_host_index);
+
+ mrioc->sbq_host_index = mrioc->num_sense_bufs;
+ writel(mrioc->sbq_host_index,
+ &mrioc->sysif_regs->sense_buffer_free_host_index);
+
+ if (!re_init) {
+ retval = mpi3mr_setup_isr(mrioc, 0);
+ if (retval) {
+ ioc_err(mrioc, "Failed to re-setup ISR, error %d\n",
+ retval);
+ goto out_failed;
+ }
+ }
+
+ retval = mpi3mr_create_op_queues(mrioc);
+ if (retval) {
+ ioc_err(mrioc, "Failed to create OpQueues error %d\n",
+ retval);
+ goto out_failed;
+ }
+
+ if (re_init &&
+ (mrioc->shost->nr_hw_queues > mrioc->num_op_reply_q)) {
+ retval = -1;
+ ioc_err(mrioc,
+ "Cannot create minimum number of OpQueues expected:%d created:%d\n",
+ mrioc->shost->nr_hw_queues, mrioc->num_op_reply_q);
+ goto out_failed;
+ }
+
+ for (i = 0; i < MPI3_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
+ mrioc->event_masks[i] = -1;
+
+ mpi3mr_unmask_events(mrioc, MPI3_EVENT_DEVICE_ADDED);
+ mpi3mr_unmask_events(mrioc, MPI3_EVENT_DEVICE_INFO_CHANGED);
+ mpi3mr_unmask_events(mrioc, MPI3_EVENT_DEVICE_STATUS_CHANGE);
+ mpi3mr_unmask_events(mrioc, MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE);
+ mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
+ mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_DISCOVERY);
+ mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR);
+ mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_BROADCAST_PRIMITIVE);
+ mpi3mr_unmask_events(mrioc, MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST);
+ mpi3mr_unmask_events(mrioc, MPI3_EVENT_PCIE_ENUMERATION);
+ mpi3mr_unmask_events(mrioc, MPI3_EVENT_CABLE_MGMT);
+ mpi3mr_unmask_events(mrioc, MPI3_EVENT_ENERGY_PACK_CHANGE);
+
+ retval = mpi3mr_issue_event_notification(mrioc);
+ if (retval) {
+ ioc_err(mrioc, "Failed to issue event notification %d\n",
+ retval);
+ goto out_failed;
+ }
+
+ if (re_init) {
+ ioc_info(mrioc, "Issuing Port Enable\n");
+ retval = mpi3mr_issue_port_enable(mrioc, 0);
+ if (retval) {
+ ioc_err(mrioc, "Failed to issue port enable %d\n",
+ retval);
+ goto out_failed;
+ }
+ }
+ return retval;
+
+out_failed:
+ mpi3mr_cleanup_ioc(mrioc, re_init);
+out_nocleanup:
+ return retval;
+}
+
+/**
+ * mpi3mr_memset_op_reply_q_buffers - memset the operational reply queue's
+ * segments
+ * @mrioc: Adapter instance reference
+ * @qidx: Operational reply queue index
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_memset_op_reply_q_buffers(struct mpi3mr_ioc *mrioc, u16 qidx)
+{
+ struct op_reply_qinfo *op_reply_q = mrioc->op_reply_qinfo + qidx;
+ struct segments *segments;
+ int i, size;
+
+ if (!op_reply_q->q_segments)
+ return;
+
+ size = op_reply_q->segment_qd * mrioc->op_reply_desc_sz;
+ segments = op_reply_q->q_segments;
+ for (i = 0; i < op_reply_q->num_segments; i++)
+ memset(segments[i].segment, 0, size);
+}
+
+/**
+ * mpi3mr_memset_op_req_q_buffers - memset the operational request queue's
+ * segments
+ * @mrioc: Adapter instance reference
+ * @qidx: Operational request queue index
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_memset_op_req_q_buffers(struct mpi3mr_ioc *mrioc, u16 qidx)
+{
+ struct op_req_qinfo *op_req_q = mrioc->req_qinfo + qidx;
+ struct segments *segments;
+ int i, size;
+
+ if (!op_req_q->q_segments)
+ return;
+
+ size = op_req_q->segment_qd * mrioc->facts.op_req_sz;
+ segments = op_req_q->q_segments;
+ for (i = 0; i < op_req_q->num_segments; i++)
+ memset(segments[i].segment, 0, size);
+}
+
+/**
+ * mpi3mr_memset_buffers - memset memory for a controller
+ * @mrioc: Adapter instance reference
+ *
+ * clear all the memory allocated for a controller, typically
+ * called post reset to reuse the memory allocated during the
+ * controller init.
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_memset_buffers(struct mpi3mr_ioc *mrioc)
+{
+ u16 i;
+
+ memset(mrioc->admin_req_base, 0, mrioc->admin_req_q_sz);
+ memset(mrioc->admin_reply_base, 0, mrioc->admin_reply_q_sz);
+
+ memset(mrioc->init_cmds.reply, 0, sizeof(*mrioc->init_cmds.reply));
+ memset(mrioc->host_tm_cmds.reply, 0,
+ sizeof(*mrioc->host_tm_cmds.reply));
+ for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++)
+ memset(mrioc->dev_rmhs_cmds[i].reply, 0,
+ sizeof(*mrioc->dev_rmhs_cmds[i].reply));
+ memset(mrioc->removepend_bitmap, 0, mrioc->dev_handle_bitmap_sz);
+ memset(mrioc->devrem_bitmap, 0, mrioc->devrem_bitmap_sz);
+
+ for (i = 0; i < mrioc->num_queues; i++) {
+ mrioc->op_reply_qinfo[i].qid = 0;
+ mrioc->op_reply_qinfo[i].ci = 0;
+ mrioc->op_reply_qinfo[i].num_replies = 0;
+ mrioc->op_reply_qinfo[i].ephase = 0;
+ atomic_set(&mrioc->op_reply_qinfo[i].pend_ios, 0);
+ atomic_set(&mrioc->op_reply_qinfo[i].in_use, 0);
+ mpi3mr_memset_op_reply_q_buffers(mrioc, i);
+
+ mrioc->req_qinfo[i].ci = 0;
+ mrioc->req_qinfo[i].pi = 0;
+ mrioc->req_qinfo[i].num_requests = 0;
+ mrioc->req_qinfo[i].qid = 0;
+ mrioc->req_qinfo[i].reply_qid = 0;
+ spin_lock_init(&mrioc->req_qinfo[i].q_lock);
+ mpi3mr_memset_op_req_q_buffers(mrioc, i);
+ }
+}
+
+/**
+ * mpi3mr_free_mem - Free memory allocated for a controller
+ * @mrioc: Adapter instance reference
+ *
+ * Free all the memory allocated for a controller.
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_free_mem(struct mpi3mr_ioc *mrioc)
+{
+ u16 i;
+ struct mpi3mr_intr_info *intr_info;
+
+ if (mrioc->sense_buf_pool) {
+ if (mrioc->sense_buf)
+ dma_pool_free(mrioc->sense_buf_pool, mrioc->sense_buf,
+ mrioc->sense_buf_dma);
+ dma_pool_destroy(mrioc->sense_buf_pool);
+ mrioc->sense_buf = NULL;
+ mrioc->sense_buf_pool = NULL;
+ }
+ if (mrioc->sense_buf_q_pool) {
+ if (mrioc->sense_buf_q)
+ dma_pool_free(mrioc->sense_buf_q_pool,
+ mrioc->sense_buf_q, mrioc->sense_buf_q_dma);
+ dma_pool_destroy(mrioc->sense_buf_q_pool);
+ mrioc->sense_buf_q = NULL;
+ mrioc->sense_buf_q_pool = NULL;
+ }
+
+ if (mrioc->reply_buf_pool) {
+ if (mrioc->reply_buf)
+ dma_pool_free(mrioc->reply_buf_pool, mrioc->reply_buf,
+ mrioc->reply_buf_dma);
+ dma_pool_destroy(mrioc->reply_buf_pool);
+ mrioc->reply_buf = NULL;
+ mrioc->reply_buf_pool = NULL;
+ }
+ if (mrioc->reply_free_q_pool) {
+ if (mrioc->reply_free_q)
+ dma_pool_free(mrioc->reply_free_q_pool,
+ mrioc->reply_free_q, mrioc->reply_free_q_dma);
+ dma_pool_destroy(mrioc->reply_free_q_pool);
+ mrioc->reply_free_q = NULL;
+ mrioc->reply_free_q_pool = NULL;
+ }
+
+ for (i = 0; i < mrioc->num_op_req_q; i++)
+ mpi3mr_free_op_req_q_segments(mrioc, i);
+
+ for (i = 0; i < mrioc->num_op_reply_q; i++)
+ mpi3mr_free_op_reply_q_segments(mrioc, i);
+
+ for (i = 0; i < mrioc->intr_info_count; i++) {
+ intr_info = mrioc->intr_info + i;
+ intr_info->op_reply_q = NULL;
+ }
+
+ kfree(mrioc->req_qinfo);
+ mrioc->req_qinfo = NULL;
+ mrioc->num_op_req_q = 0;
+
+ kfree(mrioc->op_reply_qinfo);
+ mrioc->op_reply_qinfo = NULL;
+ mrioc->num_op_reply_q = 0;
+
+ kfree(mrioc->init_cmds.reply);
+ mrioc->init_cmds.reply = NULL;
+
+ kfree(mrioc->host_tm_cmds.reply);
+ mrioc->host_tm_cmds.reply = NULL;
+
+ kfree(mrioc->removepend_bitmap);
+ mrioc->removepend_bitmap = NULL;
+
+ kfree(mrioc->devrem_bitmap);
+ mrioc->devrem_bitmap = NULL;
+
+ kfree(mrioc->chain_bitmap);
+ mrioc->chain_bitmap = NULL;
+
+ for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) {
+ kfree(mrioc->dev_rmhs_cmds[i].reply);
+ mrioc->dev_rmhs_cmds[i].reply = NULL;
+ }
+
+ if (mrioc->chain_buf_pool) {
+ for (i = 0; i < mrioc->chain_buf_count; i++) {
+ if (mrioc->chain_sgl_list[i].addr) {
+ dma_pool_free(mrioc->chain_buf_pool,
+ mrioc->chain_sgl_list[i].addr,
+ mrioc->chain_sgl_list[i].dma_addr);
+ mrioc->chain_sgl_list[i].addr = NULL;
+ }
+ }
+ dma_pool_destroy(mrioc->chain_buf_pool);
+ mrioc->chain_buf_pool = NULL;
+ }
+
+ kfree(mrioc->chain_sgl_list);
+ mrioc->chain_sgl_list = NULL;
+
+ if (mrioc->admin_reply_base) {
+ dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_reply_q_sz,
+ mrioc->admin_reply_base, mrioc->admin_reply_dma);
+ mrioc->admin_reply_base = NULL;
+ }
+ if (mrioc->admin_req_base) {
+ dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_req_q_sz,
+ mrioc->admin_req_base, mrioc->admin_req_dma);
+ mrioc->admin_req_base = NULL;
+ }
+}
+
+/**
+ * mpi3mr_issue_ioc_shutdown - shutdown controller
+ * @mrioc: Adapter instance reference
+ *
+ * Send shutodwn notification to the controller and wait for the
+ * shutdown_timeout for it to be completed.
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_issue_ioc_shutdown(struct mpi3mr_ioc *mrioc)
+{
+ u32 ioc_config, ioc_status;
+ u8 retval = 1;
+ u32 timeout = MPI3MR_DEFAULT_SHUTDOWN_TIME * 10;
+
+ ioc_info(mrioc, "Issuing shutdown Notification\n");
+ if (mrioc->unrecoverable) {
+ ioc_warn(mrioc,
+ "IOC is unrecoverable shutdown is not issued\n");
+ return;
+ }
+ ioc_status = readl(&mrioc->sysif_regs->ioc_status);
+ if ((ioc_status & MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK)
+ == MPI3_SYSIF_IOC_STATUS_SHUTDOWN_IN_PROGRESS) {
+ ioc_info(mrioc, "shutdown already in progress\n");
+ return;
+ }
+
+ ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
+ ioc_config |= MPI3_SYSIF_IOC_CONFIG_SHUTDOWN_NORMAL;
+ ioc_config |= MPI3_SYSIF_IOC_CONFIG_DEVICE_SHUTDOWN;
+
+ writel(ioc_config, &mrioc->sysif_regs->ioc_configuration);
+
+ if (mrioc->facts.shutdown_timeout)
+ timeout = mrioc->facts.shutdown_timeout * 10;
+
+ do {
+ ioc_status = readl(&mrioc->sysif_regs->ioc_status);
+ if ((ioc_status & MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK)
+ == MPI3_SYSIF_IOC_STATUS_SHUTDOWN_COMPLETE) {
+ retval = 0;
+ break;
+ }
+ msleep(100);
+ } while (--timeout);
+
+ ioc_status = readl(&mrioc->sysif_regs->ioc_status);
+ ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
+
+ if (retval) {
+ if ((ioc_status & MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK)
+ == MPI3_SYSIF_IOC_STATUS_SHUTDOWN_IN_PROGRESS)
+ ioc_warn(mrioc,
+ "shutdown still in progress after timeout\n");
+ }
+
+ ioc_info(mrioc,
+ "Base IOC Sts/Config after %s shutdown is (0x%x)/(0x%x)\n",
+ (!retval) ? "successful" : "failed", ioc_status,
+ ioc_config);
+}
+
+/**
+ * mpi3mr_cleanup_ioc - Cleanup controller
+ * @mrioc: Adapter instance reference
+ * @re_init: Cleanup due to a reinit or not
+ *
+ * controller cleanup handler, Message unit reset or soft reset
+ * and shutdown notification is issued to the controller and the
+ * associated memory resources are freed.
+ *
+ * Return: Nothing.
+ */
+void mpi3mr_cleanup_ioc(struct mpi3mr_ioc *mrioc, u8 re_init)
+{
+ enum mpi3mr_iocstate ioc_state;
+
+ if (!re_init)
+ mpi3mr_stop_watchdog(mrioc);
+
+ mpi3mr_ioc_disable_intr(mrioc);
+
+ ioc_state = mpi3mr_get_iocstate(mrioc);
+
+ if ((!mrioc->unrecoverable) && (!mrioc->reset_in_progress) &&
+ (ioc_state == MRIOC_STATE_READY)) {
+ if (mpi3mr_issue_and_process_mur(mrioc,
+ MPI3MR_RESET_FROM_CTLR_CLEANUP))
+ mpi3mr_issue_reset(mrioc,
+ MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET,
+ MPI3MR_RESET_FROM_MUR_FAILURE);
+
+ if (!re_init)
+ mpi3mr_issue_ioc_shutdown(mrioc);
+ }
+
+ if (!re_init) {
+ mpi3mr_free_mem(mrioc);
+ mpi3mr_cleanup_resources(mrioc);
+ }
+}
+
+/**
+ * mpi3mr_drv_cmd_comp_reset - Flush a internal driver command
+ * @mrioc: Adapter instance reference
+ * @cmdptr: Internal command tracker
+ *
+ * Complete an internal driver commands with state indicating it
+ * is completed due to reset.
+ *
+ * Return: Nothing.
+ */
+static inline void mpi3mr_drv_cmd_comp_reset(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_drv_cmd *cmdptr)
+{
+ if (cmdptr->state & MPI3MR_CMD_PENDING) {
+ cmdptr->state |= MPI3MR_CMD_RESET;
+ cmdptr->state &= ~MPI3MR_CMD_PENDING;
+ if (cmdptr->is_waiting) {
+ complete(&cmdptr->done);
+ cmdptr->is_waiting = 0;
+ } else if (cmdptr->callback)
+ cmdptr->callback(mrioc, cmdptr);
+ }
+}
+
+/**
+ * mpi3mr_flush_drv_cmds - Flush internaldriver commands
+ * @mrioc: Adapter instance reference
+ *
+ * Flush all internal driver commands post reset
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_flush_drv_cmds(struct mpi3mr_ioc *mrioc)
+{
+ struct mpi3mr_drv_cmd *cmdptr;
+ u8 i;
+
+ cmdptr = &mrioc->init_cmds;
+ mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
+ cmdptr = &mrioc->host_tm_cmds;
+ mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
+
+ for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) {
+ cmdptr = &mrioc->dev_rmhs_cmds[i];
+ mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
+ }
+}
+
+/**
+ * mpi3mr_diagfault_reset_handler - Diag fault reset handler
+ * @mrioc: Adapter instance reference
+ * @reset_reason: Reset reason code
+ *
+ * This is an handler for issuing diag fault reset from the
+ * applications through IOCTL path to stop the execution of the
+ * controller
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+int mpi3mr_diagfault_reset_handler(struct mpi3mr_ioc *mrioc,
+ u32 reset_reason)
+{
+ int retval = 0;
+
+ ioc_info(mrioc, "Entry: reason code: %s\n",
+ mpi3mr_reset_rc_name(reset_reason));
+ mrioc->reset_in_progress = 1;
+
+ mpi3mr_ioc_disable_intr(mrioc);
+
+ retval = mpi3mr_issue_reset(mrioc,
+ MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, reset_reason);
+
+ if (retval) {
+ ioc_err(mrioc, "The diag fault reset failed: reason %d\n",
+ reset_reason);
+ mpi3mr_ioc_enable_intr(mrioc);
+ }
+ ioc_info(mrioc, "%s\n", ((retval == 0) ? "SUCCESS" : "FAILED"));
+ mrioc->reset_in_progress = 0;
+ return retval;
+}
+
+/**
+ * mpi3mr_soft_reset_handler - Reset the controller
+ * @mrioc: Adapter instance reference
+ * @reset_reason: Reset reason code
+ * @snapdump: Flag to generate snapdump in firmware or not
+ *
+ * This is an handler for recovering controller by issuing soft
+ * reset are diag fault reset. This is a blocking function and
+ * when one reset is executed if any other resets they will be
+ * blocked. All IOCTLs/IO will be blocked during the reset. If
+ * controller reset is successful then the controller will be
+ * reinitalized, otherwise the controller will be marked as not
+ * recoverable
+ *
+ * In snapdump bit is set, the controller is issued with diag
+ * fault reset so that the firmware can create a snap dump and
+ * post that the firmware will result in F000 fault and the
+ * driver will issue soft reset to recover from that.
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+int mpi3mr_soft_reset_handler(struct mpi3mr_ioc *mrioc,
+ u32 reset_reason, u8 snapdump)
+{
+ int retval = 0, i;
+ unsigned long flags;
+ u32 host_diagnostic, timeout = MPI3_SYSIF_DIAG_SAVE_TIMEOUT * 10;
+
+ if (mrioc->fault_dbg) {
+ if (snapdump)
+ mpi3mr_set_diagsave(mrioc);
+ mpi3mr_kill_ioc(mrioc, reset_reason);
+ }
+
+ /*
+ * Block new resets until the currently executing one is finished and
+ * return the status of the existing reset for all blocked resets
+ */
+ if (!mutex_trylock(&mrioc->reset_mutex)) {
+ ioc_info(mrioc, "Another reset in progress\n");
+ return -1;
+ }
+ mrioc->reset_in_progress = 1;
+
+ if ((!snapdump) && (reset_reason != MPI3MR_RESET_FROM_FAULT_WATCH) &&
+ (reset_reason != MPI3MR_RESET_FROM_CIACTIV_FAULT)) {
+ for (i = 0; i < MPI3_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
+ mrioc->event_masks[i] = -1;
+
+ retval = mpi3mr_issue_event_notification(mrioc);
+
+ if (retval) {
+ ioc_err(mrioc,
+ "Failed to turn off events prior to reset %d\n",
+ retval);
+ }
+ }
+
+ mpi3mr_wait_for_host_io(mrioc, MPI3MR_RESET_HOST_IOWAIT_TIMEOUT);
+
+ mpi3mr_ioc_disable_intr(mrioc);
+
+ if (snapdump) {
+ mpi3mr_set_diagsave(mrioc);
+ retval = mpi3mr_issue_reset(mrioc,
+ MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, reset_reason);
+ if (!retval) {
+ do {
+ host_diagnostic =
+ readl(&mrioc->sysif_regs->host_diagnostic);
+ if (!(host_diagnostic &
+ MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS))
+ break;
+ msleep(100);
+ } while (--timeout);
+ }
+ }
+
+ retval = mpi3mr_issue_reset(mrioc,
+ MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET, reset_reason);
+ if (retval) {
+ ioc_err(mrioc, "Failed to issue soft reset to the ioc\n");
+ goto out;
+ }
+
+ mpi3mr_flush_delayed_rmhs_list(mrioc);
+ mpi3mr_flush_drv_cmds(mrioc);
+ memset(mrioc->devrem_bitmap, 0, mrioc->devrem_bitmap_sz);
+ memset(mrioc->removepend_bitmap, 0, mrioc->dev_handle_bitmap_sz);
+ mpi3mr_cleanup_fwevt_list(mrioc);
+ mpi3mr_flush_host_io(mrioc);
+ mpi3mr_invalidate_devhandles(mrioc);
+ mpi3mr_memset_buffers(mrioc);
+ retval = mpi3mr_init_ioc(mrioc, 1);
+ if (retval) {
+ pr_err(IOCNAME "reinit after soft reset failed: reason %d\n",
+ mrioc->name, reset_reason);
+ goto out;
+ }
+ ssleep(10);
+
+out:
+ if (!retval) {
+ mrioc->reset_in_progress = 0;
+ scsi_unblock_requests(mrioc->shost);
+ mpi3mr_rfresh_tgtdevs(mrioc);
+ mrioc->ts_update_counter = 0;
+ spin_lock_irqsave(&mrioc->watchdog_lock, flags);
+ if (mrioc->watchdog_work_q)
+ queue_delayed_work(mrioc->watchdog_work_q,
+ &mrioc->watchdog_work,
+ msecs_to_jiffies(MPI3MR_WATCHDOG_INTERVAL));
+ spin_unlock_irqrestore(&mrioc->watchdog_lock, flags);
+ } else {
+ mpi3mr_issue_reset(mrioc,
+ MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, reset_reason);
+ mrioc->unrecoverable = 1;
+ mrioc->reset_in_progress = 0;
+ retval = -1;
+ }
+
+ mutex_unlock(&mrioc->reset_mutex);
+ ioc_info(mrioc, "%s\n", ((retval == 0) ? "SUCCESS" : "FAILED"));
+ return retval;
+}
diff --git a/drivers/scsi/mpi3mr/mpi3mr_os.c b/drivers/scsi/mpi3mr/mpi3mr_os.c
new file mode 100644
index 000000000000..40676155e62d
--- /dev/null
+++ b/drivers/scsi/mpi3mr/mpi3mr_os.c
@@ -0,0 +1,4045 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Driver for Broadcom MPI3 Storage Controllers
+ *
+ * Copyright (C) 2017-2021 Broadcom Inc.
+ * (mailto: mpi3mr-linuxdrv.pdl@broadcom.com)
+ *
+ */
+
+#include "mpi3mr.h"
+
+/* global driver scop variables */
+LIST_HEAD(mrioc_list);
+DEFINE_SPINLOCK(mrioc_list_lock);
+static int mrioc_ids;
+static int warn_non_secure_ctlr;
+
+MODULE_AUTHOR(MPI3MR_DRIVER_AUTHOR);
+MODULE_DESCRIPTION(MPI3MR_DRIVER_DESC);
+MODULE_LICENSE(MPI3MR_DRIVER_LICENSE);
+MODULE_VERSION(MPI3MR_DRIVER_VERSION);
+
+/* Module parameters*/
+int prot_mask = -1;
+module_param(prot_mask, int, 0);
+MODULE_PARM_DESC(prot_mask, "Host protection capabilities mask, def=0x07");
+
+static int prot_guard_mask = 3;
+module_param(prot_guard_mask, int, 0);
+MODULE_PARM_DESC(prot_guard_mask, " Host protection guard mask, def=3");
+static int logging_level;
+module_param(logging_level, int, 0);
+MODULE_PARM_DESC(logging_level,
+ " bits for enabling additional logging info (default=0)");
+
+/* Forward declarations*/
+/**
+ * mpi3mr_host_tag_for_scmd - Get host tag for a scmd
+ * @mrioc: Adapter instance reference
+ * @scmd: SCSI command reference
+ *
+ * Calculate the host tag based on block tag for a given scmd.
+ *
+ * Return: Valid host tag or MPI3MR_HOSTTAG_INVALID.
+ */
+static u16 mpi3mr_host_tag_for_scmd(struct mpi3mr_ioc *mrioc,
+ struct scsi_cmnd *scmd)
+{
+ struct scmd_priv *priv = NULL;
+ u32 unique_tag;
+ u16 host_tag, hw_queue;
+
+ unique_tag = blk_mq_unique_tag(scmd->request);
+
+ hw_queue = blk_mq_unique_tag_to_hwq(unique_tag);
+ if (hw_queue >= mrioc->num_op_reply_q)
+ return MPI3MR_HOSTTAG_INVALID;
+ host_tag = blk_mq_unique_tag_to_tag(unique_tag);
+
+ if (WARN_ON(host_tag >= mrioc->max_host_ios))
+ return MPI3MR_HOSTTAG_INVALID;
+
+ priv = scsi_cmd_priv(scmd);
+ /*host_tag 0 is invalid hence incrementing by 1*/
+ priv->host_tag = host_tag + 1;
+ priv->scmd = scmd;
+ priv->in_lld_scope = 1;
+ priv->req_q_idx = hw_queue;
+ priv->meta_chain_idx = -1;
+ priv->chain_idx = -1;
+ priv->meta_sg_valid = 0;
+ return priv->host_tag;
+}
+
+/**
+ * mpi3mr_scmd_from_host_tag - Get SCSI command from host tag
+ * @mrioc: Adapter instance reference
+ * @host_tag: Host tag
+ * @qidx: Operational queue index
+ *
+ * Identify the block tag from the host tag and queue index and
+ * retrieve associated scsi command using scsi_host_find_tag().
+ *
+ * Return: SCSI command reference or NULL.
+ */
+static struct scsi_cmnd *mpi3mr_scmd_from_host_tag(
+ struct mpi3mr_ioc *mrioc, u16 host_tag, u16 qidx)
+{
+ struct scsi_cmnd *scmd = NULL;
+ struct scmd_priv *priv = NULL;
+ u32 unique_tag = host_tag - 1;
+
+ if (WARN_ON(host_tag > mrioc->max_host_ios))
+ goto out;
+
+ unique_tag |= (qidx << BLK_MQ_UNIQUE_TAG_BITS);
+
+ scmd = scsi_host_find_tag(mrioc->shost, unique_tag);
+ if (scmd) {
+ priv = scsi_cmd_priv(scmd);
+ if (!priv->in_lld_scope)
+ scmd = NULL;
+ }
+out:
+ return scmd;
+}
+
+/**
+ * mpi3mr_clear_scmd_priv - Cleanup SCSI command private date
+ * @mrioc: Adapter instance reference
+ * @scmd: SCSI command reference
+ *
+ * Invalidate the SCSI command private data to mark the command
+ * is not in LLD scope anymore.
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_clear_scmd_priv(struct mpi3mr_ioc *mrioc,
+ struct scsi_cmnd *scmd)
+{
+ struct scmd_priv *priv = NULL;
+
+ priv = scsi_cmd_priv(scmd);
+
+ if (WARN_ON(priv->in_lld_scope == 0))
+ return;
+ priv->host_tag = MPI3MR_HOSTTAG_INVALID;
+ priv->req_q_idx = 0xFFFF;
+ priv->scmd = NULL;
+ priv->in_lld_scope = 0;
+ priv->meta_sg_valid = 0;
+ if (priv->chain_idx >= 0) {
+ clear_bit(priv->chain_idx, mrioc->chain_bitmap);
+ priv->chain_idx = -1;
+ }
+ if (priv->meta_chain_idx >= 0) {
+ clear_bit(priv->meta_chain_idx, mrioc->chain_bitmap);
+ priv->meta_chain_idx = -1;
+ }
+}
+
+static void mpi3mr_dev_rmhs_send_tm(struct mpi3mr_ioc *mrioc, u16 handle,
+ struct mpi3mr_drv_cmd *cmdparam, u8 iou_rc);
+static void mpi3mr_fwevt_worker(struct work_struct *work);
+
+/**
+ * mpi3mr_fwevt_free - firmware event memory dealloctor
+ * @r: k reference pointer of the firmware event
+ *
+ * Free firmware event memory when no reference.
+ */
+static void mpi3mr_fwevt_free(struct kref *r)
+{
+ kfree(container_of(r, struct mpi3mr_fwevt, ref_count));
+}
+
+/**
+ * mpi3mr_fwevt_get - k reference incrementor
+ * @fwevt: Firmware event reference
+ *
+ * Increment firmware event reference count.
+ */
+static void mpi3mr_fwevt_get(struct mpi3mr_fwevt *fwevt)
+{
+ kref_get(&fwevt->ref_count);
+}
+
+/**
+ * mpi3mr_fwevt_put - k reference decrementor
+ * @fwevt: Firmware event reference
+ *
+ * decrement firmware event reference count.
+ */
+static void mpi3mr_fwevt_put(struct mpi3mr_fwevt *fwevt)
+{
+ kref_put(&fwevt->ref_count, mpi3mr_fwevt_free);
+}
+
+/**
+ * mpi3mr_alloc_fwevt - Allocate firmware event
+ * @len: length of firmware event data to allocate
+ *
+ * Allocate firmware event with required length and initialize
+ * the reference counter.
+ *
+ * Return: firmware event reference.
+ */
+static struct mpi3mr_fwevt *mpi3mr_alloc_fwevt(int len)
+{
+ struct mpi3mr_fwevt *fwevt;
+
+ fwevt = kzalloc(sizeof(*fwevt) + len, GFP_ATOMIC);
+ if (!fwevt)
+ return NULL;
+
+ kref_init(&fwevt->ref_count);
+ return fwevt;
+}
+
+/**
+ * mpi3mr_fwevt_add_to_list - Add firmware event to the list
+ * @mrioc: Adapter instance reference
+ * @fwevt: Firmware event reference
+ *
+ * Add the given firmware event to the firmware event list.
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_fwevt_add_to_list(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_fwevt *fwevt)
+{
+ unsigned long flags;
+
+ if (!mrioc->fwevt_worker_thread)
+ return;
+
+ spin_lock_irqsave(&mrioc->fwevt_lock, flags);
+ /* get fwevt reference count while adding it to fwevt_list */
+ mpi3mr_fwevt_get(fwevt);
+ INIT_LIST_HEAD(&fwevt->list);
+ list_add_tail(&fwevt->list, &mrioc->fwevt_list);
+ INIT_WORK(&fwevt->work, mpi3mr_fwevt_worker);
+ /* get fwevt reference count while enqueueing it to worker queue */
+ mpi3mr_fwevt_get(fwevt);
+ queue_work(mrioc->fwevt_worker_thread, &fwevt->work);
+ spin_unlock_irqrestore(&mrioc->fwevt_lock, flags);
+}
+
+/**
+ * mpi3mr_fwevt_del_from_list - Delete firmware event from list
+ * @mrioc: Adapter instance reference
+ * @fwevt: Firmware event reference
+ *
+ * Delete the given firmware event from the firmware event list.
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_fwevt_del_from_list(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_fwevt *fwevt)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&mrioc->fwevt_lock, flags);
+ if (!list_empty(&fwevt->list)) {
+ list_del_init(&fwevt->list);
+ /*
+ * Put fwevt reference count after
+ * removing it from fwevt_list
+ */
+ mpi3mr_fwevt_put(fwevt);
+ }
+ spin_unlock_irqrestore(&mrioc->fwevt_lock, flags);
+}
+
+/**
+ * mpi3mr_dequeue_fwevt - Dequeue firmware event from the list
+ * @mrioc: Adapter instance reference
+ *
+ * Dequeue a firmware event from the firmware event list.
+ *
+ * Return: firmware event.
+ */
+static struct mpi3mr_fwevt *mpi3mr_dequeue_fwevt(
+ struct mpi3mr_ioc *mrioc)
+{
+ unsigned long flags;
+ struct mpi3mr_fwevt *fwevt = NULL;
+
+ spin_lock_irqsave(&mrioc->fwevt_lock, flags);
+ if (!list_empty(&mrioc->fwevt_list)) {
+ fwevt = list_first_entry(&mrioc->fwevt_list,
+ struct mpi3mr_fwevt, list);
+ list_del_init(&fwevt->list);
+ /*
+ * Put fwevt reference count after
+ * removing it from fwevt_list
+ */
+ mpi3mr_fwevt_put(fwevt);
+ }
+ spin_unlock_irqrestore(&mrioc->fwevt_lock, flags);
+
+ return fwevt;
+}
+
+/**
+ * mpi3mr_cleanup_fwevt_list - Cleanup firmware event list
+ * @mrioc: Adapter instance reference
+ *
+ * Flush all pending firmware events from the firmware event
+ * list.
+ *
+ * Return: Nothing.
+ */
+void mpi3mr_cleanup_fwevt_list(struct mpi3mr_ioc *mrioc)
+{
+ struct mpi3mr_fwevt *fwevt = NULL;
+
+ if ((list_empty(&mrioc->fwevt_list) && !mrioc->current_event) ||
+ !mrioc->fwevt_worker_thread)
+ return;
+
+ while ((fwevt = mpi3mr_dequeue_fwevt(mrioc)) ||
+ (fwevt = mrioc->current_event)) {
+ /*
+ * Wait on the fwevt to complete. If this returns 1, then
+ * the event was never executed, and we need a put for the
+ * reference the work had on the fwevt.
+ *
+ * If it did execute, we wait for it to finish, and the put will
+ * happen from mpi3mr_process_fwevt()
+ */
+ if (cancel_work_sync(&fwevt->work)) {
+ /*
+ * Put fwevt reference count after
+ * dequeuing it from worker queue
+ */
+ mpi3mr_fwevt_put(fwevt);
+ /*
+ * Put fwevt reference count to neutralize
+ * kref_init increment
+ */
+ mpi3mr_fwevt_put(fwevt);
+ }
+ }
+}
+
+/**
+ * mpi3mr_invalidate_devhandles -Invalidate device handles
+ * @mrioc: Adapter instance reference
+ *
+ * Invalidate the device handles in the target device structures
+ * . Called post reset prior to reinitializing the controller.
+ *
+ * Return: Nothing.
+ */
+void mpi3mr_invalidate_devhandles(struct mpi3mr_ioc *mrioc)
+{
+ struct mpi3mr_tgt_dev *tgtdev;
+ struct mpi3mr_stgt_priv_data *tgt_priv;
+
+ list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) {
+ tgtdev->dev_handle = MPI3MR_INVALID_DEV_HANDLE;
+ if (tgtdev->starget && tgtdev->starget->hostdata) {
+ tgt_priv = tgtdev->starget->hostdata;
+ tgt_priv->dev_handle = MPI3MR_INVALID_DEV_HANDLE;
+ }
+ }
+}
+
+/**
+ * mpi3mr_print_scmd - print individual SCSI command
+ * @rq: Block request
+ * @data: Adapter instance reference
+ * @reserved: N/A. Currently not used
+ *
+ * Print the SCSI command details if it is in LLD scope.
+ *
+ * Return: true always.
+ */
+static bool mpi3mr_print_scmd(struct request *rq,
+ void *data, bool reserved)
+{
+ struct mpi3mr_ioc *mrioc = (struct mpi3mr_ioc *)data;
+ struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
+ struct scmd_priv *priv = NULL;
+
+ if (scmd) {
+ priv = scsi_cmd_priv(scmd);
+ if (!priv->in_lld_scope)
+ goto out;
+
+ ioc_info(mrioc, "%s :Host Tag = %d, qid = %d\n",
+ __func__, priv->host_tag, priv->req_q_idx + 1);
+ scsi_print_command(scmd);
+ }
+
+out:
+ return(true);
+}
+
+/**
+ * mpi3mr_flush_scmd - Flush individual SCSI command
+ * @rq: Block request
+ * @data: Adapter instance reference
+ * @reserved: N/A. Currently not used
+ *
+ * Return the SCSI command to the upper layers if it is in LLD
+ * scope.
+ *
+ * Return: true always.
+ */
+
+static bool mpi3mr_flush_scmd(struct request *rq,
+ void *data, bool reserved)
+{
+ struct mpi3mr_ioc *mrioc = (struct mpi3mr_ioc *)data;
+ struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
+ struct scmd_priv *priv = NULL;
+
+ if (scmd) {
+ priv = scsi_cmd_priv(scmd);
+ if (!priv->in_lld_scope)
+ goto out;
+
+ if (priv->meta_sg_valid)
+ dma_unmap_sg(&mrioc->pdev->dev, scsi_prot_sglist(scmd),
+ scsi_prot_sg_count(scmd), scmd->sc_data_direction);
+ mpi3mr_clear_scmd_priv(mrioc, scmd);
+ scsi_dma_unmap(scmd);
+ scmd->result = DID_RESET << 16;
+ scsi_print_command(scmd);
+ scmd->scsi_done(scmd);
+ mrioc->flush_io_count++;
+ }
+
+out:
+ return(true);
+}
+
+/**
+ * mpi3mr_flush_host_io - Flush host I/Os
+ * @mrioc: Adapter instance reference
+ *
+ * Flush all of the pending I/Os by calling
+ * blk_mq_tagset_busy_iter() for each possible tag. This is
+ * executed post controller reset
+ *
+ * Return: Nothing.
+ */
+void mpi3mr_flush_host_io(struct mpi3mr_ioc *mrioc)
+{
+ struct Scsi_Host *shost = mrioc->shost;
+
+ mrioc->flush_io_count = 0;
+ ioc_info(mrioc, "%s :Flushing Host I/O cmds post reset\n", __func__);
+ blk_mq_tagset_busy_iter(&shost->tag_set,
+ mpi3mr_flush_scmd, (void *)mrioc);
+ ioc_info(mrioc, "%s :Flushed %d Host I/O cmds\n", __func__,
+ mrioc->flush_io_count);
+}
+
+/**
+ * mpi3mr_alloc_tgtdev - target device allocator
+ *
+ * Allocate target device instance and initialize the reference
+ * count
+ *
+ * Return: target device instance.
+ */
+static struct mpi3mr_tgt_dev *mpi3mr_alloc_tgtdev(void)
+{
+ struct mpi3mr_tgt_dev *tgtdev;
+
+ tgtdev = kzalloc(sizeof(*tgtdev), GFP_ATOMIC);
+ if (!tgtdev)
+ return NULL;
+ kref_init(&tgtdev->ref_count);
+ return tgtdev;
+}
+
+/**
+ * mpi3mr_tgtdev_add_to_list -Add tgtdevice to the list
+ * @mrioc: Adapter instance reference
+ * @tgtdev: Target device
+ *
+ * Add the target device to the target device list
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_tgtdev_add_to_list(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_tgt_dev *tgtdev)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
+ mpi3mr_tgtdev_get(tgtdev);
+ INIT_LIST_HEAD(&tgtdev->list);
+ list_add_tail(&tgtdev->list, &mrioc->tgtdev_list);
+ spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
+}
+
+/**
+ * mpi3mr_tgtdev_del_from_list -Delete tgtdevice from the list
+ * @mrioc: Adapter instance reference
+ * @tgtdev: Target device
+ *
+ * Remove the target device from the target device list
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_tgtdev_del_from_list(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_tgt_dev *tgtdev)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
+ if (!list_empty(&tgtdev->list)) {
+ list_del_init(&tgtdev->list);
+ mpi3mr_tgtdev_put(tgtdev);
+ }
+ spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
+}
+
+/**
+ * __mpi3mr_get_tgtdev_by_handle -Get tgtdev from device handle
+ * @mrioc: Adapter instance reference
+ * @handle: Device handle
+ *
+ * Accessor to retrieve target device from the device handle.
+ * Non Lock version
+ *
+ * Return: Target device reference.
+ */
+static struct mpi3mr_tgt_dev *__mpi3mr_get_tgtdev_by_handle(
+ struct mpi3mr_ioc *mrioc, u16 handle)
+{
+ struct mpi3mr_tgt_dev *tgtdev;
+
+ assert_spin_locked(&mrioc->tgtdev_lock);
+ list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list)
+ if (tgtdev->dev_handle == handle)
+ goto found_tgtdev;
+ return NULL;
+
+found_tgtdev:
+ mpi3mr_tgtdev_get(tgtdev);
+ return tgtdev;
+}
+
+/**
+ * mpi3mr_get_tgtdev_by_handle -Get tgtdev from device handle
+ * @mrioc: Adapter instance reference
+ * @handle: Device handle
+ *
+ * Accessor to retrieve target device from the device handle.
+ * Lock version
+ *
+ * Return: Target device reference.
+ */
+static struct mpi3mr_tgt_dev *mpi3mr_get_tgtdev_by_handle(
+ struct mpi3mr_ioc *mrioc, u16 handle)
+{
+ struct mpi3mr_tgt_dev *tgtdev;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
+ tgtdev = __mpi3mr_get_tgtdev_by_handle(mrioc, handle);
+ spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
+ return tgtdev;
+}
+
+/**
+ * __mpi3mr_get_tgtdev_by_perst_id -Get tgtdev from persist ID
+ * @mrioc: Adapter instance reference
+ * @persist_id: Persistent ID
+ *
+ * Accessor to retrieve target device from the Persistent ID.
+ * Non Lock version
+ *
+ * Return: Target device reference.
+ */
+static struct mpi3mr_tgt_dev *__mpi3mr_get_tgtdev_by_perst_id(
+ struct mpi3mr_ioc *mrioc, u16 persist_id)
+{
+ struct mpi3mr_tgt_dev *tgtdev;
+
+ assert_spin_locked(&mrioc->tgtdev_lock);
+ list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list)
+ if (tgtdev->perst_id == persist_id)
+ goto found_tgtdev;
+ return NULL;
+
+found_tgtdev:
+ mpi3mr_tgtdev_get(tgtdev);
+ return tgtdev;
+}
+
+/**
+ * mpi3mr_get_tgtdev_by_perst_id -Get tgtdev from persistent ID
+ * @mrioc: Adapter instance reference
+ * @persist_id: Persistent ID
+ *
+ * Accessor to retrieve target device from the Persistent ID.
+ * Lock version
+ *
+ * Return: Target device reference.
+ */
+static struct mpi3mr_tgt_dev *mpi3mr_get_tgtdev_by_perst_id(
+ struct mpi3mr_ioc *mrioc, u16 persist_id)
+{
+ struct mpi3mr_tgt_dev *tgtdev;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
+ tgtdev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, persist_id);
+ spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
+ return tgtdev;
+}
+
+/**
+ * __mpi3mr_get_tgtdev_from_tgtpriv -Get tgtdev from tgt private
+ * @mrioc: Adapter instance reference
+ * @tgt_priv: Target private data
+ *
+ * Accessor to return target device from the target private
+ * data. Non Lock version
+ *
+ * Return: Target device reference.
+ */
+static struct mpi3mr_tgt_dev *__mpi3mr_get_tgtdev_from_tgtpriv(
+ struct mpi3mr_ioc *mrioc, struct mpi3mr_stgt_priv_data *tgt_priv)
+{
+ struct mpi3mr_tgt_dev *tgtdev;
+
+ assert_spin_locked(&mrioc->tgtdev_lock);
+ tgtdev = tgt_priv->tgt_dev;
+ if (tgtdev)
+ mpi3mr_tgtdev_get(tgtdev);
+ return tgtdev;
+}
+
+/**
+ * mpi3mr_remove_tgtdev_from_host - Remove dev from upper layers
+ * @mrioc: Adapter instance reference
+ * @tgtdev: Target device structure
+ *
+ * Checks whether the device is exposed to upper layers and if it
+ * is then remove the device from upper layers by calling
+ * scsi_remove_target().
+ *
+ * Return: 0 on success, non zero on failure.
+ */
+static void mpi3mr_remove_tgtdev_from_host(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_tgt_dev *tgtdev)
+{
+ struct mpi3mr_stgt_priv_data *tgt_priv;
+
+ ioc_info(mrioc, "%s :Removing handle(0x%04x), wwid(0x%016llx)\n",
+ __func__, tgtdev->dev_handle, (unsigned long long)tgtdev->wwid);
+ if (tgtdev->starget && tgtdev->starget->hostdata) {
+ tgt_priv = tgtdev->starget->hostdata;
+ tgt_priv->dev_handle = MPI3MR_INVALID_DEV_HANDLE;
+ }
+
+ if (tgtdev->starget) {
+ scsi_remove_target(&tgtdev->starget->dev);
+ tgtdev->host_exposed = 0;
+ }
+ ioc_info(mrioc, "%s :Removed handle(0x%04x), wwid(0x%016llx)\n",
+ __func__, tgtdev->dev_handle, (unsigned long long)tgtdev->wwid);
+}
+
+/**
+ * mpi3mr_report_tgtdev_to_host - Expose device to upper layers
+ * @mrioc: Adapter instance reference
+ * @perst_id: Persistent ID of the device
+ *
+ * Checks whether the device can be exposed to upper layers and
+ * if it is not then expose the device to upper layers by
+ * calling scsi_scan_target().
+ *
+ * Return: 0 on success, non zero on failure.
+ */
+static int mpi3mr_report_tgtdev_to_host(struct mpi3mr_ioc *mrioc,
+ u16 perst_id)
+{
+ int retval = 0;
+ struct mpi3mr_tgt_dev *tgtdev;
+
+ tgtdev = mpi3mr_get_tgtdev_by_perst_id(mrioc, perst_id);
+ if (!tgtdev) {
+ retval = -1;
+ goto out;
+ }
+ if (tgtdev->is_hidden) {
+ retval = -1;
+ goto out;
+ }
+ if (!tgtdev->host_exposed && !mrioc->reset_in_progress) {
+ tgtdev->host_exposed = 1;
+ scsi_scan_target(&mrioc->shost->shost_gendev, 0,
+ tgtdev->perst_id,
+ SCAN_WILD_CARD, SCSI_SCAN_INITIAL);
+ if (!tgtdev->starget)
+ tgtdev->host_exposed = 0;
+ }
+out:
+ if (tgtdev)
+ mpi3mr_tgtdev_put(tgtdev);
+
+ return retval;
+}
+
+/**
+ * mpi3mr_change_queue_depth- Change QD callback handler
+ * @sdev: SCSI device reference
+ * @q_depth: Queue depth
+ *
+ * Validate and limit QD and call scsi_change_queue_depth.
+ *
+ * Return: return value of scsi_change_queue_depth
+ */
+static int mpi3mr_change_queue_depth(struct scsi_device *sdev,
+ int q_depth)
+{
+ struct scsi_target *starget = scsi_target(sdev);
+ struct Scsi_Host *shost = dev_to_shost(&starget->dev);
+ int retval = 0;
+
+ if (!sdev->tagged_supported)
+ q_depth = 1;
+ if (q_depth > shost->can_queue)
+ q_depth = shost->can_queue;
+ else if (!q_depth)
+ q_depth = MPI3MR_DEFAULT_SDEV_QD;
+ retval = scsi_change_queue_depth(sdev, q_depth);
+
+ return retval;
+}
+
+/**
+ * mpi3mr_update_sdev - Update SCSI device information
+ * @sdev: SCSI device reference
+ * @data: target device reference
+ *
+ * This is an iterator function called for each SCSI device in a
+ * target to update the target specific information into each
+ * SCSI device.
+ *
+ * Return: Nothing.
+ */
+static void
+mpi3mr_update_sdev(struct scsi_device *sdev, void *data)
+{
+ struct mpi3mr_tgt_dev *tgtdev;
+
+ tgtdev = (struct mpi3mr_tgt_dev *)data;
+ if (!tgtdev)
+ return;
+
+ mpi3mr_change_queue_depth(sdev, tgtdev->q_depth);
+ switch (tgtdev->dev_type) {
+ case MPI3_DEVICE_DEVFORM_PCIE:
+ /*The block layer hw sector size = 512*/
+ blk_queue_max_hw_sectors(sdev->request_queue,
+ tgtdev->dev_spec.pcie_inf.mdts / 512);
+ blk_queue_virt_boundary(sdev->request_queue,
+ ((1 << tgtdev->dev_spec.pcie_inf.pgsz) - 1));
+
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * mpi3mr_rfresh_tgtdevs - Refresh target device exposure
+ * @mrioc: Adapter instance reference
+ *
+ * This is executed post controller reset to identify any
+ * missing devices during reset and remove from the upper layers
+ * or expose any newly detected device to the upper layers.
+ *
+ * Return: Nothing.
+ */
+
+void mpi3mr_rfresh_tgtdevs(struct mpi3mr_ioc *mrioc)
+{
+ struct mpi3mr_tgt_dev *tgtdev, *tgtdev_next;
+
+ list_for_each_entry_safe(tgtdev, tgtdev_next, &mrioc->tgtdev_list,
+ list) {
+ if ((tgtdev->dev_handle == MPI3MR_INVALID_DEV_HANDLE) &&
+ tgtdev->host_exposed) {
+ mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev);
+ mpi3mr_tgtdev_del_from_list(mrioc, tgtdev);
+ mpi3mr_tgtdev_put(tgtdev);
+ }
+ }
+
+ tgtdev = NULL;
+ list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) {
+ if ((tgtdev->dev_handle != MPI3MR_INVALID_DEV_HANDLE) &&
+ !tgtdev->is_hidden && !tgtdev->host_exposed)
+ mpi3mr_report_tgtdev_to_host(mrioc, tgtdev->perst_id);
+ }
+}
+
+/**
+ * mpi3mr_update_tgtdev - DevStatusChange evt bottomhalf
+ * @mrioc: Adapter instance reference
+ * @tgtdev: Target device internal structure
+ * @dev_pg0: New device page0
+ *
+ * Update the information from the device page0 into the driver
+ * cached target device structure.
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_update_tgtdev(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_tgt_dev *tgtdev, struct mpi3_device_page0 *dev_pg0)
+{
+ u16 flags = 0;
+ struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data;
+ u8 prot_mask = 0;
+
+ tgtdev->perst_id = le16_to_cpu(dev_pg0->persistent_id);
+ tgtdev->dev_handle = le16_to_cpu(dev_pg0->dev_handle);
+ tgtdev->dev_type = dev_pg0->device_form;
+ tgtdev->encl_handle = le16_to_cpu(dev_pg0->enclosure_handle);
+ tgtdev->parent_handle = le16_to_cpu(dev_pg0->parent_dev_handle);
+ tgtdev->slot = le16_to_cpu(dev_pg0->slot);
+ tgtdev->q_depth = le16_to_cpu(dev_pg0->queue_depth);
+ tgtdev->wwid = le64_to_cpu(dev_pg0->wwid);
+
+ flags = le16_to_cpu(dev_pg0->flags);
+ tgtdev->is_hidden = (flags & MPI3_DEVICE0_FLAGS_HIDDEN);
+
+ if (tgtdev->starget && tgtdev->starget->hostdata) {
+ scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *)
+ tgtdev->starget->hostdata;
+ scsi_tgt_priv_data->perst_id = tgtdev->perst_id;
+ scsi_tgt_priv_data->dev_handle = tgtdev->dev_handle;
+ scsi_tgt_priv_data->dev_type = tgtdev->dev_type;
+ }
+
+ switch (tgtdev->dev_type) {
+ case MPI3_DEVICE_DEVFORM_SAS_SATA:
+ {
+ struct mpi3_device0_sas_sata_format *sasinf =
+ &dev_pg0->device_specific.sas_sata_format;
+ u16 dev_info = le16_to_cpu(sasinf->device_info);
+
+ tgtdev->dev_spec.sas_sata_inf.dev_info = dev_info;
+ tgtdev->dev_spec.sas_sata_inf.sas_address =
+ le64_to_cpu(sasinf->sas_address);
+ if ((dev_info & MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_MASK) !=
+ MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_END_DEVICE)
+ tgtdev->is_hidden = 1;
+ else if (!(dev_info & (MPI3_SAS_DEVICE_INFO_STP_SATA_TARGET |
+ MPI3_SAS_DEVICE_INFO_SSP_TARGET)))
+ tgtdev->is_hidden = 1;
+ break;
+ }
+ case MPI3_DEVICE_DEVFORM_PCIE:
+ {
+ struct mpi3_device0_pcie_format *pcieinf =
+ &dev_pg0->device_specific.pcie_format;
+ u16 dev_info = le16_to_cpu(pcieinf->device_info);
+
+ tgtdev->dev_spec.pcie_inf.capb =
+ le32_to_cpu(pcieinf->capabilities);
+ tgtdev->dev_spec.pcie_inf.mdts = MPI3MR_DEFAULT_MDTS;
+ /* 2^12 = 4096 */
+ tgtdev->dev_spec.pcie_inf.pgsz = 12;
+ if (dev_pg0->access_status == MPI3_DEVICE0_ASTATUS_NO_ERRORS) {
+ tgtdev->dev_spec.pcie_inf.mdts =
+ le32_to_cpu(pcieinf->maximum_data_transfer_size);
+ tgtdev->dev_spec.pcie_inf.pgsz = pcieinf->page_size;
+ tgtdev->dev_spec.pcie_inf.reset_to =
+ pcieinf->controller_reset_to;
+ tgtdev->dev_spec.pcie_inf.abort_to =
+ pcieinf->nv_me_abort_to;
+ }
+ if (tgtdev->dev_spec.pcie_inf.mdts > (1024 * 1024))
+ tgtdev->dev_spec.pcie_inf.mdts = (1024 * 1024);
+ if ((dev_info & MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) !=
+ MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE)
+ tgtdev->is_hidden = 1;
+ if (mrioc->shost)
+ prot_mask = scsi_host_get_prot(mrioc->shost);
+ if (prot_mask & SHOST_DIX_TYPE0_PROTECTION) {
+ scsi_host_set_prot(mrioc->shost, prot_mask & 0x77);
+ ioc_info(mrioc,
+ "%s : Disabling DIX0 prot capability\n", __func__);
+ ioc_info(mrioc,
+ "because HBA does not support DIX0 operation on NVME drives\n");
+ }
+ break;
+ }
+ case MPI3_DEVICE_DEVFORM_VD:
+ {
+ struct mpi3_device0_vd_format *vdinf =
+ &dev_pg0->device_specific.vd_format;
+
+ tgtdev->dev_spec.vol_inf.state = vdinf->vd_state;
+ if (vdinf->vd_state == MPI3_DEVICE0_VD_STATE_OFFLINE)
+ tgtdev->is_hidden = 1;
+ break;
+ }
+ default:
+ break;
+ }
+}
+
+/**
+ * mpi3mr_devstatuschg_evt_bh - DevStatusChange evt bottomhalf
+ * @mrioc: Adapter instance reference
+ * @fwevt: Firmware event information.
+ *
+ * Process Device status Change event and based on device's new
+ * information, either expose the device to the upper layers, or
+ * remove the device from upper layers.
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_devstatuschg_evt_bh(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_fwevt *fwevt)
+{
+ u16 dev_handle = 0;
+ u8 uhide = 0, delete = 0, cleanup = 0;
+ struct mpi3mr_tgt_dev *tgtdev = NULL;
+ struct mpi3_event_data_device_status_change *evtdata =
+ (struct mpi3_event_data_device_status_change *)fwevt->event_data;
+
+ dev_handle = le16_to_cpu(evtdata->dev_handle);
+ ioc_info(mrioc,
+ "%s :device status change: handle(0x%04x): reason code(0x%x)\n",
+ __func__, dev_handle, evtdata->reason_code);
+ switch (evtdata->reason_code) {
+ case MPI3_EVENT_DEV_STAT_RC_HIDDEN:
+ delete = 1;
+ break;
+ case MPI3_EVENT_DEV_STAT_RC_NOT_HIDDEN:
+ uhide = 1;
+ break;
+ case MPI3_EVENT_DEV_STAT_RC_VD_NOT_RESPONDING:
+ delete = 1;
+ cleanup = 1;
+ break;
+ default:
+ ioc_info(mrioc, "%s :Unhandled reason code(0x%x)\n", __func__,
+ evtdata->reason_code);
+ break;
+ }
+
+ tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle);
+ if (!tgtdev)
+ goto out;
+ if (uhide) {
+ tgtdev->is_hidden = 0;
+ if (!tgtdev->host_exposed)
+ mpi3mr_report_tgtdev_to_host(mrioc, tgtdev->perst_id);
+ }
+ if (tgtdev->starget && tgtdev->starget->hostdata) {
+ if (delete)
+ mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev);
+ }
+ if (cleanup) {
+ mpi3mr_tgtdev_del_from_list(mrioc, tgtdev);
+ mpi3mr_tgtdev_put(tgtdev);
+ }
+
+out:
+ if (tgtdev)
+ mpi3mr_tgtdev_put(tgtdev);
+}
+
+/**
+ * mpi3mr_devinfochg_evt_bh - DeviceInfoChange evt bottomhalf
+ * @mrioc: Adapter instance reference
+ * @dev_pg0: New device page0
+ *
+ * Process Device Info Change event and based on device's new
+ * information, either expose the device to the upper layers, or
+ * remove the device from upper layers or update the details of
+ * the device.
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_devinfochg_evt_bh(struct mpi3mr_ioc *mrioc,
+ struct mpi3_device_page0 *dev_pg0)
+{
+ struct mpi3mr_tgt_dev *tgtdev = NULL;
+ u16 dev_handle = 0, perst_id = 0;
+
+ perst_id = le16_to_cpu(dev_pg0->persistent_id);
+ dev_handle = le16_to_cpu(dev_pg0->dev_handle);
+ ioc_info(mrioc,
+ "%s :Device info change: handle(0x%04x): persist_id(0x%x)\n",
+ __func__, dev_handle, perst_id);
+ tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle);
+ if (!tgtdev)
+ goto out;
+ mpi3mr_update_tgtdev(mrioc, tgtdev, dev_pg0);
+ if (!tgtdev->is_hidden && !tgtdev->host_exposed)
+ mpi3mr_report_tgtdev_to_host(mrioc, perst_id);
+ if (tgtdev->is_hidden && tgtdev->host_exposed)
+ mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev);
+ if (!tgtdev->is_hidden && tgtdev->host_exposed && tgtdev->starget)
+ starget_for_each_device(tgtdev->starget, (void *)tgtdev,
+ mpi3mr_update_sdev);
+out:
+ if (tgtdev)
+ mpi3mr_tgtdev_put(tgtdev);
+}
+
+/**
+ * mpi3mr_sastopochg_evt_debug - SASTopoChange details
+ * @mrioc: Adapter instance reference
+ * @event_data: SAS topology change list event data
+ *
+ * Prints information about the SAS topology change event.
+ *
+ * Return: Nothing.
+ */
+static void
+mpi3mr_sastopochg_evt_debug(struct mpi3mr_ioc *mrioc,
+ struct mpi3_event_data_sas_topology_change_list *event_data)
+{
+ int i;
+ u16 handle;
+ u8 reason_code, phy_number;
+ char *status_str = NULL;
+ u8 link_rate, prev_link_rate;
+
+ switch (event_data->exp_status) {
+ case MPI3_EVENT_SAS_TOPO_ES_NOT_RESPONDING:
+ status_str = "remove";
+ break;
+ case MPI3_EVENT_SAS_TOPO_ES_RESPONDING:
+ status_str = "responding";
+ break;
+ case MPI3_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING:
+ status_str = "remove delay";
+ break;
+ case MPI3_EVENT_SAS_TOPO_ES_NO_EXPANDER:
+ status_str = "direct attached";
+ break;
+ default:
+ status_str = "unknown status";
+ break;
+ }
+ ioc_info(mrioc, "%s :sas topology change: (%s)\n",
+ __func__, status_str);
+ ioc_info(mrioc,
+ "%s :\texpander_handle(0x%04x), enclosure_handle(0x%04x) start_phy(%02d), num_entries(%d)\n",
+ __func__, le16_to_cpu(event_data->expander_dev_handle),
+ le16_to_cpu(event_data->enclosure_handle),
+ event_data->start_phy_num, event_data->num_entries);
+ for (i = 0; i < event_data->num_entries; i++) {
+ handle = le16_to_cpu(event_data->phy_entry[i].attached_dev_handle);
+ if (!handle)
+ continue;
+ phy_number = event_data->start_phy_num + i;
+ reason_code = event_data->phy_entry[i].status &
+ MPI3_EVENT_SAS_TOPO_PHY_RC_MASK;
+ switch (reason_code) {
+ case MPI3_EVENT_SAS_TOPO_PHY_RC_TARG_NOT_RESPONDING:
+ status_str = "target remove";
+ break;
+ case MPI3_EVENT_SAS_TOPO_PHY_RC_DELAY_NOT_RESPONDING:
+ status_str = "delay target remove";
+ break;
+ case MPI3_EVENT_SAS_TOPO_PHY_RC_PHY_CHANGED:
+ status_str = "link status change";
+ break;
+ case MPI3_EVENT_SAS_TOPO_PHY_RC_NO_CHANGE:
+ status_str = "link status no change";
+ break;
+ case MPI3_EVENT_SAS_TOPO_PHY_RC_RESPONDING:
+ status_str = "target responding";
+ break;
+ default:
+ status_str = "unknown";
+ break;
+ }
+ link_rate = event_data->phy_entry[i].link_rate >> 4;
+ prev_link_rate = event_data->phy_entry[i].link_rate & 0xF;
+ ioc_info(mrioc,
+ "%s :\tphy(%02d), attached_handle(0x%04x): %s: link rate: new(0x%02x), old(0x%02x)\n",
+ __func__, phy_number, handle, status_str, link_rate,
+ prev_link_rate);
+ }
+}
+
+/**
+ * mpi3mr_sastopochg_evt_bh - SASTopologyChange evt bottomhalf
+ * @mrioc: Adapter instance reference
+ * @fwevt: Firmware event reference
+ *
+ * Prints information about the SAS topology change event and
+ * for "not responding" event code, removes the device from the
+ * upper layers.
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_sastopochg_evt_bh(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_fwevt *fwevt)
+{
+ struct mpi3_event_data_sas_topology_change_list *event_data =
+ (struct mpi3_event_data_sas_topology_change_list *)fwevt->event_data;
+ int i;
+ u16 handle;
+ u8 reason_code;
+ struct mpi3mr_tgt_dev *tgtdev = NULL;
+
+ mpi3mr_sastopochg_evt_debug(mrioc, event_data);
+
+ for (i = 0; i < event_data->num_entries; i++) {
+ handle = le16_to_cpu(event_data->phy_entry[i].attached_dev_handle);
+ if (!handle)
+ continue;
+ tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle);
+ if (!tgtdev)
+ continue;
+
+ reason_code = event_data->phy_entry[i].status &
+ MPI3_EVENT_SAS_TOPO_PHY_RC_MASK;
+
+ switch (reason_code) {
+ case MPI3_EVENT_SAS_TOPO_PHY_RC_TARG_NOT_RESPONDING:
+ if (tgtdev->host_exposed)
+ mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev);
+ mpi3mr_tgtdev_del_from_list(mrioc, tgtdev);
+ mpi3mr_tgtdev_put(tgtdev);
+ break;
+ default:
+ break;
+ }
+ if (tgtdev)
+ mpi3mr_tgtdev_put(tgtdev);
+ }
+}
+
+/**
+ * mpi3mr_pcietopochg_evt_debug - PCIeTopoChange details
+ * @mrioc: Adapter instance reference
+ * @event_data: PCIe topology change list event data
+ *
+ * Prints information about the PCIe topology change event.
+ *
+ * Return: Nothing.
+ */
+static void
+mpi3mr_pcietopochg_evt_debug(struct mpi3mr_ioc *mrioc,
+ struct mpi3_event_data_pcie_topology_change_list *event_data)
+{
+ int i;
+ u16 handle;
+ u16 reason_code;
+ u8 port_number;
+ char *status_str = NULL;
+ u8 link_rate, prev_link_rate;
+
+ switch (event_data->switch_status) {
+ case MPI3_EVENT_PCIE_TOPO_SS_NOT_RESPONDING:
+ status_str = "remove";
+ break;
+ case MPI3_EVENT_PCIE_TOPO_SS_RESPONDING:
+ status_str = "responding";
+ break;
+ case MPI3_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING:
+ status_str = "remove delay";
+ break;
+ case MPI3_EVENT_PCIE_TOPO_SS_NO_PCIE_SWITCH:
+ status_str = "direct attached";
+ break;
+ default:
+ status_str = "unknown status";
+ break;
+ }
+ ioc_info(mrioc, "%s :pcie topology change: (%s)\n",
+ __func__, status_str);
+ ioc_info(mrioc,
+ "%s :\tswitch_handle(0x%04x), enclosure_handle(0x%04x) start_port(%02d), num_entries(%d)\n",
+ __func__, le16_to_cpu(event_data->switch_dev_handle),
+ le16_to_cpu(event_data->enclosure_handle),
+ event_data->start_port_num, event_data->num_entries);
+ for (i = 0; i < event_data->num_entries; i++) {
+ handle =
+ le16_to_cpu(event_data->port_entry[i].attached_dev_handle);
+ if (!handle)
+ continue;
+ port_number = event_data->start_port_num + i;
+ reason_code = event_data->port_entry[i].port_status;
+ switch (reason_code) {
+ case MPI3_EVENT_PCIE_TOPO_PS_NOT_RESPONDING:
+ status_str = "target remove";
+ break;
+ case MPI3_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING:
+ status_str = "delay target remove";
+ break;
+ case MPI3_EVENT_PCIE_TOPO_PS_PORT_CHANGED:
+ status_str = "link status change";
+ break;
+ case MPI3_EVENT_PCIE_TOPO_PS_NO_CHANGE:
+ status_str = "link status no change";
+ break;
+ case MPI3_EVENT_PCIE_TOPO_PS_RESPONDING:
+ status_str = "target responding";
+ break;
+ default:
+ status_str = "unknown";
+ break;
+ }
+ link_rate = event_data->port_entry[i].current_port_info &
+ MPI3_EVENT_PCIE_TOPO_PI_RATE_MASK;
+ prev_link_rate = event_data->port_entry[i].previous_port_info &
+ MPI3_EVENT_PCIE_TOPO_PI_RATE_MASK;
+ ioc_info(mrioc,
+ "%s :\tport(%02d), attached_handle(0x%04x): %s: link rate: new(0x%02x), old(0x%02x)\n",
+ __func__, port_number, handle, status_str, link_rate,
+ prev_link_rate);
+ }
+}
+
+/**
+ * mpi3mr_pcietopochg_evt_bh - PCIeTopologyChange evt bottomhalf
+ * @mrioc: Adapter instance reference
+ * @fwevt: Firmware event reference
+ *
+ * Prints information about the PCIe topology change event and
+ * for "not responding" event code, removes the device from the
+ * upper layers.
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_pcietopochg_evt_bh(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_fwevt *fwevt)
+{
+ struct mpi3_event_data_pcie_topology_change_list *event_data =
+ (struct mpi3_event_data_pcie_topology_change_list *)fwevt->event_data;
+ int i;
+ u16 handle;
+ u8 reason_code;
+ struct mpi3mr_tgt_dev *tgtdev = NULL;
+
+ mpi3mr_pcietopochg_evt_debug(mrioc, event_data);
+
+ for (i = 0; i < event_data->num_entries; i++) {
+ handle =
+ le16_to_cpu(event_data->port_entry[i].attached_dev_handle);
+ if (!handle)
+ continue;
+ tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle);
+ if (!tgtdev)
+ continue;
+
+ reason_code = event_data->port_entry[i].port_status;
+
+ switch (reason_code) {
+ case MPI3_EVENT_PCIE_TOPO_PS_NOT_RESPONDING:
+ if (tgtdev->host_exposed)
+ mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev);
+ mpi3mr_tgtdev_del_from_list(mrioc, tgtdev);
+ mpi3mr_tgtdev_put(tgtdev);
+ break;
+ default:
+ break;
+ }
+ if (tgtdev)
+ mpi3mr_tgtdev_put(tgtdev);
+ }
+}
+
+/**
+ * mpi3mr_fwevt_bh - Firmware event bottomhalf handler
+ * @mrioc: Adapter instance reference
+ * @fwevt: Firmware event reference
+ *
+ * Identifies the firmware event and calls corresponding bottomg
+ * half handler and sends event acknowledgment if required.
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_fwevt_bh(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_fwevt *fwevt)
+{
+ mrioc->current_event = fwevt;
+ mpi3mr_fwevt_del_from_list(mrioc, fwevt);
+
+ if (mrioc->stop_drv_processing)
+ goto out;
+
+ if (!fwevt->process_evt)
+ goto evt_ack;
+
+ switch (fwevt->event_id) {
+ case MPI3_EVENT_DEVICE_ADDED:
+ {
+ struct mpi3_device_page0 *dev_pg0 =
+ (struct mpi3_device_page0 *)fwevt->event_data;
+ mpi3mr_report_tgtdev_to_host(mrioc,
+ le16_to_cpu(dev_pg0->persistent_id));
+ break;
+ }
+ case MPI3_EVENT_DEVICE_INFO_CHANGED:
+ {
+ mpi3mr_devinfochg_evt_bh(mrioc,
+ (struct mpi3_device_page0 *)fwevt->event_data);
+ break;
+ }
+ case MPI3_EVENT_DEVICE_STATUS_CHANGE:
+ {
+ mpi3mr_devstatuschg_evt_bh(mrioc, fwevt);
+ break;
+ }
+ case MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
+ {
+ mpi3mr_sastopochg_evt_bh(mrioc, fwevt);
+ break;
+ }
+ case MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
+ {
+ mpi3mr_pcietopochg_evt_bh(mrioc, fwevt);
+ break;
+ }
+ default:
+ break;
+ }
+
+evt_ack:
+ if (fwevt->send_ack)
+ mpi3mr_send_event_ack(mrioc, fwevt->event_id,
+ fwevt->evt_ctx);
+out:
+ /* Put fwevt reference count to neutralize kref_init increment */
+ mpi3mr_fwevt_put(fwevt);
+ mrioc->current_event = NULL;
+}
+
+/**
+ * mpi3mr_fwevt_worker - Firmware event worker
+ * @work: Work struct containing firmware event
+ *
+ * Extracts the firmware event and calls mpi3mr_fwevt_bh.
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_fwevt_worker(struct work_struct *work)
+{
+ struct mpi3mr_fwevt *fwevt = container_of(work, struct mpi3mr_fwevt,
+ work);
+ mpi3mr_fwevt_bh(fwevt->mrioc, fwevt);
+ /*
+ * Put fwevt reference count after
+ * dequeuing it from worker queue
+ */
+ mpi3mr_fwevt_put(fwevt);
+}
+
+/**
+ * mpi3mr_create_tgtdev - Create and add a target device
+ * @mrioc: Adapter instance reference
+ * @dev_pg0: Device Page 0 data
+ *
+ * If the device specified by the device page 0 data is not
+ * present in the driver's internal list, allocate the memory
+ * for the device, populate the data and add to the list, else
+ * update the device data. The key is persistent ID.
+ *
+ * Return: 0 on success, -ENOMEM on memory allocation failure
+ */
+static int mpi3mr_create_tgtdev(struct mpi3mr_ioc *mrioc,
+ struct mpi3_device_page0 *dev_pg0)
+{
+ int retval = 0;
+ struct mpi3mr_tgt_dev *tgtdev = NULL;
+ u16 perst_id = 0;
+
+ perst_id = le16_to_cpu(dev_pg0->persistent_id);
+ tgtdev = mpi3mr_get_tgtdev_by_perst_id(mrioc, perst_id);
+ if (tgtdev) {
+ mpi3mr_update_tgtdev(mrioc, tgtdev, dev_pg0);
+ mpi3mr_tgtdev_put(tgtdev);
+ } else {
+ tgtdev = mpi3mr_alloc_tgtdev();
+ if (!tgtdev)
+ return -ENOMEM;
+ mpi3mr_update_tgtdev(mrioc, tgtdev, dev_pg0);
+ mpi3mr_tgtdev_add_to_list(mrioc, tgtdev);
+ }
+
+ return retval;
+}
+
+/**
+ * mpi3mr_flush_delayed_rmhs_list - Flush pending commands
+ * @mrioc: Adapter instance reference
+ *
+ * Flush pending commands in the delayed removal handshake list
+ * due to a controller reset or driver removal as a cleanup.
+ *
+ * Return: Nothing
+ */
+void mpi3mr_flush_delayed_rmhs_list(struct mpi3mr_ioc *mrioc)
+{
+ struct delayed_dev_rmhs_node *_rmhs_node;
+
+ while (!list_empty(&mrioc->delayed_rmhs_list)) {
+ _rmhs_node = list_entry(mrioc->delayed_rmhs_list.next,
+ struct delayed_dev_rmhs_node, list);
+ list_del(&_rmhs_node->list);
+ kfree(_rmhs_node);
+ }
+}
+
+/**
+ * mpi3mr_dev_rmhs_complete_iou - Device removal IOUC completion
+ * @mrioc: Adapter instance reference
+ * @drv_cmd: Internal command tracker
+ *
+ * Issues a target reset TM to the firmware from the device
+ * removal TM pend list or retry the removal handshake sequence
+ * based on the IOU control request IOC status.
+ *
+ * Return: Nothing
+ */
+static void mpi3mr_dev_rmhs_complete_iou(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_drv_cmd *drv_cmd)
+{
+ u16 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN;
+ struct delayed_dev_rmhs_node *delayed_dev_rmhs = NULL;
+
+ ioc_info(mrioc,
+ "%s :dev_rmhs_iouctrl_complete:handle(0x%04x), ioc_status(0x%04x), loginfo(0x%08x)\n",
+ __func__, drv_cmd->dev_handle, drv_cmd->ioc_status,
+ drv_cmd->ioc_loginfo);
+ if (drv_cmd->ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ if (drv_cmd->retry_count < MPI3MR_DEV_RMHS_RETRY_COUNT) {
+ drv_cmd->retry_count++;
+ ioc_info(mrioc,
+ "%s :dev_rmhs_iouctrl_complete: handle(0x%04x)retrying handshake retry=%d\n",
+ __func__, drv_cmd->dev_handle,
+ drv_cmd->retry_count);
+ mpi3mr_dev_rmhs_send_tm(mrioc, drv_cmd->dev_handle,
+ drv_cmd, drv_cmd->iou_rc);
+ return;
+ }
+ ioc_err(mrioc,
+ "%s :dev removal handshake failed after all retries: handle(0x%04x)\n",
+ __func__, drv_cmd->dev_handle);
+ } else {
+ ioc_info(mrioc,
+ "%s :dev removal handshake completed successfully: handle(0x%04x)\n",
+ __func__, drv_cmd->dev_handle);
+ clear_bit(drv_cmd->dev_handle, mrioc->removepend_bitmap);
+ }
+
+ if (!list_empty(&mrioc->delayed_rmhs_list)) {
+ delayed_dev_rmhs = list_entry(mrioc->delayed_rmhs_list.next,
+ struct delayed_dev_rmhs_node, list);
+ drv_cmd->dev_handle = delayed_dev_rmhs->handle;
+ drv_cmd->retry_count = 0;
+ drv_cmd->iou_rc = delayed_dev_rmhs->iou_rc;
+ ioc_info(mrioc,
+ "%s :dev_rmhs_iouctrl_complete: processing delayed TM: handle(0x%04x)\n",
+ __func__, drv_cmd->dev_handle);
+ mpi3mr_dev_rmhs_send_tm(mrioc, drv_cmd->dev_handle, drv_cmd,
+ drv_cmd->iou_rc);
+ list_del(&delayed_dev_rmhs->list);
+ kfree(delayed_dev_rmhs);
+ return;
+ }
+ drv_cmd->state = MPI3MR_CMD_NOTUSED;
+ drv_cmd->callback = NULL;
+ drv_cmd->retry_count = 0;
+ drv_cmd->dev_handle = MPI3MR_INVALID_DEV_HANDLE;
+ clear_bit(cmd_idx, mrioc->devrem_bitmap);
+}
+
+/**
+ * mpi3mr_dev_rmhs_complete_tm - Device removal TM completion
+ * @mrioc: Adapter instance reference
+ * @drv_cmd: Internal command tracker
+ *
+ * Issues a target reset TM to the firmware from the device
+ * removal TM pend list or issue IO unit control request as
+ * part of device removal or hidden acknowledgment handshake.
+ *
+ * Return: Nothing
+ */
+static void mpi3mr_dev_rmhs_complete_tm(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_drv_cmd *drv_cmd)
+{
+ struct mpi3_iounit_control_request iou_ctrl;
+ u16 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN;
+ struct mpi3_scsi_task_mgmt_reply *tm_reply = NULL;
+ int retval;
+
+ if (drv_cmd->state & MPI3MR_CMD_REPLY_VALID)
+ tm_reply = (struct mpi3_scsi_task_mgmt_reply *)drv_cmd->reply;
+
+ if (tm_reply)
+ pr_info(IOCNAME
+ "dev_rmhs_tr_complete:handle(0x%04x), ioc_status(0x%04x), loginfo(0x%08x), term_count(%d)\n",
+ mrioc->name, drv_cmd->dev_handle, drv_cmd->ioc_status,
+ drv_cmd->ioc_loginfo,
+ le32_to_cpu(tm_reply->termination_count));
+
+ pr_info(IOCNAME "Issuing IOU CTL: handle(0x%04x) dev_rmhs idx(%d)\n",
+ mrioc->name, drv_cmd->dev_handle, cmd_idx);
+
+ memset(&iou_ctrl, 0, sizeof(iou_ctrl));
+
+ drv_cmd->state = MPI3MR_CMD_PENDING;
+ drv_cmd->is_waiting = 0;
+ drv_cmd->callback = mpi3mr_dev_rmhs_complete_iou;
+ iou_ctrl.operation = drv_cmd->iou_rc;
+ iou_ctrl.param16[0] = cpu_to_le16(drv_cmd->dev_handle);
+ iou_ctrl.host_tag = cpu_to_le16(drv_cmd->host_tag);
+ iou_ctrl.function = MPI3_FUNCTION_IO_UNIT_CONTROL;
+
+ retval = mpi3mr_admin_request_post(mrioc, &iou_ctrl, sizeof(iou_ctrl),
+ 1);
+ if (retval) {
+ pr_err(IOCNAME "Issue DevRmHsTMIOUCTL: Admin post failed\n",
+ mrioc->name);
+ goto out_failed;
+ }
+
+ return;
+out_failed:
+ drv_cmd->state = MPI3MR_CMD_NOTUSED;
+ drv_cmd->callback = NULL;
+ drv_cmd->dev_handle = MPI3MR_INVALID_DEV_HANDLE;
+ drv_cmd->retry_count = 0;
+ clear_bit(cmd_idx, mrioc->devrem_bitmap);
+}
+
+/**
+ * mpi3mr_dev_rmhs_send_tm - Issue TM for device removal
+ * @mrioc: Adapter instance reference
+ * @handle: Device handle
+ * @cmdparam: Internal command tracker
+ * @iou_rc: IO unit reason code
+ *
+ * Issues a target reset TM to the firmware or add it to a pend
+ * list as part of device removal or hidden acknowledgment
+ * handshake.
+ *
+ * Return: Nothing
+ */
+static void mpi3mr_dev_rmhs_send_tm(struct mpi3mr_ioc *mrioc, u16 handle,
+ struct mpi3mr_drv_cmd *cmdparam, u8 iou_rc)
+{
+ struct mpi3_scsi_task_mgmt_request tm_req;
+ int retval = 0;
+ u16 cmd_idx = MPI3MR_NUM_DEVRMCMD;
+ u8 retrycount = 5;
+ struct mpi3mr_drv_cmd *drv_cmd = cmdparam;
+ struct delayed_dev_rmhs_node *delayed_dev_rmhs = NULL;
+
+ if (drv_cmd)
+ goto issue_cmd;
+ do {
+ cmd_idx = find_first_zero_bit(mrioc->devrem_bitmap,
+ MPI3MR_NUM_DEVRMCMD);
+ if (cmd_idx < MPI3MR_NUM_DEVRMCMD) {
+ if (!test_and_set_bit(cmd_idx, mrioc->devrem_bitmap))
+ break;
+ cmd_idx = MPI3MR_NUM_DEVRMCMD;
+ }
+ } while (retrycount--);
+
+ if (cmd_idx >= MPI3MR_NUM_DEVRMCMD) {
+ delayed_dev_rmhs = kzalloc(sizeof(*delayed_dev_rmhs),
+ GFP_ATOMIC);
+ if (!delayed_dev_rmhs)
+ return;
+ INIT_LIST_HEAD(&delayed_dev_rmhs->list);
+ delayed_dev_rmhs->handle = handle;
+ delayed_dev_rmhs->iou_rc = iou_rc;
+ list_add_tail(&delayed_dev_rmhs->list,
+ &mrioc->delayed_rmhs_list);
+ ioc_info(mrioc, "%s :DevRmHs: tr:handle(0x%04x) is postponed\n",
+ __func__, handle);
+ return;
+ }
+ drv_cmd = &mrioc->dev_rmhs_cmds[cmd_idx];
+
+issue_cmd:
+ cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN;
+ ioc_info(mrioc,
+ "%s :Issuing TR TM: for devhandle 0x%04x with dev_rmhs %d\n",
+ __func__, handle, cmd_idx);
+
+ memset(&tm_req, 0, sizeof(tm_req));
+ if (drv_cmd->state & MPI3MR_CMD_PENDING) {
+ ioc_err(mrioc, "%s :Issue TM: Command is in use\n", __func__);
+ goto out;
+ }
+ drv_cmd->state = MPI3MR_CMD_PENDING;
+ drv_cmd->is_waiting = 0;
+ drv_cmd->callback = mpi3mr_dev_rmhs_complete_tm;
+ drv_cmd->dev_handle = handle;
+ drv_cmd->iou_rc = iou_rc;
+ tm_req.dev_handle = cpu_to_le16(handle);
+ tm_req.task_type = MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
+ tm_req.host_tag = cpu_to_le16(drv_cmd->host_tag);
+ tm_req.task_host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INVALID);
+ tm_req.function = MPI3_FUNCTION_SCSI_TASK_MGMT;
+
+ set_bit(handle, mrioc->removepend_bitmap);
+ retval = mpi3mr_admin_request_post(mrioc, &tm_req, sizeof(tm_req), 1);
+ if (retval) {
+ ioc_err(mrioc, "%s :Issue DevRmHsTM: Admin Post failed\n",
+ __func__);
+ goto out_failed;
+ }
+out:
+ return;
+out_failed:
+ drv_cmd->state = MPI3MR_CMD_NOTUSED;
+ drv_cmd->callback = NULL;
+ drv_cmd->dev_handle = MPI3MR_INVALID_DEV_HANDLE;
+ drv_cmd->retry_count = 0;
+ clear_bit(cmd_idx, mrioc->devrem_bitmap);
+}
+
+/**
+ * mpi3mr_pcietopochg_evt_th - PCIETopologyChange evt tophalf
+ * @mrioc: Adapter instance reference
+ * @event_reply: event data
+ *
+ * Checks for the reason code and based on that either block I/O
+ * to device, or unblock I/O to the device, or start the device
+ * removal handshake with reason as remove with the firmware for
+ * PCIe devices.
+ *
+ * Return: Nothing
+ */
+static void mpi3mr_pcietopochg_evt_th(struct mpi3mr_ioc *mrioc,
+ struct mpi3_event_notification_reply *event_reply)
+{
+ struct mpi3_event_data_pcie_topology_change_list *topo_evt =
+ (struct mpi3_event_data_pcie_topology_change_list *)event_reply->event_data;
+ int i;
+ u16 handle;
+ u8 reason_code;
+ struct mpi3mr_tgt_dev *tgtdev = NULL;
+ struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL;
+
+ for (i = 0; i < topo_evt->num_entries; i++) {
+ handle = le16_to_cpu(topo_evt->port_entry[i].attached_dev_handle);
+ if (!handle)
+ continue;
+ reason_code = topo_evt->port_entry[i].port_status;
+ scsi_tgt_priv_data = NULL;
+ tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle);
+ if (tgtdev && tgtdev->starget && tgtdev->starget->hostdata)
+ scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *)
+ tgtdev->starget->hostdata;
+ switch (reason_code) {
+ case MPI3_EVENT_PCIE_TOPO_PS_NOT_RESPONDING:
+ if (scsi_tgt_priv_data) {
+ scsi_tgt_priv_data->dev_removed = 1;
+ scsi_tgt_priv_data->dev_removedelay = 0;
+ atomic_set(&scsi_tgt_priv_data->block_io, 0);
+ }
+ mpi3mr_dev_rmhs_send_tm(mrioc, handle, NULL,
+ MPI3_CTRL_OP_REMOVE_DEVICE);
+ break;
+ case MPI3_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING:
+ if (scsi_tgt_priv_data) {
+ scsi_tgt_priv_data->dev_removedelay = 1;
+ atomic_inc(&scsi_tgt_priv_data->block_io);
+ }
+ break;
+ case MPI3_EVENT_PCIE_TOPO_PS_RESPONDING:
+ if (scsi_tgt_priv_data &&
+ scsi_tgt_priv_data->dev_removedelay) {
+ scsi_tgt_priv_data->dev_removedelay = 0;
+ atomic_dec_if_positive
+ (&scsi_tgt_priv_data->block_io);
+ }
+ break;
+ case MPI3_EVENT_PCIE_TOPO_PS_PORT_CHANGED:
+ default:
+ break;
+ }
+ if (tgtdev)
+ mpi3mr_tgtdev_put(tgtdev);
+ }
+}
+
+/**
+ * mpi3mr_sastopochg_evt_th - SASTopologyChange evt tophalf
+ * @mrioc: Adapter instance reference
+ * @event_reply: event data
+ *
+ * Checks for the reason code and based on that either block I/O
+ * to device, or unblock I/O to the device, or start the device
+ * removal handshake with reason as remove with the firmware for
+ * SAS/SATA devices.
+ *
+ * Return: Nothing
+ */
+static void mpi3mr_sastopochg_evt_th(struct mpi3mr_ioc *mrioc,
+ struct mpi3_event_notification_reply *event_reply)
+{
+ struct mpi3_event_data_sas_topology_change_list *topo_evt =
+ (struct mpi3_event_data_sas_topology_change_list *)event_reply->event_data;
+ int i;
+ u16 handle;
+ u8 reason_code;
+ struct mpi3mr_tgt_dev *tgtdev = NULL;
+ struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL;
+
+ for (i = 0; i < topo_evt->num_entries; i++) {
+ handle = le16_to_cpu(topo_evt->phy_entry[i].attached_dev_handle);
+ if (!handle)
+ continue;
+ reason_code = topo_evt->phy_entry[i].status &
+ MPI3_EVENT_SAS_TOPO_PHY_RC_MASK;
+ scsi_tgt_priv_data = NULL;
+ tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle);
+ if (tgtdev && tgtdev->starget && tgtdev->starget->hostdata)
+ scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *)
+ tgtdev->starget->hostdata;
+ switch (reason_code) {
+ case MPI3_EVENT_SAS_TOPO_PHY_RC_TARG_NOT_RESPONDING:
+ if (scsi_tgt_priv_data) {
+ scsi_tgt_priv_data->dev_removed = 1;
+ scsi_tgt_priv_data->dev_removedelay = 0;
+ atomic_set(&scsi_tgt_priv_data->block_io, 0);
+ }
+ mpi3mr_dev_rmhs_send_tm(mrioc, handle, NULL,
+ MPI3_CTRL_OP_REMOVE_DEVICE);
+ break;
+ case MPI3_EVENT_SAS_TOPO_PHY_RC_DELAY_NOT_RESPONDING:
+ if (scsi_tgt_priv_data) {
+ scsi_tgt_priv_data->dev_removedelay = 1;
+ atomic_inc(&scsi_tgt_priv_data->block_io);
+ }
+ break;
+ case MPI3_EVENT_SAS_TOPO_PHY_RC_RESPONDING:
+ if (scsi_tgt_priv_data &&
+ scsi_tgt_priv_data->dev_removedelay) {
+ scsi_tgt_priv_data->dev_removedelay = 0;
+ atomic_dec_if_positive
+ (&scsi_tgt_priv_data->block_io);
+ }
+ break;
+ case MPI3_EVENT_SAS_TOPO_PHY_RC_PHY_CHANGED:
+ default:
+ break;
+ }
+ if (tgtdev)
+ mpi3mr_tgtdev_put(tgtdev);
+ }
+}
+
+/**
+ * mpi3mr_devstatuschg_evt_th - DeviceStatusChange evt tophalf
+ * @mrioc: Adapter instance reference
+ * @event_reply: event data
+ *
+ * Checks for the reason code and based on that either block I/O
+ * to device, or unblock I/O to the device, or start the device
+ * removal handshake with reason as remove/hide acknowledgment
+ * with the firmware.
+ *
+ * Return: Nothing
+ */
+static void mpi3mr_devstatuschg_evt_th(struct mpi3mr_ioc *mrioc,
+ struct mpi3_event_notification_reply *event_reply)
+{
+ u16 dev_handle = 0;
+ u8 ublock = 0, block = 0, hide = 0, delete = 0, remove = 0;
+ struct mpi3mr_tgt_dev *tgtdev = NULL;
+ struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL;
+ struct mpi3_event_data_device_status_change *evtdata =
+ (struct mpi3_event_data_device_status_change *)event_reply->event_data;
+
+ if (mrioc->stop_drv_processing)
+ goto out;
+
+ dev_handle = le16_to_cpu(evtdata->dev_handle);
+
+ switch (evtdata->reason_code) {
+ case MPI3_EVENT_DEV_STAT_RC_INT_DEVICE_RESET_STRT:
+ case MPI3_EVENT_DEV_STAT_RC_INT_IT_NEXUS_RESET_STRT:
+ block = 1;
+ break;
+ case MPI3_EVENT_DEV_STAT_RC_HIDDEN:
+ delete = 1;
+ hide = 1;
+ break;
+ case MPI3_EVENT_DEV_STAT_RC_VD_NOT_RESPONDING:
+ delete = 1;
+ remove = 1;
+ break;
+ case MPI3_EVENT_DEV_STAT_RC_INT_DEVICE_RESET_CMP:
+ case MPI3_EVENT_DEV_STAT_RC_INT_IT_NEXUS_RESET_CMP:
+ ublock = 1;
+ break;
+ default:
+ break;
+ }
+
+ tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle);
+ if (!tgtdev)
+ goto out;
+ if (hide)
+ tgtdev->is_hidden = hide;
+ if (tgtdev->starget && tgtdev->starget->hostdata) {
+ scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *)
+ tgtdev->starget->hostdata;
+ if (block)
+ atomic_inc(&scsi_tgt_priv_data->block_io);
+ if (delete)
+ scsi_tgt_priv_data->dev_removed = 1;
+ if (ublock)
+ atomic_dec_if_positive(&scsi_tgt_priv_data->block_io);
+ }
+ if (remove)
+ mpi3mr_dev_rmhs_send_tm(mrioc, dev_handle, NULL,
+ MPI3_CTRL_OP_REMOVE_DEVICE);
+ if (hide)
+ mpi3mr_dev_rmhs_send_tm(mrioc, dev_handle, NULL,
+ MPI3_CTRL_OP_HIDDEN_ACK);
+
+out:
+ if (tgtdev)
+ mpi3mr_tgtdev_put(tgtdev);
+}
+
+/**
+ * mpi3mr_energypackchg_evt_th - Energy pack change evt tophalf
+ * @mrioc: Adapter instance reference
+ * @event_reply: event data
+ *
+ * Identifies the new shutdown timeout value and update.
+ *
+ * Return: Nothing
+ */
+static void mpi3mr_energypackchg_evt_th(struct mpi3mr_ioc *mrioc,
+ struct mpi3_event_notification_reply *event_reply)
+{
+ struct mpi3_event_data_energy_pack_change *evtdata =
+ (struct mpi3_event_data_energy_pack_change *)event_reply->event_data;
+ u16 shutdown_timeout = le16_to_cpu(evtdata->shutdown_timeout);
+
+ if (shutdown_timeout <= 0) {
+ ioc_warn(mrioc,
+ "%s :Invalid Shutdown Timeout received = %d\n",
+ __func__, shutdown_timeout);
+ return;
+ }
+
+ ioc_info(mrioc,
+ "%s :Previous Shutdown Timeout Value = %d New Shutdown Timeout Value = %d\n",
+ __func__, mrioc->facts.shutdown_timeout, shutdown_timeout);
+ mrioc->facts.shutdown_timeout = shutdown_timeout;
+}
+
+/**
+ * mpi3mr_os_handle_events - Firmware event handler
+ * @mrioc: Adapter instance reference
+ * @event_reply: event data
+ *
+ * Identify whteher the event has to handled and acknowledged
+ * and either process the event in the tophalf and/or schedule a
+ * bottom half through mpi3mr_fwevt_worker.
+ *
+ * Return: Nothing
+ */
+void mpi3mr_os_handle_events(struct mpi3mr_ioc *mrioc,
+ struct mpi3_event_notification_reply *event_reply)
+{
+ u16 evt_type, sz;
+ struct mpi3mr_fwevt *fwevt = NULL;
+ bool ack_req = 0, process_evt_bh = 0;
+
+ if (mrioc->stop_drv_processing)
+ return;
+
+ if ((event_reply->msg_flags & MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_MASK)
+ == MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_REQUIRED)
+ ack_req = 1;
+
+ evt_type = event_reply->event;
+
+ switch (evt_type) {
+ case MPI3_EVENT_DEVICE_ADDED:
+ {
+ struct mpi3_device_page0 *dev_pg0 =
+ (struct mpi3_device_page0 *)event_reply->event_data;
+ if (mpi3mr_create_tgtdev(mrioc, dev_pg0))
+ ioc_err(mrioc,
+ "%s :Failed to add device in the device add event\n",
+ __func__);
+ else
+ process_evt_bh = 1;
+ break;
+ }
+ case MPI3_EVENT_DEVICE_STATUS_CHANGE:
+ {
+ process_evt_bh = 1;
+ mpi3mr_devstatuschg_evt_th(mrioc, event_reply);
+ break;
+ }
+ case MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
+ {
+ process_evt_bh = 1;
+ mpi3mr_sastopochg_evt_th(mrioc, event_reply);
+ break;
+ }
+ case MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
+ {
+ process_evt_bh = 1;
+ mpi3mr_pcietopochg_evt_th(mrioc, event_reply);
+ break;
+ }
+ case MPI3_EVENT_DEVICE_INFO_CHANGED:
+ {
+ process_evt_bh = 1;
+ break;
+ }
+ case MPI3_EVENT_ENERGY_PACK_CHANGE:
+ {
+ mpi3mr_energypackchg_evt_th(mrioc, event_reply);
+ break;
+ }
+ case MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE:
+ case MPI3_EVENT_SAS_DISCOVERY:
+ case MPI3_EVENT_CABLE_MGMT:
+ case MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
+ case MPI3_EVENT_SAS_BROADCAST_PRIMITIVE:
+ case MPI3_EVENT_PCIE_ENUMERATION:
+ break;
+ default:
+ ioc_info(mrioc, "%s :event 0x%02x is not handled\n",
+ __func__, evt_type);
+ break;
+ }
+ if (process_evt_bh || ack_req) {
+ sz = event_reply->event_data_length * 4;
+ fwevt = mpi3mr_alloc_fwevt(sz);
+ if (!fwevt) {
+ ioc_info(mrioc, "%s :failure at %s:%d/%s()!\n",
+ __func__, __FILE__, __LINE__, __func__);
+ return;
+ }
+
+ memcpy(fwevt->event_data, event_reply->event_data, sz);
+ fwevt->mrioc = mrioc;
+ fwevt->event_id = evt_type;
+ fwevt->send_ack = ack_req;
+ fwevt->process_evt = process_evt_bh;
+ fwevt->evt_ctx = le32_to_cpu(event_reply->event_context);
+ mpi3mr_fwevt_add_to_list(mrioc, fwevt);
+ }
+}
+
+/**
+ * mpi3mr_setup_eedp - Setup EEDP information in MPI3 SCSI IO
+ * @mrioc: Adapter instance reference
+ * @scmd: SCSI command reference
+ * @scsiio_req: MPI3 SCSI IO request
+ *
+ * Identifies the protection information flags from the SCSI
+ * command and set appropriate flags in the MPI3 SCSI IO
+ * request.
+ *
+ * Return: Nothing
+ */
+static void mpi3mr_setup_eedp(struct mpi3mr_ioc *mrioc,
+ struct scsi_cmnd *scmd, struct mpi3_scsi_io_request *scsiio_req)
+{
+ u16 eedp_flags = 0;
+ unsigned char prot_op = scsi_get_prot_op(scmd);
+ unsigned char prot_type = scsi_get_prot_type(scmd);
+
+ switch (prot_op) {
+ case SCSI_PROT_NORMAL:
+ return;
+ case SCSI_PROT_READ_STRIP:
+ eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK_REMOVE;
+ break;
+ case SCSI_PROT_WRITE_INSERT:
+ eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_INSERT;
+ break;
+ case SCSI_PROT_READ_INSERT:
+ eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_INSERT;
+ scsiio_req->msg_flags |= MPI3_SCSIIO_MSGFLAGS_METASGL_VALID;
+ break;
+ case SCSI_PROT_WRITE_STRIP:
+ eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK_REMOVE;
+ scsiio_req->msg_flags |= MPI3_SCSIIO_MSGFLAGS_METASGL_VALID;
+ break;
+ case SCSI_PROT_READ_PASS:
+ eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK |
+ MPI3_EEDPFLAGS_CHK_REF_TAG | MPI3_EEDPFLAGS_CHK_APP_TAG |
+ MPI3_EEDPFLAGS_CHK_GUARD;
+ scsiio_req->msg_flags |= MPI3_SCSIIO_MSGFLAGS_METASGL_VALID;
+ break;
+ case SCSI_PROT_WRITE_PASS:
+ if (scsi_host_get_guard(scmd->device->host)
+ & SHOST_DIX_GUARD_IP) {
+ eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK_REGEN |
+ MPI3_EEDPFLAGS_CHK_APP_TAG |
+ MPI3_EEDPFLAGS_CHK_GUARD |
+ MPI3_EEDPFLAGS_INCR_PRI_REF_TAG;
+ scsiio_req->sgl[0].eedp.application_tag_translation_mask =
+ 0xffff;
+ } else {
+ eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK |
+ MPI3_EEDPFLAGS_CHK_REF_TAG |
+ MPI3_EEDPFLAGS_CHK_APP_TAG |
+ MPI3_EEDPFLAGS_CHK_GUARD;
+ }
+ scsiio_req->msg_flags |= MPI3_SCSIIO_MSGFLAGS_METASGL_VALID;
+ break;
+ default:
+ return;
+ }
+
+ if (scsi_host_get_guard(scmd->device->host) & SHOST_DIX_GUARD_IP)
+ eedp_flags |= MPI3_EEDPFLAGS_HOST_GUARD_IP_CHKSUM;
+
+ switch (prot_type) {
+ case SCSI_PROT_DIF_TYPE0:
+ eedp_flags |= MPI3_EEDPFLAGS_INCR_PRI_REF_TAG;
+ scsiio_req->cdb.eedp32.primary_reference_tag =
+ cpu_to_be32(t10_pi_ref_tag(scmd->request));
+ break;
+ case SCSI_PROT_DIF_TYPE1:
+ case SCSI_PROT_DIF_TYPE2:
+ eedp_flags |= MPI3_EEDPFLAGS_INCR_PRI_REF_TAG |
+ MPI3_EEDPFLAGS_ESC_MODE_APPTAG_DISABLE |
+ MPI3_EEDPFLAGS_CHK_GUARD;
+ scsiio_req->cdb.eedp32.primary_reference_tag =
+ cpu_to_be32(t10_pi_ref_tag(scmd->request));
+ break;
+ case SCSI_PROT_DIF_TYPE3:
+ eedp_flags |= MPI3_EEDPFLAGS_CHK_GUARD |
+ MPI3_EEDPFLAGS_ESC_MODE_APPTAG_DISABLE;
+ break;
+
+ default:
+ scsiio_req->msg_flags &= ~(MPI3_SCSIIO_MSGFLAGS_METASGL_VALID);
+ return;
+ }
+
+ switch (scmd->device->sector_size) {
+ case 512:
+ scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_512;
+ break;
+ case 520:
+ scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_520;
+ break;
+ case 4080:
+ scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4080;
+ break;
+ case 4088:
+ scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4088;
+ break;
+ case 4096:
+ scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4096;
+ break;
+ case 4104:
+ scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4104;
+ break;
+ case 4160:
+ scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4160;
+ break;
+ default:
+ break;
+ }
+
+ scsiio_req->sgl[0].eedp.eedp_flags = cpu_to_le16(eedp_flags);
+ scsiio_req->sgl[0].eedp.flags = MPI3_SGE_FLAGS_ELEMENT_TYPE_EXTENDED;
+}
+
+/**
+ * mpi3mr_build_sense_buffer - Map sense information
+ * @desc: Sense type
+ * @buf: Sense buffer to populate
+ * @key: Sense key
+ * @asc: Additional sense code
+ * @ascq: Additional sense code qualifier
+ *
+ * Maps the given sense information into either descriptor or
+ * fixed format sense data.
+ *
+ * Return: Nothing
+ */
+static inline void mpi3mr_build_sense_buffer(int desc, u8 *buf, u8 key,
+ u8 asc, u8 ascq)
+{
+ if (desc) {
+ buf[0] = 0x72; /* descriptor, current */
+ buf[1] = key;
+ buf[2] = asc;
+ buf[3] = ascq;
+ buf[7] = 0;
+ } else {
+ buf[0] = 0x70; /* fixed, current */
+ buf[2] = key;
+ buf[7] = 0xa;
+ buf[12] = asc;
+ buf[13] = ascq;
+ }
+}
+
+/**
+ * mpi3mr_map_eedp_error - Map EEDP errors from IOC status
+ * @scmd: SCSI command reference
+ * @ioc_status: status of MPI3 request
+ *
+ * Maps the EEDP error status of the SCSI IO request to sense
+ * data.
+ *
+ * Return: Nothing
+ */
+static void mpi3mr_map_eedp_error(struct scsi_cmnd *scmd,
+ u16 ioc_status)
+{
+ u8 ascq = 0;
+
+ switch (ioc_status) {
+ case MPI3_IOCSTATUS_EEDP_GUARD_ERROR:
+ ascq = 0x01;
+ break;
+ case MPI3_IOCSTATUS_EEDP_APP_TAG_ERROR:
+ ascq = 0x02;
+ break;
+ case MPI3_IOCSTATUS_EEDP_REF_TAG_ERROR:
+ ascq = 0x03;
+ break;
+ default:
+ ascq = 0x00;
+ break;
+ }
+
+ mpi3mr_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST,
+ 0x10, ascq);
+ scmd->result = (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
+}
+
+/**
+ * mpi3mr_process_op_reply_desc - reply descriptor handler
+ * @mrioc: Adapter instance reference
+ * @reply_desc: Operational reply descriptor
+ * @reply_dma: place holder for reply DMA address
+ * @qidx: Operational queue index
+ *
+ * Process the operational reply descriptor and identifies the
+ * descriptor type. Based on the descriptor map the MPI3 request
+ * status to a SCSI command status and calls scsi_done call
+ * back.
+ *
+ * Return: Nothing
+ */
+void mpi3mr_process_op_reply_desc(struct mpi3mr_ioc *mrioc,
+ struct mpi3_default_reply_descriptor *reply_desc, u64 *reply_dma, u16 qidx)
+{
+ u16 reply_desc_type, host_tag = 0;
+ u16 ioc_status = MPI3_IOCSTATUS_SUCCESS;
+ u32 ioc_loginfo = 0;
+ struct mpi3_status_reply_descriptor *status_desc = NULL;
+ struct mpi3_address_reply_descriptor *addr_desc = NULL;
+ struct mpi3_success_reply_descriptor *success_desc = NULL;
+ struct mpi3_scsi_io_reply *scsi_reply = NULL;
+ struct scsi_cmnd *scmd = NULL;
+ struct scmd_priv *priv = NULL;
+ u8 *sense_buf = NULL;
+ u8 scsi_state = 0, scsi_status = 0, sense_state = 0;
+ u32 xfer_count = 0, sense_count = 0, resp_data = 0;
+ u16 dev_handle = 0xFFFF;
+ struct scsi_sense_hdr sshdr;
+
+ *reply_dma = 0;
+ reply_desc_type = le16_to_cpu(reply_desc->reply_flags) &
+ MPI3_REPLY_DESCRIPT_FLAGS_TYPE_MASK;
+ switch (reply_desc_type) {
+ case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_STATUS:
+ status_desc = (struct mpi3_status_reply_descriptor *)reply_desc;
+ host_tag = le16_to_cpu(status_desc->host_tag);
+ ioc_status = le16_to_cpu(status_desc->ioc_status);
+ if (ioc_status &
+ MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL)
+ ioc_loginfo = le32_to_cpu(status_desc->ioc_log_info);
+ ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK;
+ break;
+ case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_ADDRESS_REPLY:
+ addr_desc = (struct mpi3_address_reply_descriptor *)reply_desc;
+ *reply_dma = le64_to_cpu(addr_desc->reply_frame_address);
+ scsi_reply = mpi3mr_get_reply_virt_addr(mrioc,
+ *reply_dma);
+ if (!scsi_reply) {
+ panic("%s: scsi_reply is NULL, this shouldn't happen\n",
+ mrioc->name);
+ goto out;
+ }
+ host_tag = le16_to_cpu(scsi_reply->host_tag);
+ ioc_status = le16_to_cpu(scsi_reply->ioc_status);
+ scsi_status = scsi_reply->scsi_status;
+ scsi_state = scsi_reply->scsi_state;
+ dev_handle = le16_to_cpu(scsi_reply->dev_handle);
+ sense_state = (scsi_state & MPI3_SCSI_STATE_SENSE_MASK);
+ xfer_count = le32_to_cpu(scsi_reply->transfer_count);
+ sense_count = le32_to_cpu(scsi_reply->sense_count);
+ resp_data = le32_to_cpu(scsi_reply->response_data);
+ sense_buf = mpi3mr_get_sensebuf_virt_addr(mrioc,
+ le64_to_cpu(scsi_reply->sense_data_buffer_address));
+ if (ioc_status &
+ MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL)
+ ioc_loginfo = le32_to_cpu(scsi_reply->ioc_log_info);
+ ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK;
+ if (sense_state == MPI3_SCSI_STATE_SENSE_BUFF_Q_EMPTY)
+ panic("%s: Ran out of sense buffers\n", mrioc->name);
+ break;
+ case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_SUCCESS:
+ success_desc = (struct mpi3_success_reply_descriptor *)reply_desc;
+ host_tag = le16_to_cpu(success_desc->host_tag);
+ break;
+ default:
+ break;
+ }
+ scmd = mpi3mr_scmd_from_host_tag(mrioc, host_tag, qidx);
+ if (!scmd) {
+ panic("%s: Cannot Identify scmd for host_tag 0x%x\n",
+ mrioc->name, host_tag);
+ goto out;
+ }
+ priv = scsi_cmd_priv(scmd);
+ if (success_desc) {
+ scmd->result = DID_OK << 16;
+ goto out_success;
+ }
+ if (ioc_status == MPI3_IOCSTATUS_SCSI_DATA_UNDERRUN &&
+ xfer_count == 0 && (scsi_status == MPI3_SCSI_STATUS_BUSY ||
+ scsi_status == MPI3_SCSI_STATUS_RESERVATION_CONFLICT ||
+ scsi_status == MPI3_SCSI_STATUS_TASK_SET_FULL))
+ ioc_status = MPI3_IOCSTATUS_SUCCESS;
+
+ if ((sense_state == MPI3_SCSI_STATE_SENSE_VALID) && sense_count &&
+ sense_buf) {
+ u32 sz = min_t(u32, SCSI_SENSE_BUFFERSIZE, sense_count);
+
+ memcpy(scmd->sense_buffer, sense_buf, sz);
+ }
+
+ switch (ioc_status) {
+ case MPI3_IOCSTATUS_BUSY:
+ case MPI3_IOCSTATUS_INSUFFICIENT_RESOURCES:
+ scmd->result = SAM_STAT_BUSY;
+ break;
+ case MPI3_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
+ scmd->result = DID_NO_CONNECT << 16;
+ break;
+ case MPI3_IOCSTATUS_SCSI_IOC_TERMINATED:
+ scmd->result = DID_SOFT_ERROR << 16;
+ break;
+ case MPI3_IOCSTATUS_SCSI_TASK_TERMINATED:
+ case MPI3_IOCSTATUS_SCSI_EXT_TERMINATED:
+ scmd->result = DID_RESET << 16;
+ break;
+ case MPI3_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
+ if ((xfer_count == 0) || (scmd->underflow > xfer_count))
+ scmd->result = DID_SOFT_ERROR << 16;
+ else
+ scmd->result = (DID_OK << 16) | scsi_status;
+ break;
+ case MPI3_IOCSTATUS_SCSI_DATA_UNDERRUN:
+ scmd->result = (DID_OK << 16) | scsi_status;
+ if (sense_state == MPI3_SCSI_STATE_SENSE_VALID)
+ break;
+ if (xfer_count < scmd->underflow) {
+ if (scsi_status == SAM_STAT_BUSY)
+ scmd->result = SAM_STAT_BUSY;
+ else
+ scmd->result = DID_SOFT_ERROR << 16;
+ } else if ((scsi_state & (MPI3_SCSI_STATE_NO_SCSI_STATUS)) ||
+ (sense_state != MPI3_SCSI_STATE_SENSE_NOT_AVAILABLE))
+ scmd->result = DID_SOFT_ERROR << 16;
+ else if (scsi_state & MPI3_SCSI_STATE_TERMINATED)
+ scmd->result = DID_RESET << 16;
+ break;
+ case MPI3_IOCSTATUS_SCSI_DATA_OVERRUN:
+ scsi_set_resid(scmd, 0);
+ fallthrough;
+ case MPI3_IOCSTATUS_SCSI_RECOVERED_ERROR:
+ case MPI3_IOCSTATUS_SUCCESS:
+ scmd->result = (DID_OK << 16) | scsi_status;
+ if ((scsi_state & (MPI3_SCSI_STATE_NO_SCSI_STATUS)) ||
+ (sense_state == MPI3_SCSI_STATE_SENSE_FAILED) ||
+ (sense_state == MPI3_SCSI_STATE_SENSE_BUFF_Q_EMPTY))
+ scmd->result = DID_SOFT_ERROR << 16;
+ else if (scsi_state & MPI3_SCSI_STATE_TERMINATED)
+ scmd->result = DID_RESET << 16;
+ break;
+ case MPI3_IOCSTATUS_EEDP_GUARD_ERROR:
+ case MPI3_IOCSTATUS_EEDP_REF_TAG_ERROR:
+ case MPI3_IOCSTATUS_EEDP_APP_TAG_ERROR:
+ mpi3mr_map_eedp_error(scmd, ioc_status);
+ break;
+ case MPI3_IOCSTATUS_SCSI_PROTOCOL_ERROR:
+ case MPI3_IOCSTATUS_INVALID_FUNCTION:
+ case MPI3_IOCSTATUS_INVALID_SGL:
+ case MPI3_IOCSTATUS_INTERNAL_ERROR:
+ case MPI3_IOCSTATUS_INVALID_FIELD:
+ case MPI3_IOCSTATUS_INVALID_STATE:
+ case MPI3_IOCSTATUS_SCSI_IO_DATA_ERROR:
+ case MPI3_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
+ case MPI3_IOCSTATUS_INSUFFICIENT_POWER:
+ default:
+ scmd->result = DID_SOFT_ERROR << 16;
+ break;
+ }
+
+ if (scmd->result != (DID_OK << 16) && (scmd->cmnd[0] != ATA_12) &&
+ (scmd->cmnd[0] != ATA_16)) {
+ ioc_info(mrioc, "%s :scmd->result 0x%x\n", __func__,
+ scmd->result);
+ scsi_print_command(scmd);
+ ioc_info(mrioc,
+ "%s :Command issued to handle 0x%02x returned with error 0x%04x loginfo 0x%08x, qid %d\n",
+ __func__, dev_handle, ioc_status, ioc_loginfo,
+ priv->req_q_idx + 1);
+ ioc_info(mrioc,
+ " host_tag %d scsi_state 0x%02x scsi_status 0x%02x, xfer_cnt %d resp_data 0x%x\n",
+ host_tag, scsi_state, scsi_status, xfer_count, resp_data);
+ if (sense_buf) {
+ scsi_normalize_sense(sense_buf, sense_count, &sshdr);
+ ioc_info(mrioc,
+ "%s :sense_count 0x%x, sense_key 0x%x ASC 0x%x, ASCQ 0x%x\n",
+ __func__, sense_count, sshdr.sense_key,
+ sshdr.asc, sshdr.ascq);
+ }
+ }
+out_success:
+ if (priv->meta_sg_valid) {
+ dma_unmap_sg(&mrioc->pdev->dev, scsi_prot_sglist(scmd),
+ scsi_prot_sg_count(scmd), scmd->sc_data_direction);
+ }
+ mpi3mr_clear_scmd_priv(mrioc, scmd);
+ scsi_dma_unmap(scmd);
+ scmd->scsi_done(scmd);
+out:
+ if (sense_buf)
+ mpi3mr_repost_sense_buf(mrioc,
+ le64_to_cpu(scsi_reply->sense_data_buffer_address));
+}
+
+/**
+ * mpi3mr_get_chain_idx - get free chain buffer index
+ * @mrioc: Adapter instance reference
+ *
+ * Try to get a free chain buffer index from the free pool.
+ *
+ * Return: -1 on failure or the free chain buffer index
+ */
+static int mpi3mr_get_chain_idx(struct mpi3mr_ioc *mrioc)
+{
+ u8 retry_count = 5;
+ int cmd_idx = -1;
+
+ do {
+ spin_lock(&mrioc->chain_buf_lock);
+ cmd_idx = find_first_zero_bit(mrioc->chain_bitmap,
+ mrioc->chain_buf_count);
+ if (cmd_idx < mrioc->chain_buf_count) {
+ set_bit(cmd_idx, mrioc->chain_bitmap);
+ spin_unlock(&mrioc->chain_buf_lock);
+ break;
+ }
+ spin_unlock(&mrioc->chain_buf_lock);
+ cmd_idx = -1;
+ } while (retry_count--);
+ return cmd_idx;
+}
+
+/**
+ * mpi3mr_prepare_sg_scmd - build scatter gather list
+ * @mrioc: Adapter instance reference
+ * @scmd: SCSI command reference
+ * @scsiio_req: MPI3 SCSI IO request
+ *
+ * This function maps SCSI command's data and protection SGEs to
+ * MPI request SGEs. If required additional 4K chain buffer is
+ * used to send the SGEs.
+ *
+ * Return: 0 on success, -ENOMEM on dma_map_sg failure
+ */
+static int mpi3mr_prepare_sg_scmd(struct mpi3mr_ioc *mrioc,
+ struct scsi_cmnd *scmd, struct mpi3_scsi_io_request *scsiio_req)
+{
+ dma_addr_t chain_dma;
+ struct scatterlist *sg_scmd;
+ void *sg_local, *chain;
+ u32 chain_length;
+ int sges_left, chain_idx;
+ u32 sges_in_segment;
+ u8 simple_sgl_flags;
+ u8 simple_sgl_flags_last;
+ u8 last_chain_sgl_flags;
+ struct chain_element *chain_req;
+ struct scmd_priv *priv = NULL;
+ u32 meta_sg = le32_to_cpu(scsiio_req->flags) &
+ MPI3_SCSIIO_FLAGS_DMAOPERATION_HOST_PI;
+
+ priv = scsi_cmd_priv(scmd);
+
+ simple_sgl_flags = MPI3_SGE_FLAGS_ELEMENT_TYPE_SIMPLE |
+ MPI3_SGE_FLAGS_DLAS_SYSTEM;
+ simple_sgl_flags_last = simple_sgl_flags |
+ MPI3_SGE_FLAGS_END_OF_LIST;
+ last_chain_sgl_flags = MPI3_SGE_FLAGS_ELEMENT_TYPE_LAST_CHAIN |
+ MPI3_SGE_FLAGS_DLAS_SYSTEM;
+
+ if (meta_sg)
+ sg_local = &scsiio_req->sgl[MPI3_SCSIIO_METASGL_INDEX];
+ else
+ sg_local = &scsiio_req->sgl;
+
+ if (!scsiio_req->data_length && !meta_sg) {
+ mpi3mr_build_zero_len_sge(sg_local);
+ return 0;
+ }
+
+ if (meta_sg) {
+ sg_scmd = scsi_prot_sglist(scmd);
+ sges_left = dma_map_sg(&mrioc->pdev->dev,
+ scsi_prot_sglist(scmd),
+ scsi_prot_sg_count(scmd),
+ scmd->sc_data_direction);
+ priv->meta_sg_valid = 1; /* To unmap meta sg DMA */
+ } else {
+ sg_scmd = scsi_sglist(scmd);
+ sges_left = scsi_dma_map(scmd);
+ }
+
+ if (sges_left < 0) {
+ sdev_printk(KERN_ERR, scmd->device,
+ "scsi_dma_map failed: request for %d bytes!\n",
+ scsi_bufflen(scmd));
+ return -ENOMEM;
+ }
+ if (sges_left > MPI3MR_SG_DEPTH) {
+ sdev_printk(KERN_ERR, scmd->device,
+ "scsi_dma_map returned unsupported sge count %d!\n",
+ sges_left);
+ return -ENOMEM;
+ }
+
+ sges_in_segment = (mrioc->facts.op_req_sz -
+ offsetof(struct mpi3_scsi_io_request, sgl)) / sizeof(struct mpi3_sge_common);
+
+ if (scsiio_req->sgl[0].eedp.flags ==
+ MPI3_SGE_FLAGS_ELEMENT_TYPE_EXTENDED && !meta_sg) {
+ sg_local += sizeof(struct mpi3_sge_common);
+ sges_in_segment--;
+ /* Reserve 1st segment (scsiio_req->sgl[0]) for eedp */
+ }
+
+ if (scsiio_req->msg_flags ==
+ MPI3_SCSIIO_MSGFLAGS_METASGL_VALID && !meta_sg) {
+ sges_in_segment--;
+ /* Reserve last segment (scsiio_req->sgl[3]) for meta sg */
+ }
+
+ if (meta_sg)
+ sges_in_segment = 1;
+
+ if (sges_left <= sges_in_segment)
+ goto fill_in_last_segment;
+
+ /* fill in main message segment when there is a chain following */
+ while (sges_in_segment > 1) {
+ mpi3mr_add_sg_single(sg_local, simple_sgl_flags,
+ sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
+ sg_scmd = sg_next(sg_scmd);
+ sg_local += sizeof(struct mpi3_sge_common);
+ sges_left--;
+ sges_in_segment--;
+ }
+
+ chain_idx = mpi3mr_get_chain_idx(mrioc);
+ if (chain_idx < 0)
+ return -1;
+ chain_req = &mrioc->chain_sgl_list[chain_idx];
+ if (meta_sg)
+ priv->meta_chain_idx = chain_idx;
+ else
+ priv->chain_idx = chain_idx;
+
+ chain = chain_req->addr;
+ chain_dma = chain_req->dma_addr;
+ sges_in_segment = sges_left;
+ chain_length = sges_in_segment * sizeof(struct mpi3_sge_common);
+
+ mpi3mr_add_sg_single(sg_local, last_chain_sgl_flags,
+ chain_length, chain_dma);
+
+ sg_local = chain;
+
+fill_in_last_segment:
+ while (sges_left > 0) {
+ if (sges_left == 1)
+ mpi3mr_add_sg_single(sg_local,
+ simple_sgl_flags_last, sg_dma_len(sg_scmd),
+ sg_dma_address(sg_scmd));
+ else
+ mpi3mr_add_sg_single(sg_local, simple_sgl_flags,
+ sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
+ sg_scmd = sg_next(sg_scmd);
+ sg_local += sizeof(struct mpi3_sge_common);
+ sges_left--;
+ }
+
+ return 0;
+}
+
+/**
+ * mpi3mr_build_sg_scmd - build scatter gather list for SCSI IO
+ * @mrioc: Adapter instance reference
+ * @scmd: SCSI command reference
+ * @scsiio_req: MPI3 SCSI IO request
+ *
+ * This function calls mpi3mr_prepare_sg_scmd for constructing
+ * both data SGEs and protection information SGEs in the MPI
+ * format from the SCSI Command as appropriate .
+ *
+ * Return: return value of mpi3mr_prepare_sg_scmd.
+ */
+static int mpi3mr_build_sg_scmd(struct mpi3mr_ioc *mrioc,
+ struct scsi_cmnd *scmd, struct mpi3_scsi_io_request *scsiio_req)
+{
+ int ret;
+
+ ret = mpi3mr_prepare_sg_scmd(mrioc, scmd, scsiio_req);
+ if (ret)
+ return ret;
+
+ if (scsiio_req->msg_flags == MPI3_SCSIIO_MSGFLAGS_METASGL_VALID) {
+ /* There is a valid meta sg */
+ scsiio_req->flags |=
+ cpu_to_le32(MPI3_SCSIIO_FLAGS_DMAOPERATION_HOST_PI);
+ ret = mpi3mr_prepare_sg_scmd(mrioc, scmd, scsiio_req);
+ }
+
+ return ret;
+}
+
+/**
+ * mpi3mr_print_response_code - print TM response as a string
+ * @mrioc: Adapter instance reference
+ * @resp_code: TM response code
+ *
+ * Print TM response code as a readable string.
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_print_response_code(struct mpi3mr_ioc *mrioc, u8 resp_code)
+{
+ char *desc;
+
+ switch (resp_code) {
+ case MPI3MR_RSP_TM_COMPLETE:
+ desc = "task management request completed";
+ break;
+ case MPI3MR_RSP_INVALID_FRAME:
+ desc = "invalid frame";
+ break;
+ case MPI3MR_RSP_TM_NOT_SUPPORTED:
+ desc = "task management request not supported";
+ break;
+ case MPI3MR_RSP_TM_FAILED:
+ desc = "task management request failed";
+ break;
+ case MPI3MR_RSP_TM_SUCCEEDED:
+ desc = "task management request succeeded";
+ break;
+ case MPI3MR_RSP_TM_INVALID_LUN:
+ desc = "invalid lun";
+ break;
+ case MPI3MR_RSP_TM_OVERLAPPED_TAG:
+ desc = "overlapped tag attempted";
+ break;
+ case MPI3MR_RSP_IO_QUEUED_ON_IOC:
+ desc = "task queued, however not sent to target";
+ break;
+ default:
+ desc = "unknown";
+ break;
+ }
+ ioc_info(mrioc, "%s :response_code(0x%01x): %s\n", __func__,
+ resp_code, desc);
+}
+
+/**
+ * mpi3mr_issue_tm - Issue Task Management request
+ * @mrioc: Adapter instance reference
+ * @tm_type: Task Management type
+ * @handle: Device handle
+ * @lun: lun ID
+ * @htag: Host tag of the TM request
+ * @drv_cmd: Internal command tracker
+ * @resp_code: Response code place holder
+ * @cmd_priv: SCSI command private data
+ *
+ * Issues a Task Management Request to the controller for a
+ * specified target, lun and command and wait for its completion
+ * and check TM response. Recover the TM if it timed out by
+ * issuing controller reset.
+ *
+ * Return: 0 on success, non-zero on errors
+ */
+static int mpi3mr_issue_tm(struct mpi3mr_ioc *mrioc, u8 tm_type,
+ u16 handle, uint lun, u16 htag, ulong timeout,
+ struct mpi3mr_drv_cmd *drv_cmd,
+ u8 *resp_code, struct scmd_priv *cmd_priv)
+{
+ struct mpi3_scsi_task_mgmt_request tm_req;
+ struct mpi3_scsi_task_mgmt_reply *tm_reply = NULL;
+ int retval = 0;
+ struct mpi3mr_tgt_dev *tgtdev = NULL;
+ struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL;
+ struct op_req_qinfo *op_req_q = NULL;
+
+ ioc_info(mrioc, "%s :Issue TM: TM type (0x%x) for devhandle 0x%04x\n",
+ __func__, tm_type, handle);
+ if (mrioc->unrecoverable) {
+ retval = -1;
+ ioc_err(mrioc, "%s :Issue TM: Unrecoverable controller\n",
+ __func__);
+ goto out;
+ }
+
+ memset(&tm_req, 0, sizeof(tm_req));
+ mutex_lock(&drv_cmd->mutex);
+ if (drv_cmd->state & MPI3MR_CMD_PENDING) {
+ retval = -1;
+ ioc_err(mrioc, "%s :Issue TM: Command is in use\n", __func__);
+ mutex_unlock(&drv_cmd->mutex);
+ goto out;
+ }
+ if (mrioc->reset_in_progress) {
+ retval = -1;
+ ioc_err(mrioc, "%s :Issue TM: Reset in progress\n", __func__);
+ mutex_unlock(&drv_cmd->mutex);
+ goto out;
+ }
+
+ drv_cmd->state = MPI3MR_CMD_PENDING;
+ drv_cmd->is_waiting = 1;
+ drv_cmd->callback = NULL;
+ tm_req.dev_handle = cpu_to_le16(handle);
+ tm_req.task_type = tm_type;
+ tm_req.host_tag = cpu_to_le16(htag);
+
+ int_to_scsilun(lun, (struct scsi_lun *)tm_req.lun);
+ tm_req.function = MPI3_FUNCTION_SCSI_TASK_MGMT;
+
+ tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle);
+ if (tgtdev && tgtdev->starget && tgtdev->starget->hostdata) {
+ scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *)
+ tgtdev->starget->hostdata;
+ atomic_inc(&scsi_tgt_priv_data->block_io);
+ }
+ if (cmd_priv) {
+ op_req_q = &mrioc->req_qinfo[cmd_priv->req_q_idx];
+ tm_req.task_host_tag = cpu_to_le16(cmd_priv->host_tag);
+ tm_req.task_request_queue_id = cpu_to_le16(op_req_q->qid);
+ }
+ if (tgtdev && (tgtdev->dev_type == MPI3_DEVICE_DEVFORM_PCIE)) {
+ if (cmd_priv && tgtdev->dev_spec.pcie_inf.abort_to)
+ timeout = tgtdev->dev_spec.pcie_inf.abort_to;
+ else if (!cmd_priv && tgtdev->dev_spec.pcie_inf.reset_to)
+ timeout = tgtdev->dev_spec.pcie_inf.reset_to;
+ }
+
+ init_completion(&drv_cmd->done);
+ retval = mpi3mr_admin_request_post(mrioc, &tm_req, sizeof(tm_req), 1);
+ if (retval) {
+ ioc_err(mrioc, "%s :Issue TM: Admin Post failed\n", __func__);
+ goto out_unlock;
+ }
+ wait_for_completion_timeout(&drv_cmd->done, (timeout * HZ));
+
+ if (!(drv_cmd->state & MPI3MR_CMD_COMPLETE)) {
+ ioc_err(mrioc, "%s :Issue TM: command timed out\n", __func__);
+ drv_cmd->is_waiting = 0;
+ retval = -1;
+ mpi3mr_soft_reset_handler(mrioc,
+ MPI3MR_RESET_FROM_TM_TIMEOUT, 1);
+ goto out_unlock;
+ }
+
+ if (drv_cmd->state & MPI3MR_CMD_REPLY_VALID)
+ tm_reply = (struct mpi3_scsi_task_mgmt_reply *)drv_cmd->reply;
+
+ if (drv_cmd->ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc,
+ "%s :Issue TM: handle(0x%04x) Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
+ __func__, handle, drv_cmd->ioc_status,
+ drv_cmd->ioc_loginfo);
+ retval = -1;
+ goto out_unlock;
+ }
+
+ if (!tm_reply) {
+ ioc_err(mrioc, "%s :Issue TM: No TM Reply message\n", __func__);
+ retval = -1;
+ goto out_unlock;
+ }
+
+ *resp_code = le32_to_cpu(tm_reply->response_data) &
+ MPI3MR_RI_MASK_RESPCODE;
+ switch (*resp_code) {
+ case MPI3MR_RSP_TM_SUCCEEDED:
+ case MPI3MR_RSP_TM_COMPLETE:
+ break;
+ case MPI3MR_RSP_IO_QUEUED_ON_IOC:
+ if (tm_type != MPI3_SCSITASKMGMT_TASKTYPE_QUERY_TASK)
+ retval = -1;
+ break;
+ default:
+ retval = -1;
+ break;
+ }
+
+ ioc_info(mrioc,
+ "%s :Issue TM: Completed TM type (0x%x) handle(0x%04x) ",
+ __func__, tm_type, handle);
+ ioc_info(mrioc,
+ "with ioc_status(0x%04x), loginfo(0x%08x), term_count(0x%08x)\n",
+ drv_cmd->ioc_status, drv_cmd->ioc_loginfo,
+ le32_to_cpu(tm_reply->termination_count));
+ mpi3mr_print_response_code(mrioc, *resp_code);
+
+out_unlock:
+ drv_cmd->state = MPI3MR_CMD_NOTUSED;
+ mutex_unlock(&drv_cmd->mutex);
+ if (scsi_tgt_priv_data)
+ atomic_dec_if_positive(&scsi_tgt_priv_data->block_io);
+ if (tgtdev)
+ mpi3mr_tgtdev_put(tgtdev);
+ if (!retval) {
+ /*
+ * Flush all IRQ handlers by calling synchronize_irq().
+ * mpi3mr_ioc_disable_intr() takes care of it.
+ */
+ mpi3mr_ioc_disable_intr(mrioc);
+ mpi3mr_ioc_enable_intr(mrioc);
+ }
+out:
+ return retval;
+}
+
+/**
+ * mpi3mr_bios_param - BIOS param callback
+ * @sdev: SCSI device reference
+ * @bdev: Block device reference
+ * @capacity: Capacity in logical sectors
+ * @params: Parameter array
+ *
+ * Just the parameters with heads/secots/cylinders.
+ *
+ * Return: 0 always
+ */
+static int mpi3mr_bios_param(struct scsi_device *sdev,
+ struct block_device *bdev, sector_t capacity, int params[])
+{
+ int heads;
+ int sectors;
+ sector_t cylinders;
+ ulong dummy;
+
+ heads = 64;
+ sectors = 32;
+
+ dummy = heads * sectors;
+ cylinders = capacity;
+ sector_div(cylinders, dummy);
+
+ if ((ulong)capacity >= 0x200000) {
+ heads = 255;
+ sectors = 63;
+ dummy = heads * sectors;
+ cylinders = capacity;
+ sector_div(cylinders, dummy);
+ }
+
+ params[0] = heads;
+ params[1] = sectors;
+ params[2] = cylinders;
+ return 0;
+}
+
+/**
+ * mpi3mr_map_queues - Map queues callback handler
+ * @shost: SCSI host reference
+ *
+ * Call the blk_mq_pci_map_queues with from which operational
+ * queue the mapping has to be done
+ *
+ * Return: return of blk_mq_pci_map_queues
+ */
+static int mpi3mr_map_queues(struct Scsi_Host *shost)
+{
+ struct mpi3mr_ioc *mrioc = shost_priv(shost);
+
+ return blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT],
+ mrioc->pdev, mrioc->op_reply_q_offset);
+}
+
+/**
+ * mpi3mr_get_fw_pending_ios - Calculate pending I/O count
+ * @mrioc: Adapter instance reference
+ *
+ * Calculate the pending I/Os for the controller and return.
+ *
+ * Return: Number of pending I/Os
+ */
+static inline int mpi3mr_get_fw_pending_ios(struct mpi3mr_ioc *mrioc)
+{
+ u16 i;
+ uint pend_ios = 0;
+
+ for (i = 0; i < mrioc->num_op_reply_q; i++)
+ pend_ios += atomic_read(&mrioc->op_reply_qinfo[i].pend_ios);
+ return pend_ios;
+}
+
+/**
+ * mpi3mr_print_pending_host_io - print pending I/Os
+ * @mrioc: Adapter instance reference
+ *
+ * Print number of pending I/Os and each I/O details prior to
+ * reset for debug purpose.
+ *
+ * Return: Nothing
+ */
+static void mpi3mr_print_pending_host_io(struct mpi3mr_ioc *mrioc)
+{
+ struct Scsi_Host *shost = mrioc->shost;
+
+ ioc_info(mrioc, "%s :Pending commands prior to reset: %d\n",
+ __func__, mpi3mr_get_fw_pending_ios(mrioc));
+ blk_mq_tagset_busy_iter(&shost->tag_set,
+ mpi3mr_print_scmd, (void *)mrioc);
+}
+
+/**
+ * mpi3mr_wait_for_host_io - block for I/Os to complete
+ * @mrioc: Adapter instance reference
+ * @timeout: time out in seconds
+ * Waits for pending I/Os for the given adapter to complete or
+ * to hit the timeout.
+ *
+ * Return: Nothing
+ */
+void mpi3mr_wait_for_host_io(struct mpi3mr_ioc *mrioc, u32 timeout)
+{
+ enum mpi3mr_iocstate iocstate;
+ int i = 0;
+
+ iocstate = mpi3mr_get_iocstate(mrioc);
+ if (iocstate != MRIOC_STATE_READY)
+ return;
+
+ if (!mpi3mr_get_fw_pending_ios(mrioc))
+ return;
+ ioc_info(mrioc,
+ "%s :Waiting for %d seconds prior to reset for %d I/O\n",
+ __func__, timeout, mpi3mr_get_fw_pending_ios(mrioc));
+
+ for (i = 0; i < timeout; i++) {
+ if (!mpi3mr_get_fw_pending_ios(mrioc))
+ break;
+ iocstate = mpi3mr_get_iocstate(mrioc);
+ if (iocstate != MRIOC_STATE_READY)
+ break;
+ msleep(1000);
+ }
+
+ ioc_info(mrioc, "%s :Pending I/Os after wait is: %d\n", __func__,
+ mpi3mr_get_fw_pending_ios(mrioc));
+}
+
+/**
+ * mpi3mr_eh_host_reset - Host reset error handling callback
+ * @scmd: SCSI command reference
+ *
+ * Issue controller reset if the scmd is for a Physical Device,
+ * if the scmd is for RAID volume, then wait for
+ * MPI3MR_RAID_ERRREC_RESET_TIMEOUT and checke whether any
+ * pending I/Os prior to issuing reset to the controller.
+ *
+ * Return: SUCCESS of successful reset else FAILED
+ */
+static int mpi3mr_eh_host_reset(struct scsi_cmnd *scmd)
+{
+ struct mpi3mr_ioc *mrioc = shost_priv(scmd->device->host);
+ struct mpi3mr_stgt_priv_data *stgt_priv_data;
+ struct mpi3mr_sdev_priv_data *sdev_priv_data;
+ u8 dev_type = MPI3_DEVICE_DEVFORM_VD;
+ int retval = FAILED, ret;
+
+ sdev_priv_data = scmd->device->hostdata;
+ if (sdev_priv_data && sdev_priv_data->tgt_priv_data) {
+ stgt_priv_data = sdev_priv_data->tgt_priv_data;
+ dev_type = stgt_priv_data->dev_type;
+ }
+
+ if (dev_type == MPI3_DEVICE_DEVFORM_VD) {
+ mpi3mr_wait_for_host_io(mrioc,
+ MPI3MR_RAID_ERRREC_RESET_TIMEOUT);
+ if (!mpi3mr_get_fw_pending_ios(mrioc)) {
+ retval = SUCCESS;
+ goto out;
+ }
+ }
+
+ mpi3mr_print_pending_host_io(mrioc);
+ ret = mpi3mr_soft_reset_handler(mrioc,
+ MPI3MR_RESET_FROM_EH_HOS, 1);
+ if (ret)
+ goto out;
+
+ retval = SUCCESS;
+out:
+ sdev_printk(KERN_INFO, scmd->device,
+ "Host reset is %s for scmd(%p)\n",
+ ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
+
+ return retval;
+}
+
+/**
+ * mpi3mr_eh_target_reset - Target reset error handling callback
+ * @scmd: SCSI command reference
+ *
+ * Issue Target reset Task Management and verify the scmd is
+ * terminated successfully and return status accordingly.
+ *
+ * Return: SUCCESS of successful termination of the scmd else
+ * FAILED
+ */
+static int mpi3mr_eh_target_reset(struct scsi_cmnd *scmd)
+{
+ struct mpi3mr_ioc *mrioc = shost_priv(scmd->device->host);
+ struct mpi3mr_stgt_priv_data *stgt_priv_data;
+ struct mpi3mr_sdev_priv_data *sdev_priv_data;
+ u16 dev_handle;
+ u8 resp_code = 0;
+ int retval = FAILED, ret = 0;
+
+ sdev_printk(KERN_INFO, scmd->device,
+ "Attempting Target Reset! scmd(%p)\n", scmd);
+ scsi_print_command(scmd);
+
+ sdev_priv_data = scmd->device->hostdata;
+ if (!sdev_priv_data || !sdev_priv_data->tgt_priv_data) {
+ sdev_printk(KERN_INFO, scmd->device,
+ "SCSI device is not available\n");
+ retval = SUCCESS;
+ goto out;
+ }
+
+ stgt_priv_data = sdev_priv_data->tgt_priv_data;
+ dev_handle = stgt_priv_data->dev_handle;
+ sdev_printk(KERN_INFO, scmd->device,
+ "Target Reset is issued to handle(0x%04x)\n",
+ dev_handle);
+
+ ret = mpi3mr_issue_tm(mrioc,
+ MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET, dev_handle,
+ sdev_priv_data->lun_id, MPI3MR_HOSTTAG_BLK_TMS,
+ MPI3MR_RESETTM_TIMEOUT, &mrioc->host_tm_cmds, &resp_code, NULL);
+
+ if (ret)
+ goto out;
+
+ retval = SUCCESS;
+out:
+ sdev_printk(KERN_INFO, scmd->device,
+ "Target reset is %s for scmd(%p)\n",
+ ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
+
+ return retval;
+}
+
+/**
+ * mpi3mr_eh_dev_reset- Device reset error handling callback
+ * @scmd: SCSI command reference
+ *
+ * Issue lun reset Task Management and verify the scmd is
+ * terminated successfully and return status accordingly.
+ *
+ * Return: SUCCESS of successful termination of the scmd else
+ * FAILED
+ */
+static int mpi3mr_eh_dev_reset(struct scsi_cmnd *scmd)
+{
+ struct mpi3mr_ioc *mrioc = shost_priv(scmd->device->host);
+ struct mpi3mr_stgt_priv_data *stgt_priv_data;
+ struct mpi3mr_sdev_priv_data *sdev_priv_data;
+ u16 dev_handle;
+ u8 resp_code = 0;
+ int retval = FAILED, ret = 0;
+
+ sdev_printk(KERN_INFO, scmd->device,
+ "Attempting Device(lun) Reset! scmd(%p)\n", scmd);
+ scsi_print_command(scmd);
+
+ sdev_priv_data = scmd->device->hostdata;
+ if (!sdev_priv_data || !sdev_priv_data->tgt_priv_data) {
+ sdev_printk(KERN_INFO, scmd->device,
+ "SCSI device is not available\n");
+ retval = SUCCESS;
+ goto out;
+ }
+
+ stgt_priv_data = sdev_priv_data->tgt_priv_data;
+ dev_handle = stgt_priv_data->dev_handle;
+ sdev_printk(KERN_INFO, scmd->device,
+ "Device(lun) Reset is issued to handle(0x%04x)\n", dev_handle);
+
+ ret = mpi3mr_issue_tm(mrioc,
+ MPI3_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, dev_handle,
+ sdev_priv_data->lun_id, MPI3MR_HOSTTAG_BLK_TMS,
+ MPI3MR_RESETTM_TIMEOUT, &mrioc->host_tm_cmds, &resp_code, NULL);
+
+ if (ret)
+ goto out;
+
+ retval = SUCCESS;
+out:
+ sdev_printk(KERN_INFO, scmd->device,
+ "Device(lun) reset is %s for scmd(%p)\n",
+ ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
+
+ return retval;
+}
+
+/**
+ * mpi3mr_scan_start - Scan start callback handler
+ * @shost: SCSI host reference
+ *
+ * Issue port enable request asynchronously.
+ *
+ * Return: Nothing
+ */
+static void mpi3mr_scan_start(struct Scsi_Host *shost)
+{
+ struct mpi3mr_ioc *mrioc = shost_priv(shost);
+
+ mrioc->scan_started = 1;
+ ioc_info(mrioc, "%s :Issuing Port Enable\n", __func__);
+ if (mpi3mr_issue_port_enable(mrioc, 1)) {
+ ioc_err(mrioc, "%s :Issuing port enable failed\n", __func__);
+ mrioc->scan_started = 0;
+ mrioc->scan_failed = MPI3_IOCSTATUS_INTERNAL_ERROR;
+ }
+}
+
+/**
+ * mpi3mr_scan_finished - Scan finished callback handler
+ * @shost: SCSI host reference
+ * @time: Jiffies from the scan start
+ *
+ * Checks whether the port enable is completed or timedout or
+ * failed and set the scan status accordingly after taking any
+ * recovery if required.
+ *
+ * Return: 1 on scan finished or timed out, 0 for in progress
+ */
+static int mpi3mr_scan_finished(struct Scsi_Host *shost,
+ unsigned long time)
+{
+ struct mpi3mr_ioc *mrioc = shost_priv(shost);
+ u32 pe_timeout = MPI3MR_PORTENABLE_TIMEOUT;
+
+ if (time >= (pe_timeout * HZ)) {
+ mrioc->init_cmds.is_waiting = 0;
+ mrioc->init_cmds.callback = NULL;
+ mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
+ ioc_err(mrioc, "%s :port enable request timed out\n", __func__);
+ mrioc->is_driver_loading = 0;
+ mpi3mr_soft_reset_handler(mrioc,
+ MPI3MR_RESET_FROM_PE_TIMEOUT, 1);
+ }
+
+ if (mrioc->scan_failed) {
+ ioc_err(mrioc,
+ "%s :port enable failed with (ioc_status=0x%08x)\n",
+ __func__, mrioc->scan_failed);
+ mrioc->is_driver_loading = 0;
+ mrioc->stop_drv_processing = 1;
+ return 1;
+ }
+
+ if (mrioc->scan_started)
+ return 0;
+ ioc_info(mrioc, "%s :port enable: SUCCESS\n", __func__);
+ mpi3mr_start_watchdog(mrioc);
+ mrioc->is_driver_loading = 0;
+
+ return 1;
+}
+
+/**
+ * mpi3mr_slave_destroy - Slave destroy callback handler
+ * @sdev: SCSI device reference
+ *
+ * Cleanup and free per device(lun) private data.
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_slave_destroy(struct scsi_device *sdev)
+{
+ struct Scsi_Host *shost;
+ struct mpi3mr_ioc *mrioc;
+ struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data;
+ struct mpi3mr_tgt_dev *tgt_dev;
+ unsigned long flags;
+ struct scsi_target *starget;
+
+ if (!sdev->hostdata)
+ return;
+
+ starget = scsi_target(sdev);
+ shost = dev_to_shost(&starget->dev);
+ mrioc = shost_priv(shost);
+ scsi_tgt_priv_data = starget->hostdata;
+
+ scsi_tgt_priv_data->num_luns--;
+
+ spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
+ tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id);
+ if (tgt_dev && (!scsi_tgt_priv_data->num_luns))
+ tgt_dev->starget = NULL;
+ if (tgt_dev)
+ mpi3mr_tgtdev_put(tgt_dev);
+ spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
+
+ kfree(sdev->hostdata);
+ sdev->hostdata = NULL;
+}
+
+/**
+ * mpi3mr_target_destroy - Target destroy callback handler
+ * @starget: SCSI target reference
+ *
+ * Cleanup and free per target private data.
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_target_destroy(struct scsi_target *starget)
+{
+ struct Scsi_Host *shost;
+ struct mpi3mr_ioc *mrioc;
+ struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data;
+ struct mpi3mr_tgt_dev *tgt_dev;
+ unsigned long flags;
+
+ if (!starget->hostdata)
+ return;
+
+ shost = dev_to_shost(&starget->dev);
+ mrioc = shost_priv(shost);
+ scsi_tgt_priv_data = starget->hostdata;
+
+ spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
+ tgt_dev = __mpi3mr_get_tgtdev_from_tgtpriv(mrioc, scsi_tgt_priv_data);
+ if (tgt_dev && (tgt_dev->starget == starget) &&
+ (tgt_dev->perst_id == starget->id))
+ tgt_dev->starget = NULL;
+ if (tgt_dev) {
+ scsi_tgt_priv_data->tgt_dev = NULL;
+ scsi_tgt_priv_data->perst_id = 0;
+ mpi3mr_tgtdev_put(tgt_dev);
+ mpi3mr_tgtdev_put(tgt_dev);
+ }
+ spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
+
+ kfree(starget->hostdata);
+ starget->hostdata = NULL;
+}
+
+/**
+ * mpi3mr_slave_configure - Slave configure callback handler
+ * @sdev: SCSI device reference
+ *
+ * Configure queue depth, max hardware sectors and virt boundary
+ * as required
+ *
+ * Return: 0 always.
+ */
+static int mpi3mr_slave_configure(struct scsi_device *sdev)
+{
+ struct scsi_target *starget;
+ struct Scsi_Host *shost;
+ struct mpi3mr_ioc *mrioc;
+ struct mpi3mr_tgt_dev *tgt_dev;
+ unsigned long flags;
+ int retval = 0;
+
+ starget = scsi_target(sdev);
+ shost = dev_to_shost(&starget->dev);
+ mrioc = shost_priv(shost);
+
+ spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
+ tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id);
+ spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
+ if (!tgt_dev)
+ return -ENXIO;
+
+ mpi3mr_change_queue_depth(sdev, tgt_dev->q_depth);
+ switch (tgt_dev->dev_type) {
+ case MPI3_DEVICE_DEVFORM_PCIE:
+ /*The block layer hw sector size = 512*/
+ blk_queue_max_hw_sectors(sdev->request_queue,
+ tgt_dev->dev_spec.pcie_inf.mdts / 512);
+ blk_queue_virt_boundary(sdev->request_queue,
+ ((1 << tgt_dev->dev_spec.pcie_inf.pgsz) - 1));
+ break;
+ default:
+ break;
+ }
+
+ mpi3mr_tgtdev_put(tgt_dev);
+
+ return retval;
+}
+
+/**
+ * mpi3mr_slave_alloc -Slave alloc callback handler
+ * @sdev: SCSI device reference
+ *
+ * Allocate per device(lun) private data and initialize it.
+ *
+ * Return: 0 on success -ENOMEM on memory allocation failure.
+ */
+static int mpi3mr_slave_alloc(struct scsi_device *sdev)
+{
+ struct Scsi_Host *shost;
+ struct mpi3mr_ioc *mrioc;
+ struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data;
+ struct mpi3mr_tgt_dev *tgt_dev;
+ struct mpi3mr_sdev_priv_data *scsi_dev_priv_data;
+ unsigned long flags;
+ struct scsi_target *starget;
+ int retval = 0;
+
+ starget = scsi_target(sdev);
+ shost = dev_to_shost(&starget->dev);
+ mrioc = shost_priv(shost);
+ scsi_tgt_priv_data = starget->hostdata;
+
+ spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
+ tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id);
+
+ if (tgt_dev) {
+ if (tgt_dev->starget == NULL)
+ tgt_dev->starget = starget;
+ mpi3mr_tgtdev_put(tgt_dev);
+ retval = 0;
+ } else {
+ spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
+ return -ENXIO;
+ }
+
+ spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
+
+ scsi_dev_priv_data = kzalloc(sizeof(*scsi_dev_priv_data), GFP_KERNEL);
+ if (!scsi_dev_priv_data)
+ return -ENOMEM;
+
+ scsi_dev_priv_data->lun_id = sdev->lun;
+ scsi_dev_priv_data->tgt_priv_data = scsi_tgt_priv_data;
+ sdev->hostdata = scsi_dev_priv_data;
+
+ scsi_tgt_priv_data->num_luns++;
+
+ return retval;
+}
+
+/**
+ * mpi3mr_target_alloc - Target alloc callback handler
+ * @starget: SCSI target reference
+ *
+ * Allocate per target private data and initialize it.
+ *
+ * Return: 0 on success -ENOMEM on memory allocation failure.
+ */
+static int mpi3mr_target_alloc(struct scsi_target *starget)
+{
+ struct Scsi_Host *shost = dev_to_shost(&starget->dev);
+ struct mpi3mr_ioc *mrioc = shost_priv(shost);
+ struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data;
+ struct mpi3mr_tgt_dev *tgt_dev;
+ unsigned long flags;
+ int retval = 0;
+
+ scsi_tgt_priv_data = kzalloc(sizeof(*scsi_tgt_priv_data), GFP_KERNEL);
+ if (!scsi_tgt_priv_data)
+ return -ENOMEM;
+
+ starget->hostdata = scsi_tgt_priv_data;
+
+ spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
+ tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id);
+ if (tgt_dev && !tgt_dev->is_hidden) {
+ scsi_tgt_priv_data->starget = starget;
+ scsi_tgt_priv_data->dev_handle = tgt_dev->dev_handle;
+ scsi_tgt_priv_data->perst_id = tgt_dev->perst_id;
+ scsi_tgt_priv_data->dev_type = tgt_dev->dev_type;
+ scsi_tgt_priv_data->tgt_dev = tgt_dev;
+ tgt_dev->starget = starget;
+ atomic_set(&scsi_tgt_priv_data->block_io, 0);
+ retval = 0;
+ } else
+ retval = -ENXIO;
+ spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
+
+ return retval;
+}
+
+/**
+ * mpi3mr_check_return_unmap - Whether an unmap is allowed
+ * @mrioc: Adapter instance reference
+ * @scmd: SCSI Command reference
+ *
+ * The controller hardware cannot handle certain unmap commands
+ * for NVMe drives, this routine checks those and return true
+ * and completes the SCSI command with proper status and sense
+ * data.
+ *
+ * Return: TRUE for not allowed unmap, FALSE otherwise.
+ */
+static bool mpi3mr_check_return_unmap(struct mpi3mr_ioc *mrioc,
+ struct scsi_cmnd *scmd)
+{
+ unsigned char *buf;
+ u16 param_len, desc_len;
+
+ param_len = get_unaligned_be16(scmd->cmnd + 7);
+
+ if (!param_len) {
+ ioc_warn(mrioc,
+ "%s: cdb received with zero parameter length\n",
+ __func__);
+ scsi_print_command(scmd);
+ scmd->result = DID_OK << 16;
+ scmd->scsi_done(scmd);
+ return true;
+ }
+
+ if (param_len < 24) {
+ ioc_warn(mrioc,
+ "%s: cdb received with invalid param_len: %d\n",
+ __func__, param_len);
+ scsi_print_command(scmd);
+ scmd->result = SAM_STAT_CHECK_CONDITION;
+ scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST,
+ 0x1A, 0);
+ scmd->scsi_done(scmd);
+ return true;
+ }
+ if (param_len != scsi_bufflen(scmd)) {
+ ioc_warn(mrioc,
+ "%s: cdb received with param_len: %d bufflen: %d\n",
+ __func__, param_len, scsi_bufflen(scmd));
+ scsi_print_command(scmd);
+ scmd->result = SAM_STAT_CHECK_CONDITION;
+ scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST,
+ 0x1A, 0);
+ scmd->scsi_done(scmd);
+ return true;
+ }
+ buf = kzalloc(scsi_bufflen(scmd), GFP_ATOMIC);
+ if (!buf) {
+ scsi_print_command(scmd);
+ scmd->result = SAM_STAT_CHECK_CONDITION;
+ scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST,
+ 0x55, 0x03);
+ scmd->scsi_done(scmd);
+ return true;
+ }
+ scsi_sg_copy_to_buffer(scmd, buf, scsi_bufflen(scmd));
+ desc_len = get_unaligned_be16(&buf[2]);
+
+ if (desc_len < 16) {
+ ioc_warn(mrioc,
+ "%s: Invalid descriptor length in param list: %d\n",
+ __func__, desc_len);
+ scsi_print_command(scmd);
+ scmd->result = SAM_STAT_CHECK_CONDITION;
+ scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST,
+ 0x26, 0);
+ scmd->scsi_done(scmd);
+ kfree(buf);
+ return true;
+ }
+
+ if (param_len > (desc_len + 8)) {
+ scsi_print_command(scmd);
+ ioc_warn(mrioc,
+ "%s: Truncating param_len(%d) to desc_len+8(%d)\n",
+ __func__, param_len, (desc_len + 8));
+ param_len = desc_len + 8;
+ put_unaligned_be16(param_len, scmd->cmnd + 7);
+ scsi_print_command(scmd);
+ }
+
+ kfree(buf);
+ return false;
+}
+
+/**
+ * mpi3mr_allow_scmd_to_fw - Command is allowed during shutdown
+ * @scmd: SCSI Command reference
+ *
+ * Checks whether a cdb is allowed during shutdown or not.
+ *
+ * Return: TRUE for allowed commands, FALSE otherwise.
+ */
+
+inline bool mpi3mr_allow_scmd_to_fw(struct scsi_cmnd *scmd)
+{
+ switch (scmd->cmnd[0]) {
+ case SYNCHRONIZE_CACHE:
+ case START_STOP:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/**
+ * mpi3mr_qcmd - I/O request despatcher
+ * @shost: SCSI Host reference
+ * @scmd: SCSI Command reference
+ *
+ * Issues the SCSI Command as an MPI3 request.
+ *
+ * Return: 0 on successful queueing of the request or if the
+ * request is completed with failure.
+ * SCSI_MLQUEUE_DEVICE_BUSY when the device is busy.
+ * SCSI_MLQUEUE_HOST_BUSY when the host queue is full.
+ */
+static int mpi3mr_qcmd(struct Scsi_Host *shost,
+ struct scsi_cmnd *scmd)
+{
+ struct mpi3mr_ioc *mrioc = shost_priv(shost);
+ struct mpi3mr_stgt_priv_data *stgt_priv_data;
+ struct mpi3mr_sdev_priv_data *sdev_priv_data;
+ struct scmd_priv *scmd_priv_data = NULL;
+ struct mpi3_scsi_io_request *scsiio_req = NULL;
+ struct op_req_qinfo *op_req_q = NULL;
+ int retval = 0;
+ u16 dev_handle;
+ u16 host_tag;
+ u32 scsiio_flags = 0;
+ struct request *rq = scmd->request;
+ int iprio_class;
+
+ sdev_priv_data = scmd->device->hostdata;
+ if (!sdev_priv_data || !sdev_priv_data->tgt_priv_data) {
+ scmd->result = DID_NO_CONNECT << 16;
+ scmd->scsi_done(scmd);
+ goto out;
+ }
+
+ if (mrioc->stop_drv_processing &&
+ !(mpi3mr_allow_scmd_to_fw(scmd))) {
+ scmd->result = DID_NO_CONNECT << 16;
+ scmd->scsi_done(scmd);
+ goto out;
+ }
+
+ if (mrioc->reset_in_progress) {
+ retval = SCSI_MLQUEUE_HOST_BUSY;
+ goto out;
+ }
+
+ stgt_priv_data = sdev_priv_data->tgt_priv_data;
+
+ dev_handle = stgt_priv_data->dev_handle;
+ if (dev_handle == MPI3MR_INVALID_DEV_HANDLE) {
+ scmd->result = DID_NO_CONNECT << 16;
+ scmd->scsi_done(scmd);
+ goto out;
+ }
+ if (stgt_priv_data->dev_removed) {
+ scmd->result = DID_NO_CONNECT << 16;
+ scmd->scsi_done(scmd);
+ goto out;
+ }
+
+ if (atomic_read(&stgt_priv_data->block_io)) {
+ if (mrioc->stop_drv_processing) {
+ scmd->result = DID_NO_CONNECT << 16;
+ scmd->scsi_done(scmd);
+ goto out;
+ }
+ retval = SCSI_MLQUEUE_DEVICE_BUSY;
+ goto out;
+ }
+
+ if ((scmd->cmnd[0] == UNMAP) &&
+ (stgt_priv_data->dev_type == MPI3_DEVICE_DEVFORM_PCIE) &&
+ mpi3mr_check_return_unmap(mrioc, scmd))
+ goto out;
+
+ host_tag = mpi3mr_host_tag_for_scmd(mrioc, scmd);
+ if (host_tag == MPI3MR_HOSTTAG_INVALID) {
+ scmd->result = DID_ERROR << 16;
+ scmd->scsi_done(scmd);
+ goto out;
+ }
+
+ if (scmd->sc_data_direction == DMA_FROM_DEVICE)
+ scsiio_flags = MPI3_SCSIIO_FLAGS_DATADIRECTION_READ;
+ else if (scmd->sc_data_direction == DMA_TO_DEVICE)
+ scsiio_flags = MPI3_SCSIIO_FLAGS_DATADIRECTION_WRITE;
+ else
+ scsiio_flags = MPI3_SCSIIO_FLAGS_DATADIRECTION_NO_DATA_TRANSFER;
+
+ scsiio_flags |= MPI3_SCSIIO_FLAGS_TASKATTRIBUTE_SIMPLEQ;
+
+ if (sdev_priv_data->ncq_prio_enable) {
+ iprio_class = IOPRIO_PRIO_CLASS(req_get_ioprio(rq));
+ if (iprio_class == IOPRIO_CLASS_RT)
+ scsiio_flags |= 1 << MPI3_SCSIIO_FLAGS_CMDPRI_SHIFT;
+ }
+
+ if (scmd->cmd_len > 16)
+ scsiio_flags |= MPI3_SCSIIO_FLAGS_CDB_GREATER_THAN_16;
+
+ scmd_priv_data = scsi_cmd_priv(scmd);
+ memset(scmd_priv_data->mpi3mr_scsiio_req, 0, MPI3MR_ADMIN_REQ_FRAME_SZ);
+ scsiio_req = (struct mpi3_scsi_io_request *)scmd_priv_data->mpi3mr_scsiio_req;
+ scsiio_req->function = MPI3_FUNCTION_SCSI_IO;
+ scsiio_req->host_tag = cpu_to_le16(host_tag);
+
+ mpi3mr_setup_eedp(mrioc, scmd, scsiio_req);
+
+ memcpy(scsiio_req->cdb.cdb32, scmd->cmnd, scmd->cmd_len);
+ scsiio_req->data_length = cpu_to_le32(scsi_bufflen(scmd));
+ scsiio_req->dev_handle = cpu_to_le16(dev_handle);
+ scsiio_req->flags = cpu_to_le32(scsiio_flags);
+ int_to_scsilun(sdev_priv_data->lun_id,
+ (struct scsi_lun *)scsiio_req->lun);
+
+ if (mpi3mr_build_sg_scmd(mrioc, scmd, scsiio_req)) {
+ mpi3mr_clear_scmd_priv(mrioc, scmd);
+ retval = SCSI_MLQUEUE_HOST_BUSY;
+ goto out;
+ }
+ op_req_q = &mrioc->req_qinfo[scmd_priv_data->req_q_idx];
+
+ if (mpi3mr_op_request_post(mrioc, op_req_q,
+ scmd_priv_data->mpi3mr_scsiio_req)) {
+ mpi3mr_clear_scmd_priv(mrioc, scmd);
+ retval = SCSI_MLQUEUE_HOST_BUSY;
+ goto out;
+ }
+
+out:
+ return retval;
+}
+
+static struct scsi_host_template mpi3mr_driver_template = {
+ .module = THIS_MODULE,
+ .name = "MPI3 Storage Controller",
+ .proc_name = MPI3MR_DRIVER_NAME,
+ .queuecommand = mpi3mr_qcmd,
+ .target_alloc = mpi3mr_target_alloc,
+ .slave_alloc = mpi3mr_slave_alloc,
+ .slave_configure = mpi3mr_slave_configure,
+ .target_destroy = mpi3mr_target_destroy,
+ .slave_destroy = mpi3mr_slave_destroy,
+ .scan_finished = mpi3mr_scan_finished,
+ .scan_start = mpi3mr_scan_start,
+ .change_queue_depth = mpi3mr_change_queue_depth,
+ .eh_device_reset_handler = mpi3mr_eh_dev_reset,
+ .eh_target_reset_handler = mpi3mr_eh_target_reset,
+ .eh_host_reset_handler = mpi3mr_eh_host_reset,
+ .bios_param = mpi3mr_bios_param,
+ .map_queues = mpi3mr_map_queues,
+ .no_write_same = 1,
+ .can_queue = 1,
+ .this_id = -1,
+ .sg_tablesize = MPI3MR_SG_DEPTH,
+ /* max xfer supported is 1M (2K in 512 byte sized sectors)
+ */
+ .max_sectors = 2048,
+ .cmd_per_lun = MPI3MR_MAX_CMDS_LUN,
+ .track_queue_depth = 1,
+ .cmd_size = sizeof(struct scmd_priv),
+};
+
+/**
+ * mpi3mr_init_drv_cmd - Initialize internal command tracker
+ * @cmdptr: Internal command tracker
+ * @host_tag: Host tag used for the specific command
+ *
+ * Initialize the internal command tracker structure with
+ * specified host tag.
+ *
+ * Return: Nothing.
+ */
+static inline void mpi3mr_init_drv_cmd(struct mpi3mr_drv_cmd *cmdptr,
+ u16 host_tag)
+{
+ mutex_init(&cmdptr->mutex);
+ cmdptr->reply = NULL;
+ cmdptr->state = MPI3MR_CMD_NOTUSED;
+ cmdptr->dev_handle = MPI3MR_INVALID_DEV_HANDLE;
+ cmdptr->host_tag = host_tag;
+}
+
+/**
+ * osintfc_mrioc_security_status -Check controller secure status
+ * @pdev: PCI device instance
+ *
+ * Read the Device Serial Number capability from PCI config
+ * space and decide whether the controller is secure or not.
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+static int
+osintfc_mrioc_security_status(struct pci_dev *pdev)
+{
+ u32 cap_data;
+ int base;
+ u32 ctlr_status;
+ u32 debug_status;
+ int retval = 0;
+
+ base = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DSN);
+ if (!base) {
+ dev_err(&pdev->dev,
+ "%s: PCI_EXT_CAP_ID_DSN is not supported\n", __func__);
+ return -1;
+ }
+
+ pci_read_config_dword(pdev, base + 4, &cap_data);
+
+ debug_status = cap_data & MPI3MR_CTLR_SECURE_DBG_STATUS_MASK;
+ ctlr_status = cap_data & MPI3MR_CTLR_SECURITY_STATUS_MASK;
+
+ switch (ctlr_status) {
+ case MPI3MR_INVALID_DEVICE:
+ dev_err(&pdev->dev,
+ "%s: Non secure ctlr (Invalid) is detected: DID: 0x%x: SVID: 0x%x: SDID: 0x%x\n",
+ __func__, pdev->device, pdev->subsystem_vendor,
+ pdev->subsystem_device);
+ retval = -1;
+ break;
+ case MPI3MR_CONFIG_SECURE_DEVICE:
+ if (!debug_status)
+ dev_info(&pdev->dev,
+ "%s: Config secure ctlr is detected\n",
+ __func__);
+ break;
+ case MPI3MR_HARD_SECURE_DEVICE:
+ break;
+ case MPI3MR_TAMPERED_DEVICE:
+ dev_err(&pdev->dev,
+ "%s: Non secure ctlr (Tampered) is detected: DID: 0x%x: SVID: 0x%x: SDID: 0x%x\n",
+ __func__, pdev->device, pdev->subsystem_vendor,
+ pdev->subsystem_device);
+ retval = -1;
+ break;
+ default:
+ retval = -1;
+ break;
+ }
+
+ if (!retval && debug_status) {
+ dev_err(&pdev->dev,
+ "%s: Non secure ctlr (Secure Dbg) is detected: DID: 0x%x: SVID: 0x%x: SDID: 0x%x\n",
+ __func__, pdev->device, pdev->subsystem_vendor,
+ pdev->subsystem_device);
+ retval = -1;
+ }
+
+ return retval;
+}
+
+/**
+ * mpi3mr_probe - PCI probe callback
+ * @pdev: PCI device instance
+ * @id: PCI device ID details
+ *
+ * controller initialization routine. Checks the security status
+ * of the controller and if it is invalid or tampered return the
+ * probe without initializing the controller. Otherwise,
+ * allocate per adapter instance through shost_priv and
+ * initialize controller specific data structures, initializae
+ * the controller hardware, add shost to the SCSI subsystem.
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+
+static int
+mpi3mr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct mpi3mr_ioc *mrioc = NULL;
+ struct Scsi_Host *shost = NULL;
+ int retval = 0, i;
+
+ if (osintfc_mrioc_security_status(pdev)) {
+ warn_non_secure_ctlr = 1;
+ return 1; /* For Invalid and Tampered device */
+ }
+
+ shost = scsi_host_alloc(&mpi3mr_driver_template,
+ sizeof(struct mpi3mr_ioc));
+ if (!shost) {
+ retval = -ENODEV;
+ goto shost_failed;
+ }
+
+ mrioc = shost_priv(shost);
+ mrioc->id = mrioc_ids++;
+ sprintf(mrioc->driver_name, "%s", MPI3MR_DRIVER_NAME);
+ sprintf(mrioc->name, "%s%d", mrioc->driver_name, mrioc->id);
+ INIT_LIST_HEAD(&mrioc->list);
+ spin_lock(&mrioc_list_lock);
+ list_add_tail(&mrioc->list, &mrioc_list);
+ spin_unlock(&mrioc_list_lock);
+
+ spin_lock_init(&mrioc->admin_req_lock);
+ spin_lock_init(&mrioc->reply_free_queue_lock);
+ spin_lock_init(&mrioc->sbq_lock);
+ spin_lock_init(&mrioc->fwevt_lock);
+ spin_lock_init(&mrioc->tgtdev_lock);
+ spin_lock_init(&mrioc->watchdog_lock);
+ spin_lock_init(&mrioc->chain_buf_lock);
+
+ INIT_LIST_HEAD(&mrioc->fwevt_list);
+ INIT_LIST_HEAD(&mrioc->tgtdev_list);
+ INIT_LIST_HEAD(&mrioc->delayed_rmhs_list);
+
+ mutex_init(&mrioc->reset_mutex);
+ mpi3mr_init_drv_cmd(&mrioc->init_cmds, MPI3MR_HOSTTAG_INITCMDS);
+ mpi3mr_init_drv_cmd(&mrioc->host_tm_cmds, MPI3MR_HOSTTAG_BLK_TMS);
+
+ for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++)
+ mpi3mr_init_drv_cmd(&mrioc->dev_rmhs_cmds[i],
+ MPI3MR_HOSTTAG_DEVRMCMD_MIN + i);
+
+ if (pdev->revision)
+ mrioc->enable_segqueue = true;
+
+ init_waitqueue_head(&mrioc->reset_waitq);
+ mrioc->logging_level = logging_level;
+ mrioc->shost = shost;
+ mrioc->pdev = pdev;
+
+ /* init shost parameters */
+ shost->max_cmd_len = MPI3MR_MAX_CDB_LENGTH;
+ shost->max_lun = -1;
+ shost->unique_id = mrioc->id;
+
+ shost->max_channel = 1;
+ shost->max_id = 0xFFFFFFFF;
+
+ if (prot_mask >= 0)
+ scsi_host_set_prot(shost, prot_mask);
+ else {
+ prot_mask = SHOST_DIF_TYPE1_PROTECTION
+ | SHOST_DIF_TYPE2_PROTECTION
+ | SHOST_DIF_TYPE3_PROTECTION;
+ scsi_host_set_prot(shost, prot_mask);
+ }
+
+ ioc_info(mrioc,
+ "%s :host protection capabilities enabled %s%s%s%s%s%s%s\n",
+ __func__,
+ (prot_mask & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
+ (prot_mask & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
+ (prot_mask & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
+ (prot_mask & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
+ (prot_mask & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
+ (prot_mask & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
+ (prot_mask & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
+
+ if (prot_guard_mask)
+ scsi_host_set_guard(shost, (prot_guard_mask & 3));
+ else
+ scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC);
+
+ snprintf(mrioc->fwevt_worker_name, sizeof(mrioc->fwevt_worker_name),
+ "%s%d_fwevt_wrkr", mrioc->driver_name, mrioc->id);
+ mrioc->fwevt_worker_thread = alloc_ordered_workqueue(
+ mrioc->fwevt_worker_name, WQ_MEM_RECLAIM);
+ if (!mrioc->fwevt_worker_thread) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ retval = -ENODEV;
+ goto out_fwevtthread_failed;
+ }
+
+ mrioc->is_driver_loading = 1;
+ if (mpi3mr_init_ioc(mrioc, 0)) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ retval = -ENODEV;
+ goto out_iocinit_failed;
+ }
+
+ shost->nr_hw_queues = mrioc->num_op_reply_q;
+ shost->can_queue = mrioc->max_host_ios;
+ shost->sg_tablesize = MPI3MR_SG_DEPTH;
+ shost->max_id = mrioc->facts.max_perids;
+
+ retval = scsi_add_host(shost, &pdev->dev);
+ if (retval) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ goto addhost_failed;
+ }
+
+ scsi_scan_host(shost);
+ return retval;
+
+addhost_failed:
+ mpi3mr_cleanup_ioc(mrioc, 0);
+out_iocinit_failed:
+ destroy_workqueue(mrioc->fwevt_worker_thread);
+out_fwevtthread_failed:
+ spin_lock(&mrioc_list_lock);
+ list_del(&mrioc->list);
+ spin_unlock(&mrioc_list_lock);
+ scsi_host_put(shost);
+shost_failed:
+ return retval;
+}
+
+/**
+ * mpi3mr_remove - PCI remove callback
+ * @pdev: PCI device instance
+ *
+ * Free up all memory and resources associated with the
+ * controllerand target devices, unregister the shost.
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_remove(struct pci_dev *pdev)
+{
+ struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct mpi3mr_ioc *mrioc;
+ struct workqueue_struct *wq;
+ unsigned long flags;
+ struct mpi3mr_tgt_dev *tgtdev, *tgtdev_next;
+
+ if (!shost)
+ return;
+
+ mrioc = shost_priv(shost);
+ while (mrioc->reset_in_progress || mrioc->is_driver_loading)
+ ssleep(1);
+
+ mrioc->stop_drv_processing = 1;
+ mpi3mr_cleanup_fwevt_list(mrioc);
+ spin_lock_irqsave(&mrioc->fwevt_lock, flags);
+ wq = mrioc->fwevt_worker_thread;
+ mrioc->fwevt_worker_thread = NULL;
+ spin_unlock_irqrestore(&mrioc->fwevt_lock, flags);
+ if (wq)
+ destroy_workqueue(wq);
+ scsi_remove_host(shost);
+
+ list_for_each_entry_safe(tgtdev, tgtdev_next, &mrioc->tgtdev_list,
+ list) {
+ mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev);
+ mpi3mr_tgtdev_del_from_list(mrioc, tgtdev);
+ mpi3mr_tgtdev_put(tgtdev);
+ }
+ mpi3mr_cleanup_ioc(mrioc, 0);
+
+ spin_lock(&mrioc_list_lock);
+ list_del(&mrioc->list);
+ spin_unlock(&mrioc_list_lock);
+
+ scsi_host_put(shost);
+}
+
+/**
+ * mpi3mr_shutdown - PCI shutdown callback
+ * @pdev: PCI device instance
+ *
+ * Free up all memory and resources associated with the
+ * controller
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_shutdown(struct pci_dev *pdev)
+{
+ struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct mpi3mr_ioc *mrioc;
+ struct workqueue_struct *wq;
+ unsigned long flags;
+
+ if (!shost)
+ return;
+
+ mrioc = shost_priv(shost);
+ while (mrioc->reset_in_progress || mrioc->is_driver_loading)
+ ssleep(1);
+
+ mrioc->stop_drv_processing = 1;
+ mpi3mr_cleanup_fwevt_list(mrioc);
+ spin_lock_irqsave(&mrioc->fwevt_lock, flags);
+ wq = mrioc->fwevt_worker_thread;
+ mrioc->fwevt_worker_thread = NULL;
+ spin_unlock_irqrestore(&mrioc->fwevt_lock, flags);
+ if (wq)
+ destroy_workqueue(wq);
+ mpi3mr_cleanup_ioc(mrioc, 0);
+}
+
+#ifdef CONFIG_PM
+/**
+ * mpi3mr_suspend - PCI power management suspend callback
+ * @pdev: PCI device instance
+ * @state: New power state
+ *
+ * Change the power state to the given value and cleanup the IOC
+ * by issuing MUR and shutdown notification
+ *
+ * Return: 0 always.
+ */
+static int mpi3mr_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct mpi3mr_ioc *mrioc;
+ pci_power_t device_state;
+
+ if (!shost)
+ return 0;
+
+ mrioc = shost_priv(shost);
+ while (mrioc->reset_in_progress || mrioc->is_driver_loading)
+ ssleep(1);
+ mrioc->stop_drv_processing = 1;
+ mpi3mr_cleanup_fwevt_list(mrioc);
+ scsi_block_requests(shost);
+ mpi3mr_stop_watchdog(mrioc);
+ mpi3mr_cleanup_ioc(mrioc, 1);
+
+ device_state = pci_choose_state(pdev, state);
+ ioc_info(mrioc, "pdev=0x%p, slot=%s, entering operating state [D%d]\n",
+ pdev, pci_name(pdev), device_state);
+ pci_save_state(pdev);
+ pci_set_power_state(pdev, device_state);
+ mpi3mr_cleanup_resources(mrioc);
+
+ return 0;
+}
+
+/**
+ * mpi3mr_resume - PCI power management resume callback
+ * @pdev: PCI device instance
+ *
+ * Restore the power state to D0 and reinitialize the controller
+ * and resume I/O operations to the target devices
+ *
+ * Return: 0 on success, non-zero on failure
+ */
+static int mpi3mr_resume(struct pci_dev *pdev)
+{
+ struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct mpi3mr_ioc *mrioc;
+ pci_power_t device_state = pdev->current_state;
+ int r;
+
+ if (!shost)
+ return 0;
+
+ mrioc = shost_priv(shost);
+
+ ioc_info(mrioc, "pdev=0x%p, slot=%s, previous operating state [D%d]\n",
+ pdev, pci_name(pdev), device_state);
+ pci_set_power_state(pdev, PCI_D0);
+ pci_enable_wake(pdev, PCI_D0, 0);
+ pci_restore_state(pdev);
+ mrioc->pdev = pdev;
+ mrioc->cpu_count = num_online_cpus();
+ r = mpi3mr_setup_resources(mrioc);
+ if (r) {
+ ioc_info(mrioc, "%s: Setup resources failed[%d]\n",
+ __func__, r);
+ return r;
+ }
+
+ mrioc->stop_drv_processing = 0;
+ mpi3mr_init_ioc(mrioc, 1);
+ scsi_unblock_requests(shost);
+ mpi3mr_start_watchdog(mrioc);
+
+ return 0;
+}
+#endif
+
+static const struct pci_device_id mpi3mr_pci_id_table[] = {
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_LSI_LOGIC, 0x00A5,
+ PCI_ANY_ID, PCI_ANY_ID)
+ },
+ { 0 }
+};
+MODULE_DEVICE_TABLE(pci, mpi3mr_pci_id_table);
+
+static struct pci_driver mpi3mr_pci_driver = {
+ .name = MPI3MR_DRIVER_NAME,
+ .id_table = mpi3mr_pci_id_table,
+ .probe = mpi3mr_probe,
+ .remove = mpi3mr_remove,
+ .shutdown = mpi3mr_shutdown,
+#ifdef CONFIG_PM
+ .suspend = mpi3mr_suspend,
+ .resume = mpi3mr_resume,
+#endif
+};
+
+static int __init mpi3mr_init(void)
+{
+ int ret_val;
+
+ pr_info("Loading %s version %s\n", MPI3MR_DRIVER_NAME,
+ MPI3MR_DRIVER_VERSION);
+
+ ret_val = pci_register_driver(&mpi3mr_pci_driver);
+
+ return ret_val;
+}
+
+static void __exit mpi3mr_exit(void)
+{
+ if (warn_non_secure_ctlr)
+ pr_warn(
+ "Unloading %s version %s while managing a non secure controller\n",
+ MPI3MR_DRIVER_NAME, MPI3MR_DRIVER_VERSION);
+ else
+ pr_info("Unloading %s version %s\n", MPI3MR_DRIVER_NAME,
+ MPI3MR_DRIVER_VERSION);
+
+ pci_unregister_driver(&mpi3mr_pci_driver);
+}
+
+module_init(mpi3mr_init);
+module_exit(mpi3mr_exit);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 5779f313f6f8..c39955239d1c 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -141,7 +141,7 @@ _base_clear_outstanding_commands(struct MPT3SAS_ADAPTER *ioc);
* @mpi_request:mf request pointer.
* @sz: size of buffer.
*
- * @Returns - 1/0 Reset to be done or Not
+ * Return: 1/0 Reset to be done or Not
*/
u8
mpt3sas_base_check_cmd_timeout(struct MPT3SAS_ADAPTER *ioc,
@@ -440,7 +440,7 @@ static void _clone_sg_entries(struct MPT3SAS_ADAPTER *ioc,
return;
/* From smid we can get scsi_cmd, once we have sg_scmd,
- * we just need to get sg_virt and sg_next to get virual
+ * we just need to get sg_virt and sg_next to get virtual
* address associated with sgel->Address.
*/
@@ -600,7 +600,7 @@ static int mpt3sas_remove_dead_ioc_func(void *arg)
* _base_sync_drv_fw_timestamp - Sync Drive-Fw TimeStamp.
* @ioc: Per Adapter Object
*
- * Return nothing.
+ * Return: nothing.
*/
static void _base_sync_drv_fw_timestamp(struct MPT3SAS_ADAPTER *ioc)
{
@@ -704,7 +704,7 @@ _base_fault_reset_work(struct work_struct *work)
/*
* Call _scsih_flush_pending_cmds callback so that we flush all
- * pending commands back to OS. This call is required to aovid
+ * pending commands back to OS. This call is required to avoid
* deadlock at block layer. Dead IOC will fail to do diag reset,
* and this call is safe since dead ioc will never return any
* command back from HW.
@@ -873,7 +873,7 @@ mpt3sas_base_fault_info(struct MPT3SAS_ADAPTER *ioc , u16 fault_code)
* @ioc: per adapter object
* @fault_code: fault code
*
- * Return nothing.
+ * Return: nothing.
*/
void
mpt3sas_base_coredump_info(struct MPT3SAS_ADAPTER *ioc, u16 fault_code)
@@ -887,7 +887,7 @@ mpt3sas_base_coredump_info(struct MPT3SAS_ADAPTER *ioc, u16 fault_code)
* @ioc: per adapter object
* @caller: caller function name
*
- * Returns 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
int
mpt3sas_base_wait_for_coredump_completion(struct MPT3SAS_ADAPTER *ioc,
@@ -1359,11 +1359,11 @@ _base_sas_log_info(struct MPT3SAS_ADAPTER *ioc , u32 log_info)
}
/**
- * _base_display_reply_info -
+ * _base_display_reply_info - handle reply descriptors depending on IOC Status
* @ioc: per adapter object
* @smid: system request message index
* @msix_index: MSIX table index supplied by the OS
- * @reply: reply message frame(lower 32bit addr)
+ * @reply: reply message frame (lower 32bit addr)
*/
static void
_base_display_reply_info(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
@@ -1804,7 +1804,7 @@ _base_interrupt(int irq, void *bus_id)
* @irqpoll: irq_poll object
* @budget: irq poll weight
*
- * returns number of reply descriptors processed
+ * Return: number of reply descriptors processed
*/
static int
_base_irqpoll(struct irq_poll *irqpoll, int budget)
@@ -1826,7 +1826,7 @@ _base_irqpoll(struct irq_poll *irqpoll, int budget)
enable_irq(reply_q->os_irq);
/*
* Go for one more round of processing the
- * reply descriptor post queue incase if HBA
+ * reply descriptor post queue in case the HBA
* Firmware has posted some reply descriptors
* while reenabling the IRQ.
*/
@@ -1840,7 +1840,7 @@ _base_irqpoll(struct irq_poll *irqpoll, int budget)
* _base_init_irqpolls - initliaze IRQ polls
* @ioc: per adapter object
*
- * returns nothing
+ * Return: nothing
*/
static void
_base_init_irqpolls(struct MPT3SAS_ADAPTER *ioc)
@@ -1878,7 +1878,7 @@ _base_is_controller_msix_enabled(struct MPT3SAS_ADAPTER *ioc)
* @ioc: per adapter object
* @poll: poll over reply descriptor pools incase interrupt for
* timed-out SCSI command got delayed
- * Context: non ISR conext
+ * Context: non-ISR context
*
* Called when a Task Management request has completed.
*/
@@ -2104,7 +2104,16 @@ _base_build_sg(struct MPT3SAS_ADAPTER *ioc, void *psge,
/**
* _base_build_nvme_prp - This function is called for NVMe end devices to build
- * a native SGL (NVMe PRP). The native SGL is built starting in the first PRP
+ * a native SGL (NVMe PRP).
+ * @ioc: per adapter object
+ * @smid: system request message index for getting asscociated SGL
+ * @nvme_encap_request: the NVMe request msg frame pointer
+ * @data_out_dma: physical address for WRITES
+ * @data_out_sz: data xfer size for WRITES
+ * @data_in_dma: physical address for READS
+ * @data_in_sz: data xfer size for READS
+ *
+ * The native SGL is built starting in the first PRP
* entry of the NVMe message (PRP1). If the data buffer is small enough to be
* described entirely using PRP1, then PRP2 is not used. If needed, PRP2 is
* used to describe a larger data buffer. If the data buffer is too large to
@@ -2133,7 +2142,7 @@ _base_build_sg(struct MPT3SAS_ADAPTER *ioc, void *psge,
* Each 64-bit PRP entry comprises an address and an offset field. The address
* always points at the beginning of a 4KB physical memory page, and the offset
* describes where within that 4KB page the memory segment begins. Only the
- * first element in a PRP list may contain a non-zero offest, implying that all
+ * first element in a PRP list may contain a non-zero offset, implying that all
* memory segments following the first begin at the start of a 4KB page.
*
* Each PRP element normally describes 4KB of physical memory, with exceptions
@@ -2147,14 +2156,6 @@ _base_build_sg(struct MPT3SAS_ADAPTER *ioc, void *psge,
* Since PRP entries lack any indication of size, the overall data buffer length
* is used to determine where the end of the data memory buffer is located, and
* how many PRP entries are required to describe it.
- *
- * @ioc: per adapter object
- * @smid: system request message index for getting asscociated SGL
- * @nvme_encap_request: the NVMe request msg frame pointer
- * @data_out_dma: physical address for WRITES
- * @data_out_sz: data xfer size for WRITES
- * @data_in_dma: physical address for READS
- * @data_in_sz: data xfer size for READS
*/
static void
_base_build_nvme_prp(struct MPT3SAS_ADAPTER *ioc, u16 smid,
@@ -2311,8 +2312,8 @@ _base_build_nvme_prp(struct MPT3SAS_ADAPTER *ioc, u16 smid,
}
/**
- * base_make_prp_nvme -
- * Prepare PRPs(Physical Region Page)- SGLs specific to NVMe drives only
+ * base_make_prp_nvme - Prepare PRPs (Physical Region Page) -
+ * SGLs specific to NVMe drives only
*
* @ioc: per adapter object
* @scmd: SCSI command from the mid-layer
@@ -3155,7 +3156,7 @@ fall_back:
* - loaded driver with default max_msix_vectors module parameter and
* - system booted in non kdump mode
*
- * returns nothing.
+ * Return: nothing.
*/
static void
_base_check_and_enable_high_iops_queues(struct MPT3SAS_ADAPTER *ioc,
@@ -3364,14 +3365,14 @@ static int
_base_diag_reset(struct MPT3SAS_ADAPTER *ioc);
/**
- * _base_check_for_fault_and_issue_reset - check if IOC is in fault state
+ * mpt3sas_base_check_for_fault_and_issue_reset - check if IOC is in fault state
* and if it is in fault state then issue diag reset.
* @ioc: per adapter object
*
- * Returns: 0 for success, non-zero for failure.
+ * Return: 0 for success, non-zero for failure.
*/
-static int
-_base_check_for_fault_and_issue_reset(struct MPT3SAS_ADAPTER *ioc)
+int
+mpt3sas_base_check_for_fault_and_issue_reset(struct MPT3SAS_ADAPTER *ioc)
{
u32 ioc_state;
int rc = -EFAULT;
@@ -3385,12 +3386,14 @@ _base_check_for_fault_and_issue_reset(struct MPT3SAS_ADAPTER *ioc)
if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
mpt3sas_print_fault_code(ioc, ioc_state &
MPI2_DOORBELL_DATA_MASK);
+ mpt3sas_base_mask_interrupts(ioc);
rc = _base_diag_reset(ioc);
} else if ((ioc_state & MPI2_IOC_STATE_MASK) ==
MPI2_IOC_STATE_COREDUMP) {
mpt3sas_print_coredump_info(ioc, ioc_state &
MPI2_DOORBELL_DATA_MASK);
mpt3sas_base_wait_for_coredump_completion(ioc, __func__);
+ mpt3sas_base_mask_interrupts(ioc);
rc = _base_diag_reset(ioc);
}
@@ -3472,7 +3475,7 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
r = _base_get_ioc_facts(ioc);
if (r) {
- rc = _base_check_for_fault_and_issue_reset(ioc);
+ rc = mpt3sas_base_check_for_fault_and_issue_reset(ioc);
if (rc || (_base_get_ioc_facts(ioc)))
goto out_fail;
}
@@ -3633,7 +3636,7 @@ mpt3sas_base_get_reply_virt_addr(struct MPT3SAS_ADAPTER *ioc, u32 phys_addr)
* @ioc: per adapter object
* @scmd: scsi_cmnd object
*
- * returns msix index of general reply queues,
+ * Return: msix index of general reply queues,
* i.e. reply queue on which IO request's reply
* should be posted by the HBA firmware.
*/
@@ -3663,7 +3666,7 @@ _base_get_msix_index(struct MPT3SAS_ADAPTER *ioc,
* @ioc: per adapter object
* @scmd: scsi_cmnd object
*
- * Returns: msix index of high iops reply queues.
+ * Return: msix index of high iops reply queues.
* i.e. high iops reply queue on which IO request's
* reply should be posted by the HBA firmware.
*/
@@ -3910,7 +3913,7 @@ _base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock)
* @ioc: per adapter object
* @smid: system request message index
*
- * returns msix index.
+ * Return: msix index.
*/
static u8
_base_set_and_get_msix_index(struct MPT3SAS_ADAPTER *ioc, u16 smid)
@@ -4005,7 +4008,7 @@ _base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid,
* _base_put_smid_hi_priority - send Task Management request to firmware
* @ioc: per adapter object
* @smid: system request message index
- * @msix_task: msix_task will be same as msix of IO incase of task abort else 0.
+ * @msix_task: msix_task will be same as msix of IO in case of task abort else 0
*/
static void
_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid,
@@ -4109,7 +4112,7 @@ _base_put_smid_default(struct MPT3SAS_ADAPTER *ioc, u16 smid)
* @smid: system request message index
* @handle: device handle, unused in this function, for function type match
*
- * Return nothing.
+ * Return: nothing.
*/
static void
_base_put_smid_scsi_io_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid,
@@ -4131,7 +4134,7 @@ _base_put_smid_scsi_io_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid,
* @ioc: per adapter object
* @smid: system request message index
* @handle: device handle, unused in this function, for function type match
- * Return nothing
+ * Return: nothing
*/
static void
_base_put_smid_fast_path_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid,
@@ -4152,9 +4155,9 @@ _base_put_smid_fast_path_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid,
* firmware using Atomic Request Descriptor
* @ioc: per adapter object
* @smid: system request message index
- * @msix_task: msix_task will be same as msix of IO incase of task abort else 0
+ * @msix_task: msix_task will be same as msix of IO in case of task abort else 0
*
- * Return nothing.
+ * Return: nothing.
*/
static void
_base_put_smid_hi_priority_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid,
@@ -4176,7 +4179,7 @@ _base_put_smid_hi_priority_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid,
* @ioc: per adapter object
* @smid: system request message index
*
- * Return nothing.
+ * Return: nothing.
*/
static void
_base_put_smid_default_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid)
@@ -4434,6 +4437,7 @@ _base_display_OEMs_branding(struct MPT3SAS_ADAPTER *ioc)
ioc->pdev->subsystem_device);
break;
}
+ break;
default:
break;
}
@@ -4453,7 +4457,7 @@ _base_display_fwpkg_version(struct MPT3SAS_ADAPTER *ioc)
Mpi26ComponentImageHeader_t *cmp_img_hdr;
Mpi25FWUploadRequest_t *mpi_request;
Mpi2FWUploadReply_t mpi_reply;
- int r = 0;
+ int r = 0, issue_diag_reset = 0;
u32 package_version = 0;
void *fwpkg_data = NULL;
dma_addr_t fwpkg_data_dma;
@@ -4503,7 +4507,7 @@ _base_display_fwpkg_version(struct MPT3SAS_ADAPTER *ioc)
ioc_err(ioc, "%s: timeout\n", __func__);
_debug_dump_mf(mpi_request,
sizeof(Mpi25FWUploadRequest_t)/4);
- r = -ETIME;
+ issue_diag_reset = 1;
} else {
memset(&mpi_reply, 0, sizeof(Mpi2FWUploadReply_t));
if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID) {
@@ -4543,11 +4547,18 @@ out:
if (fwpkg_data)
dma_free_coherent(&ioc->pdev->dev, data_length, fwpkg_data,
fwpkg_data_dma);
+ if (issue_diag_reset) {
+ if (ioc->drv_internal_flags & MPT_DRV_INTERNAL_FIRST_PE_ISSUED)
+ return -EFAULT;
+ if (mpt3sas_base_check_for_fault_and_issue_reset(ioc))
+ return -EFAULT;
+ r = -EAGAIN;
+ }
return r;
}
/**
- * _base_display_ioc_capabilities - Disply IOC's capabilities.
+ * _base_display_ioc_capabilities - Display IOC's capabilities.
* @ioc: per adapter object
*/
static void
@@ -4750,15 +4761,19 @@ out:
* according to performance mode.
* @ioc : per adapter object
*
- * Return nothing.
+ * Return: zero on success; otherwise return EAGAIN error code asking the
+ * caller to retry.
*/
-static void
+static int
_base_update_ioc_page1_inlinewith_perf_mode(struct MPT3SAS_ADAPTER *ioc)
{
Mpi2IOCPage1_t ioc_pg1;
Mpi2ConfigReply_t mpi_reply;
+ int rc;
- mpt3sas_config_get_ioc_pg1(ioc, &mpi_reply, &ioc->ioc_pg1_copy);
+ rc = mpt3sas_config_get_ioc_pg1(ioc, &mpi_reply, &ioc->ioc_pg1_copy);
+ if (rc)
+ return rc;
memcpy(&ioc_pg1, &ioc->ioc_pg1_copy, sizeof(Mpi2IOCPage1_t));
switch (perf_mode) {
@@ -4780,9 +4795,11 @@ _base_update_ioc_page1_inlinewith_perf_mode(struct MPT3SAS_ADAPTER *ioc)
*/
ioc_pg1.ProductSpecific = cpu_to_le32(0x80000000 |
((1 << MPT3SAS_HIGH_IOPS_REPLY_QUEUES/8) - 1));
- mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply, &ioc_pg1);
+ rc = mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply, &ioc_pg1);
+ if (rc)
+ return rc;
ioc_info(ioc, "performance mode: balanced\n");
- return;
+ return 0;
}
fallthrough;
case MPT_PERF_MODE_LATENCY:
@@ -4793,7 +4810,9 @@ _base_update_ioc_page1_inlinewith_perf_mode(struct MPT3SAS_ADAPTER *ioc)
ioc_pg1.CoalescingTimeout = cpu_to_le32(0xa);
ioc_pg1.Flags |= cpu_to_le32(MPI2_IOCPAGE1_REPLY_COALESCING);
ioc_pg1.ProductSpecific = 0;
- mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply, &ioc_pg1);
+ rc = mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply, &ioc_pg1);
+ if (rc)
+ return rc;
ioc_info(ioc, "performance mode: latency\n");
break;
case MPT_PERF_MODE_IOPS:
@@ -4805,9 +4824,12 @@ _base_update_ioc_page1_inlinewith_perf_mode(struct MPT3SAS_ADAPTER *ioc)
le32_to_cpu(ioc_pg1.CoalescingTimeout));
ioc_pg1.Flags |= cpu_to_le32(MPI2_IOCPAGE1_REPLY_COALESCING);
ioc_pg1.ProductSpecific = 0;
- mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply, &ioc_pg1);
+ rc = mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply, &ioc_pg1);
+ if (rc)
+ return rc;
break;
}
+ return 0;
}
/**
@@ -4815,9 +4837,9 @@ _base_update_ioc_page1_inlinewith_perf_mode(struct MPT3SAS_ADAPTER *ioc)
* persistent pages
* @ioc : per adapter object
*
- * Return nothing.
+ * Return: nothing.
*/
-static void
+static int
_base_get_event_diag_triggers(struct MPT3SAS_ADAPTER *ioc)
{
Mpi26DriverTriggerPage2_t trigger_pg2;
@@ -4831,7 +4853,7 @@ _base_get_event_diag_triggers(struct MPT3SAS_ADAPTER *ioc)
r = mpt3sas_config_get_driver_trigger_pg2(ioc, &mpi_reply,
&trigger_pg2);
if (r)
- return;
+ return r;
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
MPI2_IOCSTATUS_MASK;
@@ -4840,7 +4862,7 @@ _base_get_event_diag_triggers(struct MPT3SAS_ADAPTER *ioc)
ioc_err(ioc,
"%s: Failed to get trigger pg2, ioc_status(0x%04x)\n",
__func__, ioc_status));
- return;
+ return 0;
}
if (le16_to_cpu(trigger_pg2.NumMPIEventTrigger)) {
@@ -4859,6 +4881,7 @@ _base_get_event_diag_triggers(struct MPT3SAS_ADAPTER *ioc)
mpi_event_tg++;
}
}
+ return 0;
}
/**
@@ -4866,9 +4889,9 @@ _base_get_event_diag_triggers(struct MPT3SAS_ADAPTER *ioc)
* persistent pages
* @ioc : per adapter object
*
- * Return nothing.
+ * Return: 0 on success; otherwise return failure status.
*/
-static void
+static int
_base_get_scsi_diag_triggers(struct MPT3SAS_ADAPTER *ioc)
{
Mpi26DriverTriggerPage3_t trigger_pg3;
@@ -4882,7 +4905,7 @@ _base_get_scsi_diag_triggers(struct MPT3SAS_ADAPTER *ioc)
r = mpt3sas_config_get_driver_trigger_pg3(ioc, &mpi_reply,
&trigger_pg3);
if (r)
- return;
+ return r;
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
MPI2_IOCSTATUS_MASK;
@@ -4891,7 +4914,7 @@ _base_get_scsi_diag_triggers(struct MPT3SAS_ADAPTER *ioc)
ioc_err(ioc,
"%s: Failed to get trigger pg3, ioc_status(0x%04x)\n",
__func__, ioc_status));
- return;
+ return 0;
}
if (le16_to_cpu(trigger_pg3.NumSCSISenseTrigger)) {
@@ -4910,6 +4933,7 @@ _base_get_scsi_diag_triggers(struct MPT3SAS_ADAPTER *ioc)
mpi_scsi_tg++;
}
}
+ return 0;
}
/**
@@ -4917,9 +4941,9 @@ _base_get_scsi_diag_triggers(struct MPT3SAS_ADAPTER *ioc)
* persistent pages
* @ioc : per adapter object
*
- * Return nothing.
+ * Return: 0 on success; otherwise return failure status.
*/
-static void
+static int
_base_get_mpi_diag_triggers(struct MPT3SAS_ADAPTER *ioc)
{
Mpi26DriverTriggerPage4_t trigger_pg4;
@@ -4933,7 +4957,7 @@ _base_get_mpi_diag_triggers(struct MPT3SAS_ADAPTER *ioc)
r = mpt3sas_config_get_driver_trigger_pg4(ioc, &mpi_reply,
&trigger_pg4);
if (r)
- return;
+ return r;
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
MPI2_IOCSTATUS_MASK;
@@ -4942,7 +4966,7 @@ _base_get_mpi_diag_triggers(struct MPT3SAS_ADAPTER *ioc)
ioc_err(ioc,
"%s: Failed to get trigger pg4, ioc_status(0x%04x)\n",
__func__, ioc_status));
- return;
+ return 0;
}
if (le16_to_cpu(trigger_pg4.NumIOCStatusLogInfoTrigger)) {
@@ -4963,6 +4987,7 @@ _base_get_mpi_diag_triggers(struct MPT3SAS_ADAPTER *ioc)
mpi_status_tg++;
}
}
+ return 0;
}
/**
@@ -4970,9 +4995,9 @@ _base_get_mpi_diag_triggers(struct MPT3SAS_ADAPTER *ioc)
* persistent pages
* @ioc : per adapter object
*
- * Return nothing.
+ * Return: nothing.
*/
-static void
+static int
_base_get_master_diag_triggers(struct MPT3SAS_ADAPTER *ioc)
{
Mpi26DriverTriggerPage1_t trigger_pg1;
@@ -4983,7 +5008,7 @@ _base_get_master_diag_triggers(struct MPT3SAS_ADAPTER *ioc)
r = mpt3sas_config_get_driver_trigger_pg1(ioc, &mpi_reply,
&trigger_pg1);
if (r)
- return;
+ return r;
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
MPI2_IOCSTATUS_MASK;
@@ -4992,25 +5017,30 @@ _base_get_master_diag_triggers(struct MPT3SAS_ADAPTER *ioc)
ioc_err(ioc,
"%s: Failed to get trigger pg1, ioc_status(0x%04x)\n",
__func__, ioc_status));
- return;
+ return 0;
}
if (le16_to_cpu(trigger_pg1.NumMasterTrigger))
ioc->diag_trigger_master.MasterData |=
le32_to_cpu(
trigger_pg1.MasterTriggers[0].MasterTriggerFlags);
+ return 0;
}
/**
* _base_check_for_trigger_pages_support - checks whether HBA FW supports
* driver trigger pages or not
* @ioc : per adapter object
+ * @trigger_flags : address where trigger page0's TriggerFlags value is copied
+ *
+ * Return: trigger flags mask if HBA FW supports driver trigger pages;
+ * otherwise returns %-EFAULT if driver trigger pages are not supported by FW or
+ * return EAGAIN if diag reset occurred due to FW fault and asking the
+ * caller to retry the command.
*
- * Returns trigger flags mask if HBA FW supports driver trigger pages,
- * otherwise returns EFAULT.
*/
static int
-_base_check_for_trigger_pages_support(struct MPT3SAS_ADAPTER *ioc)
+_base_check_for_trigger_pages_support(struct MPT3SAS_ADAPTER *ioc, u32 *trigger_flags)
{
Mpi26DriverTriggerPage0_t trigger_pg0;
int r = 0;
@@ -5020,14 +5050,15 @@ _base_check_for_trigger_pages_support(struct MPT3SAS_ADAPTER *ioc)
r = mpt3sas_config_get_driver_trigger_pg0(ioc, &mpi_reply,
&trigger_pg0);
if (r)
- return -EFAULT;
+ return r;
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
MPI2_IOCSTATUS_MASK;
if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
return -EFAULT;
- return le16_to_cpu(trigger_pg0.TriggerFlags);
+ *trigger_flags = le16_to_cpu(trigger_pg0.TriggerFlags);
+ return 0;
}
/**
@@ -5035,12 +5066,14 @@ _base_check_for_trigger_pages_support(struct MPT3SAS_ADAPTER *ioc)
* persistent pages.
* @ioc : per adapter object
*
- * Return nothing.
+ * Return: zero on success; otherwise return EAGAIN error codes
+ * asking the caller to retry.
*/
-static void
+static int
_base_get_diag_triggers(struct MPT3SAS_ADAPTER *ioc)
{
int trigger_flags;
+ int r;
/*
* Default setting of master trigger.
@@ -5048,9 +5081,16 @@ _base_get_diag_triggers(struct MPT3SAS_ADAPTER *ioc)
ioc->diag_trigger_master.MasterData =
(MASTER_TRIGGER_FW_FAULT + MASTER_TRIGGER_ADAPTER_RESET);
- trigger_flags = _base_check_for_trigger_pages_support(ioc);
- if (trigger_flags < 0)
- return;
+ r = _base_check_for_trigger_pages_support(ioc, &trigger_flags);
+ if (r) {
+ if (r == -EAGAIN)
+ return r;
+ /*
+ * Don't go for error handling when FW doesn't support
+ * driver trigger pages.
+ */
+ return 0;
+ }
ioc->supports_trigger_pages = 1;
@@ -5059,40 +5099,53 @@ _base_get_diag_triggers(struct MPT3SAS_ADAPTER *ioc)
* if master trigger bit enabled in TriggerFlags.
*/
if ((u16)trigger_flags &
- MPI26_DRIVER_TRIGGER0_FLAG_MASTER_TRIGGER_VALID)
- _base_get_master_diag_triggers(ioc);
+ MPI26_DRIVER_TRIGGER0_FLAG_MASTER_TRIGGER_VALID) {
+ r = _base_get_master_diag_triggers(ioc);
+ if (r)
+ return r;
+ }
/*
* Retrieve event diag trigger values from driver trigger pg2
* if event trigger bit enabled in TriggerFlags.
*/
if ((u16)trigger_flags &
- MPI26_DRIVER_TRIGGER0_FLAG_MPI_EVENT_TRIGGER_VALID)
- _base_get_event_diag_triggers(ioc);
+ MPI26_DRIVER_TRIGGER0_FLAG_MPI_EVENT_TRIGGER_VALID) {
+ r = _base_get_event_diag_triggers(ioc);
+ if (r)
+ return r;
+ }
/*
* Retrieve scsi diag trigger values from driver trigger pg3
* if scsi trigger bit enabled in TriggerFlags.
*/
if ((u16)trigger_flags &
- MPI26_DRIVER_TRIGGER0_FLAG_SCSI_SENSE_TRIGGER_VALID)
- _base_get_scsi_diag_triggers(ioc);
+ MPI26_DRIVER_TRIGGER0_FLAG_SCSI_SENSE_TRIGGER_VALID) {
+ r = _base_get_scsi_diag_triggers(ioc);
+ if (r)
+ return r;
+ }
/*
* Retrieve mpi error diag trigger values from driver trigger pg4
* if loginfo trigger bit enabled in TriggerFlags.
*/
if ((u16)trigger_flags &
- MPI26_DRIVER_TRIGGER0_FLAG_LOGINFO_TRIGGER_VALID)
- _base_get_mpi_diag_triggers(ioc);
+ MPI26_DRIVER_TRIGGER0_FLAG_LOGINFO_TRIGGER_VALID) {
+ r = _base_get_mpi_diag_triggers(ioc);
+ if (r)
+ return r;
+ }
+ return 0;
}
/**
* _base_update_diag_trigger_pages - Update the driver trigger pages after
- * online FW update, incase updated FW supports driver
+ * online FW update, in case updated FW supports driver
* trigger pages.
* @ioc : per adapter object
*
- * Return nothing.
+ * Return: nothing.
*/
static void
_base_update_diag_trigger_pages(struct MPT3SAS_ADAPTER *ioc)
@@ -5119,23 +5172,33 @@ _base_update_diag_trigger_pages(struct MPT3SAS_ADAPTER *ioc)
* _base_static_config_pages - static start of day config pages
* @ioc: per adapter object
*/
-static void
+static int
_base_static_config_pages(struct MPT3SAS_ADAPTER *ioc)
{
Mpi2ConfigReply_t mpi_reply;
u32 iounit_pg1_flags;
int tg_flags = 0;
+ int rc;
ioc->nvme_abort_timeout = 30;
- mpt3sas_config_get_manufacturing_pg0(ioc, &mpi_reply, &ioc->manu_pg0);
- if (ioc->ir_firmware)
- mpt3sas_config_get_manufacturing_pg10(ioc, &mpi_reply,
- &ioc->manu_pg10);
+ rc = mpt3sas_config_get_manufacturing_pg0(ioc, &mpi_reply,
+ &ioc->manu_pg0);
+ if (rc)
+ return rc;
+ if (ioc->ir_firmware) {
+ rc = mpt3sas_config_get_manufacturing_pg10(ioc, &mpi_reply,
+ &ioc->manu_pg10);
+ if (rc)
+ return rc;
+ }
/*
* Ensure correct T10 PI operation if vendor left EEDPTagMode
* flag unset in NVDATA.
*/
- mpt3sas_config_get_manufacturing_pg11(ioc, &mpi_reply, &ioc->manu_pg11);
+ rc = mpt3sas_config_get_manufacturing_pg11(ioc, &mpi_reply,
+ &ioc->manu_pg11);
+ if (rc)
+ return rc;
if (!ioc->is_gen35_ioc && ioc->manu_pg11.EEDPTagMode == 0) {
pr_err("%s: overriding NVDATA EEDPTagMode setting\n",
ioc->name);
@@ -5174,12 +5237,24 @@ _base_static_config_pages(struct MPT3SAS_ADAPTER *ioc)
ioc_warn(ioc,
"TimeSync Interval in Manuf page-11 is not enabled. Periodic Time-Sync will be disabled\n");
}
- mpt3sas_config_get_bios_pg2(ioc, &mpi_reply, &ioc->bios_pg2);
- mpt3sas_config_get_bios_pg3(ioc, &mpi_reply, &ioc->bios_pg3);
- mpt3sas_config_get_ioc_pg8(ioc, &mpi_reply, &ioc->ioc_pg8);
- mpt3sas_config_get_iounit_pg0(ioc, &mpi_reply, &ioc->iounit_pg0);
- mpt3sas_config_get_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1);
- mpt3sas_config_get_iounit_pg8(ioc, &mpi_reply, &ioc->iounit_pg8);
+ rc = mpt3sas_config_get_bios_pg2(ioc, &mpi_reply, &ioc->bios_pg2);
+ if (rc)
+ return rc;
+ rc = mpt3sas_config_get_bios_pg3(ioc, &mpi_reply, &ioc->bios_pg3);
+ if (rc)
+ return rc;
+ rc = mpt3sas_config_get_ioc_pg8(ioc, &mpi_reply, &ioc->ioc_pg8);
+ if (rc)
+ return rc;
+ rc = mpt3sas_config_get_iounit_pg0(ioc, &mpi_reply, &ioc->iounit_pg0);
+ if (rc)
+ return rc;
+ rc = mpt3sas_config_get_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1);
+ if (rc)
+ return rc;
+ rc = mpt3sas_config_get_iounit_pg8(ioc, &mpi_reply, &ioc->iounit_pg8);
+ if (rc)
+ return rc;
_base_display_ioc_capabilities(ioc);
/*
@@ -5195,16 +5270,23 @@ _base_static_config_pages(struct MPT3SAS_ADAPTER *ioc)
iounit_pg1_flags |=
MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING;
ioc->iounit_pg1.Flags = cpu_to_le32(iounit_pg1_flags);
- mpt3sas_config_set_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1);
+ rc = mpt3sas_config_set_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1);
+ if (rc)
+ return rc;
if (ioc->iounit_pg8.NumSensors)
ioc->temp_sensors_count = ioc->iounit_pg8.NumSensors;
- if (ioc->is_aero_ioc)
- _base_update_ioc_page1_inlinewith_perf_mode(ioc);
+ if (ioc->is_aero_ioc) {
+ rc = _base_update_ioc_page1_inlinewith_perf_mode(ioc);
+ if (rc)
+ return rc;
+ }
if (ioc->is_gen35_ioc) {
- if (ioc->is_driver_loading)
- _base_get_diag_triggers(ioc);
- else {
+ if (ioc->is_driver_loading) {
+ rc = _base_get_diag_triggers(ioc);
+ if (rc)
+ return rc;
+ } else {
/*
* In case of online HBA FW update operation,
* check whether updated FW supports the driver trigger
@@ -5216,7 +5298,7 @@ _base_static_config_pages(struct MPT3SAS_ADAPTER *ioc)
* and new FW doesn't support them then disable
* support_trigger_pages flag.
*/
- tg_flags = _base_check_for_trigger_pages_support(ioc);
+ _base_check_for_trigger_pages_support(ioc, &tg_flags);
if (!ioc->supports_trigger_pages && tg_flags != -EFAULT)
_base_update_diag_trigger_pages(ioc);
else if (ioc->supports_trigger_pages &&
@@ -5224,6 +5306,7 @@ _base_static_config_pages(struct MPT3SAS_ADAPTER *ioc)
ioc->supports_trigger_pages = 0;
}
}
+ return 0;
}
/**
@@ -6233,7 +6316,7 @@ _base_wait_on_iocstate(struct MPT3SAS_ADAPTER *ioc, u32 ioc_state, int timeout)
* _base_dump_reg_set - This function will print hexdump of register set.
* @ioc: per adapter object
*
- * Returns nothing.
+ * Return: nothing.
*/
static inline void
_base_dump_reg_set(struct MPT3SAS_ADAPTER *ioc)
@@ -6467,7 +6550,7 @@ _base_send_ioc_reset(struct MPT3SAS_ADAPTER *ioc, u8 reset_type, int timeout)
*
* Return: Waits up to timeout seconds for the IOC to
* become operational. Returns 0 if IOC is present
- * and operational; otherwise returns -EFAULT.
+ * and operational; otherwise returns %-EFAULT.
*/
int
@@ -6480,6 +6563,17 @@ mpt3sas_wait_for_ioc(struct MPT3SAS_ADAPTER *ioc, int timeout)
ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
if (ioc_state == MPI2_IOC_STATE_OPERATIONAL)
break;
+
+ /*
+ * Watchdog thread will be started after IOC Initialization, so
+ * no need to wait here for IOC state to become operational
+ * when IOC Initialization is on. Instead the driver will
+ * return ETIME status, so that calling function can issue
+ * diag reset operation and retry the command.
+ */
+ if (ioc->is_driver_loading)
+ return -ETIME;
+
ssleep(1);
ioc_info(ioc, "%s: waiting for operational state(count=%d)\n",
__func__, ++wait_state_count);
@@ -7112,7 +7206,8 @@ mpt3sas_port_enable_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
ioc->port_enable_failed = 1;
- if (ioc->is_driver_loading) {
+ if (ioc->port_enable_cmds.status & MPT3_CMD_COMPLETE_ASYNC) {
+ ioc->port_enable_cmds.status &= ~MPT3_CMD_COMPLETE_ASYNC;
if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
mpt3sas_port_enable_complete(ioc);
return 1;
@@ -7213,8 +7308,9 @@ mpt3sas_port_enable(struct MPT3SAS_ADAPTER *ioc)
ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
return -EAGAIN;
}
-
+ ioc->drv_internal_flags |= MPT_DRV_INTERNAL_FIRST_PE_ISSUED;
ioc->port_enable_cmds.status = MPT3_CMD_PENDING;
+ ioc->port_enable_cmds.status |= MPT3_CMD_COMPLETE_ASYNC;
mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
ioc->port_enable_cmds.smid = smid;
memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t));
@@ -7311,7 +7407,7 @@ _base_event_notification(struct MPT3SAS_ADAPTER *ioc)
Mpi2EventNotificationRequest_t *mpi_request;
u16 smid;
int r = 0;
- int i;
+ int i, issue_diag_reset = 0;
dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
@@ -7345,10 +7441,19 @@ _base_event_notification(struct MPT3SAS_ADAPTER *ioc)
if (ioc->base_cmds.status & MPT3_CMD_RESET)
r = -EFAULT;
else
- r = -ETIME;
+ issue_diag_reset = 1;
+
} else
dinitprintk(ioc, ioc_info(ioc, "%s: complete\n", __func__));
ioc->base_cmds.status = MPT3_CMD_NOT_USED;
+
+ if (issue_diag_reset) {
+ if (ioc->drv_internal_flags & MPT_DRV_INTERNAL_FIRST_PE_ISSUED)
+ return -EFAULT;
+ if (mpt3sas_base_check_for_fault_and_issue_reset(ioc))
+ return -EFAULT;
+ r = -EAGAIN;
+ }
return r;
}
@@ -7712,7 +7817,7 @@ _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc)
if (!ioc->is_driver_loading)
return r;
- rc = _base_check_for_fault_and_issue_reset(ioc);
+ rc = mpt3sas_base_check_for_fault_and_issue_reset(ioc);
if (rc || (_base_send_ioc_init(ioc)))
return r;
}
@@ -7746,12 +7851,15 @@ _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc)
return r;
}
- _base_static_config_pages(ioc);
+ rc = _base_static_config_pages(ioc);
+ if (r)
+ return r;
+
r = _base_event_notification(ioc);
if (r)
return r;
- if (ioc->is_driver_loading) {
+ if (!ioc->shost_recovery) {
if (ioc->is_warpdrive && ioc->manu_pg10.OEMIdentifier
== 0x80) {
@@ -7851,7 +7959,7 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
pci_set_drvdata(ioc->pdev, ioc->shost);
r = _base_get_ioc_facts(ioc);
if (r) {
- rc = _base_check_for_fault_and_issue_reset(ioc);
+ rc = mpt3sas_base_check_for_fault_and_issue_reset(ioc);
if (rc || (_base_get_ioc_facts(ioc)))
goto out_free_resources;
}
@@ -7868,7 +7976,7 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
/*
* In SAS3.0,
* SCSI_IO, SMP_PASSTHRU, SATA_PASSTHRU, Target Assist, and
- * Target Status - all require the IEEE formated scatter gather
+ * Target Status - all require the IEEE formatted scatter gather
* elements.
*/
ioc->build_sg_scmd = &_base_build_sg_scmd_ieee;
@@ -7923,7 +8031,7 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
for (i = 0 ; i < ioc->facts.NumberOfPorts; i++) {
r = _base_get_port_facts(ioc, i);
if (r) {
- rc = _base_check_for_fault_and_issue_reset(ioc);
+ rc = mpt3sas_base_check_for_fault_and_issue_reset(ioc);
if (rc || (_base_get_port_facts(ioc, i)))
goto out_free_resources;
}
@@ -8049,8 +8157,11 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
}
}
r = _base_make_ioc_operational(ioc);
- if (r)
- goto out_free_resources;
+ if (r == -EAGAIN) {
+ r = _base_make_ioc_operational(ioc);
+ if (r)
+ goto out_free_resources;
+ }
/*
* Copy current copy of IOCFacts in prev_fw_facts
@@ -8168,8 +8279,6 @@ _base_clear_outstanding_mpt_commands(struct MPT3SAS_ADAPTER *ioc)
ioc->start_scan_failed =
MPI2_IOCSTATUS_INTERNAL_ERROR;
ioc->start_scan = 0;
- ioc->port_enable_cmds.status =
- MPT3_CMD_NOT_USED;
} else {
complete(&ioc->port_enable_cmds.done);
}
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
index 98558d9c8c2d..d4834c8ee9c0 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
@@ -500,6 +500,7 @@ struct MPT3SAS_DEVICE {
#define MPT3_CMD_PENDING 0x0002 /* pending */
#define MPT3_CMD_REPLY_VALID 0x0004 /* reply is valid */
#define MPT3_CMD_RESET 0x0008 /* host reset dropped the command */
+#define MPT3_CMD_COMPLETE_ASYNC 0x0010 /* tells whether cmd completes in same thread or not */
/**
* struct _internal_cmd - internal commands struct
@@ -1175,6 +1176,7 @@ typedef void (*MPT3SAS_FLUSH_RUNNING_CMDS)(struct MPT3SAS_ADAPTER *ioc);
* @schedule_dead_ioc_flush_running_cmds: callback to flush pending commands
* @thresh_hold: Max number of reply descriptors processed
* before updating Host Index
+ * @drv_internal_flags: Bit map internal to driver
* @drv_support_bitmap: driver's supported feature bit map
* @use_32bit_dma: Flag to use 32 bit consistent dma mask
* @scsi_io_cb_idx: shost generated commands
@@ -1370,6 +1372,7 @@ struct MPT3SAS_ADAPTER {
bool msix_load_balance;
u16 thresh_hold;
u8 high_iops_queues;
+ u32 drv_internal_flags;
u32 drv_support_bitmap;
u32 dma_mask;
bool enable_sdev_max_qd;
@@ -1615,6 +1618,8 @@ struct mpt3sas_debugfs_buffer {
#define MPT_DRV_SUPPORT_BITMAP_MEMMOVE 0x00000001
#define MPT_DRV_SUPPORT_BITMAP_ADDNLQUERY 0x00000002
+#define MPT_DRV_INTERNAL_FIRST_PE_ISSUED 0x00000001
+
typedef u8 (*MPT_CALLBACK)(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
u32 reply);
@@ -1709,6 +1714,9 @@ void mpt3sas_halt_firmware(struct MPT3SAS_ADAPTER *ioc);
void mpt3sas_base_update_missing_delay(struct MPT3SAS_ADAPTER *ioc,
u16 device_missing_delay, u8 io_missing_delay);
+int mpt3sas_base_check_for_fault_and_issue_reset(
+ struct MPT3SAS_ADAPTER *ioc);
+
int mpt3sas_port_enable(struct MPT3SAS_ADAPTER *ioc);
void
diff --git a/drivers/scsi/mpt3sas/mpt3sas_config.c b/drivers/scsi/mpt3sas/mpt3sas_config.c
index 55cd32908924..83a5c2172ad4 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_config.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_config.c
@@ -359,8 +359,11 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
}
r = mpt3sas_wait_for_ioc(ioc, MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT);
- if (r)
+ if (r) {
+ if (r == -ETIME)
+ issue_host_reset = 1;
goto free_mem;
+ }
smid = mpt3sas_base_get_smid(ioc, ioc->config_cb_idx);
if (!smid) {
@@ -395,7 +398,6 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
MPT3_CMD_RESET) || ioc->pci_error_recovery)
goto retry_config;
issue_host_reset = 1;
- r = -EFAULT;
goto free_mem;
}
@@ -486,8 +488,16 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
ioc->config_cmds.status = MPT3_CMD_NOT_USED;
mutex_unlock(&ioc->config_cmds.mutex);
- if (issue_host_reset)
- mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
+ if (issue_host_reset) {
+ if (ioc->drv_internal_flags & MPT_DRV_INTERNAL_FIRST_PE_ISSUED) {
+ mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
+ r = -EFAULT;
+ } else {
+ if (mpt3sas_base_check_for_fault_and_issue_reset(ioc))
+ return -EFAULT;
+ r = -EAGAIN;
+ }
+ }
return r;
}
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index d00aca3c77ce..866d118f7931 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -78,6 +78,7 @@ static void _scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER *ioc,
static void
_scsih_pcie_check_device(struct MPT3SAS_ADAPTER *ioc, u16 handle);
static u8 _scsih_check_for_pending_tm(struct MPT3SAS_ADAPTER *ioc, u16 smid);
+static void _scsih_complete_devices_scanning(struct MPT3SAS_ADAPTER *ioc);
/* global parameters */
LIST_HEAD(mpt3sas_ioc_list);
@@ -3631,8 +3632,6 @@ _scsih_error_recovery_delete_devices(struct MPT3SAS_ADAPTER *ioc)
{
struct fw_event_work *fw_event;
- if (ioc->is_driver_loading)
- return;
fw_event = alloc_fw_event_work(0);
if (!fw_event)
return;
@@ -3693,10 +3692,53 @@ _scsih_fw_event_cleanup_queue(struct MPT3SAS_ADAPTER *ioc)
if ((list_empty(&ioc->fw_event_list) && !ioc->current_event) ||
!ioc->firmware_event_thread)
return;
+ /*
+ * Set current running event as ignore, so that
+ * current running event will exit quickly.
+ * As diag reset has occurred it is of no use
+ * to process remaining stale event data entries.
+ */
+ if (ioc->shost_recovery && ioc->current_event)
+ ioc->current_event->ignore = 1;
ioc->fw_events_cleanup = 1;
while ((fw_event = dequeue_next_fw_event(ioc)) ||
(fw_event = ioc->current_event)) {
+
+ /*
+ * Don't call cancel_work_sync() for current_event
+ * other than MPT3SAS_REMOVE_UNRESPONDING_DEVICES;
+ * otherwise we may observe deadlock if current
+ * hard reset issued as part of processing the current_event.
+ *
+ * Orginal logic of cleaning the current_event is added
+ * for handling the back to back host reset issued by the user.
+ * i.e. during back to back host reset, driver use to process
+ * the two instances of MPT3SAS_REMOVE_UNRESPONDING_DEVICES
+ * event back to back and this made the drives to unregister
+ * the devices from SML.
+ */
+
+ if (fw_event == ioc->current_event &&
+ ioc->current_event->event !=
+ MPT3SAS_REMOVE_UNRESPONDING_DEVICES) {
+ ioc->current_event = NULL;
+ continue;
+ }
+
+ /*
+ * Driver has to clear ioc->start_scan flag when
+ * it is cleaning up MPT3SAS_PORT_ENABLE_COMPLETE,
+ * otherwise scsi_scan_host() API waits for the
+ * 5 minute timer to expire. If we exit from
+ * scsi_scan_host() early then we can issue the
+ * new port enable request as part of current diag reset.
+ */
+ if (fw_event->event == MPT3SAS_PORT_ENABLE_COMPLETE) {
+ ioc->port_enable_cmds.status |= MPT3_CMD_RESET;
+ ioc->start_scan = 0;
+ }
+
/*
* Wait on the fw_event to complete. If this returns 1, then
* the event was never executed, and we need a put for the
@@ -5077,10 +5119,8 @@ _scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status)
ascq = 0x00;
break;
}
- scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 0x10,
- ascq);
- scmd->result = DRIVER_SENSE << 24 | (DID_ABORT << 16) |
- SAM_STAT_CHECK_CONDITION;
+ scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x10, ascq);
+ set_host_byte(scmd, DID_ABORT);
}
/**
@@ -5837,12 +5877,8 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
else if (!xfer_cnt && scmd->cmnd[0] == REPORT_LUNS) {
mpi_reply->SCSIState = MPI2_SCSI_STATE_AUTOSENSE_VALID;
mpi_reply->SCSIStatus = SAM_STAT_CHECK_CONDITION;
- scmd->result = (DRIVER_SENSE << 24) |
- SAM_STAT_CHECK_CONDITION;
- scmd->sense_buffer[0] = 0x70;
- scmd->sense_buffer[2] = ILLEGAL_REQUEST;
- scmd->sense_buffer[12] = 0x20;
- scmd->sense_buffer[13] = 0;
+ scsi_build_sense(scmd, 0, ILLEGAL_REQUEST,
+ 0x20, 0);
}
break;
@@ -6884,8 +6920,10 @@ _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle)
handle, parent_handle,
(u64)sas_expander->sas_address, sas_expander->num_phys);
- if (!sas_expander->num_phys)
+ if (!sas_expander->num_phys) {
+ rc = -1;
goto out_fail;
+ }
sas_expander->phy = kcalloc(sas_expander->num_phys,
sizeof(struct _sas_phy), GFP_KERNEL);
if (!sas_expander->phy) {
@@ -10118,6 +10156,17 @@ _scsih_remove_unresponding_devices(struct MPT3SAS_ADAPTER *ioc)
* owner for the reference the list had on any object we prune.
*/
spin_lock_irqsave(&ioc->sas_device_lock, flags);
+
+ /*
+ * Clean up the sas_device_init_list list as
+ * driver goes for fresh scan as part of diag reset.
+ */
+ list_for_each_entry_safe(sas_device, sas_device_next,
+ &ioc->sas_device_init_list, list) {
+ list_del_init(&sas_device->list);
+ sas_device_put(sas_device);
+ }
+
list_for_each_entry_safe(sas_device, sas_device_next,
&ioc->sas_device_list, list) {
if (!sas_device->responding)
@@ -10139,6 +10188,16 @@ _scsih_remove_unresponding_devices(struct MPT3SAS_ADAPTER *ioc)
ioc_info(ioc, "Removing unresponding devices: pcie end-devices\n");
INIT_LIST_HEAD(&head);
spin_lock_irqsave(&ioc->pcie_device_lock, flags);
+ /*
+ * Clean up the pcie_device_init_list list as
+ * driver goes for fresh scan as part of diag reset.
+ */
+ list_for_each_entry_safe(pcie_device, pcie_device_next,
+ &ioc->pcie_device_init_list, list) {
+ list_del_init(&pcie_device->list);
+ pcie_device_put(pcie_device);
+ }
+
list_for_each_entry_safe(pcie_device, pcie_device_next,
&ioc->pcie_device_list, list) {
if (!pcie_device->responding)
@@ -10541,8 +10600,7 @@ void
mpt3sas_scsih_reset_done_handler(struct MPT3SAS_ADAPTER *ioc)
{
dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_DONE_RESET\n", __func__));
- if ((!ioc->is_driver_loading) && !(disable_discovery > 0 &&
- !ioc->sas_hba.num_phys)) {
+ if (!(disable_discovery > 0 && !ioc->sas_hba.num_phys)) {
if (ioc->multipath_on_hba) {
_scsih_sas_port_refresh(ioc);
_scsih_update_vphys_after_reset(ioc);
@@ -10597,6 +10655,18 @@ _mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
_scsih_del_dirty_vphy(ioc);
_scsih_del_dirty_port_entries(ioc);
_scsih_scan_for_devices_after_reset(ioc);
+ /*
+ * If diag reset has occurred during the driver load
+ * then driver has to complete the driver load operation
+ * by executing the following items:
+ *- Register the devices from sas_device_init_list to SML
+ *- clear is_driver_loading flag,
+ *- start the watchdog thread.
+ * In happy driver load path, above things are taken care of when
+ * driver executes scsih_scan_finished().
+ */
+ if (ioc->is_driver_loading)
+ _scsih_complete_devices_scanning(ioc);
_scsih_set_nvme_max_shutdown_latency(ioc);
break;
case MPT3SAS_PORT_ENABLE_COMPLETE:
@@ -10742,11 +10812,23 @@ mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
_scsih_check_topo_delete_events(ioc,
(Mpi2EventDataSasTopologyChangeList_t *)
mpi_reply->EventData);
+ /*
+ * No need to add the topology change list
+ * event to fw event work queue when
+ * diag reset is going on. Since during diag
+ * reset driver scan the devices by reading
+ * sas device page0's not by processing the
+ * events.
+ */
+ if (ioc->shost_recovery)
+ return 1;
break;
case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
_scsih_check_pcie_topo_remove_events(ioc,
(Mpi26EventDataPCIeTopologyChangeList_t *)
mpi_reply->EventData);
+ if (ioc->shost_recovery)
+ return 1;
break;
case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
_scsih_check_ir_config_unhide_events(ioc,
@@ -11262,13 +11344,27 @@ _scsih_probe_boot_devices(struct MPT3SAS_ADAPTER *ioc)
if (channel == RAID_CHANNEL) {
raid_device = device;
+ /*
+ * If this boot vd is already registered with SML then
+ * no need to register it again as part of device scanning
+ * after diag reset during driver load operation.
+ */
+ if (raid_device->starget)
+ return;
rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
raid_device->id, 0);
if (rc)
_scsih_raid_device_remove(ioc, raid_device);
} else if (channel == PCIE_CHANNEL) {
- spin_lock_irqsave(&ioc->pcie_device_lock, flags);
pcie_device = device;
+ /*
+ * If this boot NVMe device is already registered with SML then
+ * no need to register it again as part of device scanning
+ * after diag reset during driver load operation.
+ */
+ if (pcie_device->starget)
+ return;
+ spin_lock_irqsave(&ioc->pcie_device_lock, flags);
tid = pcie_device->id;
list_move_tail(&pcie_device->list, &ioc->pcie_device_list);
spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
@@ -11276,8 +11372,15 @@ _scsih_probe_boot_devices(struct MPT3SAS_ADAPTER *ioc)
if (rc)
_scsih_pcie_device_remove(ioc, pcie_device);
} else {
- spin_lock_irqsave(&ioc->sas_device_lock, flags);
sas_device = device;
+ /*
+ * If this boot sas/sata device is already registered with SML
+ * then no need to register it again as part of device scanning
+ * after diag reset during driver load operation.
+ */
+ if (sas_device->starget)
+ return;
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
handle = sas_device->handle;
sas_address_parent = sas_device->sas_address_parent;
sas_address = sas_device->sas_address;
@@ -11576,6 +11679,25 @@ scsih_scan_start(struct Scsi_Host *shost)
}
/**
+ * _scsih_complete_devices_scanning - add the devices to sml and
+ * complete ioc initialization.
+ * @ioc: per adapter object
+ *
+ * Return nothing.
+ */
+static void _scsih_complete_devices_scanning(struct MPT3SAS_ADAPTER *ioc)
+{
+
+ if (ioc->wait_for_discovery_to_complete) {
+ ioc->wait_for_discovery_to_complete = 0;
+ _scsih_probe_devices(ioc);
+ }
+
+ mpt3sas_base_start_watchdog(ioc);
+ ioc->is_driver_loading = 0;
+}
+
+/**
* scsih_scan_finished - scsi lld callback for .scan_finished
* @shost: SCSI host pointer
* @time: elapsed time of the scan in jiffies
@@ -11588,6 +11710,8 @@ static int
scsih_scan_finished(struct Scsi_Host *shost, unsigned long time)
{
struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ u32 ioc_state;
+ int issue_hard_reset = 0;
if (disable_discovery > 0) {
ioc->is_driver_loading = 0;
@@ -11602,9 +11726,30 @@ scsih_scan_finished(struct Scsi_Host *shost, unsigned long time)
return 1;
}
- if (ioc->start_scan)
+ if (ioc->start_scan) {
+ ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
+ if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
+ mpt3sas_print_fault_code(ioc, ioc_state &
+ MPI2_DOORBELL_DATA_MASK);
+ issue_hard_reset = 1;
+ goto out;
+ } else if ((ioc_state & MPI2_IOC_STATE_MASK) ==
+ MPI2_IOC_STATE_COREDUMP) {
+ mpt3sas_base_coredump_info(ioc, ioc_state &
+ MPI2_DOORBELL_DATA_MASK);
+ mpt3sas_base_wait_for_coredump_completion(ioc, __func__);
+ issue_hard_reset = 1;
+ goto out;
+ }
return 0;
+ }
+ if (ioc->port_enable_cmds.status & MPT3_CMD_RESET) {
+ ioc_info(ioc,
+ "port enable: aborted due to diag reset\n");
+ ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
+ goto out;
+ }
if (ioc->start_scan_failed) {
ioc_info(ioc, "port enable: FAILED with (ioc_status=0x%08x)\n",
ioc->start_scan_failed);
@@ -11616,13 +11761,14 @@ scsih_scan_finished(struct Scsi_Host *shost, unsigned long time)
ioc_info(ioc, "port enable: SUCCESS\n");
ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
+ _scsih_complete_devices_scanning(ioc);
- if (ioc->wait_for_discovery_to_complete) {
- ioc->wait_for_discovery_to_complete = 0;
- _scsih_probe_devices(ioc);
+out:
+ if (issue_hard_reset) {
+ ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
+ if (mpt3sas_base_hard_reset_handler(ioc, SOFT_RESET))
+ ioc->is_driver_loading = 0;
}
- mpt3sas_base_start_watchdog(ioc);
- ioc->is_driver_loading = 0;
return 1;
}
@@ -11932,6 +12078,7 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
ioc->multipath_on_hba = 1;
else
ioc->multipath_on_hba = 0;
+ break;
default:
break;
}
diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c
index 1acea528f27f..31d1ea5a5dd2 100644
--- a/drivers/scsi/mvsas/mv_sas.c
+++ b/drivers/scsi/mvsas/mv_sas.c
@@ -1314,7 +1314,7 @@ static int mvs_exec_internal_tmf_task(struct domain_device *dev,
}
if (task->task_status.resp == SAS_TASK_COMPLETE &&
- task->task_status.stat == SAM_STAT_GOOD) {
+ task->task_status.stat == SAS_SAM_STAT_GOOD) {
res = TMF_RESP_FUNC_COMPLETE;
break;
}
@@ -1764,7 +1764,7 @@ int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags)
case SAS_PROTOCOL_SSP:
/* hw says status == 0, datapres == 0 */
if (rx_desc & RXQ_GOOD) {
- tstat->stat = SAM_STAT_GOOD;
+ tstat->stat = SAS_SAM_STAT_GOOD;
tstat->resp = SAS_TASK_COMPLETE;
}
/* response frame present */
@@ -1773,12 +1773,12 @@ int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags)
sizeof(struct mvs_err_info);
sas_ssp_task_response(mvi->dev, task, iu);
} else
- tstat->stat = SAM_STAT_CHECK_CONDITION;
+ tstat->stat = SAS_SAM_STAT_CHECK_CONDITION;
break;
case SAS_PROTOCOL_SMP: {
struct scatterlist *sg_resp = &task->smp_task.smp_resp;
- tstat->stat = SAM_STAT_GOOD;
+ tstat->stat = SAS_SAM_STAT_GOOD;
to = kmap_atomic(sg_page(sg_resp));
memcpy(to + sg_resp->offset,
slot->response + sizeof(struct mvs_err_info),
@@ -1795,7 +1795,7 @@ int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags)
}
default:
- tstat->stat = SAM_STAT_CHECK_CONDITION;
+ tstat->stat = SAS_SAM_STAT_CHECK_CONDITION;
break;
}
if (!slot->port->port_attached) {
diff --git a/drivers/scsi/mvumi.c b/drivers/scsi/mvumi.c
index 9d5743627604..6bb03d7a254d 100644
--- a/drivers/scsi/mvumi.c
+++ b/drivers/scsi/mvumi.c
@@ -1317,11 +1317,10 @@ static void mvumi_complete_cmd(struct mvumi_hba *mhba, struct mvumi_cmd *cmd,
if (ob_frame->rsp_flag & CL_RSP_FLAG_SENSEDATA) {
memcpy(cmd->scmd->sense_buffer, ob_frame->payload,
sizeof(struct mvumi_sense_data));
- scmd->result |= (DRIVER_SENSE << 24);
}
break;
default:
- scmd->result |= (DRIVER_INVALID << 24) | (DID_ABORT << 16);
+ scmd->result |= (DID_ABORT << 16);
break;
}
@@ -2068,10 +2067,7 @@ static unsigned char mvumi_build_frame(struct mvumi_hba *mhba,
return 0;
error:
- scmd->result = (DID_OK << 16) | (DRIVER_SENSE << 24) |
- SAM_STAT_CHECK_CONDITION;
- scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 0x24,
- 0);
+ scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x24, 0);
return -1;
}
@@ -2131,7 +2127,7 @@ static enum blk_eh_timer_return mvumi_timed_out(struct scsi_cmnd *scmd)
else
atomic_dec(&mhba->fw_outstanding);
- scmd->result = (DRIVER_INVALID << 24) | (DID_ABORT << 16);
+ scmd->result = (DID_ABORT << 16);
scmd->SCp.ptr = NULL;
if (scsi_bufflen(scmd)) {
dma_unmap_sg(&mhba->pdev->dev, scsi_sglist(scmd),
diff --git a/drivers/scsi/myrb.c b/drivers/scsi/myrb.c
index d9c82e211ae7..542ed88ef90d 100644
--- a/drivers/scsi/myrb.c
+++ b/drivers/scsi/myrb.c
@@ -1397,8 +1397,7 @@ myrb_mode_sense(struct myrb_hba *cb, struct scsi_cmnd *scmd,
static void myrb_request_sense(struct myrb_hba *cb,
struct scsi_cmnd *scmd)
{
- scsi_build_sense_buffer(0, scmd->sense_buffer,
- NO_SENSE, 0, 0);
+ scsi_build_sense(scmd, 0, NO_SENSE, 0, 0);
scsi_sg_copy_from_buffer(scmd, scmd->sense_buffer,
SCSI_SENSE_BUFFERSIZE);
}
@@ -1447,10 +1446,7 @@ static int myrb_ldev_queuecommand(struct Scsi_Host *shost,
case INQUIRY:
if (scmd->cmnd[1] & 1) {
/* Illegal request, invalid field in CDB */
- scsi_build_sense_buffer(0, scmd->sense_buffer,
- ILLEGAL_REQUEST, 0x24, 0);
- scmd->result = (DRIVER_SENSE << 24) |
- SAM_STAT_CHECK_CONDITION;
+ scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x24, 0);
} else {
myrb_inquiry(cb, scmd);
scmd->result = (DID_OK << 16);
@@ -1465,10 +1461,7 @@ static int myrb_ldev_queuecommand(struct Scsi_Host *shost,
if ((scmd->cmnd[2] & 0x3F) != 0x3F &&
(scmd->cmnd[2] & 0x3F) != 0x08) {
/* Illegal request, invalid field in CDB */
- scsi_build_sense_buffer(0, scmd->sense_buffer,
- ILLEGAL_REQUEST, 0x24, 0);
- scmd->result = (DRIVER_SENSE << 24) |
- SAM_STAT_CHECK_CONDITION;
+ scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x24, 0);
} else {
myrb_mode_sense(cb, scmd, ldev_info);
scmd->result = (DID_OK << 16);
@@ -1479,20 +1472,14 @@ static int myrb_ldev_queuecommand(struct Scsi_Host *shost,
if ((scmd->cmnd[1] & 1) ||
(scmd->cmnd[8] & 1)) {
/* Illegal request, invalid field in CDB */
- scsi_build_sense_buffer(0, scmd->sense_buffer,
- ILLEGAL_REQUEST, 0x24, 0);
- scmd->result = (DRIVER_SENSE << 24) |
- SAM_STAT_CHECK_CONDITION;
+ scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x24, 0);
scmd->scsi_done(scmd);
return 0;
}
lba = get_unaligned_be32(&scmd->cmnd[2]);
if (lba) {
/* Illegal request, invalid field in CDB */
- scsi_build_sense_buffer(0, scmd->sense_buffer,
- ILLEGAL_REQUEST, 0x24, 0);
- scmd->result = (DRIVER_SENSE << 24) |
- SAM_STAT_CHECK_CONDITION;
+ scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x24, 0);
scmd->scsi_done(scmd);
return 0;
}
@@ -1506,10 +1493,7 @@ static int myrb_ldev_queuecommand(struct Scsi_Host *shost,
case SEND_DIAGNOSTIC:
if (scmd->cmnd[1] != 0x04) {
/* Illegal request, invalid field in CDB */
- scsi_build_sense_buffer(0, scmd->sense_buffer,
- ILLEGAL_REQUEST, 0x24, 0);
- scmd->result = (DRIVER_SENSE << 24) |
- SAM_STAT_CHECK_CONDITION;
+ scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x24, 0);
} else {
/* Assume good status */
scmd->result = (DID_OK << 16);
@@ -1519,10 +1503,7 @@ static int myrb_ldev_queuecommand(struct Scsi_Host *shost,
case READ_6:
if (ldev_info->state == MYRB_DEVICE_WO) {
/* Data protect, attempt to read invalid data */
- scsi_build_sense_buffer(0, scmd->sense_buffer,
- DATA_PROTECT, 0x21, 0x06);
- scmd->result = (DRIVER_SENSE << 24) |
- SAM_STAT_CHECK_CONDITION;
+ scsi_build_sense(scmd, 0, DATA_PROTECT, 0x21, 0x06);
scmd->scsi_done(scmd);
return 0;
}
@@ -1536,10 +1517,7 @@ static int myrb_ldev_queuecommand(struct Scsi_Host *shost,
case READ_10:
if (ldev_info->state == MYRB_DEVICE_WO) {
/* Data protect, attempt to read invalid data */
- scsi_build_sense_buffer(0, scmd->sense_buffer,
- DATA_PROTECT, 0x21, 0x06);
- scmd->result = (DRIVER_SENSE << 24) |
- SAM_STAT_CHECK_CONDITION;
+ scsi_build_sense(scmd, 0, DATA_PROTECT, 0x21, 0x06);
scmd->scsi_done(scmd);
return 0;
}
@@ -1553,10 +1531,7 @@ static int myrb_ldev_queuecommand(struct Scsi_Host *shost,
case READ_12:
if (ldev_info->state == MYRB_DEVICE_WO) {
/* Data protect, attempt to read invalid data */
- scsi_build_sense_buffer(0, scmd->sense_buffer,
- DATA_PROTECT, 0x21, 0x06);
- scmd->result = (DRIVER_SENSE << 24) |
- SAM_STAT_CHECK_CONDITION;
+ scsi_build_sense(scmd, 0, DATA_PROTECT, 0x21, 0x06);
scmd->scsi_done(scmd);
return 0;
}
@@ -1569,9 +1544,7 @@ static int myrb_ldev_queuecommand(struct Scsi_Host *shost,
break;
default:
/* Illegal request, invalid opcode */
- scsi_build_sense_buffer(0, scmd->sense_buffer,
- ILLEGAL_REQUEST, 0x20, 0);
- scmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
+ scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x20, 0);
scmd->scsi_done(scmd);
return 0;
}
@@ -2352,25 +2325,19 @@ static void myrb_handle_scsi(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk,
"Bad Data Encountered\n");
if (scmd->sc_data_direction == DMA_FROM_DEVICE)
/* Unrecovered read error */
- scsi_build_sense_buffer(0, scmd->sense_buffer,
- MEDIUM_ERROR, 0x11, 0);
+ scsi_build_sense(scmd, 0, MEDIUM_ERROR, 0x11, 0);
else
/* Write error */
- scsi_build_sense_buffer(0, scmd->sense_buffer,
- MEDIUM_ERROR, 0x0C, 0);
- scmd->result = (DID_OK << 16) | SAM_STAT_CHECK_CONDITION;
+ scsi_build_sense(scmd, 0, MEDIUM_ERROR, 0x0C, 0);
break;
case MYRB_STATUS_IRRECOVERABLE_DATA_ERROR:
scmd_printk(KERN_ERR, scmd, "Irrecoverable Data Error\n");
if (scmd->sc_data_direction == DMA_FROM_DEVICE)
/* Unrecovered read error, auto-reallocation failed */
- scsi_build_sense_buffer(0, scmd->sense_buffer,
- MEDIUM_ERROR, 0x11, 0x04);
+ scsi_build_sense(scmd, 0, MEDIUM_ERROR, 0x11, 0x04);
else
/* Write error, auto-reallocation failed */
- scsi_build_sense_buffer(0, scmd->sense_buffer,
- MEDIUM_ERROR, 0x0C, 0x02);
- scmd->result = (DID_OK << 16) | SAM_STAT_CHECK_CONDITION;
+ scsi_build_sense(scmd, 0, MEDIUM_ERROR, 0x0C, 0x02);
break;
case MYRB_STATUS_LDRV_NONEXISTENT_OR_OFFLINE:
dev_dbg(&scmd->device->sdev_gendev,
@@ -2381,8 +2348,7 @@ static void myrb_handle_scsi(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk,
dev_dbg(&scmd->device->sdev_gendev,
"Attempt to Access Beyond End of Logical Drive");
/* Logical block address out of range */
- scsi_build_sense_buffer(0, scmd->sense_buffer,
- NOT_READY, 0x21, 0);
+ scsi_build_sense(scmd, 0, NOT_READY, 0x21, 0);
break;
case MYRB_STATUS_DEVICE_NONRESPONSIVE:
dev_dbg(&scmd->device->sdev_gendev, "Device nonresponsive\n");
diff --git a/drivers/scsi/myrs.c b/drivers/scsi/myrs.c
index 3b68c68d1716..26326af23dbc 100644
--- a/drivers/scsi/myrs.c
+++ b/drivers/scsi/myrs.c
@@ -1600,9 +1600,7 @@ static int myrs_queuecommand(struct Scsi_Host *shost,
switch (scmd->cmnd[0]) {
case REPORT_LUNS:
- scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST,
- 0x20, 0x0);
- scmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
+ scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x20, 0x0);
scmd->scsi_done(scmd);
return 0;
case MODE_SENSE:
@@ -1612,10 +1610,7 @@ static int myrs_queuecommand(struct Scsi_Host *shost,
if ((scmd->cmnd[2] & 0x3F) != 0x3F &&
(scmd->cmnd[2] & 0x3F) != 0x08) {
/* Illegal request, invalid field in CDB */
- scsi_build_sense_buffer(0, scmd->sense_buffer,
- ILLEGAL_REQUEST, 0x24, 0);
- scmd->result = (DRIVER_SENSE << 24) |
- SAM_STAT_CHECK_CONDITION;
+ scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x24, 0);
} else {
myrs_mode_sense(cs, scmd, ldev_info);
scmd->result = (DID_OK << 16);
diff --git a/drivers/scsi/nsp32.c b/drivers/scsi/nsp32.c
index 134bbd2d8b66..bc9d29e5fdba 100644
--- a/drivers/scsi/nsp32.c
+++ b/drivers/scsi/nsp32.c
@@ -176,38 +176,40 @@ static nsp32_sync_table nsp32_sync_table_pci[] = {
* function declaration
*/
/* module entry point */
-static int nsp32_probe (struct pci_dev *, const struct pci_device_id *);
-static void nsp32_remove(struct pci_dev *);
+static int nsp32_probe (struct pci_dev *, const struct pci_device_id *);
+static void nsp32_remove(struct pci_dev *);
static int __init init_nsp32 (void);
static void __exit exit_nsp32 (void);
/* struct struct scsi_host_template */
-static int nsp32_show_info (struct seq_file *, struct Scsi_Host *);
+static int nsp32_show_info (struct seq_file *, struct Scsi_Host *);
-static int nsp32_detect (struct pci_dev *pdev);
-static int nsp32_queuecommand(struct Scsi_Host *, struct scsi_cmnd *);
-static const char *nsp32_info (struct Scsi_Host *);
-static int nsp32_release (struct Scsi_Host *);
+static int nsp32_detect (struct pci_dev *pdev);
+static int nsp32_queuecommand(struct Scsi_Host *, struct scsi_cmnd *);
+static const char *nsp32_info (struct Scsi_Host *);
+static int nsp32_release (struct Scsi_Host *);
/* SCSI error handler */
-static int nsp32_eh_abort (struct scsi_cmnd *);
-static int nsp32_eh_host_reset(struct scsi_cmnd *);
+static int nsp32_eh_abort (struct scsi_cmnd *);
+static int nsp32_eh_host_reset(struct scsi_cmnd *);
/* generate SCSI message */
static void nsp32_build_identify(struct scsi_cmnd *);
static void nsp32_build_nop (struct scsi_cmnd *);
static void nsp32_build_reject (struct scsi_cmnd *);
-static void nsp32_build_sdtr (struct scsi_cmnd *, unsigned char, unsigned char);
+static void nsp32_build_sdtr (struct scsi_cmnd *, unsigned char,
+ unsigned char);
/* SCSI message handler */
static int nsp32_busfree_occur(struct scsi_cmnd *, unsigned short);
static void nsp32_msgout_occur (struct scsi_cmnd *);
-static void nsp32_msgin_occur (struct scsi_cmnd *, unsigned long, unsigned short);
+static void nsp32_msgin_occur (struct scsi_cmnd *, unsigned long,
+ unsigned short);
static int nsp32_setup_sg_table (struct scsi_cmnd *);
static int nsp32_selection_autopara(struct scsi_cmnd *);
static int nsp32_selection_autoscsi(struct scsi_cmnd *);
-static void nsp32_scsi_done (struct scsi_cmnd *);
+static void nsp32_scsi_done (struct scsi_cmnd *);
static int nsp32_arbitration (struct scsi_cmnd *, unsigned int);
static int nsp32_reselection (struct scsi_cmnd *, unsigned char);
static void nsp32_adjust_busfree (struct scsi_cmnd *, unsigned int);
@@ -215,10 +217,13 @@ static void nsp32_restart_autoscsi (struct scsi_cmnd *, unsigned short);
/* SCSI SDTR */
static void nsp32_analyze_sdtr (struct scsi_cmnd *);
-static int nsp32_search_period_entry(nsp32_hw_data *, nsp32_target *, unsigned char);
-static void nsp32_set_async (nsp32_hw_data *, nsp32_target *);
-static void nsp32_set_max_sync (nsp32_hw_data *, nsp32_target *, unsigned char *, unsigned char *);
-static void nsp32_set_sync_entry (nsp32_hw_data *, nsp32_target *, int, unsigned char);
+static int nsp32_search_period_entry(nsp32_hw_data *, nsp32_target *,
+ unsigned char);
+static void nsp32_set_async (nsp32_hw_data *, nsp32_target *);
+static void nsp32_set_max_sync (nsp32_hw_data *, nsp32_target *,
+ unsigned char *, unsigned char *);
+static void nsp32_set_sync_entry (nsp32_hw_data *, nsp32_target *,
+ int, unsigned char);
/* SCSI bus status handler */
static void nsp32_wait_req (nsp32_hw_data *, int);
@@ -234,16 +239,16 @@ static irqreturn_t do_nsp32_isr(int, void *);
static int nsp32hw_init(nsp32_hw_data *);
/* EEPROM handler */
-static int nsp32_getprom_param (nsp32_hw_data *);
-static int nsp32_getprom_at24 (nsp32_hw_data *);
-static int nsp32_getprom_c16 (nsp32_hw_data *);
-static void nsp32_prom_start (nsp32_hw_data *);
-static void nsp32_prom_stop (nsp32_hw_data *);
-static int nsp32_prom_read (nsp32_hw_data *, int);
-static int nsp32_prom_read_bit (nsp32_hw_data *);
-static void nsp32_prom_write_bit(nsp32_hw_data *, int);
-static void nsp32_prom_set (nsp32_hw_data *, int, int);
-static int nsp32_prom_get (nsp32_hw_data *, int);
+static int nsp32_getprom_param (nsp32_hw_data *);
+static int nsp32_getprom_at24 (nsp32_hw_data *);
+static int nsp32_getprom_c16 (nsp32_hw_data *);
+static void nsp32_prom_start (nsp32_hw_data *);
+static void nsp32_prom_stop (nsp32_hw_data *);
+static int nsp32_prom_read (nsp32_hw_data *, int);
+static int nsp32_prom_read_bit (nsp32_hw_data *);
+static void nsp32_prom_write_bit(nsp32_hw_data *, int);
+static void nsp32_prom_set (nsp32_hw_data *, int, int);
+static int nsp32_prom_get (nsp32_hw_data *, int);
/* debug/warning/info message */
static void nsp32_message (const char *, int, char *, char *, ...);
@@ -356,8 +361,8 @@ static void nsp32_dmessage(const char *func, int line, int mask, char *fmt, ...)
static void nsp32_build_identify(struct scsi_cmnd *SCpnt)
{
nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata;
- int pos = data->msgout_len;
- int mode = FALSE;
+ int pos = data->msgout_len;
+ int mode = FALSE;
/* XXX: Auto DiscPriv detection is progressing... */
if (disc_priv == 0) {
@@ -377,13 +382,13 @@ static void nsp32_build_sdtr(struct scsi_cmnd *SCpnt,
unsigned char offset)
{
nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata;
- int pos = data->msgout_len;
+ int pos = data->msgout_len;
data->msgoutbuf[pos] = EXTENDED_MESSAGE; pos++;
data->msgoutbuf[pos] = EXTENDED_SDTR_LEN; pos++;
data->msgoutbuf[pos] = EXTENDED_SDTR; pos++;
- data->msgoutbuf[pos] = period; pos++;
- data->msgoutbuf[pos] = offset; pos++;
+ data->msgoutbuf[pos] = period; pos++;
+ data->msgoutbuf[pos] = offset; pos++;
data->msgout_len = pos;
}
@@ -394,7 +399,7 @@ static void nsp32_build_sdtr(struct scsi_cmnd *SCpnt,
static void nsp32_build_nop(struct scsi_cmnd *SCpnt)
{
nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata;
- int pos = data->msgout_len;
+ int pos = data->msgout_len;
if (pos != 0) {
nsp32_msg(KERN_WARNING,
@@ -412,12 +417,12 @@ static void nsp32_build_nop(struct scsi_cmnd *SCpnt)
static void nsp32_build_reject(struct scsi_cmnd *SCpnt)
{
nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata;
- int pos = data->msgout_len;
+ int pos = data->msgout_len;
data->msgoutbuf[pos] = MESSAGE_REJECT; pos++;
data->msgout_len = pos;
}
-
+
/*
* timer
*/
@@ -450,7 +455,7 @@ static int nsp32_selection_autopara(struct scsi_cmnd *SCpnt)
unsigned char phase;
int i, ret;
unsigned int msgout;
- u16_le s;
+ u16_le s;
nsp32_dbg(NSP32_DEBUG_AUTOSCSI, "in");
@@ -482,7 +487,7 @@ static int nsp32_selection_autopara(struct scsi_cmnd *SCpnt)
* the sending order of the message is:
* MCNT 3: MSG#0 -> MSG#1 -> MSG#2
* MCNT 2: MSG#1 -> MSG#2
- * MCNT 1: MSG#2
+ * MCNT 1: MSG#2
*/
msgout >>= 8;
msgout |= ((unsigned int)(data->msgoutbuf[i]) << 24);
@@ -494,7 +499,8 @@ static int nsp32_selection_autopara(struct scsi_cmnd *SCpnt)
msgout = 0;
}
- // nsp_dbg(NSP32_DEBUG_AUTOSCSI, "sel time out=0x%x\n", nsp32_read2(base, SEL_TIME_OUT));
+ // nsp_dbg(NSP32_DEBUG_AUTOSCSI, "sel time out=0x%x\n",
+ // nsp32_read2(base, SEL_TIME_OUT));
// nsp32_write2(base, SEL_TIME_OUT, SEL_TIMEOUT_TIME);
/*
@@ -520,10 +526,10 @@ static int nsp32_selection_autopara(struct scsi_cmnd *SCpnt)
/* command control */
param->command_control = cpu_to_le16(CLEAR_CDB_FIFO_POINTER |
- AUTOSCSI_START |
- AUTO_MSGIN_00_OR_04 |
- AUTO_MSGIN_02 |
- AUTO_ATN );
+ AUTOSCSI_START |
+ AUTO_MSGIN_00_OR_04 |
+ AUTO_MSGIN_02 |
+ AUTO_ATN );
/* transfer control */
@@ -555,9 +561,9 @@ static int nsp32_selection_autopara(struct scsi_cmnd *SCpnt)
/*
* transfer parameter to ASIC
*/
- nsp32_write4(base, SGT_ADR, data->auto_paddr);
- nsp32_write2(base, COMMAND_CONTROL, CLEAR_CDB_FIFO_POINTER |
- AUTO_PARAMETER );
+ nsp32_write4(base, SGT_ADR, data->auto_paddr);
+ nsp32_write2(base, COMMAND_CONTROL,
+ CLEAR_CDB_FIFO_POINTER | AUTO_PARAMETER );
/*
* Check arbitration
@@ -599,7 +605,7 @@ static int nsp32_selection_autoscsi(struct scsi_cmnd *SCpnt)
SCpnt->result = DID_BUS_BUSY << 16;
status = 1;
goto out;
- }
+ }
/*
* clear execph
@@ -616,13 +622,14 @@ static int nsp32_selection_autoscsi(struct scsi_cmnd *SCpnt)
*/
for (i = 0; i < SCpnt->cmd_len; i++) {
nsp32_write1(base, COMMAND_DATA, SCpnt->cmnd[i]);
- }
+ }
nsp32_dbg(NSP32_DEBUG_CDB_CONTENTS, "CDB[0]=[0x%x]", SCpnt->cmnd[0]);
/*
* set SCSIOUT LATCH(initiator)/TARGET(target) (OR-ed) ID
*/
- nsp32_write1(base, SCSI_OUT_LATCH_TARGET_ID, BIT(host_id) | BIT(target));
+ nsp32_write1(base, SCSI_OUT_LATCH_TARGET_ID,
+ BIT(host_id) | BIT(target));
/*
* set SCSI MSGOUT REG
@@ -642,7 +649,7 @@ static int nsp32_selection_autoscsi(struct scsi_cmnd *SCpnt)
* the sending order of the message is:
* MCNT 3: MSG#0 -> MSG#1 -> MSG#2
* MCNT 2: MSG#1 -> MSG#2
- * MCNT 1: MSG#2
+ * MCNT 1: MSG#2
*/
msgout >>= 8;
msgout |= ((unsigned int)(data->msgoutbuf[i]) << 24);
@@ -662,7 +669,7 @@ static int nsp32_selection_autoscsi(struct scsi_cmnd *SCpnt)
/*
* set SREQ hazard killer sampling rate
- *
+ *
* TODO: sample_rate (BASE+0F) is 0 when internal clock = 40MHz.
* check other internal clock!
*/
@@ -687,7 +694,8 @@ static int nsp32_selection_autoscsi(struct scsi_cmnd *SCpnt)
nsp32_dbg(NSP32_DEBUG_AUTOSCSI,
"syncreg=0x%x, ackwidth=0x%x, sgtpaddr=0x%x, id=0x%x",
nsp32_read1(base, SYNC_REG), nsp32_read1(base, ACK_WIDTH),
- nsp32_read4(base, SGT_ADR), nsp32_read1(base, SCSI_OUT_LATCH_TARGET_ID));
+ nsp32_read4(base, SGT_ADR),
+ nsp32_read1(base, SCSI_OUT_LATCH_TARGET_ID));
nsp32_dbg(NSP32_DEBUG_AUTOSCSI, "msgout_len=%d, msgout=0x%x",
data->msgout_len, msgout);
@@ -716,10 +724,10 @@ static int nsp32_selection_autoscsi(struct scsi_cmnd *SCpnt)
* start AUTO SCSI, kick off arbitration
*/
command = (CLEAR_CDB_FIFO_POINTER |
- AUTOSCSI_START |
+ AUTOSCSI_START |
AUTO_MSGIN_00_OR_04 |
- AUTO_MSGIN_02 |
- AUTO_ATN );
+ AUTO_MSGIN_02 |
+ AUTO_ATN);
nsp32_write2(base, COMMAND_CONTROL, command);
/*
@@ -739,9 +747,9 @@ static int nsp32_selection_autoscsi(struct scsi_cmnd *SCpnt)
/*
* Arbitration Status Check
- *
+ *
* Note: Arbitration counter is waited during ARBIT_GO is not lifting.
- * Using udelay(1) consumes CPU time and system time, but
+ * Using udelay(1) consumes CPU time and system time, but
* arbitration delay time is defined minimal 2.4us in SCSI
* specification, thus udelay works as coarse grained wait timer.
*/
@@ -776,7 +784,7 @@ static int nsp32_arbitration(struct scsi_cmnd *SCpnt, unsigned int base)
nsp32_dbg(NSP32_DEBUG_AUTOSCSI, "arbit timeout");
SCpnt->result = DID_NO_CONNECT << 16;
status = FALSE;
- }
+ }
/*
* clear Arbit
@@ -822,7 +830,8 @@ static int nsp32_reselection(struct scsi_cmnd *SCpnt, unsigned char newlun)
* or current nexus is not existed, unexpected
* reselection is occurred. Send reject message.
*/
- if (newid >= ARRAY_SIZE(data->lunt) || newlun >= ARRAY_SIZE(data->lunt[0])) {
+ if (newid >= ARRAY_SIZE(data->lunt) ||
+ newlun >= ARRAY_SIZE(data->lunt[0])) {
nsp32_msg(KERN_WARNING, "unknown id/lun");
return FALSE;
} else if(data->lunt[newid][newlun].SCpnt == NULL) {
@@ -876,7 +885,8 @@ static int nsp32_setup_sg_table(struct scsi_cmnd *SCpnt)
if (le32_to_cpu(sgt[i].len) > 0x10000) {
nsp32_msg(KERN_ERR,
- "can't transfer over 64KB at a time, size=0x%x", le32_to_cpu(sgt[i].len));
+ "can't transfer over 64KB at a time, "
+ "size=0x%x", le32_to_cpu(sgt[i].len));
return FALSE;
}
nsp32_dbg(NSP32_DEBUG_SGLIST,
@@ -894,7 +904,8 @@ static int nsp32_setup_sg_table(struct scsi_cmnd *SCpnt)
return TRUE;
}
-static int nsp32_queuecommand_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
+static int nsp32_queuecommand_lck(struct scsi_cmnd *SCpnt,
+ void (*done)(struct scsi_cmnd *))
{
nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata;
nsp32_target *target;
@@ -904,8 +915,9 @@ static int nsp32_queuecommand_lck(struct scsi_cmnd *SCpnt, void (*done)(struct s
nsp32_dbg(NSP32_DEBUG_QUEUECOMMAND,
"enter. target: 0x%x LUN: 0x%llx cmnd: 0x%x cmndlen: 0x%x "
"use_sg: 0x%x reqbuf: 0x%lx reqlen: 0x%x",
- SCpnt->device->id, SCpnt->device->lun, SCpnt->cmnd[0], SCpnt->cmd_len,
- scsi_sg_count(SCpnt), scsi_sglist(SCpnt), scsi_bufflen(SCpnt));
+ SCpnt->device->id, SCpnt->device->lun, SCpnt->cmnd[0],
+ SCpnt->cmd_len, scsi_sg_count(SCpnt), scsi_sglist(SCpnt),
+ scsi_bufflen(SCpnt));
if (data->CurrentSC != NULL) {
nsp32_msg(KERN_ERR, "Currentsc != NULL. Cancel this command request");
@@ -936,7 +948,6 @@ static int nsp32_queuecommand_lck(struct scsi_cmnd *SCpnt, void (*done)(struct s
SCpnt->scsi_done = done;
data->CurrentSC = SCpnt;
SCpnt->SCp.Status = SAM_STAT_CHECK_CONDITION;
- SCpnt->SCp.Message = 0;
scsi_set_resid(SCpnt, scsi_bufflen(SCpnt));
SCpnt->SCp.ptr = (char *)scsi_sglist(SCpnt);
@@ -966,7 +977,7 @@ static int nsp32_queuecommand_lck(struct scsi_cmnd *SCpnt, void (*done)(struct s
/* Build IDENTIFY */
nsp32_build_identify(SCpnt);
- /*
+ /*
* If target is the first time to transfer after the reset
* (target don't have SDTR_DONE and SDTR_INITIATOR), sync
* message SDTR is needed to do synchronous transfer.
@@ -1051,9 +1062,9 @@ static int nsp32hw_init(nsp32_hw_data *data)
nsp32_index_write2(base, CFG_LATE_CACHE, lc_reg & 0xffff);
}
- nsp32_write2(base, IRQ_CONTROL, IRQ_CONTROL_ALL_IRQ_MASK);
- nsp32_write2(base, TRANSFER_CONTROL, 0);
- nsp32_write4(base, BM_CNT, 0);
+ nsp32_write2(base, IRQ_CONTROL, IRQ_CONTROL_ALL_IRQ_MASK);
+ nsp32_write2(base, TRANSFER_CONTROL, 0);
+ nsp32_write4(base, BM_CNT, 0);
nsp32_write2(base, SCSI_EXECUTE_PHASE, 0);
do {
@@ -1081,12 +1092,13 @@ static int nsp32hw_init(nsp32_hw_data *data)
nsp32_index_read1(base, FIFO_EMPTY_SHLD_COUNT));
nsp32_index_write1(base, CLOCK_DIV, data->clock);
- nsp32_index_write1(base, BM_CYCLE, MEMRD_CMD1 | SGT_AUTO_PARA_MEMED_CMD);
+ nsp32_index_write1(base, BM_CYCLE,
+ MEMRD_CMD1 | SGT_AUTO_PARA_MEMED_CMD);
nsp32_write1(base, PARITY_CONTROL, 0); /* parity check is disable */
/*
* initialize MISC_WRRD register
- *
+ *
* Note: Designated parameters is obeyed as following:
* MISC_SCSI_DIRECTION_DETECTOR_SELECT: It must be set.
* MISC_MASTER_TERMINATION_SELECT: It must be set.
@@ -1101,10 +1113,10 @@ static int nsp32hw_init(nsp32_hw_data *data)
*/
nsp32_index_write2(base, MISC_WR,
(SCSI_DIRECTION_DETECTOR_SELECT |
- DELAYED_BMSTART |
- MASTER_TERMINATION_SELECT |
- BMREQ_NEGATE_TIMING_SEL |
- AUTOSEL_TIMING_SEL |
+ DELAYED_BMSTART |
+ MASTER_TERMINATION_SELECT |
+ BMREQ_NEGATE_TIMING_SEL |
+ AUTOSEL_TIMING_SEL |
BMSTOP_CHANGE2_NONDATA_PHASE));
nsp32_index_write1(base, TERM_PWR_CONTROL, 0);
@@ -1125,15 +1137,16 @@ static int nsp32hw_init(nsp32_hw_data *data)
* enable to select designated IRQ (except for
* IRQSELECT_SERR, IRQSELECT_PERR, IRQSELECT_BMCNTERR)
*/
- nsp32_index_write2(base, IRQ_SELECT, IRQSELECT_TIMER_IRQ |
- IRQSELECT_SCSIRESET_IRQ |
- IRQSELECT_FIFO_SHLD_IRQ |
- IRQSELECT_RESELECT_IRQ |
- IRQSELECT_PHASE_CHANGE_IRQ |
- IRQSELECT_AUTO_SCSI_SEQ_IRQ |
- // IRQSELECT_BMCNTERR_IRQ |
- IRQSELECT_TARGET_ABORT_IRQ |
- IRQSELECT_MASTER_ABORT_IRQ );
+ nsp32_index_write2(base, IRQ_SELECT,
+ IRQSELECT_TIMER_IRQ |
+ IRQSELECT_SCSIRESET_IRQ |
+ IRQSELECT_FIFO_SHLD_IRQ |
+ IRQSELECT_RESELECT_IRQ |
+ IRQSELECT_PHASE_CHANGE_IRQ |
+ IRQSELECT_AUTO_SCSI_SEQ_IRQ |
+ // IRQSELECT_BMCNTERR_IRQ |
+ IRQSELECT_TARGET_ABORT_IRQ |
+ IRQSELECT_MASTER_ABORT_IRQ );
nsp32_write2(base, IRQ_CONTROL, 0);
/* PCI LED off */
@@ -1163,11 +1176,12 @@ static irqreturn_t do_nsp32_isr(int irq, void *dev_id)
* IRQ check, then enable IRQ mask
*/
irq_stat = nsp32_read2(base, IRQ_STATUS);
- nsp32_dbg(NSP32_DEBUG_INTR,
+ nsp32_dbg(NSP32_DEBUG_INTR,
"enter IRQ: %d, IRQstatus: 0x%x", irq, irq_stat);
/* is this interrupt comes from Ninja asic? */
if ((irq_stat & IRQSTATUS_ANY_IRQ) == 0) {
- nsp32_dbg(NSP32_DEBUG_INTR, "shared interrupt: irq other 0x%x", irq_stat);
+ nsp32_dbg(NSP32_DEBUG_INTR,
+ "shared interrupt: irq other 0x%x", irq_stat);
goto out2;
}
handled = 1;
@@ -1207,7 +1221,8 @@ static irqreturn_t do_nsp32_isr(int irq, void *dev_id)
if (SCpnt == NULL) {
nsp32_msg(KERN_WARNING, "SCpnt==NULL this can't be happened");
- nsp32_msg(KERN_WARNING, "irq_stat=0x%x trans_stat=0x%x", irq_stat, trans_stat);
+ nsp32_msg(KERN_WARNING, "irq_stat=0x%x trans_stat=0x%x",
+ irq_stat, trans_stat);
goto out;
}
@@ -1265,13 +1280,13 @@ static irqreturn_t do_nsp32_isr(int irq, void *dev_id)
"Data in/out phase processed");
/* read BMCNT, SGT pointer addr */
- nsp32_dbg(NSP32_DEBUG_INTR, "BMCNT=0x%lx",
+ nsp32_dbg(NSP32_DEBUG_INTR, "BMCNT=0x%lx",
nsp32_read4(base, BM_CNT));
- nsp32_dbg(NSP32_DEBUG_INTR, "addr=0x%lx",
+ nsp32_dbg(NSP32_DEBUG_INTR, "addr=0x%lx",
nsp32_read4(base, SGT_ADR));
- nsp32_dbg(NSP32_DEBUG_INTR, "SACK=0x%lx",
+ nsp32_dbg(NSP32_DEBUG_INTR, "SACK=0x%lx",
nsp32_read4(base, SACK_CNT));
- nsp32_dbg(NSP32_DEBUG_INTR, "SSACK=0x%lx",
+ nsp32_dbg(NSP32_DEBUG_INTR, "SSACK=0x%lx",
nsp32_read4(base, SAVED_SACK_CNT));
scsi_set_resid(SCpnt, 0); /* all data transferred! */
@@ -1306,7 +1321,7 @@ static irqreturn_t do_nsp32_isr(int irq, void *dev_id)
* Read CSB and substitute CSB for SCpnt->result
* to save status phase stutas byte.
* scsi error handler checks host_byte (DID_*:
- * low level driver to indicate status), then checks
+ * low level driver to indicate status), then checks
* status_byte (SCSI status byte).
*/
SCpnt->result = (int)nsp32_read1(base, SCSI_CSB_IN);
@@ -1314,7 +1329,7 @@ static irqreturn_t do_nsp32_isr(int irq, void *dev_id)
if (auto_stat & ILLEGAL_PHASE) {
/* Illegal phase is detected. SACK is not back. */
- nsp32_msg(KERN_WARNING,
+ nsp32_msg(KERN_WARNING,
"AUTO SCSI ILLEGAL PHASE OCCUR!!!!");
/* TODO: currently we don't have any action... bus reset? */
@@ -1367,7 +1382,8 @@ static irqreturn_t do_nsp32_isr(int irq, void *dev_id)
break;
default:
nsp32_dbg(NSP32_DEBUG_INTR, "fifo/other phase");
- nsp32_dbg(NSP32_DEBUG_INTR, "irq_stat=0x%x trans_stat=0x%x", irq_stat, trans_stat);
+ nsp32_dbg(NSP32_DEBUG_INTR, "irq_stat=0x%x trans_stat=0x%x",
+ irq_stat, trans_stat);
show_busphase(busphase);
break;
}
@@ -1433,32 +1449,39 @@ static int nsp32_show_info(struct seq_file *m, struct Scsi_Host *host)
{
unsigned long flags;
nsp32_hw_data *data;
- int hostno;
+ int hostno;
unsigned int base;
unsigned char mode_reg;
- int id, speed;
- long model;
+ int id, speed;
+ long model;
hostno = host->host_no;
data = (nsp32_hw_data *)host->hostdata;
base = host->io_port;
seq_puts(m, "NinjaSCSI-32 status\n\n");
- seq_printf(m, "Driver version: %s, $Revision: 1.33 $\n", nsp32_release_version);
- seq_printf(m, "SCSI host No.: %d\n", hostno);
- seq_printf(m, "IRQ: %d\n", host->irq);
- seq_printf(m, "IO: 0x%lx-0x%lx\n", host->io_port, host->io_port + host->n_io_port - 1);
- seq_printf(m, "MMIO(virtual address): 0x%lx-0x%lx\n", host->base, host->base + data->MmioLength - 1);
- seq_printf(m, "sg_tablesize: %d\n", host->sg_tablesize);
- seq_printf(m, "Chip revision: 0x%x\n", (nsp32_read2(base, INDEX_REG) >> 8) & 0xff);
+ seq_printf(m, "Driver version: %s, $Revision: 1.33 $\n",
+ nsp32_release_version);
+ seq_printf(m, "SCSI host No.: %d\n", hostno);
+ seq_printf(m, "IRQ: %d\n", host->irq);
+ seq_printf(m, "IO: 0x%lx-0x%lx\n",
+ host->io_port, host->io_port + host->n_io_port - 1);
+ seq_printf(m, "MMIO(virtual address): 0x%lx-0x%lx\n",
+ host->base, host->base + data->MmioLength - 1);
+ seq_printf(m, "sg_tablesize: %d\n",
+ host->sg_tablesize);
+ seq_printf(m, "Chip revision: 0x%x\n",
+ (nsp32_read2(base, INDEX_REG) >> 8) & 0xff);
mode_reg = nsp32_index_read1(base, CHIP_MODE);
model = data->pci_devid->driver_data;
#ifdef CONFIG_PM
- seq_printf(m, "Power Management: %s\n", (mode_reg & OPTF) ? "yes" : "no");
+ seq_printf(m, "Power Management: %s\n",
+ (mode_reg & OPTF) ? "yes" : "no");
#endif
- seq_printf(m, "OEM: %ld, %s\n", (mode_reg & (OEM0|OEM1)), nsp32_model[model]);
+ seq_printf(m, "OEM: %ld, %s\n",
+ (mode_reg & (OEM0|OEM1)), nsp32_model[model]);
spin_lock_irqsave(&(data->Lock), flags);
seq_printf(m, "CurrentSC: 0x%p\n\n", data->CurrentSC);
@@ -1476,7 +1499,7 @@ static int nsp32_show_info(struct seq_file *m, struct Scsi_Host *host)
}
if (data->target[id].sync_flag == SDTR_DONE) {
- if (data->target[id].period == 0 &&
+ if (data->target[id].period == 0 &&
data->target[id].offset == ASYNC_OFFSET ) {
seq_puts(m, "async");
} else {
@@ -1518,7 +1541,7 @@ static void nsp32_scsi_done(struct scsi_cmnd *SCpnt)
* clear TRANSFERCONTROL_BM_START
*/
nsp32_write2(base, TRANSFER_CONTROL, 0);
- nsp32_write4(base, BM_CNT, 0);
+ nsp32_write4(base, BM_CNT, 0);
/*
* call scsi_done
@@ -1528,10 +1551,10 @@ static void nsp32_scsi_done(struct scsi_cmnd *SCpnt)
/*
* reset parameters
*/
- data->cur_lunt->SCpnt = NULL;
- data->cur_lunt = NULL;
- data->cur_target = NULL;
- data->CurrentSC = NULL;
+ data->cur_lunt->SCpnt = NULL;
+ data->cur_lunt = NULL;
+ data->cur_target = NULL;
+ data->CurrentSC = NULL;
}
@@ -1553,7 +1576,7 @@ static int nsp32_busfree_occur(struct scsi_cmnd *SCpnt, unsigned short execph)
nsp32_dbg(NSP32_DEBUG_BUSFREE, "enter execph=0x%x", execph);
show_autophase(execph);
- nsp32_write4(base, BM_CNT, 0);
+ nsp32_write4(base, BM_CNT, 0);
nsp32_write2(base, TRANSFER_CONTROL, 0);
/*
@@ -1561,7 +1584,7 @@ static int nsp32_busfree_occur(struct scsi_cmnd *SCpnt, unsigned short execph)
*
* VALID:
* Save Data Pointer is received. Adjust pointer.
- *
+ *
* NO-VALID:
* SCSI-3 says if Save Data Pointer is not received, then we restart
* processing and we can't adjust any SCSI data pointer in next data
@@ -1574,7 +1597,7 @@ static int nsp32_busfree_occur(struct scsi_cmnd *SCpnt, unsigned short execph)
* Check sack_cnt/saved_sack_cnt, then adjust sg table if
* needed.
*/
- if (!(execph & MSGIN_00_VALID) &&
+ if (!(execph & MSGIN_00_VALID) &&
((execph & DATA_IN_PHASE) || (execph & DATA_OUT_PHASE))) {
unsigned int sacklen, s_sacklen;
@@ -1617,7 +1640,7 @@ static int nsp32_busfree_occur(struct scsi_cmnd *SCpnt, unsigned short execph)
* no processing.
*/
}
-
+
if (execph & MSGIN_03_VALID) {
/* MsgIn03 was valid to be processed. No need processing. */
}
@@ -1639,7 +1662,7 @@ static int nsp32_busfree_occur(struct scsi_cmnd *SCpnt, unsigned short execph)
* negotiating.
*/
if (execph & (MSGIN_00_VALID | MSGIN_04_VALID)) {
- /*
+ /*
* If valid message is received, then
* negotiation is succeeded.
*/
@@ -1666,21 +1689,18 @@ static int nsp32_busfree_occur(struct scsi_cmnd *SCpnt, unsigned short execph)
nsp32_dbg(NSP32_DEBUG_BUSFREE, "command complete");
SCpnt->SCp.Status = nsp32_read1(base, SCSI_CSB_IN);
- SCpnt->SCp.Message = 0;
- nsp32_dbg(NSP32_DEBUG_BUSFREE,
+ nsp32_dbg(NSP32_DEBUG_BUSFREE,
"normal end stat=0x%x resid=0x%x\n",
SCpnt->SCp.Status, scsi_get_resid(SCpnt));
- SCpnt->result = (DID_OK << 16) |
- (SCpnt->SCp.Message << 8) |
- (SCpnt->SCp.Status << 0);
+ SCpnt->result = (DID_OK << 16) |
+ (SCpnt->SCp.Status << 0);
nsp32_scsi_done(SCpnt);
/* All operation is done */
return TRUE;
} else if (execph & MSGIN_04_VALID) {
/* MsgIn 04: Disconnect */
SCpnt->SCp.Status = nsp32_read1(base, SCSI_CSB_IN);
- SCpnt->SCp.Message = 4;
-
+
nsp32_dbg(NSP32_DEBUG_BUSFREE, "disconnect");
return TRUE;
} else {
@@ -1688,7 +1708,7 @@ static int nsp32_busfree_occur(struct scsi_cmnd *SCpnt, unsigned short execph)
nsp32_msg(KERN_WARNING, "unexpected bus free occurred");
/* DID_ERROR? */
- //SCpnt->result = (DID_OK << 16) | (SCpnt->SCp.Message << 8) | (SCpnt->SCp.Status << 0);
+ //SCpnt->result = (DID_OK << 16) | (SCpnt->SCp.Status << 0);
SCpnt->result = DID_ERROR << 16;
nsp32_scsi_done(SCpnt);
return TRUE;
@@ -1706,12 +1726,12 @@ static int nsp32_busfree_occur(struct scsi_cmnd *SCpnt, unsigned short execph)
static void nsp32_adjust_busfree(struct scsi_cmnd *SCpnt, unsigned int s_sacklen)
{
nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata;
- int old_entry = data->cur_entry;
- int new_entry;
- int sg_num = data->cur_lunt->sg_num;
- nsp32_sgtable *sgt = data->cur_lunt->sglun->sgt;
- unsigned int restlen, sentlen;
- u32_le len, addr;
+ int old_entry = data->cur_entry;
+ int new_entry;
+ int sg_num = data->cur_lunt->sg_num;
+ nsp32_sgtable *sgt = data->cur_lunt->sglun->sgt;
+ unsigned int restlen, sentlen;
+ u32_le len, addr;
nsp32_dbg(NSP32_DEBUG_SGLIST, "old resid=0x%x", scsi_get_resid(SCpnt));
@@ -1719,7 +1739,7 @@ static void nsp32_adjust_busfree(struct scsi_cmnd *SCpnt, unsigned int s_sacklen
s_sacklen -= le32_to_cpu(sgt[old_entry].addr) & 3;
/*
- * calculate new_entry from sack count and each sgt[].len
+ * calculate new_entry from sack count and each sgt[].len
* calculate the byte which is intent to send
*/
sentlen = 0;
@@ -1737,8 +1757,10 @@ static void nsp32_adjust_busfree(struct scsi_cmnd *SCpnt, unsigned int s_sacklen
if (sentlen == s_sacklen) {
/* XXX: confirm it's ok or not */
- /* In this case, it's ok because we are at
- the head element of the sg. restlen is correctly calculated. */
+ /* In this case, it's ok because we are at
+ * the head element of the sg. restlen is correctly
+ * calculated.
+ */
}
/* calculate the rest length for transferring */
@@ -1753,7 +1775,7 @@ static void nsp32_adjust_busfree(struct scsi_cmnd *SCpnt, unsigned int s_sacklen
/* set cur_entry with new_entry */
data->cur_entry = new_entry;
-
+
return;
last:
@@ -1781,7 +1803,7 @@ static void nsp32_msgout_occur(struct scsi_cmnd *SCpnt)
nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata;
unsigned int base = SCpnt->device->host->io_port;
int i;
-
+
nsp32_dbg(NSP32_DEBUG_MSGOUTOCCUR,
"enter: msgout_len: 0x%x", data->msgout_len);
@@ -1815,10 +1837,10 @@ static void nsp32_msgout_occur(struct scsi_cmnd *SCpnt)
//nsp32_restart_autoscsi(SCpnt, command);
nsp32_write2(base, COMMAND_CONTROL,
(CLEAR_CDB_FIFO_POINTER |
- AUTO_COMMAND_PHASE |
- AUTOSCSI_RESTART |
- AUTO_MSGIN_00_OR_04 |
- AUTO_MSGIN_02 ));
+ AUTO_COMMAND_PHASE |
+ AUTOSCSI_RESTART |
+ AUTO_MSGIN_00_OR_04 |
+ AUTO_MSGIN_02 ));
}
/*
* Write data with SACK, then wait sack is
@@ -1918,9 +1940,9 @@ static void nsp32_msgin_occur(struct scsi_cmnd *SCpnt,
unsigned char msgtype;
unsigned char newlun;
unsigned short command = 0;
- int msgclear = TRUE;
- long new_sgtp;
- int ret;
+ int msgclear = TRUE;
+ long new_sgtp;
+ int ret;
/*
* read first message
@@ -1960,7 +1982,7 @@ static void nsp32_msgin_occur(struct scsi_cmnd *SCpnt,
goto reject;
}
}
-
+
/*
* processing messages except for IDENTIFY
*
@@ -1976,10 +1998,10 @@ static void nsp32_msgin_occur(struct scsi_cmnd *SCpnt,
* These messages should not be occurred.
* They should be processed on AutoSCSI sequencer.
*/
- nsp32_msg(KERN_WARNING,
+ nsp32_msg(KERN_WARNING,
"unexpected message of AutoSCSI MsgIn: 0x%x", msg);
break;
-
+
case RESTORE_POINTERS:
/*
* AutoMsgIn03 is disabled, and HBA gets this message.
@@ -2005,7 +2027,7 @@ static void nsp32_msgin_occur(struct scsi_cmnd *SCpnt,
/*
* set new sg pointer
*/
- new_sgtp = data->cur_lunt->sglun_paddr +
+ new_sgtp = data->cur_lunt->sglun_paddr +
(data->cur_lunt->cur_entry * sizeof(nsp32_sgtable));
nsp32_write4(base, SGT_ADR, new_sgtp);
@@ -2016,13 +2038,13 @@ static void nsp32_msgin_occur(struct scsi_cmnd *SCpnt,
* These messages should not be occurred.
* They should be processed on AutoSCSI sequencer.
*/
- nsp32_msg (KERN_WARNING,
+ nsp32_msg (KERN_WARNING,
"unexpected message of AutoSCSI MsgIn: SAVE_POINTERS");
-
+
break;
-
+
case MESSAGE_REJECT:
- /* If previous message_out is sending SDTR, and get
+ /* If previous message_out is sending SDTR, and get
message_reject from target, SDTR negotiation is failed */
if (data->cur_target->sync_flag &
(SDTR_INITIATOR | SDTR_TARGET)) {
@@ -2041,7 +2063,7 @@ static void nsp32_msgin_occur(struct scsi_cmnd *SCpnt,
case LINKED_CMD_COMPLETE:
case LINKED_FLG_CMD_COMPLETE:
/* queue tag is not supported currently */
- nsp32_msg (KERN_WARNING,
+ nsp32_msg (KERN_WARNING,
"unsupported message: 0x%x", msgtype);
break;
@@ -2094,7 +2116,7 @@ static void nsp32_msgin_occur(struct scsi_cmnd *SCpnt,
}
/*
- * Reach here means regular length of each type of
+ * Reach here means regular length of each type of
* extended messages.
*/
switch (data->msginbuf[2]) {
@@ -2129,12 +2151,12 @@ static void nsp32_msgin_occur(struct scsi_cmnd *SCpnt,
goto reject; /* not implemented yet */
break;
-
+
default:
goto reject;
}
break;
-
+
default:
goto reject;
}
@@ -2150,7 +2172,7 @@ static void nsp32_msgin_occur(struct scsi_cmnd *SCpnt,
* AutoSCSI restart, at the same time MsgOutOccur should be
* happened (however, such situation is really possible...?).
*/
- if (data->msgout_len > 0) {
+ if (data->msgout_len > 0) {
nsp32_write4(base, SCSI_MSG_OUT, 0);
command |= AUTO_ATN;
}
@@ -2192,7 +2214,7 @@ static void nsp32_msgin_occur(struct scsi_cmnd *SCpnt,
return;
reject:
- nsp32_msg(KERN_WARNING,
+ nsp32_msg(KERN_WARNING,
"invalid or unsupported MessageIn, rejected. "
"current msg: 0x%x (len: 0x%x), processing msg: 0x%x",
msg, data->msgin_len, msgtype);
@@ -2203,15 +2225,15 @@ static void nsp32_msgin_occur(struct scsi_cmnd *SCpnt,
}
/*
- *
+ *
*/
static void nsp32_analyze_sdtr(struct scsi_cmnd *SCpnt)
{
nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata;
- nsp32_target *target = data->cur_target;
- unsigned char get_period = data->msginbuf[3];
- unsigned char get_offset = data->msginbuf[4];
- int entry;
+ nsp32_target *target = data->cur_target;
+ unsigned char get_period = data->msginbuf[3];
+ unsigned char get_offset = data->msginbuf[4];
+ int entry;
nsp32_dbg(NSP32_DEBUG_MSGINOCCUR, "enter");
@@ -2219,16 +2241,16 @@ static void nsp32_analyze_sdtr(struct scsi_cmnd *SCpnt)
* If this inititor sent the SDTR message, then target responds SDTR,
* initiator SYNCREG, ACKWIDTH from SDTR parameter.
* Messages are not appropriate, then send back reject message.
- * If initiator did not send the SDTR, but target sends SDTR,
+ * If initiator did not send the SDTR, but target sends SDTR,
* initiator calculator the appropriate parameter and send back SDTR.
- */
+ */
if (target->sync_flag & SDTR_INITIATOR) {
/*
* Initiator sent SDTR, the target responds and
* send back negotiation SDTR.
*/
nsp32_dbg(NSP32_DEBUG_MSGINOCCUR, "target responds SDTR");
-
+
target->sync_flag &= ~SDTR_INITIATOR;
target->sync_flag |= SDTR_DONE;
@@ -2242,7 +2264,7 @@ static void nsp32_analyze_sdtr(struct scsi_cmnd *SCpnt)
*/
goto reject;
}
-
+
if (get_offset == ASYNC_OFFSET) {
/*
* Negotiation is succeeded, the target want
@@ -2273,7 +2295,7 @@ static void nsp32_analyze_sdtr(struct scsi_cmnd *SCpnt)
if (entry < 0) {
/*
- * Target want to use long period which is not
+ * Target want to use long period which is not
* acceptable NinjaSCSI-32Bi/UDE.
*/
goto reject;
@@ -2286,7 +2308,7 @@ static void nsp32_analyze_sdtr(struct scsi_cmnd *SCpnt)
} else {
/* Target send SDTR to initiator. */
nsp32_dbg(NSP32_DEBUG_MSGINOCCUR, "target send SDTR");
-
+
target->sync_flag |= SDTR_INITIATOR;
/* offset: */
@@ -2409,7 +2431,7 @@ static void nsp32_set_max_sync(nsp32_hw_data *data,
*/
static void nsp32_set_sync_entry(nsp32_hw_data *data,
nsp32_target *target,
- int entry,
+ int entry,
unsigned char offset)
{
unsigned char period, ackwidth, sample_rate;
@@ -2438,7 +2460,7 @@ static void nsp32_set_sync_entry(nsp32_hw_data *data,
static void nsp32_wait_req(nsp32_hw_data *data, int state)
{
unsigned int base = data->BaseAddress;
- int wait_time = 0;
+ int wait_time = 0;
unsigned char bus, req_bit;
if (!((state == ASSERT) || (state == NEGATE))) {
@@ -2450,7 +2472,7 @@ static void nsp32_wait_req(nsp32_hw_data *data, int state)
do {
bus = nsp32_read1(base, SCSI_BUS_MONITOR);
if ((bus & BUSMON_REQ) == req_bit) {
- nsp32_dbg(NSP32_DEBUG_WAIT,
+ nsp32_dbg(NSP32_DEBUG_WAIT,
"wait_time: %d", wait_time);
return;
}
@@ -2467,7 +2489,7 @@ static void nsp32_wait_req(nsp32_hw_data *data, int state)
static void nsp32_wait_sack(nsp32_hw_data *data, int state)
{
unsigned int base = data->BaseAddress;
- int wait_time = 0;
+ int wait_time = 0;
unsigned char bus, ack_bit;
if (!((state == ASSERT) || (state == NEGATE))) {
@@ -2532,8 +2554,8 @@ static int nsp32_detect(struct pci_dev *pdev)
struct Scsi_Host *host; /* registered host structure */
struct resource *res;
nsp32_hw_data *data;
- int ret;
- int i, j;
+ int ret;
+ int i, j;
nsp32_dbg(NSP32_DEBUG_REGISTER, "enter");
@@ -2610,7 +2632,7 @@ static int nsp32_detect(struct pci_dev *pdev)
*/
/*
- * setup DMA
+ * setup DMA
*/
if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
nsp32_msg (KERN_ERR, "failed to set PCI DMA mask");
@@ -2710,16 +2732,16 @@ static int nsp32_detect(struct pci_dev *pdev)
goto free_sg_list;
}
- /*
+ /*
* PCI IO register
*/
res = request_region(host->io_port, host->n_io_port, "nsp32");
if (res == NULL) {
- nsp32_msg(KERN_ERR,
+ nsp32_msg(KERN_ERR,
"I/O region 0x%x+0x%x is already used",
data->BaseAddress, data->NumAddress);
goto free_irq;
- }
+ }
ret = scsi_add_host(host, &pdev->dev);
if (ret) {
@@ -2743,7 +2765,7 @@ static int nsp32_detect(struct pci_dev *pdev)
free_autoparam:
dma_free_coherent(&pdev->dev, sizeof(nsp32_autoparam),
data->autoparam, data->auto_paddr);
-
+
scsi_unregister:
scsi_host_put(host);
@@ -2810,7 +2832,7 @@ static int nsp32_eh_abort(struct scsi_cmnd *SCpnt)
}
nsp32_write2(base, TRANSFER_CONTROL, 0);
- nsp32_write2(base, BM_CNT, 0);
+ nsp32_write2(base, BM_CNT, 0);
SCpnt->result = DID_ABORT << 16;
nsp32_scsi_done(SCpnt);
@@ -2833,8 +2855,8 @@ static void nsp32_do_bus_reset(nsp32_hw_data *data)
* clear counter
*/
nsp32_write2(base, TRANSFER_CONTROL, 0);
- nsp32_write4(base, BM_CNT, 0);
- nsp32_write4(base, CLR_COUNTER, CLRCOUNTER_ALLMASK);
+ nsp32_write4(base, BM_CNT, 0);
+ nsp32_write4(base, CLR_COUNTER, CLRCOUNTER_ALLMASK);
/*
* fall back to asynchronous transfer mode
@@ -2856,7 +2878,7 @@ static void nsp32_do_bus_reset(nsp32_hw_data *data)
for(i = 0; i < 5; i++) {
intrdat = nsp32_read2(base, IRQ_STATUS); /* dummy read */
nsp32_dbg(NSP32_DEBUG_BUSRESET, "irq:1: 0x%x", intrdat);
- }
+ }
data->CurrentSC = NULL;
}
@@ -2867,7 +2889,7 @@ static int nsp32_eh_host_reset(struct scsi_cmnd *SCpnt)
unsigned int base = SCpnt->device->host->io_port;
nsp32_hw_data *data = (nsp32_hw_data *)host->hostdata;
- nsp32_msg(KERN_INFO, "Host Reset");
+ nsp32_msg(KERN_INFO, "Host Reset");
nsp32_dbg(NSP32_DEBUG_BUSRESET, "SCpnt=0x%x", SCpnt);
spin_lock_irq(SCpnt->device->host->host_lock);
@@ -2942,13 +2964,13 @@ static int nsp32_getprom_param(nsp32_hw_data *data)
* AT24C01A (Logitec: LHA-600S), AT24C02 (Melco Buffalo: IFC-USLP) data map:
*
* ROMADDR
- * 0x00 - 0x06 : Device Synchronous Transfer Period (SCSI ID 0 - 6)
+ * 0x00 - 0x06 : Device Synchronous Transfer Period (SCSI ID 0 - 6)
* Value 0x0: ASYNC, 0x0c: Ultra-20M, 0x19: Fast-10M
* 0x07 : HBA Synchronous Transfer Period
* Value 0: AutoSync, 1: Manual Setting
* 0x08 - 0x0f : Not Used? (0x0)
* 0x10 : Bus Termination
- * Value 0: Auto[ON], 1: ON, 2: OFF
+ * Value 0: Auto[ON], 1: ON, 2: OFF
* 0x11 : Not Used? (0)
* 0x12 : Bus Reset Delay Time (0x03)
* 0x13 : Bootable CD Support
@@ -2956,7 +2978,7 @@ static int nsp32_getprom_param(nsp32_hw_data *data)
* 0x14 : Device Scan
* Bit 7 6 5 4 3 2 1 0
* | <----------------->
- * | SCSI ID: Value 0: Skip, 1: YES
+ * | SCSI ID: Value 0: Skip, 1: YES
* |-> Value 0: ALL scan, Value 1: Manual
* 0x15 - 0x1b : Not Used? (0)
* 0x1c : Constant? (0x01) (clock div?)
@@ -2967,10 +2989,10 @@ static int nsp32_getprom_param(nsp32_hw_data *data)
*/
static int nsp32_getprom_at24(nsp32_hw_data *data)
{
- int ret, i;
- int auto_sync;
+ int ret, i;
+ int auto_sync;
nsp32_target *target;
- int entry;
+ int entry;
/*
* Reset time which is designated by EEPROM.
@@ -3036,7 +3058,7 @@ static int nsp32_getprom_at24(nsp32_hw_data *data)
* C16 110 (I-O Data: SC-NBD) data map:
*
* ROMADDR
- * 0x00 - 0x06 : Device Synchronous Transfer Period (SCSI ID 0 - 6)
+ * 0x00 - 0x06 : Device Synchronous Transfer Period (SCSI ID 0 - 6)
* Value 0x0: 20MB/S, 0x1: 10MB/S, 0x2: 5MB/S, 0x3: ASYNC
* 0x07 : 0 (HBA Synchronous Transfer Period: Auto Sync)
* 0x08 - 0x0f : Not Used? (0x0)
@@ -3044,7 +3066,7 @@ static int nsp32_getprom_at24(nsp32_hw_data *data)
* Value 0: PIO, 1: Busmater
* 0x11 : Bus Reset Delay Time (0x00-0x20)
* 0x12 : Bus Termination
- * Value 0: Disable, 1: Enable
+ * Value 0: Disable, 1: Enable
* 0x13 - 0x19 : Disconnection
* Value 0: Disable, 1: Enable
* 0x1a - 0x7c : Not Used? (0)
@@ -3054,9 +3076,9 @@ static int nsp32_getprom_at24(nsp32_hw_data *data)
*/
static int nsp32_getprom_c16(nsp32_hw_data *data)
{
- int ret, i;
+ int ret, i;
nsp32_target *target;
- int entry, val;
+ int entry, val;
/*
* Reset time which is designated by EEPROM.
@@ -3156,7 +3178,7 @@ static int nsp32_prom_read(nsp32_hw_data *data, int romaddr)
for (i = 7; i >= 0; i--) {
val += (nsp32_prom_read_bit(data) << i);
}
-
+
/* no ack */
nsp32_prom_write_bit(data, 1);
@@ -3281,7 +3303,8 @@ static int nsp32_resume(struct pci_dev *pdev)
nsp32_hw_data *data = (nsp32_hw_data *)host->hostdata;
unsigned short reg;
- nsp32_msg(KERN_INFO, "pci-resume: pdev=0x%p, slot=%s, host=0x%p", pdev, pci_name(pdev), host);
+ nsp32_msg(KERN_INFO, "pci-resume: pdev=0x%p, slot=%s, host=0x%p",
+ pdev, pci_name(pdev), host);
pci_set_power_state(pdev, PCI_D0);
pci_enable_wake (pdev, PCI_D0, 0);
@@ -3316,13 +3339,13 @@ static int nsp32_probe(struct pci_dev *pdev, const struct pci_device_id *id)
nsp32_dbg(NSP32_DEBUG_REGISTER, "enter");
- ret = pci_enable_device(pdev);
+ ret = pci_enable_device(pdev);
if (ret) {
nsp32_msg(KERN_ERR, "failed to enable pci device");
return ret;
}
- data->Pci = pdev;
+ data->Pci = pdev;
data->pci_devid = id;
data->IrqNumber = pdev->irq;
data->BaseAddress = pci_resource_start(pdev, 0);
@@ -3351,7 +3374,7 @@ static void nsp32_remove(struct pci_dev *pdev)
nsp32_dbg(NSP32_DEBUG_REGISTER, "enter");
- scsi_remove_host(host);
+ scsi_remove_host(host);
nsp32_release(host);
@@ -3364,8 +3387,8 @@ static struct pci_driver nsp32_driver = {
.probe = nsp32_probe,
.remove = nsp32_remove,
#ifdef CONFIG_PM
- .suspend = nsp32_suspend,
- .resume = nsp32_resume,
+ .suspend = nsp32_suspend,
+ .resume = nsp32_resume,
#endif
};
diff --git a/drivers/scsi/pcmcia/nsp_cs.c b/drivers/scsi/pcmcia/nsp_cs.c
index ac89002646a3..7c0f931e55e8 100644
--- a/drivers/scsi/pcmcia/nsp_cs.c
+++ b/drivers/scsi/pcmcia/nsp_cs.c
@@ -221,7 +221,7 @@ static int nsp_queuecommand_lck(struct scsi_cmnd *SCpnt,
data->CurrentSC = SCpnt;
- SCpnt->SCp.Status = CHECK_CONDITION;
+ SCpnt->SCp.Status = SAM_STAT_CHECK_CONDITION;
SCpnt->SCp.Message = 0;
SCpnt->SCp.have_data_in = IO_UNKNOWN;
SCpnt->SCp.sent_command = 0;
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
index 71aa6af08340..33f8217577b1 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.c
+++ b/drivers/scsi/pm8001/pm8001_hwi.c
@@ -1930,7 +1930,7 @@ mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
param);
if (param == 0) {
ts->resp = SAS_TASK_COMPLETE;
- ts->stat = SAM_STAT_GOOD;
+ ts->stat = SAS_SAM_STAT_GOOD;
} else {
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_PROTO_RESPONSE;
@@ -2390,7 +2390,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
pm8001_dbg(pm8001_ha, IO, "IO_SUCCESS\n");
if (param == 0) {
ts->resp = SAS_TASK_COMPLETE;
- ts->stat = SAM_STAT_GOOD;
+ ts->stat = SAS_SAM_STAT_GOOD;
/* check if response is for SEND READ LOG */
if (pm8001_dev &&
(pm8001_dev->id & NCQ_READ_LOG_FLAG)) {
@@ -2912,7 +2912,7 @@ mpi_smp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
case IO_SUCCESS:
pm8001_dbg(pm8001_ha, IO, "IO_SUCCESS\n");
ts->resp = SAS_TASK_COMPLETE;
- ts->stat = SAM_STAT_GOOD;
+ ts->stat = SAS_SAM_STAT_GOOD;
if (pm8001_dev)
atomic_dec(&pm8001_dev->running_req);
break;
@@ -2939,17 +2939,17 @@ mpi_smp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
case IO_ERROR_HW_TIMEOUT:
pm8001_dbg(pm8001_ha, IO, "IO_ERROR_HW_TIMEOUT\n");
ts->resp = SAS_TASK_COMPLETE;
- ts->stat = SAM_STAT_BUSY;
+ ts->stat = SAS_SAM_STAT_BUSY;
break;
case IO_XFER_ERROR_BREAK:
pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_BREAK\n");
ts->resp = SAS_TASK_COMPLETE;
- ts->stat = SAM_STAT_BUSY;
+ ts->stat = SAS_SAM_STAT_BUSY;
break;
case IO_XFER_ERROR_PHY_NOT_READY:
pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_PHY_NOT_READY\n");
ts->resp = SAS_TASK_COMPLETE;
- ts->stat = SAM_STAT_BUSY;
+ ts->stat = SAS_SAM_STAT_BUSY;
break;
case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED:
pm8001_dbg(pm8001_ha, IO,
@@ -3710,7 +3710,7 @@ int pm8001_mpi_task_abort_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
case IO_SUCCESS:
pm8001_dbg(pm8001_ha, EH, "IO_SUCCESS\n");
ts->resp = SAS_TASK_COMPLETE;
- ts->stat = SAM_STAT_GOOD;
+ ts->stat = SAS_SAM_STAT_GOOD;
break;
case IO_NOT_VALID:
pm8001_dbg(pm8001_ha, EH, "IO_NOT_VALID\n");
@@ -4357,7 +4357,7 @@ static int pm8001_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
spin_lock_irqsave(&task->task_state_lock, flags);
ts->resp = SAS_TASK_COMPLETE;
- ts->stat = SAM_STAT_GOOD;
+ ts->stat = SAS_SAM_STAT_GOOD;
task->task_state_flags &= ~SAS_TASK_STATE_PENDING;
task->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
task->task_state_flags |= SAS_TASK_STATE_DONE;
diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
index 335cf37e6cb9..6f33d821e545 100644
--- a/drivers/scsi/pm8001/pm8001_sas.c
+++ b/drivers/scsi/pm8001/pm8001_sas.c
@@ -118,10 +118,8 @@ int pm8001_mem_alloc(struct pci_dev *pdev, void **virt_addr,
align_offset = (dma_addr_t)align - 1;
mem_virt_alloc = dma_alloc_coherent(&pdev->dev, mem_size + align,
&mem_dma_handle, GFP_KERNEL);
- if (!mem_virt_alloc) {
- pr_err("pm80xx: memory allocation error\n");
- return -1;
- }
+ if (!mem_virt_alloc)
+ return -ENOMEM;
*pphys_addr = mem_dma_handle;
phys_align = (*pphys_addr + align_offset) & ~align_offset;
*virt_addr = (void *)mem_virt_alloc + phys_align - *pphys_addr;
@@ -758,7 +756,7 @@ static int pm8001_exec_internal_tmf_task(struct domain_device *dev,
}
if (task->task_status.resp == SAS_TASK_COMPLETE &&
- task->task_status.stat == SAM_STAT_GOOD) {
+ task->task_status.stat == SAS_SAM_STAT_GOOD) {
res = TMF_RESP_FUNC_COMPLETE;
break;
}
@@ -843,7 +841,7 @@ pm8001_exec_internal_task_abort(struct pm8001_hba_info *pm8001_ha,
}
if (task->task_status.resp == SAS_TASK_COMPLETE &&
- task->task_status.stat == SAM_STAT_GOOD) {
+ task->task_status.stat == SAS_SAM_STAT_GOOD) {
res = TMF_RESP_FUNC_COMPLETE;
break;
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c
index 700530e969ac..45ecd9639977 100644
--- a/drivers/scsi/pm8001/pm80xx_hwi.c
+++ b/drivers/scsi/pm8001/pm80xx_hwi.c
@@ -1952,7 +1952,7 @@ mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
param);
if (param == 0) {
ts->resp = SAS_TASK_COMPLETE;
- ts->stat = SAM_STAT_GOOD;
+ ts->stat = SAS_SAM_STAT_GOOD;
} else {
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_PROTO_RESPONSE;
@@ -2487,7 +2487,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
pm8001_dbg(pm8001_ha, IO, "IO_SUCCESS\n");
if (param == 0) {
ts->resp = SAS_TASK_COMPLETE;
- ts->stat = SAM_STAT_GOOD;
+ ts->stat = SAS_SAM_STAT_GOOD;
/* check if response is for SEND READ LOG */
if (pm8001_dev &&
(pm8001_dev->id & NCQ_READ_LOG_FLAG)) {
@@ -3042,7 +3042,7 @@ mpi_smp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
case IO_SUCCESS:
pm8001_dbg(pm8001_ha, IO, "IO_SUCCESS\n");
ts->resp = SAS_TASK_COMPLETE;
- ts->stat = SAM_STAT_GOOD;
+ ts->stat = SAS_SAM_STAT_GOOD;
if (pm8001_dev)
atomic_dec(&pm8001_dev->running_req);
if (pm8001_ha->smp_exp_mode == SMP_DIRECT) {
@@ -3084,17 +3084,17 @@ mpi_smp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
case IO_ERROR_HW_TIMEOUT:
pm8001_dbg(pm8001_ha, IO, "IO_ERROR_HW_TIMEOUT\n");
ts->resp = SAS_TASK_COMPLETE;
- ts->stat = SAM_STAT_BUSY;
+ ts->stat = SAS_SAM_STAT_BUSY;
break;
case IO_XFER_ERROR_BREAK:
pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_BREAK\n");
ts->resp = SAS_TASK_COMPLETE;
- ts->stat = SAM_STAT_BUSY;
+ ts->stat = SAS_SAM_STAT_BUSY;
break;
case IO_XFER_ERROR_PHY_NOT_READY:
pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_PHY_NOT_READY\n");
ts->resp = SAS_TASK_COMPLETE;
- ts->stat = SAM_STAT_BUSY;
+ ts->stat = SAS_SAM_STAT_BUSY;
break;
case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED:
pm8001_dbg(pm8001_ha, IO,
@@ -4699,7 +4699,7 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
spin_lock_irqsave(&task->task_state_lock, flags);
ts->resp = SAS_TASK_COMPLETE;
- ts->stat = SAM_STAT_GOOD;
+ ts->stat = SAS_SAM_STAT_GOOD;
task->task_state_flags &= ~SAS_TASK_STATE_PENDING;
task->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
task->task_state_flags |= SAS_TASK_STATE_DONE;
diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
index 6d36debde18e..bbb75318f1e7 100644
--- a/drivers/scsi/pmcraid.h
+++ b/drivers/scsi/pmcraid.h
@@ -47,8 +47,8 @@
/*
* MAX_CMD : maximum commands that can be outstanding with IOA
* MAX_IO_CMD : command blocks available for IO commands
- * MAX_HCAM_CMD : command blocks avaibale for HCAMS
- * MAX_INTERNAL_CMD : command blocks avaible for internal commands like reset
+ * MAX_HCAM_CMD : command blocks available for HCAMS
+ * MAX_INTERNAL_CMD : command blocks available for internal commands like reset
*/
#define PMCRAID_MAX_CMD 1024
#define PMCRAID_MAX_IO_CMD 1020
diff --git a/drivers/scsi/ppa.c b/drivers/scsi/ppa.c
index aa41f7ac91cb..977315fdc254 100644
--- a/drivers/scsi/ppa.c
+++ b/drivers/scsi/ppa.c
@@ -1148,18 +1148,6 @@ static struct parport_driver ppa_driver = {
.detach = ppa_detach,
.devmodel = true,
};
+module_parport_driver(ppa_driver);
-static int __init ppa_driver_init(void)
-{
- printk(KERN_INFO "ppa: Version %s\n", PPA_VERSION);
- return parport_register_driver(&ppa_driver);
-}
-
-static void __exit ppa_driver_exit(void)
-{
- parport_unregister_driver(&ppa_driver);
-}
-
-module_init(ppa_driver_init);
-module_exit(ppa_driver_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/ps3rom.c b/drivers/scsi/ps3rom.c
index ccb5771f1cb7..0f4b99d92f12 100644
--- a/drivers/scsi/ps3rom.c
+++ b/drivers/scsi/ps3rom.c
@@ -234,10 +234,8 @@ static int ps3rom_queuecommand_lck(struct scsi_cmnd *cmd,
}
if (res) {
- memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
+ scsi_build_sense(cmd, 0, ILLEGAL_REQUEST, 0, 0);
cmd->result = res;
- cmd->sense_buffer[0] = 0x70;
- cmd->sense_buffer[2] = ILLEGAL_REQUEST;
priv->curr_cmd = NULL;
cmd->scsi_done(cmd);
}
@@ -319,8 +317,7 @@ static irqreturn_t ps3rom_interrupt(int irq, void *data)
goto done;
}
- scsi_build_sense_buffer(0, cmd->sense_buffer, sense_key, asc, ascq);
- cmd->result = SAM_STAT_CHECK_CONDITION;
+ scsi_build_sense(cmd, 0, sense_key, asc, ascq);
done:
priv->curr_cmd = NULL;
diff --git a/drivers/scsi/qedf/qedf_dbg.c b/drivers/scsi/qedf/qedf_dbg.c
index e0387e495261..0d2aed82882a 100644
--- a/drivers/scsi/qedf/qedf_dbg.c
+++ b/drivers/scsi/qedf/qedf_dbg.c
@@ -106,11 +106,10 @@ ret:
int
qedf_alloc_grc_dump_buf(u8 **buf, uint32_t len)
{
- *buf = vmalloc(len);
+ *buf = vzalloc(len);
if (!(*buf))
return -ENOMEM;
- memset(*buf, 0, len);
return 0;
}
diff --git a/drivers/scsi/qedf/qedf_io.c b/drivers/scsi/qedf/qedf_io.c
index 4869ef813dc4..6184bc485811 100644
--- a/drivers/scsi/qedf/qedf_io.c
+++ b/drivers/scsi/qedf/qedf_io.c
@@ -23,11 +23,6 @@ static void qedf_cmd_timeout(struct work_struct *work)
struct qedf_ctx *qedf;
struct qedf_rport *fcport;
- if (io_req == NULL) {
- QEDF_INFO(NULL, QEDF_LOG_IO, "io_req is NULL.\n");
- return;
- }
-
fcport = io_req->fcport;
if (io_req->fcport == NULL) {
QEDF_INFO(NULL, QEDF_LOG_IO, "fcport is NULL.\n");
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index b92570a7c309..85f41abcb56c 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -1717,6 +1717,9 @@ static void qedf_setup_fdmi(struct qedf_ctx *qedf)
FW_MAJOR_VERSION, FW_MINOR_VERSION, FW_REVISION_VERSION,
FW_ENGINEERING_VERSION);
+ snprintf(fc_host_vendor_identifier(lport->host),
+ FC_VENDOR_IDENTIFIER, "%s", "Marvell");
+
}
static int qedf_lport_setup(struct qedf_ctx *qedf)
@@ -1877,6 +1880,7 @@ static int qedf_vport_create(struct fc_vport *vport, bool disabled)
vn_port->host->max_lun = qedf_max_lun;
vn_port->host->sg_tablesize = QEDF_MAX_BDS_PER_CMD;
vn_port->host->max_cmd_len = QEDF_MAX_CDB_LEN;
+ vn_port->host->max_id = QEDF_MAX_SESSIONS;
rc = scsi_add_host(vn_port->host, &vport->dev);
if (rc) {
@@ -3528,6 +3532,7 @@ retry_probe:
host->transportt = qedf_fc_transport_template;
host->max_lun = qedf_max_lun;
host->max_cmd_len = QEDF_MAX_CDB_LEN;
+ host->max_id = QEDF_MAX_SESSIONS;
host->can_queue = FCOE_PARAMS_NUM_TASKS;
rc = scsi_add_host(host, &pdev->dev);
if (rc) {
@@ -3971,10 +3976,6 @@ void qedf_stag_change_work(struct work_struct *work)
struct qedf_ctx *qedf =
container_of(work, struct qedf_ctx, stag_work.work);
- if (!qedf) {
- QEDF_ERR(NULL, "qedf is NULL");
- return;
- }
QEDF_ERR(&qedf->dbg_ctx, "Performing software context reset.\n");
qedf_ctx_soft_reset(qedf->lport);
}
diff --git a/drivers/scsi/qedi/qedi.h b/drivers/scsi/qedi/qedi.h
index c342defc3f52..ce199a7a16b8 100644
--- a/drivers/scsi/qedi/qedi.h
+++ b/drivers/scsi/qedi/qedi.h
@@ -284,6 +284,7 @@ struct qedi_ctx {
#define QEDI_IN_RECOVERY 5
#define QEDI_IN_OFFLINE 6
#define QEDI_IN_SHUTDOWN 7
+#define QEDI_BLOCK_IO 8
u8 mac[ETH_ALEN];
u32 src_ip[4];
diff --git a/drivers/scsi/qedi/qedi_fw.c b/drivers/scsi/qedi/qedi_fw.c
index 440ddd2309f1..71333d3c5c86 100644
--- a/drivers/scsi/qedi/qedi_fw.c
+++ b/drivers/scsi/qedi/qedi_fw.c
@@ -14,8 +14,8 @@
#include "qedi_fw_iscsi.h"
#include "qedi_fw_scsi.h"
-static int qedi_send_iscsi_tmf(struct qedi_conn *qedi_conn,
- struct iscsi_task *mtask);
+static int send_iscsi_tmf(struct qedi_conn *qedi_conn,
+ struct iscsi_task *mtask, struct iscsi_task *ctask);
void qedi_iscsi_unmap_sg_list(struct qedi_cmd *cmd)
{
@@ -73,7 +73,6 @@ static void qedi_process_logout_resp(struct qedi_ctx *qedi,
spin_unlock(&qedi_conn->list_lock);
cmd->state = RESPONSE_RECEIVED;
- qedi_clear_task_idx(qedi, cmd->task_id);
__iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr, NULL, 0);
spin_unlock(&session->back_lock);
@@ -138,7 +137,6 @@ static void qedi_process_text_resp(struct qedi_ctx *qedi,
spin_unlock(&qedi_conn->list_lock);
cmd->state = RESPONSE_RECEIVED;
- qedi_clear_task_idx(qedi, cmd->task_id);
__iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr_ptr,
qedi_conn->gen_pdu.resp_buf,
@@ -158,19 +156,11 @@ static void qedi_tmf_resp_work(struct work_struct *work)
struct iscsi_tm_rsp *resp_hdr_ptr;
int rval = 0;
- set_bit(QEDI_CONN_FW_CLEANUP, &qedi_conn->flags);
resp_hdr_ptr = (struct iscsi_tm_rsp *)qedi_cmd->tmf_resp_buf;
- iscsi_block_session(session->cls_session);
rval = qedi_cleanup_all_io(qedi, qedi_conn, qedi_cmd->task, true);
- if (rval) {
- qedi_clear_task_idx(qedi, qedi_cmd->task_id);
- iscsi_unblock_session(session->cls_session);
+ if (rval)
goto exit_tmf_resp;
- }
-
- iscsi_unblock_session(session->cls_session);
- qedi_clear_task_idx(qedi, qedi_cmd->task_id);
spin_lock(&session->back_lock);
__iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr_ptr, NULL, 0);
@@ -178,7 +168,10 @@ static void qedi_tmf_resp_work(struct work_struct *work)
exit_tmf_resp:
kfree(resp_hdr_ptr);
- clear_bit(QEDI_CONN_FW_CLEANUP, &qedi_conn->flags);
+
+ spin_lock(&qedi_conn->tmf_work_lock);
+ qedi_conn->fw_cleanup_works--;
+ spin_unlock(&qedi_conn->tmf_work_lock);
}
static void qedi_process_tmf_resp(struct qedi_ctx *qedi,
@@ -234,18 +227,25 @@ static void qedi_process_tmf_resp(struct qedi_ctx *qedi,
}
spin_unlock(&qedi_conn->list_lock);
- if (((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
- ISCSI_TM_FUNC_LOGICAL_UNIT_RESET) ||
- ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
- ISCSI_TM_FUNC_TARGET_WARM_RESET) ||
- ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
- ISCSI_TM_FUNC_TARGET_COLD_RESET)) {
+ spin_lock(&qedi_conn->tmf_work_lock);
+ switch (tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) {
+ case ISCSI_TM_FUNC_LOGICAL_UNIT_RESET:
+ case ISCSI_TM_FUNC_TARGET_WARM_RESET:
+ case ISCSI_TM_FUNC_TARGET_COLD_RESET:
+ if (qedi_conn->ep_disconnect_starting) {
+ /* Session is down so ep_disconnect will clean up */
+ spin_unlock(&qedi_conn->tmf_work_lock);
+ goto unblock_sess;
+ }
+
+ qedi_conn->fw_cleanup_works++;
+ spin_unlock(&qedi_conn->tmf_work_lock);
+
INIT_WORK(&qedi_cmd->tmf_work, qedi_tmf_resp_work);
queue_work(qedi->tmf_thread, &qedi_cmd->tmf_work);
goto unblock_sess;
}
-
- qedi_clear_task_idx(qedi, qedi_cmd->task_id);
+ spin_unlock(&qedi_conn->tmf_work_lock);
__iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr_ptr, NULL, 0);
kfree(resp_hdr_ptr);
@@ -314,7 +314,6 @@ static void qedi_process_login_resp(struct qedi_ctx *qedi,
"Freeing tid=0x%x for cid=0x%x\n",
cmd->task_id, qedi_conn->iscsi_conn_id);
cmd->state = RESPONSE_RECEIVED;
- qedi_clear_task_idx(qedi, cmd->task_id);
}
static void qedi_get_rq_bdq_buf(struct qedi_ctx *qedi,
@@ -468,7 +467,6 @@ static int qedi_process_nopin_mesg(struct qedi_ctx *qedi,
}
spin_unlock(&qedi_conn->list_lock);
- qedi_clear_task_idx(qedi, cmd->task_id);
}
done:
@@ -673,7 +671,6 @@ static void qedi_scsi_completion(struct qedi_ctx *qedi,
if (qedi_io_tracing)
qedi_trace_io(qedi, task, cmd->task_id, QEDI_IO_TRACE_RSP);
- qedi_clear_task_idx(qedi, cmd->task_id);
__iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr,
conn->data, datalen);
error:
@@ -730,7 +727,6 @@ static void qedi_process_nopin_local_cmpl(struct qedi_ctx *qedi,
cqe->itid, cmd->task_id);
cmd->state = RESPONSE_RECEIVED;
- qedi_clear_task_idx(qedi, cmd->task_id);
spin_lock_bh(&session->back_lock);
__iscsi_put_task(task);
@@ -739,20 +735,17 @@ static void qedi_process_nopin_local_cmpl(struct qedi_ctx *qedi,
static void qedi_process_cmd_cleanup_resp(struct qedi_ctx *qedi,
struct iscsi_cqe_solicited *cqe,
- struct iscsi_task *task,
struct iscsi_conn *conn)
{
struct qedi_work_map *work, *work_tmp;
u32 proto_itt = cqe->itid;
- u32 ptmp_itt = 0;
itt_t protoitt = 0;
int found = 0;
struct qedi_cmd *qedi_cmd = NULL;
- u32 rtid = 0;
u32 iscsi_cid;
struct qedi_conn *qedi_conn;
struct qedi_cmd *dbg_cmd;
- struct iscsi_task *mtask;
+ struct iscsi_task *mtask, *task;
struct iscsi_tm *tmf_hdr = NULL;
iscsi_cid = cqe->conn_id;
@@ -778,93 +771,64 @@ static void qedi_process_cmd_cleanup_resp(struct qedi_ctx *qedi,
}
found = 1;
mtask = qedi_cmd->task;
+ task = work->ctask;
tmf_hdr = (struct iscsi_tm *)mtask->hdr;
- rtid = work->rtid;
list_del_init(&work->list);
kfree(work);
qedi_cmd->list_tmf_work = NULL;
}
}
- spin_unlock_bh(&qedi_conn->tmf_work_lock);
-
- if (found) {
- QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
- "TMF work, cqe->tid=0x%x, tmf flags=0x%x, cid=0x%x\n",
- proto_itt, tmf_hdr->flags, qedi_conn->iscsi_conn_id);
-
- if ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
- ISCSI_TM_FUNC_ABORT_TASK) {
- spin_lock_bh(&conn->session->back_lock);
-
- protoitt = build_itt(get_itt(tmf_hdr->rtt),
- conn->session->age);
- task = iscsi_itt_to_task(conn, protoitt);
-
- spin_unlock_bh(&conn->session->back_lock);
-
- if (!task) {
- QEDI_NOTICE(&qedi->dbg_ctx,
- "IO task completed, tmf rtt=0x%x, cid=0x%x\n",
- get_itt(tmf_hdr->rtt),
- qedi_conn->iscsi_conn_id);
- return;
- }
-
- dbg_cmd = task->dd_data;
- QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
- "Abort tmf rtt=0x%x, i/o itt=0x%x, i/o tid=0x%x, cid=0x%x\n",
- get_itt(tmf_hdr->rtt), get_itt(task->itt),
- dbg_cmd->task_id, qedi_conn->iscsi_conn_id);
+ if (!found) {
+ spin_unlock_bh(&qedi_conn->tmf_work_lock);
+ goto check_cleanup_reqs;
+ }
- if (qedi_cmd->state == CLEANUP_WAIT_FAILED)
- qedi_cmd->state = CLEANUP_RECV;
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
+ "TMF work, cqe->tid=0x%x, tmf flags=0x%x, cid=0x%x\n",
+ proto_itt, tmf_hdr->flags, qedi_conn->iscsi_conn_id);
+
+ spin_lock_bh(&conn->session->back_lock);
+ if (iscsi_task_is_completed(task)) {
+ QEDI_NOTICE(&qedi->dbg_ctx,
+ "IO task completed, tmf rtt=0x%x, cid=0x%x\n",
+ get_itt(tmf_hdr->rtt), qedi_conn->iscsi_conn_id);
+ goto unlock;
+ }
- qedi_clear_task_idx(qedi_conn->qedi, rtid);
+ dbg_cmd = task->dd_data;
- spin_lock(&qedi_conn->list_lock);
- if (likely(dbg_cmd->io_cmd_in_list)) {
- dbg_cmd->io_cmd_in_list = false;
- list_del_init(&dbg_cmd->io_cmd);
- qedi_conn->active_cmd_count--;
- }
- spin_unlock(&qedi_conn->list_lock);
- qedi_cmd->state = CLEANUP_RECV;
- wake_up_interruptible(&qedi_conn->wait_queue);
- }
- } else if (qedi_conn->cmd_cleanup_req > 0) {
- spin_lock_bh(&conn->session->back_lock);
- qedi_get_proto_itt(qedi, cqe->itid, &ptmp_itt);
- protoitt = build_itt(ptmp_itt, conn->session->age);
- task = iscsi_itt_to_task(conn, protoitt);
- QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
- "cleanup io itid=0x%x, protoitt=0x%x, cmd_cleanup_cmpl=%d, cid=0x%x\n",
- cqe->itid, protoitt, qedi_conn->cmd_cleanup_cmpl,
- qedi_conn->iscsi_conn_id);
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
+ "Abort tmf rtt=0x%x, i/o itt=0x%x, i/o tid=0x%x, cid=0x%x\n",
+ get_itt(tmf_hdr->rtt), get_itt(task->itt), dbg_cmd->task_id,
+ qedi_conn->iscsi_conn_id);
- spin_unlock_bh(&conn->session->back_lock);
- if (!task) {
- QEDI_NOTICE(&qedi->dbg_ctx,
- "task is null, itid=0x%x, cid=0x%x\n",
- cqe->itid, qedi_conn->iscsi_conn_id);
- return;
- }
- qedi_conn->cmd_cleanup_cmpl++;
- wake_up(&qedi_conn->wait_queue);
+ spin_lock(&qedi_conn->list_lock);
+ if (likely(dbg_cmd->io_cmd_in_list)) {
+ dbg_cmd->io_cmd_in_list = false;
+ list_del_init(&dbg_cmd->io_cmd);
+ qedi_conn->active_cmd_count--;
+ }
+ spin_unlock(&qedi_conn->list_lock);
+ qedi_cmd->state = CLEANUP_RECV;
+unlock:
+ spin_unlock_bh(&conn->session->back_lock);
+ spin_unlock_bh(&qedi_conn->tmf_work_lock);
+ wake_up_interruptible(&qedi_conn->wait_queue);
+ return;
+check_cleanup_reqs:
+ if (qedi_conn->cmd_cleanup_req > 0) {
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_TID,
"Freeing tid=0x%x for cid=0x%x\n",
cqe->itid, qedi_conn->iscsi_conn_id);
- qedi_clear_task_idx(qedi_conn->qedi, cqe->itid);
-
+ qedi_conn->cmd_cleanup_cmpl++;
+ wake_up(&qedi_conn->wait_queue);
} else {
- qedi_get_proto_itt(qedi, cqe->itid, &ptmp_itt);
- protoitt = build_itt(ptmp_itt, conn->session->age);
- task = iscsi_itt_to_task(conn, protoitt);
QEDI_ERR(&qedi->dbg_ctx,
- "Delayed or untracked cleanup response, itt=0x%x, tid=0x%x, cid=0x%x, task=%p\n",
- protoitt, cqe->itid, qedi_conn->iscsi_conn_id, task);
+ "Delayed or untracked cleanup response, itt=0x%x, tid=0x%x, cid=0x%x\n",
+ protoitt, cqe->itid, qedi_conn->iscsi_conn_id);
}
}
@@ -959,8 +923,7 @@ void qedi_fp_process_cqes(struct qedi_work *work)
goto exit_fp_process;
case ISCSI_CQE_TYPE_TASK_CLEANUP:
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM, "CleanUp CqE\n");
- qedi_process_cmd_cleanup_resp(qedi, &cqe->cqe_solicited, task,
- conn);
+ qedi_process_cmd_cleanup_resp(qedi, &cqe->cqe_solicited, conn);
goto exit_fp_process;
default:
QEDI_ERR(&qedi->dbg_ctx, "Error cqe.\n");
@@ -1368,7 +1331,7 @@ static int qedi_wait_for_cleanup_request(struct qedi_ctx *qedi,
return 0;
}
-static void qedi_tmf_work(struct work_struct *work)
+static void qedi_abort_work(struct work_struct *work)
{
struct qedi_cmd *qedi_cmd =
container_of(work, struct qedi_cmd, tmf_work);
@@ -1381,17 +1344,29 @@ static void qedi_tmf_work(struct work_struct *work)
struct iscsi_task *ctask;
struct iscsi_tm *tmf_hdr;
s16 rval = 0;
- s16 tid = 0;
mtask = qedi_cmd->task;
tmf_hdr = (struct iscsi_tm *)mtask->hdr;
- set_bit(QEDI_CONN_FW_CLEANUP, &qedi_conn->flags);
- ctask = iscsi_itt_to_task(conn, tmf_hdr->rtt);
- if (!ctask || !ctask->sc) {
- QEDI_ERR(&qedi->dbg_ctx, "Task already completed\n");
- goto abort_ret;
+ spin_lock_bh(&conn->session->back_lock);
+ ctask = iscsi_itt_to_ctask(conn, tmf_hdr->rtt);
+ if (!ctask) {
+ spin_unlock_bh(&conn->session->back_lock);
+ QEDI_ERR(&qedi->dbg_ctx, "Invalid RTT. Letting abort timeout.\n");
+ goto clear_cleanup;
+ }
+
+ if (iscsi_task_is_completed(ctask)) {
+ spin_unlock_bh(&conn->session->back_lock);
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
+ "Task already completed\n");
+ /*
+ * We have to still send the TMF because libiscsi needs the
+ * response to avoid a timeout.
+ */
+ goto send_tmf;
}
+ spin_unlock_bh(&conn->session->back_lock);
cmd = (struct qedi_cmd *)ctask->dd_data;
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
@@ -1402,19 +1377,21 @@ static void qedi_tmf_work(struct work_struct *work)
if (qedi_do_not_recover) {
QEDI_ERR(&qedi->dbg_ctx, "DONT SEND CLEANUP/ABORT %d\n",
qedi_do_not_recover);
- goto abort_ret;
+ goto clear_cleanup;
}
- list_work = kzalloc(sizeof(*list_work), GFP_ATOMIC);
+ list_work = kzalloc(sizeof(*list_work), GFP_NOIO);
if (!list_work) {
QEDI_ERR(&qedi->dbg_ctx, "Memory allocation failed\n");
- goto abort_ret;
+ goto clear_cleanup;
}
qedi_cmd->type = TYPEIO;
+ qedi_cmd->state = CLEANUP_WAIT;
list_work->qedi_cmd = qedi_cmd;
list_work->rtid = cmd->task_id;
list_work->state = QEDI_WORK_SCHEDULED;
+ list_work->ctask = ctask;
qedi_cmd->list_tmf_work = list_work;
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
@@ -1437,23 +1414,13 @@ static void qedi_tmf_work(struct work_struct *work)
goto ldel_exit;
}
- tid = qedi_get_task_idx(qedi);
- if (tid == -1) {
- QEDI_ERR(&qedi->dbg_ctx, "Invalid tid, cid=0x%x\n",
- qedi_conn->iscsi_conn_id);
- goto ldel_exit;
- }
-
- qedi_cmd->task_id = tid;
- qedi_send_iscsi_tmf(qedi_conn, qedi_cmd->task);
-
-abort_ret:
- clear_bit(QEDI_CONN_FW_CLEANUP, &qedi_conn->flags);
- return;
+send_tmf:
+ send_iscsi_tmf(qedi_conn, qedi_cmd->task, ctask);
+ goto clear_cleanup;
ldel_exit:
spin_lock_bh(&qedi_conn->tmf_work_lock);
- if (!qedi_cmd->list_tmf_work) {
+ if (qedi_cmd->list_tmf_work) {
list_del_init(&list_work->list);
qedi_cmd->list_tmf_work = NULL;
kfree(list_work);
@@ -1468,18 +1435,19 @@ ldel_exit:
}
spin_unlock(&qedi_conn->list_lock);
- clear_bit(QEDI_CONN_FW_CLEANUP, &qedi_conn->flags);
+clear_cleanup:
+ spin_lock(&qedi_conn->tmf_work_lock);
+ qedi_conn->fw_cleanup_works--;
+ spin_unlock(&qedi_conn->tmf_work_lock);
}
-static int qedi_send_iscsi_tmf(struct qedi_conn *qedi_conn,
- struct iscsi_task *mtask)
+static int send_iscsi_tmf(struct qedi_conn *qedi_conn, struct iscsi_task *mtask,
+ struct iscsi_task *ctask)
{
struct iscsi_tmf_request_hdr tmf_pdu_header;
struct iscsi_task_params task_params;
struct qedi_ctx *qedi = qedi_conn->qedi;
struct e4_iscsi_task_context *fw_task_ctx;
- struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
- struct iscsi_task *ctask;
struct iscsi_tm *tmf_hdr;
struct qedi_cmd *qedi_cmd;
struct qedi_cmd *cmd;
@@ -1487,7 +1455,6 @@ static int qedi_send_iscsi_tmf(struct qedi_conn *qedi_conn,
u32 scsi_lun[2];
s16 tid = 0;
u16 sq_idx = 0;
- int rval = 0;
tmf_hdr = (struct iscsi_tm *)mtask->hdr;
qedi_cmd = (struct qedi_cmd *)mtask->dd_data;
@@ -1520,12 +1487,6 @@ static int qedi_send_iscsi_tmf(struct qedi_conn *qedi_conn,
if ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
ISCSI_TM_FUNC_ABORT_TASK) {
- ctask = iscsi_itt_to_task(conn, tmf_hdr->rtt);
- if (!ctask || !ctask->sc) {
- QEDI_ERR(&qedi->dbg_ctx,
- "Could not get reference task\n");
- return 0;
- }
cmd = (struct qedi_cmd *)ctask->dd_data;
tmf_pdu_header.rtt =
qedi_set_itt(cmd->task_id,
@@ -1551,10 +1512,7 @@ static int qedi_send_iscsi_tmf(struct qedi_conn *qedi_conn,
task_params.sqe = &ep->sq[sq_idx];
memset(task_params.sqe, 0, sizeof(struct iscsi_wqe));
- rval = init_initiator_tmf_request_task(&task_params,
- &tmf_pdu_header);
- if (rval)
- return -1;
+ init_initiator_tmf_request_task(&task_params, &tmf_pdu_header);
spin_lock(&qedi_conn->list_lock);
list_add_tail(&qedi_cmd->io_cmd, &qedi_conn->active_cmd_list);
@@ -1566,47 +1524,34 @@ static int qedi_send_iscsi_tmf(struct qedi_conn *qedi_conn,
return 0;
}
-int qedi_iscsi_abort_work(struct qedi_conn *qedi_conn,
- struct iscsi_task *mtask)
+int qedi_send_iscsi_tmf(struct qedi_conn *qedi_conn, struct iscsi_task *mtask)
{
+ struct iscsi_tm *tmf_hdr = (struct iscsi_tm *)mtask->hdr;
+ struct qedi_cmd *qedi_cmd = mtask->dd_data;
struct qedi_ctx *qedi = qedi_conn->qedi;
- struct iscsi_tm *tmf_hdr;
- struct qedi_cmd *qedi_cmd = (struct qedi_cmd *)mtask->dd_data;
- s16 tid = 0;
+ int rc = 0;
- tmf_hdr = (struct iscsi_tm *)mtask->hdr;
- qedi_cmd->task = mtask;
+ switch (tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) {
+ case ISCSI_TM_FUNC_ABORT_TASK:
+ spin_lock(&qedi_conn->tmf_work_lock);
+ qedi_conn->fw_cleanup_works++;
+ spin_unlock(&qedi_conn->tmf_work_lock);
- /* If abort task then schedule the work and return */
- if ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
- ISCSI_TM_FUNC_ABORT_TASK) {
- qedi_cmd->state = CLEANUP_WAIT;
- INIT_WORK(&qedi_cmd->tmf_work, qedi_tmf_work);
+ INIT_WORK(&qedi_cmd->tmf_work, qedi_abort_work);
queue_work(qedi->tmf_thread, &qedi_cmd->tmf_work);
-
- } else if (((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
- ISCSI_TM_FUNC_LOGICAL_UNIT_RESET) ||
- ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
- ISCSI_TM_FUNC_TARGET_WARM_RESET) ||
- ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
- ISCSI_TM_FUNC_TARGET_COLD_RESET)) {
- tid = qedi_get_task_idx(qedi);
- if (tid == -1) {
- QEDI_ERR(&qedi->dbg_ctx, "Invalid tid, cid=0x%x\n",
- qedi_conn->iscsi_conn_id);
- return -1;
- }
- qedi_cmd->task_id = tid;
-
- qedi_send_iscsi_tmf(qedi_conn, qedi_cmd->task);
-
- } else {
+ break;
+ case ISCSI_TM_FUNC_LOGICAL_UNIT_RESET:
+ case ISCSI_TM_FUNC_TARGET_WARM_RESET:
+ case ISCSI_TM_FUNC_TARGET_COLD_RESET:
+ rc = send_iscsi_tmf(qedi_conn, mtask, NULL);
+ break;
+ default:
QEDI_ERR(&qedi->dbg_ctx, "Invalid tmf, cid=0x%x\n",
qedi_conn->iscsi_conn_id);
- return -1;
+ return -EINVAL;
}
- return 0;
+ return rc;
}
int qedi_send_iscsi_text(struct qedi_conn *qedi_conn,
diff --git a/drivers/scsi/qedi/qedi_gbl.h b/drivers/scsi/qedi/qedi_gbl.h
index 116645c08c71..9f8e8ef405a1 100644
--- a/drivers/scsi/qedi/qedi_gbl.h
+++ b/drivers/scsi/qedi/qedi_gbl.h
@@ -31,8 +31,7 @@ int qedi_send_iscsi_login(struct qedi_conn *qedi_conn,
struct iscsi_task *task);
int qedi_send_iscsi_logout(struct qedi_conn *qedi_conn,
struct iscsi_task *task);
-int qedi_iscsi_abort_work(struct qedi_conn *qedi_conn,
- struct iscsi_task *mtask);
+int qedi_send_iscsi_tmf(struct qedi_conn *qedi_conn, struct iscsi_task *mtask);
int qedi_send_iscsi_text(struct qedi_conn *qedi_conn,
struct iscsi_task *task);
int qedi_send_iscsi_nopout(struct qedi_conn *qedi_conn,
@@ -73,6 +72,5 @@ void qedi_remove_sysfs_ctx_attr(struct qedi_ctx *qedi);
void qedi_clearsq(struct qedi_ctx *qedi,
struct qedi_conn *qedi_conn,
struct iscsi_task *task);
-void qedi_clear_session_ctx(struct iscsi_cls_session *cls_sess);
#endif
diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c
index 08c05403cd72..97f83760da88 100644
--- a/drivers/scsi/qedi/qedi_iscsi.c
+++ b/drivers/scsi/qedi/qedi_iscsi.c
@@ -330,12 +330,22 @@ free_conn:
void qedi_mark_device_missing(struct iscsi_cls_session *cls_session)
{
- iscsi_block_session(cls_session);
+ struct iscsi_session *session = cls_session->dd_data;
+ struct qedi_conn *qedi_conn = session->leadconn->dd_data;
+
+ spin_lock_bh(&session->frwd_lock);
+ set_bit(QEDI_BLOCK_IO, &qedi_conn->qedi->flags);
+ spin_unlock_bh(&session->frwd_lock);
}
void qedi_mark_device_available(struct iscsi_cls_session *cls_session)
{
- iscsi_unblock_session(cls_session);
+ struct iscsi_session *session = cls_session->dd_data;
+ struct qedi_conn *qedi_conn = session->leadconn->dd_data;
+
+ spin_lock_bh(&session->frwd_lock);
+ clear_bit(QEDI_BLOCK_IO, &qedi_conn->qedi->flags);
+ spin_unlock_bh(&session->frwd_lock);
}
static int qedi_bind_conn_to_iscsi_cid(struct qedi_ctx *qedi,
@@ -377,6 +387,7 @@ static int qedi_conn_bind(struct iscsi_cls_session *cls_session,
struct qedi_ctx *qedi = iscsi_host_priv(shost);
struct qedi_endpoint *qedi_ep;
struct iscsi_endpoint *ep;
+ int rc = 0;
ep = iscsi_lookup_endpoint(transport_fd);
if (!ep)
@@ -384,11 +395,16 @@ static int qedi_conn_bind(struct iscsi_cls_session *cls_session,
qedi_ep = ep->dd_data;
if ((qedi_ep->state == EP_STATE_TCP_FIN_RCVD) ||
- (qedi_ep->state == EP_STATE_TCP_RST_RCVD))
- return -EINVAL;
+ (qedi_ep->state == EP_STATE_TCP_RST_RCVD)) {
+ rc = -EINVAL;
+ goto put_ep;
+ }
+
+ if (iscsi_conn_bind(cls_session, cls_conn, is_leading)) {
+ rc = -EINVAL;
+ goto put_ep;
+ }
- if (iscsi_conn_bind(cls_session, cls_conn, is_leading))
- return -EINVAL;
qedi_ep->conn = qedi_conn;
qedi_conn->ep = qedi_ep;
@@ -398,13 +414,18 @@ static int qedi_conn_bind(struct iscsi_cls_session *cls_session,
qedi_conn->cmd_cleanup_req = 0;
qedi_conn->cmd_cleanup_cmpl = 0;
- if (qedi_bind_conn_to_iscsi_cid(qedi, qedi_conn))
- return -EINVAL;
+ if (qedi_bind_conn_to_iscsi_cid(qedi, qedi_conn)) {
+ rc = -EINVAL;
+ goto put_ep;
+ }
+
spin_lock_init(&qedi_conn->tmf_work_lock);
INIT_LIST_HEAD(&qedi_conn->tmf_work_list);
init_waitqueue_head(&qedi_conn->wait_queue);
- return 0;
+put_ep:
+ iscsi_put_endpoint(ep);
+ return rc;
}
static int qedi_iscsi_update_conn(struct qedi_ctx *qedi,
@@ -582,7 +603,11 @@ static int qedi_conn_start(struct iscsi_cls_conn *cls_conn)
goto start_err;
}
- clear_bit(QEDI_CONN_FW_CLEANUP, &qedi_conn->flags);
+ spin_lock(&qedi_conn->tmf_work_lock);
+ qedi_conn->fw_cleanup_works = 0;
+ qedi_conn->ep_disconnect_starting = false;
+ spin_unlock(&qedi_conn->tmf_work_lock);
+
qedi_conn->abrt_conn = 0;
rval = iscsi_conn_start(cls_conn);
@@ -742,7 +767,7 @@ static int qedi_iscsi_send_generic_request(struct iscsi_task *task)
rc = qedi_send_iscsi_logout(qedi_conn, task);
break;
case ISCSI_OP_SCSI_TMFUNC:
- rc = qedi_iscsi_abort_work(qedi_conn, task);
+ rc = qedi_send_iscsi_tmf(qedi_conn, task);
break;
case ISCSI_OP_TEXT:
rc = qedi_send_iscsi_text(qedi_conn, task);
@@ -772,7 +797,6 @@ static int qedi_mtask_xmit(struct iscsi_conn *conn, struct iscsi_task *task)
}
cmd->conn = conn->dd_data;
- cmd->scsi_cmd = NULL;
return qedi_iscsi_send_generic_request(task);
}
@@ -783,9 +807,16 @@ static int qedi_task_xmit(struct iscsi_task *task)
struct qedi_cmd *cmd = task->dd_data;
struct scsi_cmnd *sc = task->sc;
+ /* Clear now so in cleanup_task we know it didn't make it */
+ cmd->scsi_cmd = NULL;
+ cmd->task_id = U16_MAX;
+
if (test_bit(QEDI_IN_SHUTDOWN, &qedi_conn->qedi->flags))
return -ENODEV;
+ if (test_bit(QEDI_BLOCK_IO, &qedi_conn->qedi->flags))
+ return -EACCES;
+
cmd->state = 0;
cmd->task = NULL;
cmd->use_slowpath = false;
@@ -988,12 +1019,10 @@ static void qedi_ep_disconnect(struct iscsi_endpoint *ep)
{
struct qedi_endpoint *qedi_ep;
struct qedi_conn *qedi_conn = NULL;
- struct iscsi_conn *conn = NULL;
struct qedi_ctx *qedi;
int ret = 0;
int wait_delay;
int abrt_conn = 0;
- int count = 10;
wait_delay = 60 * HZ + DEF_MAX_RT_TIME;
qedi_ep = ep->dd_data;
@@ -1007,17 +1036,21 @@ static void qedi_ep_disconnect(struct iscsi_endpoint *ep)
if (qedi_ep->conn) {
qedi_conn = qedi_ep->conn;
- conn = qedi_conn->cls_conn->dd_data;
- iscsi_suspend_queue(conn);
abrt_conn = qedi_conn->abrt_conn;
- while (count--) {
- if (!test_bit(QEDI_CONN_FW_CLEANUP,
- &qedi_conn->flags)) {
- break;
- }
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
+ "cid=0x%x qedi_ep=%p waiting for %d tmfs\n",
+ qedi_ep->iscsi_cid, qedi_ep,
+ qedi_conn->fw_cleanup_works);
+
+ spin_lock(&qedi_conn->tmf_work_lock);
+ qedi_conn->ep_disconnect_starting = true;
+ while (qedi_conn->fw_cleanup_works > 0) {
+ spin_unlock(&qedi_conn->tmf_work_lock);
msleep(1000);
+ spin_lock(&qedi_conn->tmf_work_lock);
}
+ spin_unlock(&qedi_conn->tmf_work_lock);
if (test_bit(QEDI_IN_RECOVERY, &qedi->flags)) {
if (qedi_do_not_recover) {
@@ -1383,13 +1416,24 @@ static umode_t qedi_attr_is_visible(int param_type, int param)
static void qedi_cleanup_task(struct iscsi_task *task)
{
- if (!task->sc || task->state == ISCSI_TASK_PENDING) {
+ struct qedi_cmd *cmd;
+
+ if (task->state == ISCSI_TASK_PENDING) {
QEDI_INFO(NULL, QEDI_LOG_IO, "Returning ref_cnt=%d\n",
refcount_read(&task->refcount));
return;
}
- qedi_iscsi_unmap_sg_list(task->dd_data);
+ if (task->sc)
+ qedi_iscsi_unmap_sg_list(task->dd_data);
+
+ cmd = task->dd_data;
+ if (cmd->task_id != U16_MAX)
+ qedi_clear_task_idx(iscsi_host_priv(task->conn->session->host),
+ cmd->task_id);
+
+ cmd->task_id = U16_MAX;
+ cmd->scsi_cmd = NULL;
}
struct iscsi_transport qedi_iscsi_transport = {
@@ -1401,6 +1445,7 @@ struct iscsi_transport qedi_iscsi_transport = {
.destroy_session = qedi_session_destroy,
.create_conn = qedi_conn_create,
.bind_conn = qedi_conn_bind,
+ .unbind_conn = iscsi_conn_unbind,
.start_conn = qedi_conn_start,
.stop_conn = iscsi_conn_stop,
.destroy_conn = qedi_conn_destroy,
@@ -1614,20 +1659,6 @@ void qedi_process_iscsi_error(struct qedi_endpoint *ep,
qedi_start_conn_recovery(qedi_conn->qedi, qedi_conn);
}
-void qedi_clear_session_ctx(struct iscsi_cls_session *cls_sess)
-{
- struct iscsi_session *session = cls_sess->dd_data;
- struct iscsi_conn *conn = session->leadconn;
- struct qedi_conn *qedi_conn = conn->dd_data;
-
- if (iscsi_is_session_online(cls_sess))
- qedi_ep_disconnect(qedi_conn->iscsi_ep);
-
- qedi_conn_destroy(qedi_conn->cls_conn);
-
- qedi_session_destroy(cls_sess);
-}
-
void qedi_process_tcp_error(struct qedi_endpoint *ep,
struct iscsi_eqe_data *data)
{
diff --git a/drivers/scsi/qedi/qedi_iscsi.h b/drivers/scsi/qedi/qedi_iscsi.h
index 39dc27c85e3c..758735209e15 100644
--- a/drivers/scsi/qedi/qedi_iscsi.h
+++ b/drivers/scsi/qedi/qedi_iscsi.h
@@ -169,8 +169,8 @@ struct qedi_conn {
struct list_head tmf_work_list;
wait_queue_head_t wait_queue;
spinlock_t tmf_work_lock; /* tmf work lock */
- unsigned long flags;
-#define QEDI_CONN_FW_CLEANUP 1
+ bool ep_disconnect_starting;
+ int fw_cleanup_works;
};
struct qedi_cmd {
@@ -212,6 +212,7 @@ struct qedi_cmd {
struct qedi_work_map {
struct list_head list;
struct qedi_cmd *qedi_cmd;
+ struct iscsi_task *ctask;
int rtid;
int state;
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
index 2455d1448a7e..0b0acb827071 100644
--- a/drivers/scsi/qedi/qedi_main.c
+++ b/drivers/scsi/qedi/qedi_main.c
@@ -640,7 +640,7 @@ static struct qedi_ctx *qedi_host_alloc(struct pci_dev *pdev)
goto exit_setup_shost;
}
- shost->max_id = QEDI_MAX_ISCSI_CONNS_PER_HBA;
+ shost->max_id = QEDI_MAX_ISCSI_CONNS_PER_HBA - 1;
shost->max_channel = 0;
shost->max_lun = ~0;
shost->max_cmd_len = 16;
@@ -2417,11 +2417,9 @@ static void __qedi_remove(struct pci_dev *pdev, int mode)
int rval;
u16 retry = 10;
- if (mode == QEDI_MODE_SHUTDOWN)
- iscsi_host_for_each_session(qedi->shost,
- qedi_clear_session_ctx);
-
if (mode == QEDI_MODE_NORMAL || mode == QEDI_MODE_SHUTDOWN) {
+ iscsi_host_remove(qedi->shost);
+
if (qedi->tmf_thread) {
flush_workqueue(qedi->tmf_thread);
destroy_workqueue(qedi->tmf_thread);
@@ -2482,7 +2480,6 @@ static void __qedi_remove(struct pci_dev *pdev, int mode)
if (qedi->boot_kset)
iscsi_boot_destroy_kset(qedi->boot_kset);
- iscsi_host_remove(qedi->shost);
iscsi_host_free(qedi->shost);
}
}
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index fae5cae6f0a8..418be9a2fcf6 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -173,7 +173,6 @@ extern int ql2xnvmeenable;
extern int ql2xautodetectsfp;
extern int ql2xenablemsix;
extern int qla2xuseresexchforels;
-extern int ql2xexlogins;
extern int ql2xdifbundlinginternalbuffers;
extern int ql2xfulldump_on_mpifail;
extern int ql2xenforce_iocb_limit;
@@ -220,7 +219,6 @@ extern int qla83xx_set_drv_presence(scsi_qla_host_t *vha);
extern int __qla83xx_set_drv_presence(scsi_qla_host_t *vha);
extern int qla83xx_clear_drv_presence(scsi_qla_host_t *vha);
extern int __qla83xx_clear_drv_presence(scsi_qla_host_t *vha);
-extern int qla2x00_post_uevent_work(struct scsi_qla_host *, u32);
extern int qla2x00_post_uevent_work(struct scsi_qla_host *, u32);
extern void qla2x00_disable_board_on_pci_error(struct work_struct *);
@@ -687,8 +685,6 @@ extern int qla2x00_chk_ms_status(scsi_qla_host_t *, ms_iocb_entry_t *,
struct ct_sns_rsp *, const char *);
extern void qla2x00_async_iocb_timeout(void *data);
-extern void qla2x00_free_fcport(fc_port_t *);
-
extern int qla24xx_post_gpnid_work(struct scsi_qla_host *, port_id_t *);
extern int qla24xx_async_gpnid(scsi_qla_host_t *, port_id_t *);
void qla24xx_handle_gpnid_event(scsi_qla_host_t *, struct event_arg *);
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 0de250570e39..eb825318e3f5 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -4356,8 +4356,6 @@ qla2x00_fw_ready(scsi_qla_host_t *vha)
if (IS_QLAFX00(vha->hw))
return qlafx00_fw_ready(vha);
- rval = QLA_SUCCESS;
-
/* Time to wait for loop down */
if (IS_P3P_TYPE(ha))
min_wait = 30;
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 6e8f737a4af3..19fe2c1659d0 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -2694,31 +2694,22 @@ qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
/* check guard */
if (e_guard != a_guard) {
- scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
- 0x10, 0x1);
- set_driver_byte(cmd, DRIVER_SENSE);
+ scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x1);
set_host_byte(cmd, DID_ABORT);
- cmd->result |= SAM_STAT_CHECK_CONDITION;
return 1;
}
/* check ref tag */
if (e_ref_tag != a_ref_tag) {
- scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
- 0x10, 0x3);
- set_driver_byte(cmd, DRIVER_SENSE);
+ scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x3);
set_host_byte(cmd, DID_ABORT);
- cmd->result |= SAM_STAT_CHECK_CONDITION;
return 1;
}
/* check appl tag */
if (e_app_tag != a_app_tag) {
- scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
- 0x10, 0x2);
- set_driver_byte(cmd, DRIVER_SENSE);
+ scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x2);
set_host_byte(cmd, DID_ABORT);
- cmd->result |= SAM_STAT_CHECK_CONDITION;
return 1;
}
diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c
index 0cacb667a88b..e119f8b24e33 100644
--- a/drivers/scsi/qla2xxx/qla_nvme.c
+++ b/drivers/scsi/qla2xxx/qla_nvme.c
@@ -671,7 +671,7 @@ void qla_nvme_unregister_remote_port(struct fc_port *fcport)
if (!IS_ENABLED(CONFIG_NVME_FC))
return;
- ql_log(ql_log_warn, NULL, 0x2112,
+ ql_log(ql_log_warn, fcport->vha, 0x2112,
"%s: unregister remoteport on %p %8phN\n",
__func__, fcport, fcport->port_name);
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 12a6848ade43..eb47140a899f 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -5481,8 +5481,7 @@ qlt_free_qfull_cmds(struct qla_qpair *qpair)
"%s: Unexpected cmd in QFull list %p\n", __func__,
cmd);
- list_del(&cmd->cmd_list);
- list_add_tail(&cmd->cmd_list, &free_list);
+ list_move_tail(&cmd->cmd_list, &free_list);
/* piggy back on hardware_lock for protection */
vha->hw->tgt.num_qfull_cmds_alloc--;
diff --git a/drivers/scsi/qla4xxx/ql4_83xx.c b/drivers/scsi/qla4xxx/ql4_83xx.c
index 5f56122f6664..db41d90a5b6e 100644
--- a/drivers/scsi/qla4xxx/ql4_83xx.c
+++ b/drivers/scsi/qla4xxx/ql4_83xx.c
@@ -472,8 +472,7 @@ int qla4_83xx_can_perform_reset(struct scsi_qla_host *ha)
} else if (device_map[i].device_type == ISCSI_CLASS) {
if (drv_active & (1 << device_map[i].func_num)) {
if (!iscsi_present ||
- (iscsi_present &&
- (iscsi_func_low > device_map[i].func_num)))
+ iscsi_func_low > device_map[i].func_num)
iscsi_func_low = device_map[i].func_num;
iscsi_present++;
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index ad3afe30f617..6ee7ea4c27e0 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -259,6 +259,7 @@ static struct iscsi_transport qla4xxx_iscsi_transport = {
.start_conn = qla4xxx_conn_start,
.create_conn = qla4xxx_conn_create,
.bind_conn = qla4xxx_conn_bind,
+ .unbind_conn = iscsi_conn_unbind,
.stop_conn = iscsi_conn_stop,
.destroy_conn = qla4xxx_conn_destroy,
.set_param = iscsi_set_param,
@@ -814,8 +815,6 @@ static int qla4xxx_get_chap_list(struct Scsi_Host *shost, uint16_t chap_tbl_idx,
valid_chap_entries++;
if (valid_chap_entries == *num_entries)
break;
- else
- continue;
}
mutex_unlock(&ha->chap_sem);
@@ -3234,6 +3233,7 @@ static int qla4xxx_conn_bind(struct iscsi_cls_session *cls_session,
conn = cls_conn->dd_data;
qla_conn = conn->dd_data;
qla_conn->qla_ep = ep->dd_data;
+ iscsi_put_endpoint(ep);
return 0;
}
diff --git a/drivers/scsi/qlogicfas408.c b/drivers/scsi/qlogicfas408.c
index 136681ad18a5..3bbe0b5545d9 100644
--- a/drivers/scsi/qlogicfas408.c
+++ b/drivers/scsi/qlogicfas408.c
@@ -4,9 +4,9 @@
Use at your own risk. Support Tort Reform so you won't have to read all
these silly disclaimers.
- Copyright 1994, Tom Zerucha.
+ Copyright 1994, Tom Zerucha.
tz@execpc.com
-
+
Additional Code, and much appreciated help by
Michael A. Griffith
grif@cs.ucr.edu
@@ -22,12 +22,12 @@
Functions as standalone, loadable, and PCMCIA driver, the latter from
Dave Hinds' PCMCIA package.
-
+
Cleaned up 26/10/2002 by Alan Cox <alan@lxorguk.ukuu.org.uk> as part of the 2.5
SCSI driver cleanup and audit. This driver still needs work on the
following
- - Non terminating hardware waits
- - Some layering violations with its pcmcia stub
+ - Non terminating hardware waits
+ - Some layering violations with its pcmcia stub
Redistributable under terms of the GNU General Public License
@@ -92,8 +92,9 @@ static void ql_zap(struct qlogicfas408_priv *priv)
/*
* Do a pseudo-dma tranfer
*/
-
-static int ql_pdma(struct qlogicfas408_priv *priv, int phase, char *request, int reqlen)
+
+static int ql_pdma(struct qlogicfas408_priv *priv, int phase, char *request,
+ int reqlen)
{
int j;
int qbase = priv->qbase;
@@ -108,7 +109,7 @@ static int ql_pdma(struct qlogicfas408_priv *priv, int phase, char *request, int
request += 128;
}
while (reqlen >= 84 && !(j & 0xc0)) /* 2/3 */
- if ((j = inb(qbase + 8)) & 4)
+ if ((j = inb(qbase + 8)) & 4)
{
insl(qbase + 4, request, 21);
reqlen -= 84;
@@ -123,11 +124,11 @@ static int ql_pdma(struct qlogicfas408_priv *priv, int phase, char *request, int
/* until both empty and int (or until reclen is 0) */
rtrc(7)
j = 0;
- while (reqlen && !((j & 0x10) && (j & 0xc0)))
+ while (reqlen && !((j & 0x10) && (j & 0xc0)))
{
/* while bytes to receive and not empty */
j &= 0xc0;
- while (reqlen && !((j = inb(qbase + 8)) & 0x10))
+ while (reqlen && !((j = inb(qbase + 8)) & 0x10))
{
*request++ = inb(qbase + 4);
reqlen--;
@@ -161,7 +162,7 @@ static int ql_pdma(struct qlogicfas408_priv *priv, int phase, char *request, int
j = 0;
while (reqlen && !((j & 2) && (j & 0xc0))) {
/* while bytes to send and not full */
- while (reqlen && !((j = inb(qbase + 8)) & 2))
+ while (reqlen && !((j = inb(qbase + 8)) & 2))
{
outb(*request++, qbase + 4);
reqlen--;
@@ -175,7 +176,7 @@ static int ql_pdma(struct qlogicfas408_priv *priv, int phase, char *request, int
}
/*
- * Wait for interrupt flag (polled - not real hardware interrupt)
+ * Wait for interrupt flag (polled - not real hardware interrupt)
*/
static int ql_wai(struct qlogicfas408_priv *priv)
@@ -205,14 +206,14 @@ static int ql_wai(struct qlogicfas408_priv *priv)
}
/*
- * Initiate scsi command - queueing handler
+ * Initiate scsi command - queueing handler
* caller must hold host lock
*/
static void ql_icmd(struct scsi_cmnd *cmd)
{
struct qlogicfas408_priv *priv = get_priv_by_cmd(cmd);
- int qbase = priv->qbase;
+ int qbase = priv->qbase;
int int_type = priv->int_type;
unsigned int i;
@@ -253,14 +254,13 @@ static void ql_icmd(struct scsi_cmnd *cmd)
}
/*
- * Process scsi command - usually after interrupt
+ * Process scsi command - usually after interrupt
*/
-static unsigned int ql_pcmd(struct scsi_cmnd *cmd)
+static void ql_pcmd(struct scsi_cmnd *cmd)
{
unsigned int i, j;
unsigned long k;
- unsigned int result; /* ultimate return result */
unsigned int status; /* scsi returned status */
unsigned int message; /* scsi returned message */
unsigned int phase; /* recorded scsi phase */
@@ -274,13 +274,15 @@ static unsigned int ql_pcmd(struct scsi_cmnd *cmd)
j = inb(qbase + 6);
i = inb(qbase + 5);
if (i == 0x20) {
- return (DID_NO_CONNECT << 16);
+ set_host_byte(cmd, DID_NO_CONNECT);
+ return;
}
i |= inb(qbase + 5); /* the 0x10 bit can be set after the 0x08 */
if (i != 0x18) {
printk(KERN_ERR "Ql:Bad Interrupt status:%02x\n", i);
ql_zap(priv);
- return (DID_BAD_INTR << 16);
+ set_host_byte(cmd, DID_BAD_INTR);
+ return;
}
j &= 7; /* j = inb( qbase + 7 ) >> 5; */
@@ -293,9 +295,10 @@ static unsigned int ql_pcmd(struct scsi_cmnd *cmd)
printk(KERN_ERR "Ql:Bad sequence for command %d, int %02X, cmdleft = %d\n",
j, i, inb(qbase + 7) & 0x1f);
ql_zap(priv);
- return (DID_ERROR << 16);
+ set_host_byte(cmd, DID_ERROR);
+ return;
}
- result = DID_OK;
+
if (inb(qbase + 7) & 0x1f) /* if some bytes in fifo */
outb(1, qbase + 3); /* clear fifo */
/* note that request_bufflen is the total xfer size when sg is used */
@@ -314,28 +317,31 @@ static unsigned int ql_pcmd(struct scsi_cmnd *cmd)
scsi_for_each_sg(cmd, sg, scsi_sg_count(cmd), i) {
if (priv->qabort) {
REG0;
- return ((priv->qabort == 1 ?
- DID_ABORT : DID_RESET) << 16);
+ set_host_byte(cmd,
+ priv->qabort == 1 ?
+ DID_ABORT : DID_RESET);
}
buf = sg_virt(sg);
if (ql_pdma(priv, phase, buf, sg->length))
break;
}
REG0;
- rtrc(2)
+ rtrc(2);
/*
* Wait for irq (split into second state of irq handler
- * if this can take time)
+ * if this can take time)
*/
- if ((k = ql_wai(priv)))
- return (k << 16);
+ if ((k = ql_wai(priv))) {
+ set_host_byte(cmd, k);
+ return;
+ }
k = inb(qbase + 5); /* should be 0x10, bus service */
}
/*
- * Enter Status (and Message In) Phase
+ * Enter Status (and Message In) Phase
*/
-
+
k = jiffies + WATCHDOG;
while (time_before(jiffies, k) && !priv->qabort &&
@@ -344,57 +350,72 @@ static unsigned int ql_pcmd(struct scsi_cmnd *cmd)
if (time_after_eq(jiffies, k)) {
ql_zap(priv);
- return (DID_TIME_OUT << 16);
+ set_host_byte(cmd, DID_TIME_OUT);
+ return;
}
/* FIXME: timeout ?? */
while (inb(qbase + 5))
cpu_relax(); /* clear pending ints */
- if (priv->qabort)
- return ((priv->qabort == 1 ? DID_ABORT : DID_RESET) << 16);
+ if (priv->qabort) {
+ set_host_byte(cmd,
+ priv->qabort == 1 ? DID_ABORT : DID_RESET);
+ return;
+ }
outb(0x11, qbase + 3); /* get status and message */
- if ((k = ql_wai(priv)))
- return (k << 16);
+ if ((k = ql_wai(priv))) {
+ set_host_byte(cmd, k);
+ return;
+ }
i = inb(qbase + 5); /* get chip irq stat */
j = inb(qbase + 7) & 0x1f; /* and bytes rec'd */
status = inb(qbase + 2);
message = inb(qbase + 2);
/*
- * Should get function complete int if Status and message, else
- * bus serv if only status
+ * Should get function complete int if Status and message, else
+ * bus serv if only status
*/
if (!((i == 8 && j == 2) || (i == 0x10 && j == 1))) {
printk(KERN_ERR "Ql:Error during status phase, int=%02X, %d bytes recd\n", i, j);
- result = DID_ERROR;
+ set_host_byte(cmd, DID_ERROR);
}
outb(0x12, qbase + 3); /* done, disconnect */
- rtrc(1)
- if ((k = ql_wai(priv)))
- return (k << 16);
+ rtrc(1);
+ if ((k = ql_wai(priv))) {
+ set_host_byte(cmd, k);
+ return;
+ }
/*
- * Should get bus service interrupt and disconnect interrupt
+ * Should get bus service interrupt and disconnect interrupt
*/
-
+
i = inb(qbase + 5); /* should be bus service */
while (!priv->qabort && ((i & 0x20) != 0x20)) {
barrier();
cpu_relax();
i |= inb(qbase + 5);
}
- rtrc(0)
+ rtrc(0);
- if (priv->qabort)
- return ((priv->qabort == 1 ? DID_ABORT : DID_RESET) << 16);
-
- return (result << 16) | (message << 8) | (status & STATUS_MASK);
+ if (priv->qabort) {
+ set_host_byte(cmd,
+ priv->qabort == 1 ? DID_ABORT : DID_RESET);
+ return;
+ }
+
+ set_host_byte(cmd, DID_OK);
+ if (message != COMMAND_COMPLETE)
+ scsi_msg_to_host_byte(cmd, message);
+ set_status_byte(cmd, status);
+ return;
}
/*
- * Interrupt handler
+ * Interrupt handler
*/
static void ql_ihandl(void *dev_id)
@@ -415,11 +436,11 @@ static void ql_ihandl(void *dev_id)
return;
}
icmd = priv->qlcmd;
- icmd->result = ql_pcmd(icmd);
+ ql_pcmd(icmd);
priv->qlcmd = NULL;
/*
- * If result is CHECK CONDITION done calls qcommand to request
- * sense
+ * If result is CHECK CONDITION done calls qcommand to request
+ * sense
*/
(icmd->scsi_done) (icmd);
}
@@ -443,8 +464,11 @@ static int qlogicfas408_queuecommand_lck(struct scsi_cmnd *cmd,
void (*done) (struct scsi_cmnd *))
{
struct qlogicfas408_priv *priv = get_priv_by_cmd(cmd);
+
+ set_host_byte(cmd, DID_OK);
+ set_status_byte(cmd, SAM_STAT_GOOD);
if (scmd_id(cmd) == priv->qinitid) {
- cmd->result = DID_BAD_TARGET << 16;
+ set_host_byte(cmd, DID_BAD_TARGET);
done(cmd);
return 0;
}
@@ -461,8 +485,8 @@ static int qlogicfas408_queuecommand_lck(struct scsi_cmnd *cmd,
DEF_SCSI_QCMD(qlogicfas408_queuecommand)
-/*
- * Return bios parameters
+/*
+ * Return bios parameters
*/
int qlogicfas408_biosparam(struct scsi_device *disk, struct block_device *dev,
@@ -487,7 +511,7 @@ int qlogicfas408_biosparam(struct scsi_device *disk, struct block_device *dev,
/*
* Abort a command in progress
*/
-
+
int qlogicfas408_abort(struct scsi_cmnd *cmd)
{
struct qlogicfas408_priv *priv = get_priv_by_cmd(cmd);
@@ -566,9 +590,9 @@ void qlogicfas408_setup(int qbase, int id, int int_type)
int qlogicfas408_detect(int qbase, int int_type)
{
- REG1;
+ REG1;
return (((inb(qbase + 0xe) ^ inb(qbase + 0xe)) == 7) &&
- ((inb(qbase + 0xe) ^ inb(qbase + 0xe)) == 7));
+ ((inb(qbase + 0xe) ^ inb(qbase + 0xe)) == 7));
}
/*
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index e9e2f0e15ac8..d26025cf5de3 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -144,7 +144,7 @@ void scsi_log_completion(struct scsi_cmnd *cmd, int disposition)
(level > 1)) {
scsi_print_result(cmd, "Done", disposition);
scsi_print_command(cmd);
- if (status_byte(cmd->result) == CHECK_CONDITION)
+ if (scsi_status_is_check_condition(cmd->result))
scsi_print_sense(cmd);
if (level > 3)
scmd_printk(KERN_INFO, cmd,
@@ -185,13 +185,6 @@ void scsi_finish_command(struct scsi_cmnd *cmd)
if (atomic_read(&sdev->device_blocked))
atomic_set(&sdev->device_blocked, 0);
- /*
- * If we have valid sense information, then some kind of recovery
- * must have taken place. Make a note of this.
- */
- if (SCSI_SENSE_VALID(cmd))
- cmd->result |= (DRIVER_SENSE << 24);
-
SCSI_LOG_MLCOMPLETE(4, sdev_printk(KERN_INFO, sdev,
"Notifying upper driver of completion "
"(result %x)\n", cmd->result));
@@ -508,6 +501,8 @@ int scsi_report_opcode(struct scsi_device *sdev, unsigned char *buffer,
result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, len,
&sshdr, 30 * HZ, 3, NULL);
+ if (result < 0)
+ return result;
if (result && scsi_sense_valid(&sshdr) &&
sshdr.sense_key == ILLEGAL_REQUEST &&
(sshdr.asc == 0x20 || sshdr.asc == 0x24) && sshdr.ascq == 0x00)
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index a5d1633b5bd8..5b3a20a140f9 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -851,10 +851,10 @@ static struct device_driver sdebug_driverfs_driver = {
};
static const int check_condition_result =
- (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
+ SAM_STAT_CHECK_CONDITION;
static const int illegal_condition_result =
- (DRIVER_SENSE << 24) | (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
+ (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
static const int device_qfull_result =
(DID_OK << 16) | SAM_STAT_TASK_SET_FULL;
@@ -931,7 +931,7 @@ static void mk_sense_invalid_fld(struct scsi_cmnd *scp,
}
asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST;
memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
- scsi_build_sense_buffer(sdebug_dsense, sbuff, ILLEGAL_REQUEST, asc, 0);
+ scsi_build_sense(scp, sdebug_dsense, ILLEGAL_REQUEST, asc, 0);
memset(sks, 0, sizeof(sks));
sks[0] = 0x80;
if (c_d)
@@ -957,17 +957,14 @@ static void mk_sense_invalid_fld(struct scsi_cmnd *scp,
static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
{
- unsigned char *sbuff;
-
- sbuff = scp->sense_buffer;
- if (!sbuff) {
+ if (!scp->sense_buffer) {
sdev_printk(KERN_ERR, scp->device,
"%s: sense_buffer is NULL\n", __func__);
return;
}
- memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
+ memset(scp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
- scsi_build_sense_buffer(sdebug_dsense, sbuff, key, asc, asq);
+ scsi_build_sense(scp, sdebug_dsense, key, asc, asq);
if (sdebug_verbose)
sdev_printk(KERN_INFO, scp->device,
@@ -7684,11 +7681,6 @@ static int sdebug_driver_remove(struct device *dev)
sdbg_host = to_sdebug_host(dev);
- if (!sdbg_host) {
- pr_err("Unable to locate host info\n");
- return -ENODEV;
- }
-
scsi_remove_host(sdbg_host->shost);
list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index d8fafe77dbbe..c6cd5a8e5c85 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -742,41 +742,35 @@ static enum scsi_disposition scsi_eh_completed_normally(struct scsi_cmnd *scmd)
return FAILED;
/*
- * next, check the message byte.
- */
- if (msg_byte(scmd->result) != COMMAND_COMPLETE)
- return FAILED;
-
- /*
* now, check the status byte to see if this indicates
* anything special.
*/
- switch (status_byte(scmd->result)) {
- case GOOD:
+ switch (get_status_byte(scmd)) {
+ case SAM_STAT_GOOD:
scsi_handle_queue_ramp_up(scmd->device);
fallthrough;
- case COMMAND_TERMINATED:
+ case SAM_STAT_COMMAND_TERMINATED:
return SUCCESS;
- case CHECK_CONDITION:
+ case SAM_STAT_CHECK_CONDITION:
return scsi_check_sense(scmd);
- case CONDITION_GOOD:
- case INTERMEDIATE_GOOD:
- case INTERMEDIATE_C_GOOD:
+ case SAM_STAT_CONDITION_MET:
+ case SAM_STAT_INTERMEDIATE:
+ case SAM_STAT_INTERMEDIATE_CONDITION_MET:
/*
* who knows? FIXME(eric)
*/
return SUCCESS;
- case RESERVATION_CONFLICT:
+ case SAM_STAT_RESERVATION_CONFLICT:
if (scmd->cmnd[0] == TEST_UNIT_READY)
/* it is a success, we probed the device and
* found it */
return SUCCESS;
/* otherwise, we failed to send the command */
return FAILED;
- case QUEUE_FULL:
+ case SAM_STAT_TASK_SET_FULL:
scsi_handle_queue_full(scmd->device);
fallthrough;
- case BUSY:
+ case SAM_STAT_BUSY:
return NEEDS_RETRY;
default:
return FAILED;
@@ -1258,7 +1252,7 @@ int scsi_eh_get_sense(struct list_head *work_q,
current->comm));
break;
}
- if (status_byte(scmd->result) != CHECK_CONDITION)
+ if (!scsi_status_is_check_condition(scmd->result))
/*
* don't request sense if there's no check condition
* status because the error we're processing isn't one
@@ -1766,15 +1760,14 @@ int scsi_noretry_cmd(struct scsi_cmnd *scmd)
case DID_PARITY:
return (scmd->request->cmd_flags & REQ_FAILFAST_DEV);
case DID_ERROR:
- if (msg_byte(scmd->result) == COMMAND_COMPLETE &&
- status_byte(scmd->result) == RESERVATION_CONFLICT)
+ if (get_status_byte(scmd) == SAM_STAT_RESERVATION_CONFLICT)
return 0;
fallthrough;
case DID_SOFT_ERROR:
return (scmd->request->cmd_flags & REQ_FAILFAST_DRIVER);
}
- if (status_byte(scmd->result) != CHECK_CONDITION)
+ if (!scsi_status_is_check_condition(scmd->result))
return 0;
check_type:
@@ -1883,8 +1876,7 @@ enum scsi_disposition scsi_decide_disposition(struct scsi_cmnd *scmd)
*/
return SUCCESS;
case DID_ERROR:
- if (msg_byte(scmd->result) == COMMAND_COMPLETE &&
- status_byte(scmd->result) == RESERVATION_CONFLICT)
+ if (get_status_byte(scmd) == SAM_STAT_RESERVATION_CONFLICT)
/*
* execute reservation conflict processing code
* lower down
@@ -1913,23 +1905,17 @@ enum scsi_disposition scsi_decide_disposition(struct scsi_cmnd *scmd)
}
/*
- * next, check the message byte.
- */
- if (msg_byte(scmd->result) != COMMAND_COMPLETE)
- return FAILED;
-
- /*
* check the status byte to see if this indicates anything special.
*/
- switch (status_byte(scmd->result)) {
- case QUEUE_FULL:
+ switch (get_status_byte(scmd)) {
+ case SAM_STAT_TASK_SET_FULL:
scsi_handle_queue_full(scmd->device);
/*
* the case of trying to send too many commands to a
* tagged queueing device.
*/
fallthrough;
- case BUSY:
+ case SAM_STAT_BUSY:
/*
* device can't talk to us at the moment. Should only
* occur (SAM-3) when the task queue is empty, so will cause
@@ -1937,16 +1923,16 @@ enum scsi_disposition scsi_decide_disposition(struct scsi_cmnd *scmd)
* device.
*/
return ADD_TO_MLQUEUE;
- case GOOD:
+ case SAM_STAT_GOOD:
if (scmd->cmnd[0] == REPORT_LUNS)
scmd->device->sdev_target->expecting_lun_change = 0;
scsi_handle_queue_ramp_up(scmd->device);
fallthrough;
- case COMMAND_TERMINATED:
+ case SAM_STAT_COMMAND_TERMINATED:
return SUCCESS;
- case TASK_ABORTED:
+ case SAM_STAT_TASK_ABORTED:
goto maybe_retry;
- case CHECK_CONDITION:
+ case SAM_STAT_CHECK_CONDITION:
rtn = scsi_check_sense(scmd);
if (rtn == NEEDS_RETRY)
goto maybe_retry;
@@ -1955,16 +1941,16 @@ enum scsi_disposition scsi_decide_disposition(struct scsi_cmnd *scmd)
* to collect the sense and redo the decide
* disposition */
return rtn;
- case CONDITION_GOOD:
- case INTERMEDIATE_GOOD:
- case INTERMEDIATE_C_GOOD:
- case ACA_ACTIVE:
+ case SAM_STAT_CONDITION_MET:
+ case SAM_STAT_INTERMEDIATE:
+ case SAM_STAT_INTERMEDIATE_CONDITION_MET:
+ case SAM_STAT_ACA_ACTIVE:
/*
* who knows? FIXME(eric)
*/
return SUCCESS;
- case RESERVATION_CONFLICT:
+ case SAM_STAT_RESERVATION_CONFLICT:
sdev_printk(KERN_INFO, scmd->device,
"reservation conflict\n");
set_host_byte(scmd, DID_NEXUS_FAILURE);
@@ -2137,10 +2123,10 @@ void scsi_eh_flush_done_q(struct list_head *done_q)
/*
* If just we got sense for the device (called
* scsi_eh_get_sense), scmd->result is already
- * set, do not set DRIVER_TIMEOUT.
+ * set, do not set DID_TIME_OUT.
*/
if (!scmd->result)
- scmd->result |= (DRIVER_TIMEOUT << 24);
+ scmd->result |= (DID_TIME_OUT << 16);
SCSI_LOG_ERROR_RECOVERY(3,
scmd_printk(KERN_INFO, scmd,
"%s: flush finish cmd\n",
diff --git a/drivers/scsi/scsi_ioctl.c b/drivers/scsi/scsi_ioctl.c
index 14872c9dc78c..0d13610cd6bf 100644
--- a/drivers/scsi/scsi_ioctl.c
+++ b/drivers/scsi/scsi_ioctl.c
@@ -101,8 +101,9 @@ static int ioctl_internal_command(struct scsi_device *sdev, char *cmd,
SCSI_LOG_IOCTL(2, sdev_printk(KERN_INFO, sdev,
"Ioctl returned 0x%x\n", result));
- if (driver_byte(result) == DRIVER_SENSE &&
- scsi_sense_valid(&sshdr)) {
+ if (result < 0)
+ goto out;
+ if (scsi_sense_valid(&sshdr)) {
switch (sshdr.sense_key) {
case ILLEGAL_REQUEST:
if (cmd[0] == ALLOW_MEDIUM_REMOVAL)
@@ -133,7 +134,7 @@ static int ioctl_internal_command(struct scsi_device *sdev, char *cmd,
break;
}
}
-
+out:
SCSI_LOG_IOCTL(2, sdev_printk(KERN_INFO, sdev,
"IOCTL Releasing command\n"));
return result;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 532304d42f00..6b994baf87c2 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -211,20 +211,23 @@ int __scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
{
struct request *req;
struct scsi_request *rq;
- int ret = DRIVER_ERROR << 24;
+ int ret;
req = blk_get_request(sdev->request_queue,
data_direction == DMA_TO_DEVICE ?
REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN,
rq_flags & RQF_PM ? BLK_MQ_REQ_PM : 0);
if (IS_ERR(req))
- return ret;
- rq = scsi_req(req);
+ return PTR_ERR(req);
- if (bufflen && blk_rq_map_kern(sdev->request_queue, req,
- buffer, bufflen, GFP_NOIO))
- goto out;
+ rq = scsi_req(req);
+ if (bufflen) {
+ ret = blk_rq_map_kern(sdev->request_queue, req,
+ buffer, bufflen, GFP_NOIO);
+ if (ret)
+ goto out;
+ }
rq->cmd_len = COMMAND_SIZE(cmd[0]);
memcpy(rq->cmd, cmd, rq->cmd_len);
rq->retries = retries;
@@ -588,12 +591,7 @@ static blk_status_t scsi_result_to_blk_status(struct scsi_cmnd *cmd, int result)
{
switch (host_byte(result)) {
case DID_OK:
- /*
- * Also check the other bytes than the status byte in result
- * to handle the case when a SCSI LLD sets result to
- * DRIVER_SENSE << 24 without setting SAM_STAT_CHECK_CONDITION.
- */
- if (scsi_status_is_good(result) && (result & ~0xff) == 0)
+ if (scsi_status_is_good(result))
return BLK_STS_OK;
return BLK_STS_IOERR;
case DID_TRANSPORT_FAILFAST:
@@ -787,7 +785,7 @@ static void scsi_io_completion_action(struct scsi_cmnd *cmd, int result)
*/
if (!level && __ratelimit(&_rs)) {
scsi_print_result(cmd, NULL, FAILED);
- if (driver_byte(result) == DRIVER_SENSE)
+ if (sense_valid)
scsi_print_sense(cmd);
scsi_print_command(cmd);
}
@@ -875,7 +873,7 @@ static int scsi_io_completion_nz_result(struct scsi_cmnd *cmd, int result,
* if it can't fit). Treat SAM_STAT_CONDITION_MET and the related
* intermediate statuses (both obsolete in SAM-4) as good.
*/
- if (status_byte(result) && scsi_status_is_good(result)) {
+ if ((result & 0xff) && scsi_status_is_good(result)) {
result = 0;
*blk_statp = BLK_STS_OK;
}
@@ -2093,9 +2091,7 @@ EXPORT_SYMBOL_GPL(scsi_mode_select);
* @sshdr: place to put sense data (or NULL if no sense to be collected).
* must be SCSI_SENSE_BUFFERSIZE big.
*
- * Returns zero if unsuccessful, or the header offset (either 4
- * or 8 depending on whether a six or ten byte command was
- * issued) if successful.
+ * Returns zero if successful, or a negative error number on failure
*/
int
scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
@@ -2142,58 +2138,60 @@ scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, len,
sshdr, timeout, retries, NULL);
+ if (result < 0)
+ return result;
/* This code looks awful: what it's doing is making sure an
* ILLEGAL REQUEST sense return identifies the actual command
* byte as the problem. MODE_SENSE commands can return
* ILLEGAL REQUEST if the code page isn't supported */
- if (use_10_for_ms && !scsi_status_is_good(result) &&
- driver_byte(result) == DRIVER_SENSE) {
+ if (!scsi_status_is_good(result)) {
if (scsi_sense_valid(sshdr)) {
if ((sshdr->sense_key == ILLEGAL_REQUEST) &&
(sshdr->asc == 0x20) && (sshdr->ascq == 0)) {
/*
* Invalid command operation code
*/
- sdev->use_10_for_ms = 0;
+ if (use_10_for_ms) {
+ sdev->use_10_for_ms = 0;
+ goto retry;
+ }
+ }
+ if (scsi_status_is_check_condition(result) &&
+ sshdr->sense_key == UNIT_ATTENTION &&
+ retry_count) {
+ retry_count--;
goto retry;
}
}
+ return -EIO;
+ }
+ if (unlikely(buffer[0] == 0x86 && buffer[1] == 0x0b &&
+ (modepage == 6 || modepage == 8))) {
+ /* Initio breakage? */
+ header_length = 0;
+ data->length = 13;
+ data->medium_type = 0;
+ data->device_specific = 0;
+ data->longlba = 0;
+ data->block_descriptor_length = 0;
+ } else if (use_10_for_ms) {
+ data->length = buffer[0]*256 + buffer[1] + 2;
+ data->medium_type = buffer[2];
+ data->device_specific = buffer[3];
+ data->longlba = buffer[4] & 0x01;
+ data->block_descriptor_length = buffer[6]*256
+ + buffer[7];
+ } else {
+ data->length = buffer[0] + 1;
+ data->medium_type = buffer[1];
+ data->device_specific = buffer[2];
+ data->block_descriptor_length = buffer[3];
}
+ data->header_length = header_length;
- if (scsi_status_is_good(result)) {
- if (unlikely(buffer[0] == 0x86 && buffer[1] == 0x0b &&
- (modepage == 6 || modepage == 8))) {
- /* Initio breakage? */
- header_length = 0;
- data->length = 13;
- data->medium_type = 0;
- data->device_specific = 0;
- data->longlba = 0;
- data->block_descriptor_length = 0;
- } else if (use_10_for_ms) {
- data->length = buffer[0]*256 + buffer[1] + 2;
- data->medium_type = buffer[2];
- data->device_specific = buffer[3];
- data->longlba = buffer[4] & 0x01;
- data->block_descriptor_length = buffer[6]*256
- + buffer[7];
- } else {
- data->length = buffer[0] + 1;
- data->medium_type = buffer[1];
- data->device_specific = buffer[2];
- data->block_descriptor_length = buffer[3];
- }
- data->header_length = header_length;
- } else if ((status_byte(result) == CHECK_CONDITION) &&
- scsi_sense_valid(sshdr) &&
- sshdr->sense_key == UNIT_ATTENTION && retry_count) {
- retry_count--;
- goto retry;
- }
-
- return result;
+ return 0;
}
EXPORT_SYMBOL(scsi_mode_sense);
@@ -3218,3 +3216,20 @@ int scsi_vpd_tpg_id(struct scsi_device *sdev, int *rel_id)
return group_id;
}
EXPORT_SYMBOL(scsi_vpd_tpg_id);
+
+/**
+ * scsi_build_sense - build sense data for a command
+ * @scmd: scsi command for which the sense should be formatted
+ * @desc: Sense format (non-zero == descriptor format,
+ * 0 == fixed format)
+ * @key: Sense key
+ * @asc: Additional sense code
+ * @ascq: Additional sense code qualifier
+ *
+ **/
+void scsi_build_sense(struct scsi_cmnd *scmd, int desc, u8 key, u8 asc, u8 ascq)
+{
+ scsi_build_sense_buffer(desc, scmd->sense_buffer, key, asc, ascq);
+ scmd->result = SAM_STAT_CHECK_CONDITION;
+}
+EXPORT_SYMBOL_GPL(scsi_build_sense);
diff --git a/drivers/scsi/scsi_logging.c b/drivers/scsi/scsi_logging.c
index 8ea44c6595ef..2317717935e9 100644
--- a/drivers/scsi/scsi_logging.c
+++ b/drivers/scsi/scsi_logging.c
@@ -385,7 +385,6 @@ void scsi_print_result(const struct scsi_cmnd *cmd, const char *msg,
size_t off, logbuf_len;
const char *mlret_string = scsi_mlreturn_string(disposition);
const char *hb_string = scsi_hostbyte_string(cmd->result);
- const char *db_string = scsi_driverbyte_string(cmd->result);
unsigned long cmd_age = (jiffies - cmd->jiffies_at_alloc) / HZ;
logbuf = scsi_log_reserve_buffer(&logbuf_len);
@@ -426,13 +425,8 @@ void scsi_print_result(const struct scsi_cmnd *cmd, const char *msg,
if (WARN_ON(off >= logbuf_len))
goto out_printk;
- if (db_string)
- off += scnprintf(logbuf + off, logbuf_len - off,
- "driverbyte=%s ", db_string);
- else
- off += scnprintf(logbuf + off, logbuf_len - off,
- "driverbyte=0x%02x ",
- driver_byte(cmd->result));
+ off += scnprintf(logbuf + off, logbuf_len - off,
+ "driverbyte=DRIVER_OK ");
off += scnprintf(logbuf + off, logbuf_len - off,
"cmd_age=%lus", cmd_age);
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 12f54571b83e..5ce45ef9808f 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -616,14 +616,14 @@ static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result,
"scsi scan: INQUIRY %s with code 0x%x\n",
result ? "failed" : "successful", result));
- if (result) {
+ if (result > 0) {
/*
* not-ready to ready transition [asc/ascq=0x28/0x0]
* or power-on, reset [asc/ascq=0x29/0x0], continue.
* INQUIRY should not yield UNIT_ATTENTION
* but many buggy devices do so anyway.
*/
- if (driver_byte(result) == DRIVER_SENSE &&
+ if (scsi_status_is_check_condition(result) &&
scsi_sense_valid(&sshdr)) {
if ((sshdr.sense_key == UNIT_ATTENTION) &&
((sshdr.asc == 0x28) ||
@@ -631,7 +631,7 @@ static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result,
(sshdr.ascq == 0))
continue;
}
- } else {
+ } else if (result == 0) {
/*
* if nothing was transferred, we try
* again. It's a workaround for some USB
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index da5b503dc7a1..49748cd817a5 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -1686,7 +1686,7 @@ store_fc_vport_delete(struct device *dev, struct device_attribute *attr,
unsigned long flags;
spin_lock_irqsave(shost->host_lock, flags);
- if (vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING | FC_VPORT_DELETING)) {
+ if (vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING)) {
spin_unlock_irqrestore(shost->host_lock, flags);
return -EBUSY;
}
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 441f0152193f..b07105ae7c91 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -86,16 +86,10 @@ struct iscsi_internal {
struct transport_container session_cont;
};
-/* Worker to perform connection failure on unresponsive connections
- * completely in kernel space.
- */
-static void stop_conn_work_fn(struct work_struct *work);
-static DECLARE_WORK(stop_conn_work, stop_conn_work_fn);
-
static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
static struct workqueue_struct *iscsi_eh_timer_workq;
-static struct workqueue_struct *iscsi_destroy_workq;
+static struct workqueue_struct *iscsi_conn_cleanup_workq;
static DEFINE_IDA(iscsi_sess_ida);
/*
@@ -268,9 +262,20 @@ void iscsi_destroy_endpoint(struct iscsi_endpoint *ep)
}
EXPORT_SYMBOL_GPL(iscsi_destroy_endpoint);
+void iscsi_put_endpoint(struct iscsi_endpoint *ep)
+{
+ put_device(&ep->dev);
+}
+EXPORT_SYMBOL_GPL(iscsi_put_endpoint);
+
+/**
+ * iscsi_lookup_endpoint - get ep from handle
+ * @handle: endpoint handle
+ *
+ * Caller must do a iscsi_put_endpoint.
+ */
struct iscsi_endpoint *iscsi_lookup_endpoint(u64 handle)
{
- struct iscsi_endpoint *ep;
struct device *dev;
dev = class_find_device(&iscsi_endpoint_class, NULL, &handle,
@@ -278,13 +283,7 @@ struct iscsi_endpoint *iscsi_lookup_endpoint(u64 handle)
if (!dev)
return NULL;
- ep = iscsi_dev_to_endpoint(dev);
- /*
- * we can drop this now because the interface will prevent
- * removals and lookups from racing.
- */
- put_device(dev);
- return ep;
+ return iscsi_dev_to_endpoint(dev);
}
EXPORT_SYMBOL_GPL(iscsi_lookup_endpoint);
@@ -1620,12 +1619,6 @@ static DECLARE_TRANSPORT_CLASS(iscsi_connection_class,
static struct sock *nls;
static DEFINE_MUTEX(rx_queue_mutex);
-/*
- * conn_mutex protects the {start,bind,stop,destroy}_conn from racing
- * against the kernel stop_connection recovery mechanism
- */
-static DEFINE_MUTEX(conn_mutex);
-
static LIST_HEAD(sesslist);
static DEFINE_SPINLOCK(sesslock);
static LIST_HEAD(connlist);
@@ -1976,6 +1969,8 @@ static void __iscsi_unblock_session(struct work_struct *work)
*/
void iscsi_unblock_session(struct iscsi_cls_session *session)
{
+ flush_work(&session->block_work);
+
queue_work(iscsi_eh_timer_workq, &session->unblock_work);
/*
* Blocking the session can be done from any context so we only
@@ -2242,6 +2237,123 @@ void iscsi_remove_session(struct iscsi_cls_session *session)
}
EXPORT_SYMBOL_GPL(iscsi_remove_session);
+static void iscsi_stop_conn(struct iscsi_cls_conn *conn, int flag)
+{
+ ISCSI_DBG_TRANS_CONN(conn, "Stopping conn.\n");
+
+ switch (flag) {
+ case STOP_CONN_RECOVER:
+ conn->state = ISCSI_CONN_FAILED;
+ break;
+ case STOP_CONN_TERM:
+ conn->state = ISCSI_CONN_DOWN;
+ break;
+ default:
+ iscsi_cls_conn_printk(KERN_ERR, conn, "invalid stop flag %d\n",
+ flag);
+ return;
+ }
+
+ conn->transport->stop_conn(conn, flag);
+ ISCSI_DBG_TRANS_CONN(conn, "Stopping conn done.\n");
+}
+
+static int iscsi_if_stop_conn(struct iscsi_transport *transport,
+ struct iscsi_uevent *ev)
+{
+ int flag = ev->u.stop_conn.flag;
+ struct iscsi_cls_conn *conn;
+
+ conn = iscsi_conn_lookup(ev->u.stop_conn.sid, ev->u.stop_conn.cid);
+ if (!conn)
+ return -EINVAL;
+
+ ISCSI_DBG_TRANS_CONN(conn, "iscsi if conn stop.\n");
+ /*
+ * If this is a termination we have to call stop_conn with that flag
+ * so the correct states get set. If we haven't run the work yet try to
+ * avoid the extra run.
+ */
+ if (flag == STOP_CONN_TERM) {
+ cancel_work_sync(&conn->cleanup_work);
+ iscsi_stop_conn(conn, flag);
+ } else {
+ /*
+ * Figure out if it was the kernel or userspace initiating this.
+ */
+ if (!test_and_set_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags)) {
+ iscsi_stop_conn(conn, flag);
+ } else {
+ ISCSI_DBG_TRANS_CONN(conn,
+ "flush kernel conn cleanup.\n");
+ flush_work(&conn->cleanup_work);
+ }
+ /*
+ * Only clear for recovery to avoid extra cleanup runs during
+ * termination.
+ */
+ clear_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags);
+ }
+ ISCSI_DBG_TRANS_CONN(conn, "iscsi if conn stop done.\n");
+ return 0;
+}
+
+static void iscsi_ep_disconnect(struct iscsi_cls_conn *conn, bool is_active)
+{
+ struct iscsi_cls_session *session = iscsi_conn_to_session(conn);
+ struct iscsi_endpoint *ep;
+
+ ISCSI_DBG_TRANS_CONN(conn, "disconnect ep.\n");
+ conn->state = ISCSI_CONN_FAILED;
+
+ if (!conn->ep || !session->transport->ep_disconnect)
+ return;
+
+ ep = conn->ep;
+ conn->ep = NULL;
+
+ session->transport->unbind_conn(conn, is_active);
+ session->transport->ep_disconnect(ep);
+ ISCSI_DBG_TRANS_CONN(conn, "disconnect ep done.\n");
+}
+
+static void iscsi_cleanup_conn_work_fn(struct work_struct *work)
+{
+ struct iscsi_cls_conn *conn = container_of(work, struct iscsi_cls_conn,
+ cleanup_work);
+ struct iscsi_cls_session *session = iscsi_conn_to_session(conn);
+
+ mutex_lock(&conn->ep_mutex);
+ /*
+ * If we are not at least bound there is nothing for us to do. Userspace
+ * will do a ep_disconnect call if offload is used, but will not be
+ * doing a stop since there is nothing to clean up, so we have to clear
+ * the cleanup bit here.
+ */
+ if (conn->state != ISCSI_CONN_BOUND && conn->state != ISCSI_CONN_UP) {
+ ISCSI_DBG_TRANS_CONN(conn, "Got error while conn is already failed. Ignoring.\n");
+ clear_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags);
+ mutex_unlock(&conn->ep_mutex);
+ return;
+ }
+
+ iscsi_ep_disconnect(conn, false);
+
+ if (system_state != SYSTEM_RUNNING) {
+ /*
+ * If the user has set up for the session to never timeout
+ * then hang like they wanted. For all other cases fail right
+ * away since userspace is not going to relogin.
+ */
+ if (session->recovery_tmo > 0)
+ session->recovery_tmo = 0;
+ }
+
+ iscsi_stop_conn(conn, STOP_CONN_RECOVER);
+ mutex_unlock(&conn->ep_mutex);
+ ISCSI_DBG_TRANS_CONN(conn, "cleanup done.\n");
+}
+
void iscsi_free_session(struct iscsi_cls_session *session)
{
ISCSI_DBG_TRANS_SESSION(session, "Freeing session\n");
@@ -2281,7 +2393,7 @@ iscsi_create_conn(struct iscsi_cls_session *session, int dd_size, uint32_t cid)
mutex_init(&conn->ep_mutex);
INIT_LIST_HEAD(&conn->conn_list);
- INIT_LIST_HEAD(&conn->conn_list_err);
+ INIT_WORK(&conn->cleanup_work, iscsi_cleanup_conn_work_fn);
conn->transport = transport;
conn->cid = cid;
conn->state = ISCSI_CONN_DOWN;
@@ -2338,7 +2450,6 @@ int iscsi_destroy_conn(struct iscsi_cls_conn *conn)
spin_lock_irqsave(&connlock, flags);
list_del(&conn->conn_list);
- list_del(&conn->conn_list_err);
spin_unlock_irqrestore(&connlock, flags);
transport_unregister_device(&conn->dev);
@@ -2348,6 +2459,18 @@ int iscsi_destroy_conn(struct iscsi_cls_conn *conn)
}
EXPORT_SYMBOL_GPL(iscsi_destroy_conn);
+void iscsi_put_conn(struct iscsi_cls_conn *conn)
+{
+ put_device(&conn->dev);
+}
+EXPORT_SYMBOL_GPL(iscsi_put_conn);
+
+void iscsi_get_conn(struct iscsi_cls_conn *conn)
+{
+ get_device(&conn->dev);
+}
+EXPORT_SYMBOL_GPL(iscsi_get_conn);
+
/*
* iscsi interface functions
*/
@@ -2453,77 +2576,6 @@ int iscsi_offload_mesg(struct Scsi_Host *shost,
}
EXPORT_SYMBOL_GPL(iscsi_offload_mesg);
-/*
- * This can be called without the rx_queue_mutex, if invoked by the kernel
- * stop work. But, in that case, it is guaranteed not to race with
- * iscsi_destroy by conn_mutex.
- */
-static void iscsi_if_stop_conn(struct iscsi_cls_conn *conn, int flag)
-{
- /*
- * It is important that this path doesn't rely on
- * rx_queue_mutex, otherwise, a thread doing allocation on a
- * start_session/start_connection could sleep waiting on a
- * writeback to a failed iscsi device, that cannot be recovered
- * because the lock is held. If we don't hold it here, the
- * kernel stop_conn_work_fn has a chance to stop the broken
- * session and resolve the allocation.
- *
- * Still, the user invoked .stop_conn() needs to be serialized
- * with stop_conn_work_fn by a private mutex. Not pretty, but
- * it works.
- */
- mutex_lock(&conn_mutex);
- switch (flag) {
- case STOP_CONN_RECOVER:
- conn->state = ISCSI_CONN_FAILED;
- break;
- case STOP_CONN_TERM:
- conn->state = ISCSI_CONN_DOWN;
- break;
- default:
- iscsi_cls_conn_printk(KERN_ERR, conn,
- "invalid stop flag %d\n", flag);
- goto unlock;
- }
-
- conn->transport->stop_conn(conn, flag);
-unlock:
- mutex_unlock(&conn_mutex);
-}
-
-static void stop_conn_work_fn(struct work_struct *work)
-{
- struct iscsi_cls_conn *conn, *tmp;
- unsigned long flags;
- LIST_HEAD(recovery_list);
-
- spin_lock_irqsave(&connlock, flags);
- if (list_empty(&connlist_err)) {
- spin_unlock_irqrestore(&connlock, flags);
- return;
- }
- list_splice_init(&connlist_err, &recovery_list);
- spin_unlock_irqrestore(&connlock, flags);
-
- list_for_each_entry_safe(conn, tmp, &recovery_list, conn_list_err) {
- uint32_t sid = iscsi_conn_get_sid(conn);
- struct iscsi_cls_session *session;
-
- session = iscsi_session_lookup(sid);
- if (session) {
- if (system_state != SYSTEM_RUNNING) {
- session->recovery_tmo = 0;
- iscsi_if_stop_conn(conn, STOP_CONN_TERM);
- } else {
- iscsi_if_stop_conn(conn, STOP_CONN_RECOVER);
- }
- }
-
- list_del_init(&conn->conn_list_err);
- }
-}
-
void iscsi_conn_error_event(struct iscsi_cls_conn *conn, enum iscsi_err error)
{
struct nlmsghdr *nlh;
@@ -2531,12 +2583,9 @@ void iscsi_conn_error_event(struct iscsi_cls_conn *conn, enum iscsi_err error)
struct iscsi_uevent *ev;
struct iscsi_internal *priv;
int len = nlmsg_total_size(sizeof(*ev));
- unsigned long flags;
- spin_lock_irqsave(&connlock, flags);
- list_add(&conn->conn_list_err, &connlist_err);
- spin_unlock_irqrestore(&connlock, flags);
- queue_work(system_unbound_wq, &stop_conn_work);
+ if (!test_and_set_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags))
+ queue_work(iscsi_conn_cleanup_workq, &conn->cleanup_work);
priv = iscsi_if_transport_lookup(conn->transport);
if (!priv)
@@ -2866,26 +2915,17 @@ static int
iscsi_if_destroy_conn(struct iscsi_transport *transport, struct iscsi_uevent *ev)
{
struct iscsi_cls_conn *conn;
- unsigned long flags;
conn = iscsi_conn_lookup(ev->u.d_conn.sid, ev->u.d_conn.cid);
if (!conn)
return -EINVAL;
- spin_lock_irqsave(&connlock, flags);
- if (!list_empty(&conn->conn_list_err)) {
- spin_unlock_irqrestore(&connlock, flags);
- return -EAGAIN;
- }
- spin_unlock_irqrestore(&connlock, flags);
-
+ ISCSI_DBG_TRANS_CONN(conn, "Flushing cleanup during destruction\n");
+ flush_work(&conn->cleanup_work);
ISCSI_DBG_TRANS_CONN(conn, "Destroying transport conn\n");
- mutex_lock(&conn_mutex);
if (transport->destroy_conn)
transport->destroy_conn(conn);
- mutex_unlock(&conn_mutex);
-
return 0;
}
@@ -2975,15 +3015,31 @@ static int iscsi_if_ep_disconnect(struct iscsi_transport *transport,
ep = iscsi_lookup_endpoint(ep_handle);
if (!ep)
return -EINVAL;
+
conn = ep->conn;
- if (conn) {
- mutex_lock(&conn->ep_mutex);
- conn->ep = NULL;
+ if (!conn) {
+ /*
+ * conn was not even bound yet, so we can't get iscsi conn
+ * failures yet.
+ */
+ transport->ep_disconnect(ep);
+ goto put_ep;
+ }
+
+ mutex_lock(&conn->ep_mutex);
+ /* Check if this was a conn error and the kernel took ownership */
+ if (test_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags)) {
+ ISCSI_DBG_TRANS_CONN(conn, "flush kernel conn cleanup.\n");
mutex_unlock(&conn->ep_mutex);
- conn->state = ISCSI_CONN_FAILED;
+
+ flush_work(&conn->cleanup_work);
+ goto put_ep;
}
- transport->ep_disconnect(ep);
+ iscsi_ep_disconnect(conn, false);
+ mutex_unlock(&conn->ep_mutex);
+put_ep:
+ iscsi_put_endpoint(ep);
return 0;
}
@@ -3009,6 +3065,7 @@ iscsi_if_transport_ep(struct iscsi_transport *transport,
ev->r.retcode = transport->ep_poll(ep,
ev->u.ep_poll.timeout_ms);
+ iscsi_put_endpoint(ep);
break;
case ISCSI_UEVENT_TRANSPORT_EP_DISCONNECT:
rc = iscsi_if_ep_disconnect(transport,
@@ -3639,18 +3696,129 @@ exit_host_stats:
return err;
}
+static int iscsi_if_transport_conn(struct iscsi_transport *transport,
+ struct nlmsghdr *nlh)
+{
+ struct iscsi_uevent *ev = nlmsg_data(nlh);
+ struct iscsi_cls_session *session;
+ struct iscsi_cls_conn *conn = NULL;
+ struct iscsi_endpoint *ep;
+ uint32_t pdu_len;
+ int err = 0;
+
+ switch (nlh->nlmsg_type) {
+ case ISCSI_UEVENT_CREATE_CONN:
+ return iscsi_if_create_conn(transport, ev);
+ case ISCSI_UEVENT_DESTROY_CONN:
+ return iscsi_if_destroy_conn(transport, ev);
+ case ISCSI_UEVENT_STOP_CONN:
+ return iscsi_if_stop_conn(transport, ev);
+ }
+
+ /*
+ * The following cmds need to be run under the ep_mutex so in kernel
+ * conn cleanup (ep_disconnect + unbind and conn) is not done while
+ * these are running. They also must not run if we have just run a conn
+ * cleanup because they would set the state in a way that might allow
+ * IO or send IO themselves.
+ */
+ switch (nlh->nlmsg_type) {
+ case ISCSI_UEVENT_START_CONN:
+ conn = iscsi_conn_lookup(ev->u.start_conn.sid,
+ ev->u.start_conn.cid);
+ break;
+ case ISCSI_UEVENT_BIND_CONN:
+ conn = iscsi_conn_lookup(ev->u.b_conn.sid, ev->u.b_conn.cid);
+ break;
+ case ISCSI_UEVENT_SEND_PDU:
+ conn = iscsi_conn_lookup(ev->u.send_pdu.sid, ev->u.send_pdu.cid);
+ break;
+ }
+
+ if (!conn)
+ return -EINVAL;
+
+ mutex_lock(&conn->ep_mutex);
+ if (test_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags)) {
+ mutex_unlock(&conn->ep_mutex);
+ ev->r.retcode = -ENOTCONN;
+ return 0;
+ }
+
+ switch (nlh->nlmsg_type) {
+ case ISCSI_UEVENT_BIND_CONN:
+ if (conn->ep) {
+ /*
+ * For offload boot support where iscsid is restarted
+ * during the pivot root stage, the ep will be intact
+ * here when the new iscsid instance starts up and
+ * reconnects.
+ */
+ iscsi_ep_disconnect(conn, true);
+ }
+
+ session = iscsi_session_lookup(ev->u.b_conn.sid);
+ if (!session) {
+ err = -EINVAL;
+ break;
+ }
+
+ ev->r.retcode = transport->bind_conn(session, conn,
+ ev->u.b_conn.transport_eph,
+ ev->u.b_conn.is_leading);
+ if (!ev->r.retcode)
+ conn->state = ISCSI_CONN_BOUND;
+
+ if (ev->r.retcode || !transport->ep_connect)
+ break;
+
+ ep = iscsi_lookup_endpoint(ev->u.b_conn.transport_eph);
+ if (ep) {
+ ep->conn = conn;
+ conn->ep = ep;
+ iscsi_put_endpoint(ep);
+ } else {
+ err = -ENOTCONN;
+ iscsi_cls_conn_printk(KERN_ERR, conn,
+ "Could not set ep conn binding\n");
+ }
+ break;
+ case ISCSI_UEVENT_START_CONN:
+ ev->r.retcode = transport->start_conn(conn);
+ if (!ev->r.retcode)
+ conn->state = ISCSI_CONN_UP;
+ break;
+ case ISCSI_UEVENT_SEND_PDU:
+ pdu_len = nlh->nlmsg_len - sizeof(*nlh) - sizeof(*ev);
+
+ if ((ev->u.send_pdu.hdr_size > pdu_len) ||
+ (ev->u.send_pdu.data_size > (pdu_len - ev->u.send_pdu.hdr_size))) {
+ err = -EINVAL;
+ break;
+ }
+
+ ev->r.retcode = transport->send_pdu(conn,
+ (struct iscsi_hdr *)((char *)ev + sizeof(*ev)),
+ (char *)ev + sizeof(*ev) + ev->u.send_pdu.hdr_size,
+ ev->u.send_pdu.data_size);
+ break;
+ default:
+ err = -ENOSYS;
+ }
+
+ mutex_unlock(&conn->ep_mutex);
+ return err;
+}
static int
iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
{
int err = 0;
u32 portid;
- u32 pdu_len;
struct iscsi_uevent *ev = nlmsg_data(nlh);
struct iscsi_transport *transport = NULL;
struct iscsi_internal *priv;
struct iscsi_cls_session *session;
- struct iscsi_cls_conn *conn;
struct iscsi_endpoint *ep = NULL;
if (!netlink_capable(skb, CAP_SYS_ADMIN))
@@ -3691,6 +3859,7 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
ev->u.c_bound_session.initial_cmdsn,
ev->u.c_bound_session.cmds_max,
ev->u.c_bound_session.queue_depth);
+ iscsi_put_endpoint(ep);
break;
case ISCSI_UEVENT_DESTROY_SESSION:
session = iscsi_session_lookup(ev->u.d_session.sid);
@@ -3715,7 +3884,7 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
list_del_init(&session->sess_list);
spin_unlock_irqrestore(&sesslock, flags);
- queue_work(iscsi_destroy_workq, &session->destroy_work);
+ queue_work(system_unbound_wq, &session->destroy_work);
}
break;
case ISCSI_UEVENT_UNBIND_SESSION:
@@ -3726,89 +3895,16 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
else
err = -EINVAL;
break;
- case ISCSI_UEVENT_CREATE_CONN:
- err = iscsi_if_create_conn(transport, ev);
- break;
- case ISCSI_UEVENT_DESTROY_CONN:
- err = iscsi_if_destroy_conn(transport, ev);
- break;
- case ISCSI_UEVENT_BIND_CONN:
- session = iscsi_session_lookup(ev->u.b_conn.sid);
- conn = iscsi_conn_lookup(ev->u.b_conn.sid, ev->u.b_conn.cid);
-
- if (conn && conn->ep)
- iscsi_if_ep_disconnect(transport, conn->ep->id);
-
- if (!session || !conn) {
- err = -EINVAL;
- break;
- }
-
- mutex_lock(&conn_mutex);
- ev->r.retcode = transport->bind_conn(session, conn,
- ev->u.b_conn.transport_eph,
- ev->u.b_conn.is_leading);
- if (!ev->r.retcode)
- conn->state = ISCSI_CONN_BOUND;
- mutex_unlock(&conn_mutex);
-
- if (ev->r.retcode || !transport->ep_connect)
- break;
-
- ep = iscsi_lookup_endpoint(ev->u.b_conn.transport_eph);
- if (ep) {
- ep->conn = conn;
-
- mutex_lock(&conn->ep_mutex);
- conn->ep = ep;
- mutex_unlock(&conn->ep_mutex);
- } else
- iscsi_cls_conn_printk(KERN_ERR, conn,
- "Could not set ep conn "
- "binding\n");
- break;
case ISCSI_UEVENT_SET_PARAM:
err = iscsi_set_param(transport, ev);
break;
- case ISCSI_UEVENT_START_CONN:
- conn = iscsi_conn_lookup(ev->u.start_conn.sid, ev->u.start_conn.cid);
- if (conn) {
- mutex_lock(&conn_mutex);
- ev->r.retcode = transport->start_conn(conn);
- if (!ev->r.retcode)
- conn->state = ISCSI_CONN_UP;
- mutex_unlock(&conn_mutex);
- }
- else
- err = -EINVAL;
- break;
+ case ISCSI_UEVENT_CREATE_CONN:
+ case ISCSI_UEVENT_DESTROY_CONN:
case ISCSI_UEVENT_STOP_CONN:
- conn = iscsi_conn_lookup(ev->u.stop_conn.sid, ev->u.stop_conn.cid);
- if (conn)
- iscsi_if_stop_conn(conn, ev->u.stop_conn.flag);
- else
- err = -EINVAL;
- break;
+ case ISCSI_UEVENT_START_CONN:
+ case ISCSI_UEVENT_BIND_CONN:
case ISCSI_UEVENT_SEND_PDU:
- pdu_len = nlh->nlmsg_len - sizeof(*nlh) - sizeof(*ev);
-
- if ((ev->u.send_pdu.hdr_size > pdu_len) ||
- (ev->u.send_pdu.data_size > (pdu_len - ev->u.send_pdu.hdr_size))) {
- err = -EINVAL;
- break;
- }
-
- conn = iscsi_conn_lookup(ev->u.send_pdu.sid, ev->u.send_pdu.cid);
- if (conn) {
- mutex_lock(&conn_mutex);
- ev->r.retcode = transport->send_pdu(conn,
- (struct iscsi_hdr*)((char*)ev + sizeof(*ev)),
- (char*)ev + sizeof(*ev) + ev->u.send_pdu.hdr_size,
- ev->u.send_pdu.data_size);
- mutex_unlock(&conn_mutex);
- }
- else
- err = -EINVAL;
+ err = iscsi_if_transport_conn(transport, nlh);
break;
case ISCSI_UEVENT_GET_STATS:
err = iscsi_if_get_stats(transport, nlh);
@@ -4656,6 +4752,7 @@ iscsi_register_transport(struct iscsi_transport *tt)
int err;
BUG_ON(!tt);
+ WARN_ON(tt->ep_disconnect && !tt->unbind_conn);
priv = iscsi_if_transport_lookup(tt);
if (priv)
@@ -4810,10 +4907,10 @@ static __init int iscsi_transport_init(void)
goto release_nls;
}
- iscsi_destroy_workq = alloc_workqueue("%s",
- WQ_SYSFS | __WQ_LEGACY | WQ_MEM_RECLAIM | WQ_UNBOUND,
- 1, "iscsi_destroy");
- if (!iscsi_destroy_workq) {
+ iscsi_conn_cleanup_workq = alloc_workqueue("%s",
+ WQ_SYSFS | WQ_MEM_RECLAIM | WQ_UNBOUND, 0,
+ "iscsi_conn_cleanup");
+ if (!iscsi_conn_cleanup_workq) {
err = -ENOMEM;
goto destroy_wq;
}
@@ -4843,7 +4940,7 @@ unregister_transport_class:
static void __exit iscsi_transport_exit(void)
{
- destroy_workqueue(iscsi_destroy_workq);
+ destroy_workqueue(iscsi_conn_cleanup_workq);
destroy_workqueue(iscsi_eh_timer_workq);
netlink_kernel_release(nls);
bus_unregister(&iscsi_flashnode_bus);
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index c9abed8429c9..4a96fb05731d 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -1229,16 +1229,15 @@ int sas_read_port_mode_page(struct scsi_device *sdev)
char *buffer = kzalloc(BUF_SIZE, GFP_KERNEL), *msdata;
struct sas_end_device *rdev = sas_sdev_to_rdev(sdev);
struct scsi_mode_data mode_data;
- int res, error;
+ int error;
if (!buffer)
return -ENOMEM;
- res = scsi_mode_sense(sdev, 1, 0x19, buffer, BUF_SIZE, 30*HZ, 3,
- &mode_data, NULL);
+ error = scsi_mode_sense(sdev, 1, 0x19, buffer, BUF_SIZE, 30*HZ, 3,
+ &mode_data, NULL);
- error = -EINVAL;
- if (!scsi_status_is_good(res))
+ if (error)
goto out;
msdata = buffer + mode_data.header_length +
diff --git a/drivers/scsi/scsi_transport_spi.c b/drivers/scsi/scsi_transport_spi.c
index c37dd15d16d2..5af7a10e9514 100644
--- a/drivers/scsi/scsi_transport_spi.c
+++ b/drivers/scsi/scsi_transport_spi.c
@@ -127,7 +127,7 @@ static int spi_execute(struct scsi_device *sdev, const void *cmd,
REQ_FAILFAST_TRANSPORT |
REQ_FAILFAST_DRIVER,
RQF_PM, NULL);
- if (driver_byte(result) != DRIVER_SENSE ||
+ if (result < 0 || !scsi_sense_valid(sshdr) ||
sshdr->sense_key != UNIT_ATTENTION)
break;
}
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 746a7def2825..6d2d63629a90 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1672,7 +1672,7 @@ static unsigned int sd_check_events(struct gendisk *disk, unsigned int clearing)
&sshdr);
/* failed to execute TUR, assume media not present */
- if (host_byte(retval)) {
+ if (retval < 0 || host_byte(retval)) {
set_media_not_present(sdkp);
goto out;
}
@@ -1733,16 +1733,20 @@ static int sd_sync_cache(struct scsi_disk *sdkp, struct scsi_sense_hdr *sshdr)
if (res) {
sd_print_result(sdkp, "Synchronize Cache(10) failed", res);
- if (driver_byte(res) == DRIVER_SENSE)
+ if (res < 0)
+ return res;
+
+ if (scsi_status_is_check_condition(res) &&
+ scsi_sense_valid(sshdr)) {
sd_print_sense_hdr(sdkp, sshdr);
- /* we need to evaluate the error return */
- if (scsi_sense_valid(sshdr) &&
- (sshdr->asc == 0x3a || /* medium not present */
- sshdr->asc == 0x20 || /* invalid command */
- (sshdr->asc == 0x74 && sshdr->ascq == 0x71))) /* drive is password locked */
+ /* we need to evaluate the error return */
+ if (sshdr->asc == 0x3a || /* medium not present */
+ sshdr->asc == 0x20 || /* invalid command */
+ (sshdr->asc == 0x74 && sshdr->ascq == 0x71)) /* drive is password locked */
/* this is no error here */
return 0;
+ }
switch (host_byte(res)) {
/* ignore errors due to racing a disconnection */
@@ -1839,7 +1843,7 @@ static int sd_pr_command(struct block_device *bdev, u8 sa,
result = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, &data, sizeof(data),
&sshdr, SD_TIMEOUT, sdkp->max_retries, NULL);
- if (driver_byte(result) == DRIVER_SENSE &&
+ if (scsi_status_is_check_condition(result) &&
scsi_sense_valid(&sshdr)) {
sdev_printk(KERN_INFO, sdev, "PR command failed: %d\n", result);
scsi_print_sense_hdr(sdev, NULL, &sshdr);
@@ -2083,7 +2087,7 @@ static int sd_done(struct scsi_cmnd *SCpnt)
}
sdkp->medium_access_timed_out = 0;
- if (driver_byte(result) != DRIVER_SENSE &&
+ if (!scsi_status_is_check_condition(result) &&
(!sense_valid || sense_deferred))
goto out;
@@ -2186,12 +2190,12 @@ sd_spinup_disk(struct scsi_disk *sdkp)
if (the_result)
sense_valid = scsi_sense_valid(&sshdr);
retries++;
- } while (retries < 3 &&
+ } while (retries < 3 &&
(!scsi_status_is_good(the_result) ||
- ((driver_byte(the_result) == DRIVER_SENSE) &&
+ (scsi_status_is_check_condition(the_result) &&
sense_valid && sshdr.sense_key == UNIT_ATTENTION)));
- if (driver_byte(the_result) != DRIVER_SENSE) {
+ if (!scsi_status_is_check_condition(the_result)) {
/* no sense, TUR either succeeded or failed
* with a status error */
if(!spintime && !scsi_status_is_good(the_result)) {
@@ -2319,7 +2323,7 @@ static void read_capacity_error(struct scsi_disk *sdkp, struct scsi_device *sdp,
struct scsi_sense_hdr *sshdr, int sense_valid,
int the_result)
{
- if (driver_byte(the_result) == DRIVER_SENSE)
+ if (sense_valid)
sd_print_sense_hdr(sdkp, sshdr);
else
sd_printk(KERN_NOTICE, sdkp, "Sense not available.\n");
@@ -2376,7 +2380,7 @@ static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
if (media_not_present(sdkp, &sshdr))
return -ENODEV;
- if (the_result) {
+ if (the_result > 0) {
sense_valid = scsi_sense_valid(&sshdr);
if (sense_valid &&
sshdr.sense_key == ILLEGAL_REQUEST &&
@@ -2461,7 +2465,7 @@ static int read_capacity_10(struct scsi_disk *sdkp, struct scsi_device *sdp,
if (media_not_present(sdkp, &sshdr))
return -ENODEV;
- if (the_result) {
+ if (the_result > 0) {
sense_valid = scsi_sense_valid(&sshdr);
if (sense_valid &&
sshdr.sense_key == UNIT_ATTENTION &&
@@ -2684,18 +2688,18 @@ sd_read_write_protect_flag(struct scsi_disk *sdkp, unsigned char *buffer)
* 5: Illegal Request, Sense Code 24: Invalid field in
* CDB.
*/
- if (!scsi_status_is_good(res))
+ if (res < 0)
res = sd_do_mode_sense(sdkp, 0, 0, buffer, 4, &data, NULL);
/*
* Third attempt: ask 255 bytes, as we did earlier.
*/
- if (!scsi_status_is_good(res))
+ if (res < 0)
res = sd_do_mode_sense(sdkp, 0, 0x3F, buffer, 255,
&data, NULL);
}
- if (!scsi_status_is_good(res)) {
+ if (res < 0) {
sd_first_printk(KERN_WARNING, sdkp,
"Test WP failed, assume Write Enabled\n");
} else {
@@ -2756,7 +2760,7 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
res = sd_do_mode_sense(sdkp, dbd, modepage, buffer, first_len,
&data, &sshdr);
- if (!scsi_status_is_good(res))
+ if (res < 0)
goto bad_sense;
if (!data.header_length) {
@@ -2788,7 +2792,7 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
res = sd_do_mode_sense(sdkp, dbd, modepage, buffer, len,
&data, &sshdr);
- if (scsi_status_is_good(res)) {
+ if (!res) {
int offset = data.header_length + data.block_descriptor_length;
while (offset < len) {
@@ -2906,7 +2910,7 @@ static void sd_read_app_tag_own(struct scsi_disk *sdkp, unsigned char *buffer)
res = scsi_mode_sense(sdp, 1, 0x0a, buffer, 36, SD_TIMEOUT,
sdkp->max_retries, &data, &sshdr);
- if (!scsi_status_is_good(res) || !data.header_length ||
+ if (res < 0 || !data.header_length ||
data.length < 6) {
sd_first_printk(KERN_WARNING, sdkp,
"getting Control mode page failed, assume no ATO\n");
@@ -3605,12 +3609,12 @@ static int sd_start_stop_device(struct scsi_disk *sdkp, int start)
SD_TIMEOUT, sdkp->max_retries, 0, RQF_PM, NULL);
if (res) {
sd_print_result(sdkp, "Start/Stop Unit failed", res);
- if (driver_byte(res) == DRIVER_SENSE)
+ if (res > 0 && scsi_sense_valid(&sshdr)) {
sd_print_sense_hdr(sdkp, &sshdr);
- if (scsi_sense_valid(&sshdr) &&
/* 0x3a is medium not present */
- sshdr.asc == 0x3a)
- res = 0;
+ if (sshdr.asc == 0x3a)
+ res = 0;
+ }
}
/* SCSI error codes must not go to the generic layer */
@@ -3820,15 +3824,14 @@ void sd_print_sense_hdr(struct scsi_disk *sdkp, struct scsi_sense_hdr *sshdr)
void sd_print_result(const struct scsi_disk *sdkp, const char *msg, int result)
{
const char *hb_string = scsi_hostbyte_string(result);
- const char *db_string = scsi_driverbyte_string(result);
- if (hb_string || db_string)
+ if (hb_string)
sd_printk(KERN_INFO, sdkp,
"%s: Result: hostbyte=%s driverbyte=%s\n", msg,
hb_string ? hb_string : "invalid",
- db_string ? db_string : "invalid");
+ "DRIVER_OK");
else
sd_printk(KERN_INFO, sdkp,
- "%s: Result: hostbyte=0x%02x driverbyte=0x%02x\n",
- msg, host_byte(result), driver_byte(result));
+ "%s: Result: hostbyte=0x%02x driverbyte=%s\n",
+ msg, host_byte(result), "DRIVER_OK");
}
diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c
index e45d8d94574c..186b5ff52c3a 100644
--- a/drivers/scsi/sd_zbc.c
+++ b/drivers/scsi/sd_zbc.c
@@ -116,8 +116,7 @@ static int sd_zbc_do_report_zones(struct scsi_disk *sdkp, unsigned char *buf,
sd_printk(KERN_ERR, sdkp,
"REPORT ZONES start lba %llu failed\n", lba);
sd_print_result(sdkp, "REPORT ZONES", result);
- if (driver_byte(result) == DRIVER_SENSE &&
- scsi_sense_valid(&sshdr))
+ if (result > 0 && scsi_sense_valid(&sshdr))
sd_print_sense_hdr(sdkp, &sshdr);
return -EIO;
}
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index def7ec3bbaf9..4e66994be190 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -498,9 +498,11 @@ sg_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos)
old_hdr->host_status = hp->host_status;
old_hdr->driver_status = hp->driver_status;
if ((CHECK_CONDITION & hp->masked_status) ||
- (DRIVER_SENSE & hp->driver_status))
+ (srp->sense_b[0] & 0x70) == 0x70) {
+ old_hdr->driver_status = DRIVER_SENSE;
memcpy(old_hdr->sense_buffer, srp->sense_b,
sizeof (old_hdr->sense_buffer));
+ }
switch (hp->host_status) {
/* This setup of 'result' is for backward compatibility and is best
ignored by the user who should use target, host + driver status */
@@ -574,7 +576,7 @@ sg_new_read(Sg_fd * sfp, char __user *buf, size_t count, Sg_request * srp)
hp->sb_len_wr = 0;
if ((hp->mx_sb_len > 0) && hp->sbp) {
if ((CHECK_CONDITION & hp->masked_status) ||
- (DRIVER_SENSE & hp->driver_status)) {
+ (srp->sense_b[0] & 0x70) == 0x70) {
int sb_len = SCSI_SENSE_BUFFERSIZE;
sb_len = (hp->mx_sb_len > sb_len) ? sb_len : hp->mx_sb_len;
len = 8 + (int) srp->sense_b[7]; /* Additional sense length field */
@@ -583,6 +585,7 @@ sg_new_read(Sg_fd * sfp, char __user *buf, size_t count, Sg_request * srp)
err = -EFAULT;
goto err_out;
}
+ hp->driver_status = DRIVER_SENSE;
hp->sb_len_wr = len;
}
}
@@ -1373,7 +1376,7 @@ sg_rq_end_io(struct request *rq, blk_status_t status)
srp->header.status = 0xff & result;
srp->header.masked_status = status_byte(result);
- srp->header.msg_status = msg_byte(result);
+ srp->header.msg_status = COMMAND_COMPLETE;
srp->header.host_status = host_byte(result);
srp->header.driver_status = driver_byte(result);
if ((sdp->sgdebug > 0) &&
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
index 5db16509b6e1..dcc0b9618a64 100644
--- a/drivers/scsi/smartpqi/smartpqi_init.c
+++ b/drivers/scsi/smartpqi/smartpqi_init.c
@@ -3087,8 +3087,7 @@ static void pqi_process_aio_io_error(struct pqi_io_request *io_request)
}
if (device_offline && sense_data_length == 0)
- scsi_build_sense_buffer(0, scmd->sense_buffer, HARDWARE_ERROR,
- 0x3e, 0x1);
+ scsi_build_sense(scmd, 0, HARDWARE_ERROR, 0x3e, 0x1);
scmd->result = scsi_status;
set_host_byte(scmd, host_byte);
diff --git a/drivers/scsi/snic/snic_ctl.c b/drivers/scsi/snic/snic_ctl.c
index 4cd86115cfb2..703f229862fc 100644
--- a/drivers/scsi/snic/snic_ctl.c
+++ b/drivers/scsi/snic/snic_ctl.c
@@ -114,10 +114,7 @@ snic_queue_exch_ver_req(struct snic *snic)
rqi = snic_req_init(snic, 0);
if (!rqi) {
- SNIC_HOST_ERR(snic->shost,
- "Queuing Exch Ver Req failed, err = %d\n",
- ret);
-
+ SNIC_HOST_ERR(snic->shost, "Init Exch Ver Req failed\n");
ret = -ENOMEM;
goto error;
}
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index 7815ed642d43..94c254e9012e 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -340,7 +340,7 @@ static int sr_done(struct scsi_cmnd *SCpnt)
* care is taken to avoid unnecessary additional work such as
* memcpy's that could be avoided.
*/
- if (driver_byte(result) != 0 && /* An error occurred */
+ if (scsi_status_is_check_condition(result) &&
(SCpnt->sense_buffer[0] & 0x7f) == 0x70) { /* Sense current */
switch (SCpnt->sense_buffer[2]) {
case MEDIUM_ERROR:
@@ -913,7 +913,7 @@ static void get_capabilities(struct scsi_cd *cd)
rc = scsi_mode_sense(cd->device, 0, 0x2a, buffer, ms_len,
SR_TIMEOUT, 3, &data, NULL);
- if (!scsi_status_is_good(rc) || data.length > ms_len ||
+ if (rc < 0 || data.length > ms_len ||
data.header_length + data.block_descriptor_length > data.length) {
/* failed, drive doesn't have capabilities mode page */
cd->cdi.speed = 1;
diff --git a/drivers/scsi/sr_ioctl.c b/drivers/scsi/sr_ioctl.c
index 15c305283b6c..79d9aa2df528 100644
--- a/drivers/scsi/sr_ioctl.c
+++ b/drivers/scsi/sr_ioctl.c
@@ -201,7 +201,11 @@ int sr_do_ioctl(Scsi_CD *cd, struct packet_command *cgc)
cgc->timeout, IOCTL_RETRIES, 0, 0, NULL);
/* Minimal error checking. Ignore cases we know about, and report the rest. */
- if (driver_byte(result) != 0) {
+ if (result < 0) {
+ err = result;
+ goto out;
+ }
+ if (scsi_status_is_check_condition(result)) {
switch (sshdr->sense_key) {
case UNIT_ATTENTION:
SDev->changed = 1;
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 3b1afe1d5b27..66f48bd6da76 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -390,8 +390,8 @@ static int st_chk_result(struct scsi_tape *STp, struct st_request * SRpnt)
if (!debugging) { /* Abnormal conditions for tape */
if (!cmdstatp->have_sense)
st_printk(KERN_WARNING, STp,
- "Error %x (driver bt 0x%x, host bt 0x%x).\n",
- result, driver_byte(result), host_byte(result));
+ "Error %x (driver bt 0, host bt 0x%x).\n",
+ result, host_byte(result));
else if (cmdstatp->have_sense &&
scode != NO_SENSE &&
scode != RECOVERED_ERROR &&
@@ -551,7 +551,7 @@ static int st_scsi_execute(struct st_request *SRpnt, const unsigned char *cmd,
data_direction == DMA_TO_DEVICE ?
REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, 0);
if (IS_ERR(req))
- return DRIVER_ERROR << 24;
+ return PTR_ERR(req);
rq = scsi_req(req);
req->rq_flags |= RQF_QUIET;
@@ -562,7 +562,7 @@ static int st_scsi_execute(struct st_request *SRpnt, const unsigned char *cmd,
GFP_KERNEL);
if (err) {
blk_put_request(req);
- return DRIVER_ERROR << 24;
+ return err;
}
}
diff --git a/drivers/scsi/stex.c b/drivers/scsi/stex.c
index 12471208c7a8..491b435273a6 100644
--- a/drivers/scsi/stex.c
+++ b/drivers/scsi/stex.c
@@ -398,11 +398,8 @@ static struct status_msg *stex_get_status(struct st_hba *hba)
static void stex_invalid_field(struct scsi_cmnd *cmd,
void (*done)(struct scsi_cmnd *))
{
- cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
-
/* "Invalid field in cdb" */
- scsi_build_sense_buffer(0, cmd->sense_buffer, ILLEGAL_REQUEST, 0x24,
- 0x0);
+ scsi_build_sense(cmd, 0, ILLEGAL_REQUEST, 0x24, 0x0);
done(cmd);
}
@@ -740,7 +737,7 @@ static void stex_scsi_done(struct st_ccb *ccb)
result |= DID_OK << 16;
break;
case SAM_STAT_CHECK_CONDITION:
- result |= DRIVER_SENSE << 24;
+ result |= DID_OK << 16;
break;
case SAM_STAT_BUSY:
result |= DID_BUS_BUSY << 16;
@@ -751,7 +748,7 @@ static void stex_scsi_done(struct st_ccb *ccb)
}
}
else if (ccb->srb_status & SRB_SEE_SENSE)
- result = DRIVER_SENSE << 24 | SAM_STAT_CHECK_CONDITION;
+ result = SAM_STAT_CHECK_CONDITION;
else switch (ccb->srb_status) {
case SRB_STATUS_SELECTION_TIMEOUT:
result = DID_NO_CONNECT << 16;
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 403753929320..328bb961c281 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -1031,17 +1031,40 @@ static void storvsc_handle_error(struct vmscsi_request *vm_srb,
struct storvsc_scan_work *wrk;
void (*process_err_fn)(struct work_struct *work);
struct hv_host_device *host_dev = shost_priv(host);
- bool do_work = false;
- switch (SRB_STATUS(vm_srb->srb_status)) {
- case SRB_STATUS_ERROR:
+ /*
+ * In some situations, Hyper-V sets multiple bits in the
+ * srb_status, such as ABORTED and ERROR. So process them
+ * individually, with the most specific bits first.
+ */
+
+ if (vm_srb->srb_status & SRB_STATUS_INVALID_LUN) {
+ set_host_byte(scmnd, DID_NO_CONNECT);
+ process_err_fn = storvsc_remove_lun;
+ goto do_work;
+ }
+
+ if (vm_srb->srb_status & SRB_STATUS_ABORTED) {
+ if (vm_srb->srb_status & SRB_STATUS_AUTOSENSE_VALID &&
+ /* Capacity data has changed */
+ (asc == 0x2a) && (ascq == 0x9)) {
+ process_err_fn = storvsc_device_scan;
+ /*
+ * Retry the I/O that triggered this.
+ */
+ set_host_byte(scmnd, DID_REQUEUE);
+ goto do_work;
+ }
+ }
+
+ if (vm_srb->srb_status & SRB_STATUS_ERROR) {
/*
* Let upper layer deal with error when
* sense message is present.
*/
-
if (vm_srb->srb_status & SRB_STATUS_AUTOSENSE_VALID)
- break;
+ return;
+
/*
* If there is an error; offline the device since all
* error recovery strategies would have already been
@@ -1054,37 +1077,19 @@ static void storvsc_handle_error(struct vmscsi_request *vm_srb,
set_host_byte(scmnd, DID_PASSTHROUGH);
break;
/*
- * On Some Windows hosts TEST_UNIT_READY command can return
- * SRB_STATUS_ERROR, let the upper level code deal with it
- * based on the sense information.
+ * On some Hyper-V hosts TEST_UNIT_READY command can
+ * return SRB_STATUS_ERROR. Let the upper level code
+ * deal with it based on the sense information.
*/
case TEST_UNIT_READY:
break;
default:
set_host_byte(scmnd, DID_ERROR);
}
- break;
- case SRB_STATUS_INVALID_LUN:
- set_host_byte(scmnd, DID_NO_CONNECT);
- do_work = true;
- process_err_fn = storvsc_remove_lun;
- break;
- case SRB_STATUS_ABORTED:
- if (vm_srb->srb_status & SRB_STATUS_AUTOSENSE_VALID &&
- (asc == 0x2a) && (ascq == 0x9)) {
- do_work = true;
- process_err_fn = storvsc_device_scan;
- /*
- * Retry the I/O that triggered this.
- */
- set_host_byte(scmnd, DID_REQUEUE);
- }
- break;
}
+ return;
- if (!do_work)
- return;
-
+do_work:
/*
* We need to schedule work to process this error; schedule it.
*/
@@ -1112,6 +1117,7 @@ static void storvsc_command_completion(struct storvsc_cmd_request *cmd_request,
struct Scsi_Host *host;
u32 payload_sz = cmd_request->payload_sz;
void *payload = cmd_request->payload;
+ bool sense_ok;
host = stor_dev->host;
@@ -1121,11 +1127,10 @@ static void storvsc_command_completion(struct storvsc_cmd_request *cmd_request,
scmnd->result = vm_srb->scsi_status;
if (scmnd->result) {
- if (scsi_normalize_sense(scmnd->sense_buffer,
- SCSI_SENSE_BUFFERSIZE, &sense_hdr) &&
- !(sense_hdr.sense_key == NOT_READY &&
- sense_hdr.asc == 0x03A) &&
- do_logging(STORVSC_LOGGING_ERROR))
+ sense_ok = scsi_normalize_sense(scmnd->sense_buffer,
+ SCSI_SENSE_BUFFERSIZE, &sense_hdr);
+
+ if (sense_ok && do_logging(STORVSC_LOGGING_WARN))
scsi_print_sense_hdr(scmnd->device, "storvsc",
&sense_hdr);
}
@@ -1182,53 +1187,41 @@ static void storvsc_on_io_completion(struct storvsc_device *stor_device,
vstor_packet->vm_srb.srb_status = SRB_STATUS_SUCCESS;
}
-
/* Copy over the status...etc */
stor_pkt->vm_srb.scsi_status = vstor_packet->vm_srb.scsi_status;
stor_pkt->vm_srb.srb_status = vstor_packet->vm_srb.srb_status;
- /* Validate sense_info_length (from Hyper-V) */
- if (vstor_packet->vm_srb.sense_info_length > sense_buffer_size)
- vstor_packet->vm_srb.sense_info_length = sense_buffer_size;
-
- stor_pkt->vm_srb.sense_info_length =
- vstor_packet->vm_srb.sense_info_length;
+ /*
+ * Copy over the sense_info_length, but limit to the known max
+ * size if Hyper-V returns a bad value.
+ */
+ stor_pkt->vm_srb.sense_info_length = min_t(u8, sense_buffer_size,
+ vstor_packet->vm_srb.sense_info_length);
if (vstor_packet->vm_srb.scsi_status != 0 ||
vstor_packet->vm_srb.srb_status != SRB_STATUS_SUCCESS)
- storvsc_log(device, STORVSC_LOGGING_WARN,
- "cmd 0x%x scsi status 0x%x srb status 0x%x\n",
+ storvsc_log(device, STORVSC_LOGGING_ERROR,
+ "tag#%d cmd 0x%x status: scsi 0x%x srb 0x%x hv 0x%x\n",
+ request->cmd->request->tag,
stor_pkt->vm_srb.cdb[0],
vstor_packet->vm_srb.scsi_status,
- vstor_packet->vm_srb.srb_status);
-
- if ((vstor_packet->vm_srb.scsi_status & 0xFF) == 0x02) {
- /* CHECK_CONDITION */
- if (vstor_packet->vm_srb.srb_status &
- SRB_STATUS_AUTOSENSE_VALID) {
- /* autosense data available */
-
- storvsc_log(device, STORVSC_LOGGING_WARN,
- "stor pkt %p autosense data valid - len %d\n",
- request, vstor_packet->vm_srb.sense_info_length);
-
- memcpy(request->cmd->sense_buffer,
- vstor_packet->vm_srb.sense_data,
- vstor_packet->vm_srb.sense_info_length);
+ vstor_packet->vm_srb.srb_status,
+ vstor_packet->status);
- }
- }
+ if (vstor_packet->vm_srb.scsi_status == SAM_STAT_CHECK_CONDITION &&
+ (vstor_packet->vm_srb.srb_status & SRB_STATUS_AUTOSENSE_VALID))
+ memcpy(request->cmd->sense_buffer,
+ vstor_packet->vm_srb.sense_data,
+ stor_pkt->vm_srb.sense_info_length);
stor_pkt->vm_srb.data_transfer_length =
- vstor_packet->vm_srb.data_transfer_length;
+ vstor_packet->vm_srb.data_transfer_length;
storvsc_command_completion(request, stor_device);
if (atomic_dec_and_test(&stor_device->num_outstanding_req) &&
stor_device->drain_notify)
wake_up(&stor_device->waiting_to_drain);
-
-
}
static void storvsc_on_receive(struct storvsc_device *stor_device,
@@ -1717,7 +1710,7 @@ static bool storvsc_scsi_cmd_ok(struct scsi_cmnd *scmnd)
* this. So, don't send it.
*/
case SET_WINDOW:
- scmnd->result = DID_ERROR << 16;
+ set_host_byte(scmnd, DID_ERROR);
allowed = false;
break;
default:
diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c
index d9a045f9858c..16b65fc4405c 100644
--- a/drivers/scsi/sym53c8xx_2/sym_glue.c
+++ b/drivers/scsi/sym53c8xx_2/sym_glue.c
@@ -170,9 +170,8 @@ static int sym_xerr_cam_status(int cam_status, int x_status)
void sym_set_cam_result_error(struct sym_hcb *np, struct sym_ccb *cp, int resid)
{
struct scsi_cmnd *cmd = cp->cmd;
- u_int cam_status, scsi_status, drv_status;
+ u_int cam_status, scsi_status;
- drv_status = 0;
cam_status = DID_OK;
scsi_status = cp->ssss_status;
@@ -186,7 +185,6 @@ void sym_set_cam_result_error(struct sym_hcb *np, struct sym_ccb *cp, int resid)
cp->xerr_status == 0) {
cam_status = sym_xerr_cam_status(DID_OK,
cp->sv_xerr_status);
- drv_status = DRIVER_SENSE;
/*
* Bounce back the sense data to user.
*/
@@ -235,7 +233,7 @@ void sym_set_cam_result_error(struct sym_hcb *np, struct sym_ccb *cp, int resid)
cam_status = sym_xerr_cam_status(DID_ERROR, cp->xerr_status);
}
scsi_set_resid(cmd, resid);
- cmd->result = (drv_status << 24) | (cam_status << 16) | scsi_status;
+ cmd->result = (cam_status << 16) | scsi_status;
}
static int sym_scatter(struct sym_hcb *np, struct sym_ccb *cp, struct scsi_cmnd *cmd)
diff --git a/drivers/scsi/ufs/Kconfig b/drivers/scsi/ufs/Kconfig
index 07cf415367b4..2d137953e7b4 100644
--- a/drivers/scsi/ufs/Kconfig
+++ b/drivers/scsi/ufs/Kconfig
@@ -115,6 +115,7 @@ config SCSI_UFS_MEDIATEK
tristate "Mediatek specific hooks to UFS controller platform driver"
depends on SCSI_UFSHCD_PLATFORM && ARCH_MEDIATEK
select PHY_MTK_UFS
+ select RESET_TI_SYSCON
help
This selects the Mediatek specific additions to UFSHCD platform driver.
UFS host on Mediatek needs some vendor specific configuration before
diff --git a/drivers/scsi/ufs/cdns-pltfrm.c b/drivers/scsi/ufs/cdns-pltfrm.c
index 13d92043e13b..908ff39c4856 100644
--- a/drivers/scsi/ufs/cdns-pltfrm.c
+++ b/drivers/scsi/ufs/cdns-pltfrm.c
@@ -323,6 +323,8 @@ static const struct dev_pm_ops cdns_ufs_dev_pm_ops = {
.runtime_suspend = ufshcd_pltfrm_runtime_suspend,
.runtime_resume = ufshcd_pltfrm_runtime_resume,
.runtime_idle = ufshcd_pltfrm_runtime_idle,
+ .prepare = ufshcd_suspend_prepare,
+ .complete = ufshcd_resume_complete,
};
static struct platform_driver cdns_ufs_pltfrm_driver = {
diff --git a/drivers/scsi/ufs/tc-dwc-g210-pci.c b/drivers/scsi/ufs/tc-dwc-g210-pci.c
index 67a6a61154b7..ec4589afbc13 100644
--- a/drivers/scsi/ufs/tc-dwc-g210-pci.c
+++ b/drivers/scsi/ufs/tc-dwc-g210-pci.c
@@ -148,6 +148,8 @@ static const struct dev_pm_ops tc_dwc_g210_pci_pm_ops = {
.runtime_suspend = tc_dwc_g210_pci_runtime_suspend,
.runtime_resume = tc_dwc_g210_pci_runtime_resume,
.runtime_idle = tc_dwc_g210_pci_runtime_idle,
+ .prepare = ufshcd_suspend_prepare,
+ .complete = ufshcd_resume_complete,
};
static const struct pci_device_id tc_dwc_g210_pci_tbl[] = {
diff --git a/drivers/scsi/ufs/ufs-debugfs.c b/drivers/scsi/ufs/ufs-debugfs.c
index ced9ef4d7c78..4e1ff209b933 100644
--- a/drivers/scsi/ufs/ufs-debugfs.c
+++ b/drivers/scsi/ufs/ufs-debugfs.c
@@ -13,7 +13,7 @@ void __init ufs_debugfs_init(void)
ufs_debugfs_root = debugfs_create_dir("ufshcd", NULL);
}
-void __exit ufs_debugfs_exit(void)
+void ufs_debugfs_exit(void)
{
debugfs_remove_recursive(ufs_debugfs_root);
}
@@ -60,14 +60,14 @@ __acquires(&hba->host_sem)
up(&hba->host_sem);
return -EBUSY;
}
- pm_runtime_get_sync(hba->dev);
+ ufshcd_rpm_get_sync(hba);
return 0;
}
static void ufs_debugfs_put_user_access(struct ufs_hba *hba)
__releases(&hba->host_sem)
{
- pm_runtime_put_sync(hba->dev);
+ ufshcd_rpm_put_sync(hba);
up(&hba->host_sem);
}
diff --git a/drivers/scsi/ufs/ufs-debugfs.h b/drivers/scsi/ufs/ufs-debugfs.h
index 3ca29d30460a..97548a3f90eb 100644
--- a/drivers/scsi/ufs/ufs-debugfs.h
+++ b/drivers/scsi/ufs/ufs-debugfs.h
@@ -9,7 +9,7 @@ struct ufs_hba;
#ifdef CONFIG_DEBUG_FS
void __init ufs_debugfs_init(void);
-void __exit ufs_debugfs_exit(void);
+void ufs_debugfs_exit(void);
void ufs_debugfs_hba_init(struct ufs_hba *hba);
void ufs_debugfs_hba_exit(struct ufs_hba *hba);
void ufs_debugfs_exception_event(struct ufs_hba *hba, u16 status);
diff --git a/drivers/scsi/ufs/ufs-exynos.c b/drivers/scsi/ufs/ufs-exynos.c
index 70647eacf195..cf46d6f86e0e 100644
--- a/drivers/scsi/ufs/ufs-exynos.c
+++ b/drivers/scsi/ufs/ufs-exynos.c
@@ -107,6 +107,7 @@ enum {
#define CNTR_DIV_VAL 40
+static struct exynos_ufs_drv_data exynos_ufs_drvs;
static void exynos_ufs_auto_ctrl_hcc(struct exynos_ufs *ufs, bool en);
static void exynos_ufs_ctrl_clkstop(struct exynos_ufs *ufs, bool en);
@@ -1048,7 +1049,7 @@ static void exynos_ufs_pre_hibern8(struct ufs_hba *hba, u8 enter)
exynos_ufs_ungate_clks(ufs);
if (ufs->opts & EXYNOS_UFS_OPT_USE_SW_HIBERN8_TIMER) {
- const unsigned int granularity_tbl[] = {
+ static const unsigned int granularity_tbl[] = {
1, 4, 8, 16, 32, 100
};
int h8_time = attr->pa_hibern8time *
@@ -1231,8 +1232,32 @@ static int exynos_ufs_remove(struct platform_device *pdev)
return 0;
}
-struct exynos_ufs_drv_data exynos_ufs_drvs = {
+static struct exynos_ufs_uic_attr exynos7_uic_attr = {
+ .tx_trailingclks = 0x10,
+ .tx_dif_p_nsec = 3000000, /* unit: ns */
+ .tx_dif_n_nsec = 1000000, /* unit: ns */
+ .tx_high_z_cnt_nsec = 20000, /* unit: ns */
+ .tx_base_unit_nsec = 100000, /* unit: ns */
+ .tx_gran_unit_nsec = 4000, /* unit: ns */
+ .tx_sleep_cnt = 1000, /* unit: ns */
+ .tx_min_activatetime = 0xa,
+ .rx_filler_enable = 0x2,
+ .rx_dif_p_nsec = 1000000, /* unit: ns */
+ .rx_hibern8_wait_nsec = 4000000, /* unit: ns */
+ .rx_base_unit_nsec = 100000, /* unit: ns */
+ .rx_gran_unit_nsec = 4000, /* unit: ns */
+ .rx_sleep_cnt = 1280, /* unit: ns */
+ .rx_stall_cnt = 320, /* unit: ns */
+ .rx_hs_g1_sync_len_cap = SYNC_LEN_COARSE(0xf),
+ .rx_hs_g2_sync_len_cap = SYNC_LEN_COARSE(0xf),
+ .rx_hs_g3_sync_len_cap = SYNC_LEN_COARSE(0xf),
+ .rx_hs_g1_prep_sync_len_cap = PREP_LEN(0xf),
+ .rx_hs_g2_prep_sync_len_cap = PREP_LEN(0xf),
+ .rx_hs_g3_prep_sync_len_cap = PREP_LEN(0xf),
+ .pa_dbg_option_suite = 0x30103,
+};
+static struct exynos_ufs_drv_data exynos_ufs_drvs = {
.compatible = "samsung,exynos7-ufs",
.uic_attr = &exynos7_uic_attr,
.quirks = UFSHCD_QUIRK_PRDT_BYTE_GRAN |
@@ -1267,6 +1292,8 @@ static const struct dev_pm_ops exynos_ufs_pm_ops = {
.runtime_suspend = ufshcd_pltfrm_runtime_suspend,
.runtime_resume = ufshcd_pltfrm_runtime_resume,
.runtime_idle = ufshcd_pltfrm_runtime_idle,
+ .prepare = ufshcd_suspend_prepare,
+ .complete = ufshcd_resume_complete,
};
static struct platform_driver exynos_ufs_pltform = {
diff --git a/drivers/scsi/ufs/ufs-exynos.h b/drivers/scsi/ufs/ufs-exynos.h
index 06ee565f7eb0..67505fe32ebf 100644
--- a/drivers/scsi/ufs/ufs-exynos.h
+++ b/drivers/scsi/ufs/ufs-exynos.h
@@ -245,30 +245,4 @@ static inline void exynos_ufs_disable_dbg_mode(struct ufs_hba *hba)
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_MODE), FALSE);
}
-struct exynos_ufs_drv_data exynos_ufs_drvs;
-
-struct exynos_ufs_uic_attr exynos7_uic_attr = {
- .tx_trailingclks = 0x10,
- .tx_dif_p_nsec = 3000000, /* unit: ns */
- .tx_dif_n_nsec = 1000000, /* unit: ns */
- .tx_high_z_cnt_nsec = 20000, /* unit: ns */
- .tx_base_unit_nsec = 100000, /* unit: ns */
- .tx_gran_unit_nsec = 4000, /* unit: ns */
- .tx_sleep_cnt = 1000, /* unit: ns */
- .tx_min_activatetime = 0xa,
- .rx_filler_enable = 0x2,
- .rx_dif_p_nsec = 1000000, /* unit: ns */
- .rx_hibern8_wait_nsec = 4000000, /* unit: ns */
- .rx_base_unit_nsec = 100000, /* unit: ns */
- .rx_gran_unit_nsec = 4000, /* unit: ns */
- .rx_sleep_cnt = 1280, /* unit: ns */
- .rx_stall_cnt = 320, /* unit: ns */
- .rx_hs_g1_sync_len_cap = SYNC_LEN_COARSE(0xf),
- .rx_hs_g2_sync_len_cap = SYNC_LEN_COARSE(0xf),
- .rx_hs_g3_sync_len_cap = SYNC_LEN_COARSE(0xf),
- .rx_hs_g1_prep_sync_len_cap = PREP_LEN(0xf),
- .rx_hs_g2_prep_sync_len_cap = PREP_LEN(0xf),
- .rx_hs_g3_prep_sync_len_cap = PREP_LEN(0xf),
- .pa_dbg_option_suite = 0x30103,
-};
#endif /* _UFS_EXYNOS_H_ */
diff --git a/drivers/scsi/ufs/ufs-hisi.c b/drivers/scsi/ufs/ufs-hisi.c
index d0626773eb38..5b147a48161b 100644
--- a/drivers/scsi/ufs/ufs-hisi.c
+++ b/drivers/scsi/ufs/ufs-hisi.c
@@ -400,7 +400,7 @@ static int ufs_hisi_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
{
struct ufs_hisi_host *host = ufshcd_get_variant(hba);
- if (ufshcd_is_runtime_pm(pm_op))
+ if (pm_op == UFS_RUNTIME_PM)
return 0;
if (host->in_suspend) {
@@ -577,6 +577,8 @@ static const struct dev_pm_ops ufs_hisi_pm_ops = {
.runtime_suspend = ufshcd_pltfrm_runtime_suspend,
.runtime_resume = ufshcd_pltfrm_runtime_resume,
.runtime_idle = ufshcd_pltfrm_runtime_idle,
+ .prepare = ufshcd_suspend_prepare,
+ .complete = ufshcd_resume_complete,
};
static struct platform_driver ufs_hisi_pltform = {
diff --git a/drivers/scsi/ufs/ufs-mediatek.c b/drivers/scsi/ufs/ufs-mediatek.c
index 0a84ec9e7cea..d2c251628a05 100644
--- a/drivers/scsi/ufs/ufs-mediatek.c
+++ b/drivers/scsi/ufs/ufs-mediatek.c
@@ -822,12 +822,10 @@ static int ufs_mtk_post_link(struct ufs_hba *hba)
/* enable unipro clock gating feature */
ufs_mtk_cfg_unipro_cg(hba, true);
- /* configure auto-hibern8 timer to 10ms */
- if (ufshcd_is_auto_hibern8_supported(hba)) {
- ufshcd_auto_hibern8_update(hba,
- FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 10) |
- FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, 3));
- }
+ /* will be configured during probe hba */
+ if (ufshcd_is_auto_hibern8_supported(hba))
+ hba->ahit = FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 10) |
+ FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, 3);
ufs_mtk_setup_clk_gating(hba);
@@ -858,6 +856,9 @@ static int ufs_mtk_device_reset(struct ufs_hba *hba)
{
struct arm_smccc_res res;
+ /* disable hba before device reset */
+ ufshcd_hba_stop(hba);
+
ufs_mtk_device_reset_ctrl(0, res);
/*
@@ -1084,12 +1085,42 @@ static int ufs_mtk_probe(struct platform_device *pdev)
{
int err;
struct device *dev = &pdev->dev;
+ struct device_node *reset_node;
+ struct platform_device *reset_pdev;
+ struct device_link *link;
+
+ reset_node = of_find_compatible_node(NULL, NULL,
+ "ti,syscon-reset");
+ if (!reset_node) {
+ dev_notice(dev, "find ti,syscon-reset fail\n");
+ goto skip_reset;
+ }
+ reset_pdev = of_find_device_by_node(reset_node);
+ if (!reset_pdev) {
+ dev_notice(dev, "find reset_pdev fail\n");
+ goto skip_reset;
+ }
+ link = device_link_add(dev, &reset_pdev->dev,
+ DL_FLAG_AUTOPROBE_CONSUMER);
+ if (!link) {
+ dev_notice(dev, "add reset device_link fail\n");
+ goto skip_reset;
+ }
+ /* supplier is not probed */
+ if (link->status == DL_STATE_DORMANT) {
+ err = -EPROBE_DEFER;
+ goto out;
+ }
+skip_reset:
/* perform generic probe */
err = ufshcd_pltfrm_init(pdev, &ufs_hba_mtk_vops);
+
+out:
if (err)
dev_info(dev, "probe failed %d\n", err);
+ of_node_put(reset_node);
return err;
}
@@ -1114,6 +1145,8 @@ static const struct dev_pm_ops ufs_mtk_pm_ops = {
.runtime_suspend = ufshcd_pltfrm_runtime_suspend,
.runtime_resume = ufshcd_pltfrm_runtime_resume,
.runtime_idle = ufshcd_pltfrm_runtime_idle,
+ .prepare = ufshcd_suspend_prepare,
+ .complete = ufshcd_resume_complete,
};
static struct platform_driver ufs_mtk_pltform = {
diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c
index 2a3dd21da6a6..9b1d18d7c9bb 100644
--- a/drivers/scsi/ufs/ufs-qcom.c
+++ b/drivers/scsi/ufs/ufs-qcom.c
@@ -1551,6 +1551,8 @@ static const struct dev_pm_ops ufs_qcom_pm_ops = {
.runtime_suspend = ufshcd_pltfrm_runtime_suspend,
.runtime_resume = ufshcd_pltfrm_runtime_resume,
.runtime_idle = ufshcd_pltfrm_runtime_idle,
+ .prepare = ufshcd_suspend_prepare,
+ .complete = ufshcd_resume_complete,
};
static struct platform_driver ufs_qcom_pltform = {
diff --git a/drivers/scsi/ufs/ufs-sysfs.c b/drivers/scsi/ufs/ufs-sysfs.c
index 5d0e98a05ada..52bd807f7940 100644
--- a/drivers/scsi/ufs/ufs-sysfs.c
+++ b/drivers/scsi/ufs/ufs-sysfs.c
@@ -245,9 +245,9 @@ static ssize_t wb_on_store(struct device *dev, struct device_attribute *attr,
goto out;
}
- pm_runtime_get_sync(hba->dev);
+ ufshcd_rpm_get_sync(hba);
res = ufshcd_wb_toggle(hba, wb_enable);
- pm_runtime_put_sync(hba->dev);
+ ufshcd_rpm_put_sync(hba);
out:
up(&hba->host_sem);
return res < 0 ? res : count;
@@ -278,6 +278,242 @@ static const struct attribute_group ufs_sysfs_default_group = {
.attrs = ufs_sysfs_ufshcd_attrs,
};
+static ssize_t monitor_enable_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%d\n", hba->monitor.enabled);
+}
+
+static ssize_t monitor_enable_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ unsigned long value, flags;
+
+ if (kstrtoul(buf, 0, &value))
+ return -EINVAL;
+
+ value = !!value;
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ if (value == hba->monitor.enabled)
+ goto out_unlock;
+
+ if (!value) {
+ memset(&hba->monitor, 0, sizeof(hba->monitor));
+ } else {
+ hba->monitor.enabled = true;
+ hba->monitor.enabled_ts = ktime_get();
+ }
+
+out_unlock:
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ return count;
+}
+
+static ssize_t monitor_chunk_size_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%lu\n", hba->monitor.chunk_size);
+}
+
+static ssize_t monitor_chunk_size_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ unsigned long value, flags;
+
+ if (kstrtoul(buf, 0, &value))
+ return -EINVAL;
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ /* Only allow chunk size change when monitor is disabled */
+ if (!hba->monitor.enabled)
+ hba->monitor.chunk_size = value;
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ return count;
+}
+
+static ssize_t read_total_sectors_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%lu\n", hba->monitor.nr_sec_rw[READ]);
+}
+
+static ssize_t read_total_busy_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%llu\n",
+ ktime_to_us(hba->monitor.total_busy[READ]));
+}
+
+static ssize_t read_nr_requests_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%lu\n", hba->monitor.nr_req[READ]);
+}
+
+static ssize_t read_req_latency_avg_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ struct ufs_hba_monitor *m = &hba->monitor;
+
+ return sysfs_emit(buf, "%llu\n", div_u64(ktime_to_us(m->lat_sum[READ]),
+ m->nr_req[READ]));
+}
+
+static ssize_t read_req_latency_max_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%llu\n",
+ ktime_to_us(hba->monitor.lat_max[READ]));
+}
+
+static ssize_t read_req_latency_min_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%llu\n",
+ ktime_to_us(hba->monitor.lat_min[READ]));
+}
+
+static ssize_t read_req_latency_sum_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%llu\n",
+ ktime_to_us(hba->monitor.lat_sum[READ]));
+}
+
+static ssize_t write_total_sectors_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%lu\n", hba->monitor.nr_sec_rw[WRITE]);
+}
+
+static ssize_t write_total_busy_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%llu\n",
+ ktime_to_us(hba->monitor.total_busy[WRITE]));
+}
+
+static ssize_t write_nr_requests_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%lu\n", hba->monitor.nr_req[WRITE]);
+}
+
+static ssize_t write_req_latency_avg_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ struct ufs_hba_monitor *m = &hba->monitor;
+
+ return sysfs_emit(buf, "%llu\n", div_u64(ktime_to_us(m->lat_sum[WRITE]),
+ m->nr_req[WRITE]));
+}
+
+static ssize_t write_req_latency_max_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%llu\n",
+ ktime_to_us(hba->monitor.lat_max[WRITE]));
+}
+
+static ssize_t write_req_latency_min_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%llu\n",
+ ktime_to_us(hba->monitor.lat_min[WRITE]));
+}
+
+static ssize_t write_req_latency_sum_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%llu\n",
+ ktime_to_us(hba->monitor.lat_sum[WRITE]));
+}
+
+static DEVICE_ATTR_RW(monitor_enable);
+static DEVICE_ATTR_RW(monitor_chunk_size);
+static DEVICE_ATTR_RO(read_total_sectors);
+static DEVICE_ATTR_RO(read_total_busy);
+static DEVICE_ATTR_RO(read_nr_requests);
+static DEVICE_ATTR_RO(read_req_latency_avg);
+static DEVICE_ATTR_RO(read_req_latency_max);
+static DEVICE_ATTR_RO(read_req_latency_min);
+static DEVICE_ATTR_RO(read_req_latency_sum);
+static DEVICE_ATTR_RO(write_total_sectors);
+static DEVICE_ATTR_RO(write_total_busy);
+static DEVICE_ATTR_RO(write_nr_requests);
+static DEVICE_ATTR_RO(write_req_latency_avg);
+static DEVICE_ATTR_RO(write_req_latency_max);
+static DEVICE_ATTR_RO(write_req_latency_min);
+static DEVICE_ATTR_RO(write_req_latency_sum);
+
+static struct attribute *ufs_sysfs_monitor_attrs[] = {
+ &dev_attr_monitor_enable.attr,
+ &dev_attr_monitor_chunk_size.attr,
+ &dev_attr_read_total_sectors.attr,
+ &dev_attr_read_total_busy.attr,
+ &dev_attr_read_nr_requests.attr,
+ &dev_attr_read_req_latency_avg.attr,
+ &dev_attr_read_req_latency_max.attr,
+ &dev_attr_read_req_latency_min.attr,
+ &dev_attr_read_req_latency_sum.attr,
+ &dev_attr_write_total_sectors.attr,
+ &dev_attr_write_total_busy.attr,
+ &dev_attr_write_nr_requests.attr,
+ &dev_attr_write_req_latency_avg.attr,
+ &dev_attr_write_req_latency_max.attr,
+ &dev_attr_write_req_latency_min.attr,
+ &dev_attr_write_req_latency_sum.attr,
+ NULL
+};
+
+static const struct attribute_group ufs_sysfs_monitor_group = {
+ .name = "monitor",
+ .attrs = ufs_sysfs_monitor_attrs,
+};
+
static ssize_t ufs_sysfs_read_desc_param(struct ufs_hba *hba,
enum desc_idn desc_id,
u8 desc_index,
@@ -297,10 +533,10 @@ static ssize_t ufs_sysfs_read_desc_param(struct ufs_hba *hba,
goto out;
}
- pm_runtime_get_sync(hba->dev);
+ ufshcd_rpm_get_sync(hba);
ret = ufshcd_read_desc_param(hba, desc_id, desc_index,
param_offset, desc_buf, param_size);
- pm_runtime_put_sync(hba->dev);
+ ufshcd_rpm_put_sync(hba);
if (ret) {
ret = -EINVAL;
goto out;
@@ -678,7 +914,7 @@ static ssize_t _name##_show(struct device *dev, \
up(&hba->host_sem); \
return -ENOMEM; \
} \
- pm_runtime_get_sync(hba->dev); \
+ ufshcd_rpm_get_sync(hba); \
ret = ufshcd_query_descriptor_retry(hba, \
UPIU_QUERY_OPCODE_READ_DESC, QUERY_DESC_IDN_DEVICE, \
0, 0, desc_buf, &desc_len); \
@@ -695,7 +931,7 @@ static ssize_t _name##_show(struct device *dev, \
goto out; \
ret = sysfs_emit(buf, "%s\n", desc_buf); \
out: \
- pm_runtime_put_sync(hba->dev); \
+ ufshcd_rpm_put_sync(hba); \
kfree(desc_buf); \
up(&hba->host_sem); \
return ret; \
@@ -724,8 +960,8 @@ static const struct attribute_group ufs_sysfs_string_descriptors_group = {
static inline bool ufshcd_is_wb_flags(enum flag_idn idn)
{
- return ((idn >= QUERY_FLAG_IDN_WB_EN) &&
- (idn <= QUERY_FLAG_IDN_WB_BUFF_FLUSH_DURING_HIBERN8));
+ return idn >= QUERY_FLAG_IDN_WB_EN &&
+ idn <= QUERY_FLAG_IDN_WB_BUFF_FLUSH_DURING_HIBERN8;
}
#define UFS_FLAG(_name, _uname) \
@@ -744,10 +980,10 @@ static ssize_t _name##_show(struct device *dev, \
} \
if (ufshcd_is_wb_flags(QUERY_FLAG_IDN##_uname)) \
index = ufshcd_wb_get_query_index(hba); \
- pm_runtime_get_sync(hba->dev); \
+ ufshcd_rpm_get_sync(hba); \
ret = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_READ_FLAG, \
QUERY_FLAG_IDN##_uname, index, &flag); \
- pm_runtime_put_sync(hba->dev); \
+ ufshcd_rpm_put_sync(hba); \
if (ret) { \
ret = -EINVAL; \
goto out; \
@@ -793,8 +1029,8 @@ static const struct attribute_group ufs_sysfs_flags_group = {
static inline bool ufshcd_is_wb_attrs(enum attr_idn idn)
{
- return ((idn >= QUERY_ATTR_IDN_WB_FLUSH_STATUS) &&
- (idn <= QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE));
+ return idn >= QUERY_ATTR_IDN_WB_FLUSH_STATUS &&
+ idn <= QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE;
}
#define UFS_ATTRIBUTE(_name, _uname) \
@@ -813,10 +1049,10 @@ static ssize_t _name##_show(struct device *dev, \
} \
if (ufshcd_is_wb_attrs(QUERY_ATTR_IDN##_uname)) \
index = ufshcd_wb_get_query_index(hba); \
- pm_runtime_get_sync(hba->dev); \
+ ufshcd_rpm_get_sync(hba); \
ret = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR, \
QUERY_ATTR_IDN##_uname, index, 0, &value); \
- pm_runtime_put_sync(hba->dev); \
+ ufshcd_rpm_put_sync(hba); \
if (ret) { \
ret = -EINVAL; \
goto out; \
@@ -881,6 +1117,7 @@ static const struct attribute_group ufs_sysfs_attributes_group = {
static const struct attribute_group *ufs_sysfs_groups[] = {
&ufs_sysfs_default_group,
+ &ufs_sysfs_monitor_group,
&ufs_sysfs_device_descriptor_group,
&ufs_sysfs_interconnect_descriptor_group,
&ufs_sysfs_geometry_descriptor_group,
@@ -964,10 +1201,10 @@ static ssize_t dyn_cap_needed_attribute_show(struct device *dev,
goto out;
}
- pm_runtime_get_sync(hba->dev);
+ ufshcd_rpm_get_sync(hba);
ret = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR,
QUERY_ATTR_IDN_DYN_CAP_NEEDED, lun, 0, &value);
- pm_runtime_put_sync(hba->dev);
+ ufshcd_rpm_put_sync(hba);
if (ret) {
ret = -EINVAL;
goto out;
diff --git a/drivers/scsi/ufs/ufs_bsg.c b/drivers/scsi/ufs/ufs_bsg.c
index 5b2bc1a6f922..39bf204c6ec3 100644
--- a/drivers/scsi/ufs/ufs_bsg.c
+++ b/drivers/scsi/ufs/ufs_bsg.c
@@ -97,7 +97,7 @@ static int ufs_bsg_request(struct bsg_job *job)
bsg_reply->reply_payload_rcv_len = 0;
- pm_runtime_get_sync(hba->dev);
+ ufshcd_rpm_get_sync(hba);
msgcode = bsg_request->msgcode;
switch (msgcode) {
@@ -106,7 +106,7 @@ static int ufs_bsg_request(struct bsg_job *job)
ret = ufs_bsg_alloc_desc_buffer(hba, job, &desc_buff,
&desc_len, desc_op);
if (ret) {
- pm_runtime_put_sync(hba->dev);
+ ufshcd_rpm_put_sync(hba);
goto out;
}
@@ -138,7 +138,7 @@ static int ufs_bsg_request(struct bsg_job *job)
break;
}
- pm_runtime_put_sync(hba->dev);
+ ufshcd_rpm_put_sync(hba);
if (!desc_buff)
goto out;
diff --git a/drivers/scsi/ufs/ufshcd-pci.c b/drivers/scsi/ufs/ufshcd-pci.c
index 23ee828747e2..e6c334bfb4c2 100644
--- a/drivers/scsi/ufs/ufshcd-pci.c
+++ b/drivers/scsi/ufs/ufshcd-pci.c
@@ -410,29 +410,6 @@ static int ufshcd_pci_resume(struct device *dev)
return ufshcd_system_resume(dev_get_drvdata(dev));
}
-/**
- * ufshcd_pci_poweroff - suspend-to-disk poweroff function
- * @dev: pointer to PCI device handle
- *
- * Returns 0 if successful
- * Returns non-zero otherwise
- */
-static int ufshcd_pci_poweroff(struct device *dev)
-{
- struct ufs_hba *hba = dev_get_drvdata(dev);
- int spm_lvl = hba->spm_lvl;
- int ret;
-
- /*
- * For poweroff we need to set the UFS device to PowerDown mode.
- * Force spm_lvl to ensure that.
- */
- hba->spm_lvl = 5;
- ret = ufshcd_system_suspend(hba);
- hba->spm_lvl = spm_lvl;
- return ret;
-}
-
#endif /* !CONFIG_PM_SLEEP */
#ifdef CONFIG_PM
@@ -533,17 +510,14 @@ ufshcd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
static const struct dev_pm_ops ufshcd_pci_pm_ops = {
-#ifdef CONFIG_PM_SLEEP
- .suspend = ufshcd_pci_suspend,
- .resume = ufshcd_pci_resume,
- .freeze = ufshcd_pci_suspend,
- .thaw = ufshcd_pci_resume,
- .poweroff = ufshcd_pci_poweroff,
- .restore = ufshcd_pci_resume,
-#endif
SET_RUNTIME_PM_OPS(ufshcd_pci_runtime_suspend,
ufshcd_pci_runtime_resume,
ufshcd_pci_runtime_idle)
+ SET_SYSTEM_SLEEP_PM_OPS(ufshcd_pci_suspend, ufshcd_pci_resume)
+#ifdef CONFIG_PM_SLEEP
+ .prepare = ufshcd_suspend_prepare,
+ .complete = ufshcd_resume_complete,
+#endif
};
static const struct pci_device_id ufshcd_pci_tbl[] = {
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 72fd41bfbd54..b87ff68aa9aa 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -16,6 +16,7 @@
#include <linux/bitfield.h>
#include <linux/blk-pm.h>
#include <linux/blkdev.h>
+#include <scsi/scsi_driver.h>
#include "ufshcd.h"
#include "ufs_quirks.h"
#include "unipro.h"
@@ -24,6 +25,7 @@
#include "ufs_bsg.h"
#include "ufshcd-crypto.h"
#include <asm/unaligned.h>
+#include "../sd.h"
#define CREATE_TRACE_POINTS
#include <trace/events/ufs.h>
@@ -77,6 +79,8 @@
/* Polling time to wait for fDeviceInit */
#define FDEVICEINIT_COMPL_TIMEOUT 1500 /* millisecs */
+#define wlun_dev_to_hba(dv) shost_priv(to_scsi_device(dv)->host)
+
#define ufshcd_toggle_vreg(_dev, _vreg, _on) \
({ \
int _ret; \
@@ -157,17 +161,17 @@ enum {
((h)->eh_flags &= ~UFSHCD_EH_IN_PROGRESS)
struct ufs_pm_lvl_states ufs_pm_lvl_states[] = {
- {UFS_ACTIVE_PWR_MODE, UIC_LINK_ACTIVE_STATE},
- {UFS_ACTIVE_PWR_MODE, UIC_LINK_HIBERN8_STATE},
- {UFS_SLEEP_PWR_MODE, UIC_LINK_ACTIVE_STATE},
- {UFS_SLEEP_PWR_MODE, UIC_LINK_HIBERN8_STATE},
- {UFS_POWERDOWN_PWR_MODE, UIC_LINK_HIBERN8_STATE},
- {UFS_POWERDOWN_PWR_MODE, UIC_LINK_OFF_STATE},
+ [UFS_PM_LVL_0] = {UFS_ACTIVE_PWR_MODE, UIC_LINK_ACTIVE_STATE},
+ [UFS_PM_LVL_1] = {UFS_ACTIVE_PWR_MODE, UIC_LINK_HIBERN8_STATE},
+ [UFS_PM_LVL_2] = {UFS_SLEEP_PWR_MODE, UIC_LINK_ACTIVE_STATE},
+ [UFS_PM_LVL_3] = {UFS_SLEEP_PWR_MODE, UIC_LINK_HIBERN8_STATE},
+ [UFS_PM_LVL_4] = {UFS_POWERDOWN_PWR_MODE, UIC_LINK_HIBERN8_STATE},
+ [UFS_PM_LVL_5] = {UFS_POWERDOWN_PWR_MODE, UIC_LINK_OFF_STATE},
/*
* For DeepSleep, the link is first put in hibern8 and then off.
* Leaving the link in hibern8 is not supported.
*/
- {UFS_DEEPSLEEP_PWR_MODE, UIC_LINK_OFF_STATE},
+ [UFS_PM_LVL_6] = {UFS_DEEPSLEEP_PWR_MODE, UIC_LINK_OFF_STATE},
};
static inline enum ufs_dev_pwr_mode
@@ -298,11 +302,17 @@ static void ufshcd_add_cmd_upiu_trace(struct ufs_hba *hba, unsigned int tag,
enum ufs_trace_str_t str_t)
{
struct utp_upiu_req *rq = hba->lrb[tag].ucd_req_ptr;
+ struct utp_upiu_header *header;
if (!trace_ufshcd_upiu_enabled())
return;
- trace_ufshcd_upiu(dev_name(hba->dev), str_t, &rq->header, &rq->sc.cdb,
+ if (str_t == UFS_CMD_SEND)
+ header = &rq->header;
+ else
+ header = &hba->lrb[tag].ucd_rsp_ptr->header;
+
+ trace_ufshcd_upiu(dev_name(hba->dev), str_t, header, &rq->sc.cdb,
UFS_TSF_CDB);
}
@@ -361,41 +371,40 @@ static void ufshcd_add_uic_command_trace(struct ufs_hba *hba,
static void ufshcd_add_command_trace(struct ufs_hba *hba, unsigned int tag,
enum ufs_trace_str_t str_t)
{
- sector_t lba = -1;
+ u64 lba = -1;
u8 opcode = 0, group_id = 0;
u32 intr, doorbell;
struct ufshcd_lrb *lrbp = &hba->lrb[tag];
struct scsi_cmnd *cmd = lrbp->cmd;
int transfer_len = -1;
+ if (!cmd)
+ return;
+
if (!trace_ufshcd_command_enabled()) {
/* trace UPIU W/O tracing command */
- if (cmd)
- ufshcd_add_cmd_upiu_trace(hba, tag, str_t);
+ ufshcd_add_cmd_upiu_trace(hba, tag, str_t);
return;
}
- if (cmd) { /* data phase exists */
- /* trace UPIU also */
- ufshcd_add_cmd_upiu_trace(hba, tag, str_t);
- opcode = cmd->cmnd[0];
- if ((opcode == READ_10) || (opcode == WRITE_10)) {
- /*
- * Currently we only fully trace read(10) and write(10)
- * commands
- */
- if (cmd->request && cmd->request->bio)
- lba = cmd->request->bio->bi_iter.bi_sector;
- transfer_len = be32_to_cpu(
- lrbp->ucd_req_ptr->sc.exp_data_transfer_len);
- if (opcode == WRITE_10)
- group_id = lrbp->cmd->cmnd[6];
- } else if (opcode == UNMAP) {
- if (cmd->request) {
- lba = scsi_get_lba(cmd);
- transfer_len = blk_rq_bytes(cmd->request);
- }
- }
+ /* trace UPIU also */
+ ufshcd_add_cmd_upiu_trace(hba, tag, str_t);
+ opcode = cmd->cmnd[0];
+ lba = sectors_to_logical(cmd->device, blk_rq_pos(cmd->request));
+
+ if (opcode == READ_10 || opcode == WRITE_10) {
+ /*
+ * Currently we only fully trace read(10) and write(10) commands
+ */
+ transfer_len =
+ be32_to_cpu(lrbp->ucd_req_ptr->sc.exp_data_transfer_len);
+ if (opcode == WRITE_10)
+ group_id = lrbp->cmd->cmnd[6];
+ } else if (opcode == UNMAP) {
+ /*
+ * The number of Bytes to be unmapped beginning with the lba.
+ */
+ transfer_len = blk_rq_bytes(cmd->request);
}
intr = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
@@ -755,7 +764,7 @@ static inline void ufshcd_utmrl_clear(struct ufs_hba *hba, u32 pos)
*/
static inline void ufshcd_outstanding_req_clear(struct ufs_hba *hba, int tag)
{
- __clear_bit(tag, &hba->outstanding_reqs);
+ clear_bit(tag, &hba->outstanding_reqs);
}
/**
@@ -1551,7 +1560,7 @@ static ssize_t ufshcd_clkscale_enable_store(struct device *dev,
if (value == hba->clk_scaling.is_enabled)
goto out;
- pm_runtime_get_sync(hba->dev);
+ ufshcd_rpm_get_sync(hba);
ufshcd_hold(hba, false);
hba->clk_scaling.is_enabled = value;
@@ -1567,7 +1576,7 @@ static ssize_t ufshcd_clkscale_enable_store(struct device *dev,
}
ufshcd_release(hba);
- pm_runtime_put_sync(hba->dev);
+ ufshcd_rpm_put_sync(hba);
out:
up(&hba->host_sem);
return err ? err : count;
@@ -1981,15 +1990,19 @@ static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba)
{
bool queue_resume_work = false;
ktime_t curr_t = ktime_get();
+ unsigned long flags;
if (!ufshcd_is_clkscaling_supported(hba))
return;
+ spin_lock_irqsave(hba->host->host_lock, flags);
if (!hba->clk_scaling.active_reqs++)
queue_resume_work = true;
- if (!hba->clk_scaling.is_enabled || hba->pm_op_in_progress)
+ if (!hba->clk_scaling.is_enabled || hba->pm_op_in_progress) {
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
return;
+ }
if (queue_resume_work)
queue_work(hba->clk_scaling.workq,
@@ -2005,22 +2018,91 @@ static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba)
hba->clk_scaling.busy_start_t = curr_t;
hba->clk_scaling.is_busy_started = true;
}
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
}
static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba)
{
struct ufs_clk_scaling *scaling = &hba->clk_scaling;
+ unsigned long flags;
if (!ufshcd_is_clkscaling_supported(hba))
return;
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ hba->clk_scaling.active_reqs--;
if (!hba->outstanding_reqs && scaling->is_busy_started) {
scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
scaling->busy_start_t));
scaling->busy_start_t = 0;
scaling->is_busy_started = false;
}
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+}
+
+static inline int ufshcd_monitor_opcode2dir(u8 opcode)
+{
+ if (opcode == READ_6 || opcode == READ_10 || opcode == READ_16)
+ return READ;
+ else if (opcode == WRITE_6 || opcode == WRITE_10 || opcode == WRITE_16)
+ return WRITE;
+ else
+ return -EINVAL;
+}
+
+static inline bool ufshcd_should_inform_monitor(struct ufs_hba *hba,
+ struct ufshcd_lrb *lrbp)
+{
+ struct ufs_hba_monitor *m = &hba->monitor;
+
+ return (m->enabled && lrbp && lrbp->cmd &&
+ (!m->chunk_size || m->chunk_size == lrbp->cmd->sdb.length) &&
+ ktime_before(hba->monitor.enabled_ts, lrbp->issue_time_stamp));
+}
+
+static void ufshcd_start_monitor(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
+{
+ int dir = ufshcd_monitor_opcode2dir(*lrbp->cmd->cmnd);
+ unsigned long flags;
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ if (dir >= 0 && hba->monitor.nr_queued[dir]++ == 0)
+ hba->monitor.busy_start_ts[dir] = ktime_get();
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+}
+
+static void ufshcd_update_monitor(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
+{
+ int dir = ufshcd_monitor_opcode2dir(*lrbp->cmd->cmnd);
+ unsigned long flags;
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ if (dir >= 0 && hba->monitor.nr_queued[dir] > 0) {
+ struct request *req = lrbp->cmd->request;
+ struct ufs_hba_monitor *m = &hba->monitor;
+ ktime_t now, inc, lat;
+
+ now = lrbp->compl_time_stamp;
+ inc = ktime_sub(now, m->busy_start_ts[dir]);
+ m->total_busy[dir] = ktime_add(m->total_busy[dir], inc);
+ m->nr_sec_rw[dir] += blk_rq_sectors(req);
+
+ /* Update latencies */
+ m->nr_req[dir]++;
+ lat = ktime_sub(now, lrbp->issue_time_stamp);
+ m->lat_sum[dir] += lat;
+ if (m->lat_max[dir] < lat || !m->lat_max[dir])
+ m->lat_max[dir] = lat;
+ if (m->lat_min[dir] > lat || !m->lat_min[dir])
+ m->lat_min[dir] = lat;
+
+ m->nr_queued[dir]--;
+ /* Push forward the busy start of monitor */
+ m->busy_start_ts[dir] = now;
+ }
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
}
+
/**
* ufshcd_send_command - Send SCSI or device management commands
* @hba: per adapter instance
@@ -2036,8 +2118,21 @@ void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
ufshcd_vops_setup_xfer_req(hba, task_tag, (lrbp->cmd ? true : false));
ufshcd_add_command_trace(hba, task_tag, UFS_CMD_SEND);
ufshcd_clk_scaling_start_busy(hba);
- __set_bit(task_tag, &hba->outstanding_reqs);
- ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
+ if (unlikely(ufshcd_should_inform_monitor(hba, lrbp)))
+ ufshcd_start_monitor(hba, lrbp);
+ if (ufshcd_has_utrlcnr(hba)) {
+ set_bit(task_tag, &hba->outstanding_reqs);
+ ufshcd_writel(hba, 1 << task_tag,
+ REG_UTP_TRANSFER_REQ_DOOR_BELL);
+ } else {
+ unsigned long flags;
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ set_bit(task_tag, &hba->outstanding_reqs);
+ ufshcd_writel(hba, 1 << task_tag,
+ REG_UTP_TRANSFER_REQ_DOOR_BELL);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ }
/* Make sure that doorbell is committed immediately */
wmb();
}
@@ -2565,6 +2660,17 @@ static inline u16 ufshcd_upiu_wlun_to_scsi_wlun(u8 upiu_wlun_id)
return (upiu_wlun_id & ~UFS_UPIU_WLUN_ID) | SCSI_W_LUN_BASE;
}
+static inline bool is_rpmb_wlun(struct scsi_device *sdev)
+{
+ return sdev->lun == ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_RPMB_WLUN);
+}
+
+static inline bool is_device_wlun(struct scsi_device *sdev)
+{
+ return sdev->lun ==
+ ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN);
+}
+
static void ufshcd_init_lrb(struct ufs_hba *hba, struct ufshcd_lrb *lrb, int i)
{
struct utp_transfer_cmd_desc *cmd_descp = hba->ucdl_base_addr;
@@ -2597,7 +2703,6 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
{
struct ufshcd_lrb *lrbp;
struct ufs_hba *hba;
- unsigned long flags;
int tag;
int err = 0;
@@ -2614,6 +2719,43 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
if (!down_read_trylock(&hba->clk_scaling_lock))
return SCSI_MLQUEUE_HOST_BUSY;
+ switch (hba->ufshcd_state) {
+ case UFSHCD_STATE_OPERATIONAL:
+ case UFSHCD_STATE_EH_SCHEDULED_NON_FATAL:
+ break;
+ case UFSHCD_STATE_EH_SCHEDULED_FATAL:
+ /*
+ * pm_runtime_get_sync() is used at error handling preparation
+ * stage. If a scsi cmd, e.g. the SSU cmd, is sent from hba's
+ * PM ops, it can never be finished if we let SCSI layer keep
+ * retrying it, which gets err handler stuck forever. Neither
+ * can we let the scsi cmd pass through, because UFS is in bad
+ * state, the scsi cmd may eventually time out, which will get
+ * err handler blocked for too long. So, just fail the scsi cmd
+ * sent from PM ops, err handler can recover PM error anyways.
+ */
+ if (hba->pm_op_in_progress) {
+ hba->force_reset = true;
+ set_host_byte(cmd, DID_BAD_TARGET);
+ cmd->scsi_done(cmd);
+ goto out;
+ }
+ fallthrough;
+ case UFSHCD_STATE_RESET:
+ err = SCSI_MLQUEUE_HOST_BUSY;
+ goto out;
+ case UFSHCD_STATE_ERROR:
+ set_host_byte(cmd, DID_ERROR);
+ cmd->scsi_done(cmd);
+ goto out;
+ default:
+ dev_WARN_ONCE(hba->dev, 1, "%s: invalid state %d\n",
+ __func__, hba->ufshcd_state);
+ set_host_byte(cmd, DID_BAD_TARGET);
+ cmd->scsi_done(cmd);
+ goto out;
+ }
+
hba->req_abort_count = 0;
err = ufshcd_hold(hba, true);
@@ -2624,8 +2766,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
WARN_ON(ufshcd_is_clkgating_allowed(hba) &&
(hba->clk_gating.state != CLKS_ON));
- lrbp = &hba->lrb[tag];
- if (unlikely(lrbp->in_use)) {
+ if (unlikely(test_bit(tag, &hba->outstanding_reqs))) {
if (hba->pm_op_in_progress)
set_host_byte(cmd, DID_BAD_TARGET);
else
@@ -2634,6 +2775,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
goto out;
}
+ lrbp = &hba->lrb[tag];
WARN_ON(lrbp->cmd);
lrbp->cmd = cmd;
lrbp->sense_bufflen = UFS_SENSE_SIZE;
@@ -2657,51 +2799,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
/* Make sure descriptors are ready before ringing the doorbell */
wmb();
- spin_lock_irqsave(hba->host->host_lock, flags);
- switch (hba->ufshcd_state) {
- case UFSHCD_STATE_OPERATIONAL:
- case UFSHCD_STATE_EH_SCHEDULED_NON_FATAL:
- break;
- case UFSHCD_STATE_EH_SCHEDULED_FATAL:
- /*
- * pm_runtime_get_sync() is used at error handling preparation
- * stage. If a scsi cmd, e.g. the SSU cmd, is sent from hba's
- * PM ops, it can never be finished if we let SCSI layer keep
- * retrying it, which gets err handler stuck forever. Neither
- * can we let the scsi cmd pass through, because UFS is in bad
- * state, the scsi cmd may eventually time out, which will get
- * err handler blocked for too long. So, just fail the scsi cmd
- * sent from PM ops, err handler can recover PM error anyways.
- */
- if (hba->pm_op_in_progress) {
- hba->force_reset = true;
- set_host_byte(cmd, DID_BAD_TARGET);
- goto out_compl_cmd;
- }
- fallthrough;
- case UFSHCD_STATE_RESET:
- err = SCSI_MLQUEUE_HOST_BUSY;
- goto out_compl_cmd;
- case UFSHCD_STATE_ERROR:
- set_host_byte(cmd, DID_ERROR);
- goto out_compl_cmd;
- default:
- dev_WARN_ONCE(hba->dev, 1, "%s: invalid state %d\n",
- __func__, hba->ufshcd_state);
- set_host_byte(cmd, DID_BAD_TARGET);
- goto out_compl_cmd;
- }
ufshcd_send_command(hba, tag);
- spin_unlock_irqrestore(hba->host->host_lock, flags);
- goto out;
-
-out_compl_cmd:
- scsi_dma_unmap(lrbp->cmd);
- lrbp->cmd = NULL;
- spin_unlock_irqrestore(hba->host->host_lock, flags);
- ufshcd_release(hba);
- if (!err)
- cmd->scsi_done(cmd);
out:
up_read(&hba->clk_scaling_lock);
return err;
@@ -2735,7 +2833,7 @@ ufshcd_clear_cmd(struct ufs_hba *hba, int tag)
spin_unlock_irqrestore(hba->host->host_lock, flags);
/*
- * wait for for h/w to clear corresponding bit in door-bell.
+ * wait for h/w to clear corresponding bit in door-bell.
* max. wait is 1 sec.
*/
err = ufshcd_wait_for_register(hba,
@@ -2856,7 +2954,6 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
int err;
int tag;
struct completion wait;
- unsigned long flags;
down_read(&hba->clk_scaling_lock);
@@ -2876,34 +2973,30 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
req->timeout = msecs_to_jiffies(2 * timeout);
blk_mq_start_request(req);
- init_completion(&wait);
- lrbp = &hba->lrb[tag];
- if (unlikely(lrbp->in_use)) {
+ if (unlikely(test_bit(tag, &hba->outstanding_reqs))) {
err = -EBUSY;
goto out;
}
+ init_completion(&wait);
+ lrbp = &hba->lrb[tag];
WARN_ON(lrbp->cmd);
err = ufshcd_compose_dev_cmd(hba, lrbp, cmd_type, tag);
if (unlikely(err))
- goto out_put_tag;
+ goto out;
hba->dev_cmd.complete = &wait;
ufshcd_add_query_upiu_trace(hba, UFS_QUERY_SEND, lrbp->ucd_req_ptr);
/* Make sure descriptors are ready before ringing the doorbell */
wmb();
- spin_lock_irqsave(hba->host->host_lock, flags);
- ufshcd_send_command(hba, tag);
- spin_unlock_irqrestore(hba->host->host_lock, flags);
+ ufshcd_send_command(hba, tag);
err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout);
-
-out:
ufshcd_add_query_upiu_trace(hba, err ? UFS_QUERY_ERR : UFS_QUERY_COMP,
(struct utp_upiu_req *)lrbp->ucd_rsp_ptr);
-out_put_tag:
+out:
blk_put_request(req);
out_unlock:
up_read(&hba->clk_scaling_lock);
@@ -4101,12 +4194,13 @@ void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit)
}
spin_unlock_irqrestore(hba->host->host_lock, flags);
- if (update && !pm_runtime_suspended(hba->dev)) {
- pm_runtime_get_sync(hba->dev);
+ if (update &&
+ !pm_runtime_suspended(&hba->sdev_ufs_device->sdev_gendev)) {
+ ufshcd_rpm_get_sync(hba);
ufshcd_hold(hba, false);
ufshcd_auto_hibern8_enable(hba);
ufshcd_release(hba);
- pm_runtime_put(hba->dev);
+ ufshcd_rpm_put_sync(hba);
}
}
EXPORT_SYMBOL_GPL(ufshcd_auto_hibern8_update);
@@ -4420,7 +4514,7 @@ EXPORT_SYMBOL_GPL(ufshcd_make_hba_operational);
* ufshcd_hba_stop - Send controller to reset state
* @hba: per adapter instance
*/
-static inline void ufshcd_hba_stop(struct ufs_hba *hba)
+void ufshcd_hba_stop(struct ufs_hba *hba)
{
unsigned long flags;
int err;
@@ -4439,6 +4533,7 @@ static inline void ufshcd_hba_stop(struct ufs_hba *hba)
if (err)
dev_err(hba->dev, "%s: Controller disable failed\n", __func__);
}
+EXPORT_SYMBOL_GPL(ufshcd_hba_stop);
/**
* ufshcd_hba_execute_hce - initialize the controller
@@ -4804,6 +4899,43 @@ static inline void ufshcd_get_lu_power_on_wp_status(struct ufs_hba *hba,
}
/**
+ * ufshcd_setup_links - associate link b/w device wlun and other luns
+ * @sdev: pointer to SCSI device
+ * @hba: pointer to ufs hba
+ */
+static void ufshcd_setup_links(struct ufs_hba *hba, struct scsi_device *sdev)
+{
+ struct device_link *link;
+
+ /*
+ * Device wlun is the supplier & rest of the luns are consumers.
+ * This ensures that device wlun suspends after all other luns.
+ */
+ if (hba->sdev_ufs_device) {
+ link = device_link_add(&sdev->sdev_gendev,
+ &hba->sdev_ufs_device->sdev_gendev,
+ DL_FLAG_PM_RUNTIME | DL_FLAG_RPM_ACTIVE);
+ if (!link) {
+ dev_err(&sdev->sdev_gendev, "Failed establishing link - %s\n",
+ dev_name(&hba->sdev_ufs_device->sdev_gendev));
+ return;
+ }
+ hba->luns_avail--;
+ /* Ignore REPORT_LUN wlun probing */
+ if (hba->luns_avail == 1) {
+ ufshcd_rpm_put(hba);
+ return;
+ }
+ } else {
+ /*
+ * Device wlun is probed. The assumption is that WLUNs are
+ * scanned before other LUNs.
+ */
+ hba->luns_avail--;
+ }
+}
+
+/**
* ufshcd_slave_alloc - handle initial SCSI device configurations
* @sdev: pointer to SCSI device
*
@@ -4834,6 +4966,8 @@ static int ufshcd_slave_alloc(struct scsi_device *sdev)
ufshcd_get_lu_power_on_wp_status(hba, sdev);
+ ufshcd_setup_links(hba, sdev);
+
return 0;
}
@@ -4865,8 +4999,13 @@ static int ufshcd_slave_configure(struct scsi_device *sdev)
blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1);
if (hba->quirks & UFSHCD_QUIRK_ALIGN_SG_WITH_PAGE_SIZE)
blk_queue_update_dma_alignment(q, PAGE_SIZE - 1);
-
- if (ufshcd_is_rpm_autosuspend_allowed(hba))
+ /*
+ * Block runtime-pm until all consumers are added.
+ * Refer ufshcd_setup_links().
+ */
+ if (is_device_wlun(sdev))
+ pm_runtime_get_noresume(&sdev->sdev_gendev);
+ else if (ufshcd_is_rpm_autosuspend_allowed(hba))
sdev->rpm_autosuspend = 1;
ufshcd_crypto_setup_rq_keyslot_manager(hba, q);
@@ -4982,15 +5121,9 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
*/
if (!hba->pm_op_in_progress &&
!ufshcd_eh_in_progress(hba) &&
- ufshcd_is_exception_event(lrbp->ucd_rsp_ptr) &&
- schedule_work(&hba->eeh_work)) {
- /*
- * Prevent suspend once eeh_work is scheduled
- * to avoid deadlock between ufshcd_suspend
- * and exception event handler.
- */
- pm_runtime_get_noresume(hba->dev);
- }
+ ufshcd_is_exception_event(lrbp->ucd_rsp_ptr))
+ /* Flushed in suspend */
+ schedule_work(&hba->eeh_work);
break;
case UPIU_TRANSACTION_REJECT_UPIU:
/* TODO: handle Reject UPIU Response */
@@ -5037,6 +5170,24 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
return result;
}
+static bool ufshcd_is_auto_hibern8_error(struct ufs_hba *hba,
+ u32 intr_mask)
+{
+ if (!ufshcd_is_auto_hibern8_supported(hba) ||
+ !ufshcd_is_auto_hibern8_enabled(hba))
+ return false;
+
+ if (!(intr_mask & UFSHCD_UIC_HIBERN8_MASK))
+ return false;
+
+ if (hba->active_uic_cmd &&
+ (hba->active_uic_cmd->command == UIC_CMD_DME_HIBER_ENTER ||
+ hba->active_uic_cmd->command == UIC_CMD_DME_HIBER_EXIT))
+ return false;
+
+ return true;
+}
+
/**
* ufshcd_uic_cmd_compl - handle completion of uic command
* @hba: per adapter instance
@@ -5050,6 +5201,10 @@ static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
{
irqreturn_t retval = IRQ_NONE;
+ spin_lock(hba->host->host_lock);
+ if (ufshcd_is_auto_hibern8_error(hba, intr_status))
+ hba->errors |= (UFSHCD_UIC_HIBERN8_MASK & intr_status);
+
if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) {
hba->active_uic_cmd->argument2 |=
ufshcd_get_uic_cmd_result(hba);
@@ -5070,6 +5225,7 @@ static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
if (retval == IRQ_HANDLED)
ufshcd_add_uic_command_trace(hba, hba->active_uic_cmd,
UFS_CMD_COMP);
+ spin_unlock(hba->host->host_lock);
return retval;
}
@@ -5088,11 +5244,14 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
bool update_scaling = false;
for_each_set_bit(index, &completed_reqs, hba->nutrs) {
+ if (!test_and_clear_bit(index, &hba->outstanding_reqs))
+ continue;
lrbp = &hba->lrb[index];
- lrbp->in_use = false;
lrbp->compl_time_stamp = ktime_get();
cmd = lrbp->cmd;
if (cmd) {
+ if (unlikely(ufshcd_should_inform_monitor(hba, lrbp)))
+ ufshcd_update_monitor(hba, lrbp);
ufshcd_add_command_trace(hba, index, UFS_CMD_COMP);
result = ufshcd_transfer_rsp_status(hba, lrbp);
scsi_dma_unmap(cmd);
@@ -5101,7 +5260,7 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
lrbp->cmd = NULL;
/* Do not touch lrbp after scsi done */
cmd->scsi_done(cmd);
- __ufshcd_release(hba);
+ ufshcd_release(hba);
update_scaling = true;
} else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE ||
lrbp->command_type == UTP_CMD_TYPE_UFS_STORAGE) {
@@ -5112,28 +5271,23 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
update_scaling = true;
}
}
- if (ufshcd_is_clkscaling_supported(hba) && update_scaling)
- hba->clk_scaling.active_reqs--;
+ if (update_scaling)
+ ufshcd_clk_scaling_update_busy(hba);
}
-
- /* clear corresponding bits of completed commands */
- hba->outstanding_reqs ^= completed_reqs;
-
- ufshcd_clk_scaling_update_busy(hba);
}
/**
- * ufshcd_transfer_req_compl - handle SCSI and query command completion
+ * ufshcd_trc_handler - handle transfer requests completion
* @hba: per adapter instance
+ * @use_utrlcnr: get completed requests from UTRLCNR
*
* Returns
* IRQ_HANDLED - If interrupt is valid
* IRQ_NONE - If invalid interrupt
*/
-static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba)
+static irqreturn_t ufshcd_trc_handler(struct ufs_hba *hba, bool use_utrlcnr)
{
- unsigned long completed_reqs;
- u32 tr_doorbell;
+ unsigned long completed_reqs = 0;
/* Resetting interrupt aggregation counters first and reading the
* DOOR_BELL afterward allows us to handle all the completed requests.
@@ -5146,8 +5300,24 @@ static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba)
!(hba->quirks & UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR))
ufshcd_reset_intr_aggr(hba);
- tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
- completed_reqs = tr_doorbell ^ hba->outstanding_reqs;
+ if (use_utrlcnr) {
+ u32 utrlcnr;
+
+ utrlcnr = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_LIST_COMPL);
+ if (utrlcnr) {
+ ufshcd_writel(hba, utrlcnr,
+ REG_UTP_TRANSFER_REQ_LIST_COMPL);
+ completed_reqs = utrlcnr;
+ }
+ } else {
+ unsigned long flags;
+ u32 tr_doorbell;
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
+ completed_reqs = tr_doorbell ^ hba->outstanding_reqs;
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ }
if (completed_reqs) {
__ufshcd_transfer_req_compl(hba, completed_reqs);
@@ -5589,8 +5759,8 @@ static void ufshcd_rpm_dev_flush_recheck_work(struct work_struct *work)
* after a certain delay to recheck the threshold by next runtime
* suspend.
*/
- pm_runtime_get_sync(hba->dev);
- pm_runtime_put_sync(hba->dev);
+ ufshcd_rpm_get_sync(hba);
+ ufshcd_rpm_put_sync(hba);
}
/**
@@ -5607,7 +5777,6 @@ static void ufshcd_exception_event_handler(struct work_struct *work)
u32 status = 0;
hba = container_of(work, struct ufs_hba, eeh_work);
- pm_runtime_get_sync(hba->dev);
ufshcd_scsi_block_requests(hba);
err = ufshcd_get_ee_status(hba, &status);
if (err) {
@@ -5624,21 +5793,13 @@ static void ufshcd_exception_event_handler(struct work_struct *work)
ufs_debugfs_exception_event(hba, status);
out:
ufshcd_scsi_unblock_requests(hba);
- /*
- * pm_runtime_get_noresume is called while scheduling
- * eeh_work to avoid suspend racing with exception work.
- * Hence decrement usage counter using pm_runtime_put_noidle
- * to allow suspend on completion of exception event handler.
- */
- pm_runtime_put_noidle(hba->dev);
- pm_runtime_put(hba->dev);
return;
}
/* Complete requests that have door-bell cleared */
static void ufshcd_complete_requests(struct ufs_hba *hba)
{
- ufshcd_transfer_req_compl(hba);
+ ufshcd_trc_handler(hba, false);
ufshcd_tmc_handler(hba);
}
@@ -5756,12 +5917,13 @@ static void ufshcd_clk_scaling_suspend(struct ufs_hba *hba, bool suspend)
static void ufshcd_err_handling_prepare(struct ufs_hba *hba)
{
- pm_runtime_get_sync(hba->dev);
- if (pm_runtime_status_suspended(hba->dev) || hba->is_sys_suspended) {
+ ufshcd_rpm_get_sync(hba);
+ if (pm_runtime_status_suspended(&hba->sdev_ufs_device->sdev_gendev) ||
+ hba->is_sys_suspended) {
enum ufs_pm_op pm_op;
/*
- * Don't assume anything of pm_runtime_get_sync(), if
+ * Don't assume anything of resume, if
* resume fails, irq and clocks can be OFF, and powers
* can be OFF or in LPM.
*/
@@ -5797,12 +5959,13 @@ static void ufshcd_err_handling_unprepare(struct ufs_hba *hba)
if (ufshcd_is_clkscaling_supported(hba))
ufshcd_clk_scaling_suspend(hba, false);
ufshcd_clear_ua_wluns(hba);
- pm_runtime_put(hba->dev);
+ ufshcd_rpm_put(hba);
}
static inline bool ufshcd_err_handling_should_stop(struct ufs_hba *hba)
{
return (!hba->is_powered || hba->shutting_down ||
+ !hba->sdev_ufs_device ||
hba->ufshcd_state == UFSHCD_STATE_ERROR ||
(!(hba->saved_err || hba->saved_uic_err || hba->force_reset ||
ufshcd_is_link_broken(hba))));
@@ -5818,14 +5981,18 @@ static void ufshcd_recover_pm_error(struct ufs_hba *hba)
hba->is_sys_suspended = false;
/*
- * Set RPM status of hba device to RPM_ACTIVE,
+ * Set RPM status of wlun device to RPM_ACTIVE,
* this also clears its runtime error.
*/
- ret = pm_runtime_set_active(hba->dev);
+ ret = pm_runtime_set_active(&hba->sdev_ufs_device->sdev_gendev);
+
+ /* hba device might have a runtime error otherwise */
+ if (ret)
+ ret = pm_runtime_set_active(hba->dev);
/*
- * If hba device had runtime error, we also need to resume those
- * scsi devices under hba in case any of them has failed to be
- * resumed due to hba runtime resume failure. This is to unblock
+ * If wlun device had runtime error, we also need to resume those
+ * consumer scsi devices in case any of them has failed to be
+ * resumed due to supplier runtime resume failure. This is to unblock
* blk_queue_enter in case there are bios waiting inside it.
*/
if (!ret) {
@@ -5887,13 +6054,11 @@ static void ufshcd_err_handler(struct work_struct *work)
ufshcd_set_eh_in_progress(hba);
spin_unlock_irqrestore(hba->host->host_lock, flags);
ufshcd_err_handling_prepare(hba);
+ /* Complete requests that have door-bell cleared by h/w */
+ ufshcd_complete_requests(hba);
spin_lock_irqsave(hba->host->host_lock, flags);
if (hba->ufshcd_state != UFSHCD_STATE_ERROR)
hba->ufshcd_state = UFSHCD_STATE_RESET;
-
- /* Complete requests that have door-bell cleared by h/w */
- ufshcd_complete_requests(hba);
-
/*
* A full reset and restore might have happened after preparation
* is finished, double check whether we should stop.
@@ -5976,12 +6141,11 @@ static void ufshcd_err_handler(struct work_struct *work)
}
lock_skip_pending_xfer_clear:
- spin_lock_irqsave(hba->host->host_lock, flags);
-
/* Complete the requests that are cleared by s/w */
ufshcd_complete_requests(hba);
- hba->silence_err_logs = false;
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ hba->silence_err_logs = false;
if (err_xfer || err_tm) {
needs_reset = true;
goto do_reset;
@@ -6014,19 +6178,6 @@ lock_skip_pending_xfer_clear:
do_reset:
/* Fatal errors need reset */
if (needs_reset) {
- unsigned long max_doorbells = (1UL << hba->nutrs) - 1;
-
- /*
- * ufshcd_reset_and_restore() does the link reinitialization
- * which will need atleast one empty doorbell slot to send the
- * device management commands (NOP and query commands).
- * If there is no slot empty at this moment then free up last
- * slot forcefully.
- */
- if (hba->outstanding_reqs == max_doorbells)
- __ufshcd_transfer_req_compl(hba,
- (1UL << (hba->nutrs - 1)));
-
hba->force_reset = false;
spin_unlock_irqrestore(hba->host->host_lock, flags);
err = ufshcd_reset_and_restore(hba);
@@ -6144,37 +6295,23 @@ static irqreturn_t ufshcd_update_uic_error(struct ufs_hba *hba)
return retval;
}
-static bool ufshcd_is_auto_hibern8_error(struct ufs_hba *hba,
- u32 intr_mask)
-{
- if (!ufshcd_is_auto_hibern8_supported(hba) ||
- !ufshcd_is_auto_hibern8_enabled(hba))
- return false;
-
- if (!(intr_mask & UFSHCD_UIC_HIBERN8_MASK))
- return false;
-
- if (hba->active_uic_cmd &&
- (hba->active_uic_cmd->command == UIC_CMD_DME_HIBER_ENTER ||
- hba->active_uic_cmd->command == UIC_CMD_DME_HIBER_EXIT))
- return false;
-
- return true;
-}
-
/**
* ufshcd_check_errors - Check for errors that need s/w attention
* @hba: per-adapter instance
+ * @intr_status: interrupt status generated by the controller
*
* Returns
* IRQ_HANDLED - If interrupt is valid
* IRQ_NONE - If invalid interrupt
*/
-static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba)
+static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba, u32 intr_status)
{
bool queue_eh_work = false;
irqreturn_t retval = IRQ_NONE;
+ spin_lock(hba->host->host_lock);
+ hba->errors |= UFSHCD_ERROR_MASK & intr_status;
+
if (hba->errors & INT_FATAL_ERRORS) {
ufshcd_update_evt_hist(hba, UFS_EVT_FATAL_ERR,
hba->errors);
@@ -6229,6 +6366,9 @@ static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba)
* itself without s/w intervention or errors that will be
* handled by the SCSI core layer.
*/
+ hba->errors = 0;
+ hba->uic_error = 0;
+ spin_unlock(hba->host->host_lock);
return retval;
}
@@ -6263,13 +6403,17 @@ static bool ufshcd_compl_tm(struct request *req, void *priv, bool reserved)
*/
static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba)
{
+ unsigned long flags;
struct request_queue *q = hba->tmf_queue;
struct ctm_info ci = {
.hba = hba,
- .pending = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL),
};
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ ci.pending = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
blk_mq_tagset_busy_iter(q->tag_set, ufshcd_compl_tm, &ci);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
return ci.ncpl ? IRQ_HANDLED : IRQ_NONE;
}
@@ -6286,22 +6430,17 @@ static irqreturn_t ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
{
irqreturn_t retval = IRQ_NONE;
- hba->errors = UFSHCD_ERROR_MASK & intr_status;
-
- if (ufshcd_is_auto_hibern8_error(hba, intr_status))
- hba->errors |= (UFSHCD_UIC_HIBERN8_MASK & intr_status);
-
- if (hba->errors)
- retval |= ufshcd_check_errors(hba);
-
if (intr_status & UFSHCD_UIC_MASK)
retval |= ufshcd_uic_cmd_compl(hba, intr_status);
+ if (intr_status & UFSHCD_ERROR_MASK || hba->errors)
+ retval |= ufshcd_check_errors(hba, intr_status);
+
if (intr_status & UTP_TASK_REQ_COMPL)
retval |= ufshcd_tmc_handler(hba);
if (intr_status & UTP_TRANSFER_REQ_COMPL)
- retval |= ufshcd_transfer_req_compl(hba);
+ retval |= ufshcd_trc_handler(hba, ufshcd_has_utrlcnr(hba));
return retval;
}
@@ -6322,7 +6461,6 @@ static irqreturn_t ufshcd_intr(int irq, void *__hba)
struct ufs_hba *hba = __hba;
int retries = hba->nutrs;
- spin_lock(hba->host->host_lock);
intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
hba->ufs_stats.last_intr_status = intr_status;
hba->ufs_stats.last_intr_ts = ktime_get();
@@ -6344,7 +6482,8 @@ static irqreturn_t ufshcd_intr(int irq, void *__hba)
}
if (enabled_intr_status && retval == IRQ_NONE &&
- !ufshcd_eh_in_progress(hba)) {
+ (!(enabled_intr_status & UTP_TRANSFER_REQ_COMPL) ||
+ hba->outstanding_reqs) && !ufshcd_eh_in_progress(hba)) {
dev_err(hba->dev, "%s: Unhandled interrupt 0x%08x (0x%08x, 0x%08x)\n",
__func__,
intr_status,
@@ -6353,7 +6492,6 @@ static irqreturn_t ufshcd_intr(int irq, void *__hba)
ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: ");
}
- spin_unlock(hba->host->host_lock);
return retval;
}
@@ -6530,7 +6668,6 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
int err = 0;
int tag;
struct completion wait;
- unsigned long flags;
u8 upiu_flags;
down_read(&hba->clk_scaling_lock);
@@ -6543,13 +6680,13 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
tag = req->tag;
WARN_ON_ONCE(!ufshcd_valid_tag(hba, tag));
- init_completion(&wait);
- lrbp = &hba->lrb[tag];
- if (unlikely(lrbp->in_use)) {
+ if (unlikely(test_bit(tag, &hba->outstanding_reqs))) {
err = -EBUSY;
goto out;
}
+ init_completion(&wait);
+ lrbp = &hba->lrb[tag];
WARN_ON(lrbp->cmd);
lrbp->cmd = NULL;
lrbp->sense_bufflen = 0;
@@ -6585,12 +6722,11 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
hba->dev_cmd.complete = &wait;
+ ufshcd_add_query_upiu_trace(hba, UFS_QUERY_SEND, lrbp->ucd_req_ptr);
/* Make sure descriptors are ready before ringing the doorbell */
wmb();
- spin_lock_irqsave(hba->host->host_lock, flags);
- ufshcd_send_command(hba, tag);
- spin_unlock_irqrestore(hba->host->host_lock, flags);
+ ufshcd_send_command(hba, tag);
/*
* ignore the returning value here - ufshcd_check_query_response is
* bound to fail since dev_cmd.query and dev_cmd.type were left empty.
@@ -6616,6 +6752,8 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
err = -EINVAL;
}
}
+ ufshcd_add_query_upiu_trace(hba, err ? UFS_QUERY_ERR : UFS_QUERY_COMP,
+ (struct utp_upiu_req *)lrbp->ucd_rsp_ptr);
out:
blk_put_request(req);
@@ -6709,7 +6847,6 @@ static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
u32 pos;
int err;
u8 resp = 0xF, lun;
- unsigned long flags;
host = cmd->device->host;
hba = shost_priv(host);
@@ -6728,11 +6865,9 @@ static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
err = ufshcd_clear_cmd(hba, pos);
if (err)
break;
+ __ufshcd_transfer_req_compl(hba, pos);
}
}
- spin_lock_irqsave(host->host_lock, flags);
- ufshcd_transfer_req_compl(hba);
- spin_unlock_irqrestore(host->host_lock, flags);
out:
hba->req_abort_count = 0;
@@ -6909,20 +7044,16 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
* will fail, due to spec violation, scsi err handling next step
* will be to send LU reset which, again, is a spec violation.
* To avoid these unnecessary/illegal steps, first we clean up
- * the lrb taken by this cmd and mark the lrb as in_use, then
- * queue the eh_work and bail.
+ * the lrb taken by this cmd and re-set it in outstanding_reqs,
+ * then queue the eh_work and bail.
*/
if (lrbp->lun == UFS_UPIU_UFS_DEVICE_WLUN) {
ufshcd_update_evt_hist(hba, UFS_EVT_ABORT, lrbp->lun);
+ __ufshcd_transfer_req_compl(hba, (1UL << tag));
+ set_bit(tag, &hba->outstanding_reqs);
spin_lock_irqsave(host->host_lock, flags);
- if (lrbp->cmd) {
- __ufshcd_transfer_req_compl(hba, (1UL << tag));
- __set_bit(tag, &hba->outstanding_reqs);
- lrbp->in_use = true;
- hba->force_reset = true;
- ufshcd_schedule_eh_work(hba);
- }
-
+ hba->force_reset = true;
+ ufshcd_schedule_eh_work(hba);
spin_unlock_irqrestore(host->host_lock, flags);
goto out;
}
@@ -6935,9 +7066,7 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
if (!err) {
cleanup:
- spin_lock_irqsave(host->host_lock, flags);
__ufshcd_transfer_req_compl(hba, (1UL << tag));
- spin_unlock_irqrestore(host->host_lock, flags);
out:
err = SUCCESS;
} else {
@@ -6967,19 +7096,15 @@ out:
static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
{
int err;
- unsigned long flags;
/*
* Stop the host controller and complete the requests
* cleared by h/w
*/
ufshcd_hba_stop(hba);
-
- spin_lock_irqsave(hba->host->host_lock, flags);
hba->silence_err_logs = true;
ufshcd_complete_requests(hba);
hba->silence_err_logs = false;
- spin_unlock_irqrestore(hba->host->host_lock, flags);
/* scale up clocks to max frequency before full reinitialization */
ufshcd_set_clk_freq(hba, true);
@@ -7249,7 +7374,6 @@ static int ufshcd_scsi_add_wlus(struct ufs_hba *hba)
hba->sdev_ufs_device = NULL;
goto out;
}
- ufshcd_blk_pm_runtime_init(hba->sdev_ufs_device);
scsi_device_put(hba->sdev_ufs_device);
hba->sdev_rpmb = __scsi_add_device(hba->host, 0, 0,
@@ -7413,6 +7537,9 @@ static int ufs_get_device_desc(struct ufs_hba *hba)
goto out;
}
+ hba->luns_avail = desc_buf[DEVICE_DESC_PARAM_NUM_LU] +
+ desc_buf[DEVICE_DESC_PARAM_NUM_WLU];
+
ufs_fixup_device_setup(hba);
ufshcd_wb_probe(hba, desc_buf);
@@ -7890,6 +8017,7 @@ static int ufshcd_probe_hba(struct ufs_hba *hba, bool async)
ufshcd_set_ufs_dev_active(hba);
ufshcd_force_reset_auto_bkops(hba);
hba->wlun_dev_clr_ua = true;
+ hba->wlun_rpmb_clr_ua = true;
/* Gear up to HS gear if supported */
if (hba->max_pwr_info.is_valid) {
@@ -8475,7 +8603,8 @@ static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
* handling context.
*/
hba->host->eh_noresume = 1;
- ufshcd_clear_ua_wluns(hba);
+ if (hba->wlun_dev_clr_ua)
+ ufshcd_clear_ua_wlun(hba, UFS_UPIU_UFS_DEVICE_WLUN);
cmd[4] = pwr_mode << 4;
@@ -8490,7 +8619,7 @@ static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
sdev_printk(KERN_WARNING, sdp,
"START_STOP failed for power mode: %d, result %x\n",
pwr_mode, ret);
- if (driver_byte(ret) == DRIVER_SENSE)
+ if (ret > 0 && scsi_sense_valid(&sshdr))
scsi_print_sense_hdr(sdp, NULL, &sshdr);
}
@@ -8650,23 +8779,7 @@ static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba)
ufshcd_setup_hba_vreg(hba, true);
}
-/**
- * ufshcd_suspend - helper function for suspend operations
- * @hba: per adapter instance
- * @pm_op: desired low power operation type
- *
- * This function will try to put the UFS device and link into low power
- * mode based on the "rpm_lvl" (Runtime PM level) or "spm_lvl"
- * (System PM level).
- *
- * If this function is called during shutdown, it will make sure that
- * both UFS device and UFS link is powered off.
- *
- * NOTE: UFS device & link must be active before we enter in this function.
- *
- * Returns 0 for success and non-zero for failure
- */
-static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
+static int __ufshcd_wl_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
{
int ret = 0;
int check_for_bkops;
@@ -8674,9 +8787,9 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
enum ufs_dev_pwr_mode req_dev_pwr_mode;
enum uic_link_state req_link_state;
- hba->pm_op_in_progress = 1;
- if (!ufshcd_is_shutdown_pm(pm_op)) {
- pm_lvl = ufshcd_is_runtime_pm(pm_op) ?
+ hba->pm_op_in_progress = true;
+ if (pm_op != UFS_SHUTDOWN_PM) {
+ pm_lvl = pm_op == UFS_RUNTIME_PM ?
hba->rpm_lvl : hba->spm_lvl;
req_dev_pwr_mode = ufs_get_pm_lvl_to_dev_pwr_mode(pm_lvl);
req_link_state = ufs_get_pm_lvl_to_link_pwr_state(pm_lvl);
@@ -8697,20 +8810,20 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE &&
req_link_state == UIC_LINK_ACTIVE_STATE) {
- goto disable_clks;
+ goto vops_suspend;
}
if ((req_dev_pwr_mode == hba->curr_dev_pwr_mode) &&
(req_link_state == hba->uic_link_state))
- goto enable_gating;
+ goto enable_scaling;
/* UFS device & link must be active before we enter in this function */
if (!ufshcd_is_ufs_dev_active(hba) || !ufshcd_is_link_active(hba)) {
ret = -EINVAL;
- goto enable_gating;
+ goto enable_scaling;
}
- if (ufshcd_is_runtime_pm(pm_op)) {
+ if (pm_op == UFS_RUNTIME_PM) {
if (ufshcd_can_autobkops_during_suspend(hba)) {
/*
* The device is idle with no requests in the queue,
@@ -8719,7 +8832,7 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
*/
ret = ufshcd_urgent_bkops(hba);
if (ret)
- goto enable_gating;
+ goto enable_scaling;
} else {
/* make sure that auto bkops is disabled */
ufshcd_disable_auto_bkops(hba);
@@ -8740,14 +8853,14 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
flush_work(&hba->eeh_work);
if (req_dev_pwr_mode != hba->curr_dev_pwr_mode) {
- if (!ufshcd_is_runtime_pm(pm_op))
+ if (pm_op != UFS_RUNTIME_PM)
/* ensure that bkops is disabled */
ufshcd_disable_auto_bkops(hba);
if (!hba->dev_info.b_rpm_dev_flush_capable) {
ret = ufshcd_set_dev_pwr_mode(hba, req_dev_pwr_mode);
if (ret)
- goto enable_gating;
+ goto enable_scaling;
}
}
@@ -8760,7 +8873,7 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
if (ret)
goto set_dev_active;
-disable_clks:
+vops_suspend:
/*
* Call vendor specific suspend callback. As these callbacks may access
* vendor specific host controller register space call them before the
@@ -8769,28 +8882,9 @@ disable_clks:
ret = ufshcd_vops_suspend(hba, pm_op);
if (ret)
goto set_link_active;
- /*
- * Disable the host irq as host controller as there won't be any
- * host controller transaction expected till resume.
- */
- ufshcd_disable_irq(hba);
-
- ufshcd_setup_clocks(hba, false);
-
- if (ufshcd_is_clkgating_allowed(hba)) {
- hba->clk_gating.state = CLKS_OFF;
- trace_ufshcd_clk_gating(dev_name(hba->dev),
- hba->clk_gating.state);
- }
-
- ufshcd_vreg_set_lpm(hba);
-
- /* Put the host controller in low power mode if possible */
- ufshcd_hba_vreg_set_lpm(hba);
goto out;
set_link_active:
- ufshcd_vreg_set_hpm(hba);
/*
* Device hardware reset is required to exit DeepSleep. Also, for
* DeepSleep, the link is off so host reset and restore will be done
@@ -8812,57 +8906,32 @@ set_dev_active:
}
if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE))
ufshcd_disable_auto_bkops(hba);
-enable_gating:
+enable_scaling:
if (ufshcd_is_clkscaling_supported(hba))
ufshcd_clk_scaling_suspend(hba, false);
- hba->clk_gating.is_suspended = false;
hba->dev_info.b_rpm_dev_flush_capable = false;
- ufshcd_clear_ua_wluns(hba);
- ufshcd_release(hba);
out:
if (hba->dev_info.b_rpm_dev_flush_capable) {
schedule_delayed_work(&hba->rpm_dev_flush_recheck_work,
msecs_to_jiffies(RPM_DEV_FLUSH_RECHECK_WORK_DELAY_MS));
}
- hba->pm_op_in_progress = 0;
-
- if (ret)
- ufshcd_update_evt_hist(hba, UFS_EVT_SUSPEND_ERR, (u32)ret);
+ if (ret) {
+ ufshcd_update_evt_hist(hba, UFS_EVT_WL_SUSP_ERR, (u32)ret);
+ hba->clk_gating.is_suspended = false;
+ ufshcd_release(hba);
+ }
+ hba->pm_op_in_progress = false;
return ret;
}
-/**
- * ufshcd_resume - helper function for resume operations
- * @hba: per adapter instance
- * @pm_op: runtime PM or system PM
- *
- * This function basically brings the UFS device, UniPro link and controller
- * to active state.
- *
- * Returns 0 for success and non-zero for failure
- */
-static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
+static int __ufshcd_wl_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
{
int ret;
- enum uic_link_state old_link_state;
-
- hba->pm_op_in_progress = 1;
- old_link_state = hba->uic_link_state;
-
- ufshcd_hba_vreg_set_hpm(hba);
- ret = ufshcd_vreg_set_hpm(hba);
- if (ret)
- goto out;
-
- /* Make sure clocks are enabled before accessing controller */
- ret = ufshcd_setup_clocks(hba, true);
- if (ret)
- goto disable_vreg;
+ enum uic_link_state old_link_state = hba->uic_link_state;
- /* enable the host irq as host controller would be active soon */
- ufshcd_enable_irq(hba);
+ hba->pm_op_in_progress = true;
/*
* Call vendor specific resume callback. As these callbacks may access
@@ -8871,7 +8940,7 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
*/
ret = ufshcd_vops_resume(hba, pm_op);
if (ret)
- goto disable_irq_and_vops_clks;
+ goto out;
/* For DeepSleep, the only supported option is to have the link off */
WARN_ON(ufshcd_is_ufs_dev_deepsleep(hba) && !ufshcd_is_link_off(hba));
@@ -8919,42 +8988,217 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
if (hba->ee_usr_mask)
ufshcd_write_ee_control(hba);
- hba->clk_gating.is_suspended = false;
-
if (ufshcd_is_clkscaling_supported(hba))
ufshcd_clk_scaling_suspend(hba, false);
- /* Enable Auto-Hibernate if configured */
- ufshcd_auto_hibern8_enable(hba);
-
if (hba->dev_info.b_rpm_dev_flush_capable) {
hba->dev_info.b_rpm_dev_flush_capable = false;
cancel_delayed_work(&hba->rpm_dev_flush_recheck_work);
}
- ufshcd_clear_ua_wluns(hba);
-
- /* Schedule clock gating in case of no access to UFS device yet */
- ufshcd_release(hba);
-
+ /* Enable Auto-Hibernate if configured */
+ ufshcd_auto_hibern8_enable(hba);
goto out;
set_old_link_state:
ufshcd_link_state_transition(hba, old_link_state, 0);
vendor_suspend:
ufshcd_vops_suspend(hba, pm_op);
-disable_irq_and_vops_clks:
+out:
+ if (ret)
+ ufshcd_update_evt_hist(hba, UFS_EVT_WL_RES_ERR, (u32)ret);
+ hba->clk_gating.is_suspended = false;
+ ufshcd_release(hba);
+ hba->pm_op_in_progress = false;
+ return ret;
+}
+
+static int ufshcd_wl_runtime_suspend(struct device *dev)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct ufs_hba *hba;
+ int ret;
+ ktime_t start = ktime_get();
+
+ hba = shost_priv(sdev->host);
+
+ ret = __ufshcd_wl_suspend(hba, UFS_RUNTIME_PM);
+ if (ret)
+ dev_err(&sdev->sdev_gendev, "%s failed: %d\n", __func__, ret);
+
+ trace_ufshcd_wl_runtime_suspend(dev_name(dev), ret,
+ ktime_to_us(ktime_sub(ktime_get(), start)),
+ hba->curr_dev_pwr_mode, hba->uic_link_state);
+
+ return ret;
+}
+
+static int ufshcd_wl_runtime_resume(struct device *dev)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct ufs_hba *hba;
+ int ret = 0;
+ ktime_t start = ktime_get();
+
+ hba = shost_priv(sdev->host);
+
+ ret = __ufshcd_wl_resume(hba, UFS_RUNTIME_PM);
+ if (ret)
+ dev_err(&sdev->sdev_gendev, "%s failed: %d\n", __func__, ret);
+
+ trace_ufshcd_wl_runtime_resume(dev_name(dev), ret,
+ ktime_to_us(ktime_sub(ktime_get(), start)),
+ hba->curr_dev_pwr_mode, hba->uic_link_state);
+
+ return ret;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int ufshcd_wl_suspend(struct device *dev)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct ufs_hba *hba;
+ int ret = 0;
+ ktime_t start = ktime_get();
+
+ hba = shost_priv(sdev->host);
+ down(&hba->host_sem);
+
+ if (pm_runtime_suspended(dev))
+ goto out;
+
+ ret = __ufshcd_wl_suspend(hba, UFS_SYSTEM_PM);
+ if (ret) {
+ dev_err(&sdev->sdev_gendev, "%s failed: %d\n", __func__, ret);
+ up(&hba->host_sem);
+ }
+
+out:
+ if (!ret)
+ hba->is_sys_suspended = true;
+ trace_ufshcd_wl_suspend(dev_name(dev), ret,
+ ktime_to_us(ktime_sub(ktime_get(), start)),
+ hba->curr_dev_pwr_mode, hba->uic_link_state);
+
+ return ret;
+}
+
+static int ufshcd_wl_resume(struct device *dev)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct ufs_hba *hba;
+ int ret = 0;
+ ktime_t start = ktime_get();
+
+ hba = shost_priv(sdev->host);
+
+ if (pm_runtime_suspended(dev))
+ goto out;
+
+ ret = __ufshcd_wl_resume(hba, UFS_SYSTEM_PM);
+ if (ret)
+ dev_err(&sdev->sdev_gendev, "%s failed: %d\n", __func__, ret);
+out:
+ trace_ufshcd_wl_resume(dev_name(dev), ret,
+ ktime_to_us(ktime_sub(ktime_get(), start)),
+ hba->curr_dev_pwr_mode, hba->uic_link_state);
+ if (!ret)
+ hba->is_sys_suspended = false;
+ up(&hba->host_sem);
+ return ret;
+}
+#endif
+
+static void ufshcd_wl_shutdown(struct device *dev)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct ufs_hba *hba;
+
+ hba = shost_priv(sdev->host);
+
+ down(&hba->host_sem);
+ hba->shutting_down = true;
+ up(&hba->host_sem);
+
+ /* Turn on everything while shutting down */
+ ufshcd_rpm_get_sync(hba);
+ scsi_device_quiesce(sdev);
+ shost_for_each_device(sdev, hba->host) {
+ if (sdev == hba->sdev_ufs_device)
+ continue;
+ scsi_device_quiesce(sdev);
+ }
+ __ufshcd_wl_suspend(hba, UFS_SHUTDOWN_PM);
+}
+
+/**
+ * ufshcd_suspend - helper function for suspend operations
+ * @hba: per adapter instance
+ *
+ * This function will put disable irqs, turn off clocks
+ * and set vreg and hba-vreg in lpm mode.
+ */
+static int ufshcd_suspend(struct ufs_hba *hba)
+{
+ int ret;
+
+ if (!hba->is_powered)
+ return 0;
+ /*
+ * Disable the host irq as host controller as there won't be any
+ * host controller transaction expected till resume.
+ */
ufshcd_disable_irq(hba);
- ufshcd_setup_clocks(hba, false);
+ ret = ufshcd_setup_clocks(hba, false);
+ if (ret) {
+ ufshcd_enable_irq(hba);
+ return ret;
+ }
if (ufshcd_is_clkgating_allowed(hba)) {
hba->clk_gating.state = CLKS_OFF;
trace_ufshcd_clk_gating(dev_name(hba->dev),
hba->clk_gating.state);
}
+
+ ufshcd_vreg_set_lpm(hba);
+ /* Put the host controller in low power mode if possible */
+ ufshcd_hba_vreg_set_lpm(hba);
+ return ret;
+}
+
+/**
+ * ufshcd_resume - helper function for resume operations
+ * @hba: per adapter instance
+ *
+ * This function basically turns on the regulators, clocks and
+ * irqs of the hba.
+ *
+ * Returns 0 for success and non-zero for failure
+ */
+static int ufshcd_resume(struct ufs_hba *hba)
+{
+ int ret;
+
+ if (!hba->is_powered)
+ return 0;
+
+ ufshcd_hba_vreg_set_hpm(hba);
+ ret = ufshcd_vreg_set_hpm(hba);
+ if (ret)
+ goto out;
+
+ /* Make sure clocks are enabled before accessing controller */
+ ret = ufshcd_setup_clocks(hba, true);
+ if (ret)
+ goto disable_vreg;
+
+ /* enable the host irq as host controller would be active soon */
+ ufshcd_enable_irq(hba);
+ goto out;
+
disable_vreg:
ufshcd_vreg_set_lpm(hba);
out:
- hba->pm_op_in_progress = 0;
if (ret)
ufshcd_update_evt_hist(hba, UFS_EVT_RESUME_ERR, (u32)ret);
return ret;
@@ -8973,44 +9217,14 @@ int ufshcd_system_suspend(struct ufs_hba *hba)
int ret = 0;
ktime_t start = ktime_get();
- down(&hba->host_sem);
-
- if (!hba->is_powered)
- return 0;
-
- cancel_delayed_work_sync(&hba->rpm_dev_flush_recheck_work);
-
- if ((ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl) ==
- hba->curr_dev_pwr_mode) &&
- (ufs_get_pm_lvl_to_link_pwr_state(hba->spm_lvl) ==
- hba->uic_link_state) &&
- pm_runtime_suspended(hba->dev) &&
- !hba->dev_info.b_rpm_dev_flush_capable)
+ if (pm_runtime_suspended(hba->dev))
goto out;
- if (pm_runtime_suspended(hba->dev)) {
- /*
- * UFS device and/or UFS link low power states during runtime
- * suspend seems to be different than what is expected during
- * system suspend. Hence runtime resume the devic & link and
- * let the system suspend low power states to take effect.
- * TODO: If resume takes longer time, we might have optimize
- * it in future by not resuming everything if possible.
- */
- ret = ufshcd_runtime_resume(hba);
- if (ret)
- goto out;
- }
-
- ret = ufshcd_suspend(hba, UFS_SYSTEM_PM);
+ ret = ufshcd_suspend(hba);
out:
trace_ufshcd_system_suspend(dev_name(hba->dev), ret,
ktime_to_us(ktime_sub(ktime_get(), start)),
hba->curr_dev_pwr_mode, hba->uic_link_state);
- if (!ret)
- hba->is_sys_suspended = true;
- else
- up(&hba->host_sem);
return ret;
}
EXPORT_SYMBOL(ufshcd_system_suspend);
@@ -9027,21 +9241,16 @@ int ufshcd_system_resume(struct ufs_hba *hba)
int ret = 0;
ktime_t start = ktime_get();
- if (!hba->is_powered || pm_runtime_suspended(hba->dev))
- /*
- * Let the runtime resume take care of resuming
- * if runtime suspended.
- */
+ if (pm_runtime_suspended(hba->dev))
goto out;
- else
- ret = ufshcd_resume(hba, UFS_SYSTEM_PM);
+
+ ret = ufshcd_resume(hba);
+
out:
trace_ufshcd_system_resume(dev_name(hba->dev), ret,
ktime_to_us(ktime_sub(ktime_get(), start)),
hba->curr_dev_pwr_mode, hba->uic_link_state);
- if (!ret)
- hba->is_sys_suspended = false;
- up(&hba->host_sem);
+
return ret;
}
EXPORT_SYMBOL(ufshcd_system_resume);
@@ -9056,14 +9265,11 @@ EXPORT_SYMBOL(ufshcd_system_resume);
*/
int ufshcd_runtime_suspend(struct ufs_hba *hba)
{
- int ret = 0;
+ int ret;
ktime_t start = ktime_get();
- if (!hba->is_powered)
- goto out;
- else
- ret = ufshcd_suspend(hba, UFS_RUNTIME_PM);
-out:
+ ret = ufshcd_suspend(hba);
+
trace_ufshcd_runtime_suspend(dev_name(hba->dev), ret,
ktime_to_us(ktime_sub(ktime_get(), start)),
hba->curr_dev_pwr_mode, hba->uic_link_state);
@@ -9075,33 +9281,19 @@ EXPORT_SYMBOL(ufshcd_runtime_suspend);
* ufshcd_runtime_resume - runtime resume routine
* @hba: per adapter instance
*
- * This function basically brings the UFS device, UniPro link and controller
+ * This function basically brings controller
* to active state. Following operations are done in this function:
*
* 1. Turn on all the controller related clocks
- * 2. Bring the UniPro link out of Hibernate state
- * 3. If UFS device is in sleep state, turn ON VCC rail and bring the UFS device
- * to active state.
- * 4. If auto-bkops is enabled on the device, disable it.
- *
- * So following would be the possible power state after this function return
- * successfully:
- * S1: UFS device in Active state with VCC rail ON
- * UniPro link in Active state
- * All the UFS/UniPro controller clocks are ON
- *
- * Returns 0 for success and non-zero for failure
+ * 2. Turn ON VCC rail
*/
int ufshcd_runtime_resume(struct ufs_hba *hba)
{
- int ret = 0;
+ int ret;
ktime_t start = ktime_get();
- if (!hba->is_powered)
- goto out;
- else
- ret = ufshcd_resume(hba, UFS_RUNTIME_PM);
-out:
+ ret = ufshcd_resume(hba);
+
trace_ufshcd_runtime_resume(dev_name(hba->dev), ret,
ktime_to_us(ktime_sub(ktime_get(), start)),
hba->curr_dev_pwr_mode, hba->uic_link_state);
@@ -9119,30 +9311,20 @@ EXPORT_SYMBOL(ufshcd_runtime_idle);
* ufshcd_shutdown - shutdown routine
* @hba: per adapter instance
*
- * This function would power off both UFS device and UFS link.
+ * This function would turn off both UFS device and UFS hba
+ * regulators. It would also disable clocks.
*
* Returns 0 always to allow force shutdown even in case of errors.
*/
int ufshcd_shutdown(struct ufs_hba *hba)
{
- int ret = 0;
-
- down(&hba->host_sem);
- hba->shutting_down = true;
- up(&hba->host_sem);
-
- if (!hba->is_powered)
- goto out;
-
if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba))
goto out;
pm_runtime_get_sync(hba->dev);
- ret = ufshcd_suspend(hba, UFS_SHUTDOWN_PM);
+ ufshcd_suspend(hba);
out:
- if (ret)
- dev_err(hba->dev, "%s failed, err %d\n", __func__, ret);
hba->is_powered = false;
/* allow force shutdown even in case of errors */
return 0;
@@ -9156,6 +9338,8 @@ EXPORT_SYMBOL(ufshcd_shutdown);
*/
void ufshcd_remove(struct ufs_hba *hba)
{
+ if (hba->sdev_ufs_device)
+ ufshcd_rpm_get_sync(hba);
ufs_bsg_remove(hba);
ufs_sysfs_remove_nodes(hba->dev);
blk_cleanup_queue(hba->tmf_queue);
@@ -9459,15 +9643,180 @@ out_error:
}
EXPORT_SYMBOL_GPL(ufshcd_init);
+void ufshcd_resume_complete(struct device *dev)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ if (hba->complete_put) {
+ ufshcd_rpm_put(hba);
+ hba->complete_put = false;
+ }
+ if (hba->rpmb_complete_put) {
+ ufshcd_rpmb_rpm_put(hba);
+ hba->rpmb_complete_put = false;
+ }
+}
+EXPORT_SYMBOL_GPL(ufshcd_resume_complete);
+
+int ufshcd_suspend_prepare(struct device *dev)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ int ret;
+
+ /*
+ * SCSI assumes that runtime-pm and system-pm for scsi drivers
+ * are same. And it doesn't wake up the device for system-suspend
+ * if it's runtime suspended. But ufs doesn't follow that.
+ * Refer ufshcd_resume_complete()
+ */
+ if (hba->sdev_ufs_device) {
+ ret = ufshcd_rpm_get_sync(hba);
+ if (ret < 0 && ret != -EACCES) {
+ ufshcd_rpm_put(hba);
+ return ret;
+ }
+ hba->complete_put = true;
+ }
+ if (hba->sdev_rpmb) {
+ ufshcd_rpmb_rpm_get_sync(hba);
+ hba->rpmb_complete_put = true;
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ufshcd_suspend_prepare);
+
+#ifdef CONFIG_PM_SLEEP
+static int ufshcd_wl_poweroff(struct device *dev)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct ufs_hba *hba = shost_priv(sdev->host);
+
+ __ufshcd_wl_suspend(hba, UFS_SHUTDOWN_PM);
+ return 0;
+}
+#endif
+
+static int ufshcd_wl_probe(struct device *dev)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+
+ if (!is_device_wlun(sdev))
+ return -ENODEV;
+
+ blk_pm_runtime_init(sdev->request_queue, dev);
+ pm_runtime_set_autosuspend_delay(dev, 0);
+ pm_runtime_allow(dev);
+
+ return 0;
+}
+
+static int ufshcd_wl_remove(struct device *dev)
+{
+ pm_runtime_forbid(dev);
+ return 0;
+}
+
+static const struct dev_pm_ops ufshcd_wl_pm_ops = {
+#ifdef CONFIG_PM_SLEEP
+ .suspend = ufshcd_wl_suspend,
+ .resume = ufshcd_wl_resume,
+ .freeze = ufshcd_wl_suspend,
+ .thaw = ufshcd_wl_resume,
+ .poweroff = ufshcd_wl_poweroff,
+ .restore = ufshcd_wl_resume,
+#endif
+ SET_RUNTIME_PM_OPS(ufshcd_wl_runtime_suspend, ufshcd_wl_runtime_resume, NULL)
+};
+
+/*
+ * ufs_dev_wlun_template - describes ufs device wlun
+ * ufs-device wlun - used to send pm commands
+ * All luns are consumers of ufs-device wlun.
+ *
+ * Currently, no sd driver is present for wluns.
+ * Hence the no specific pm operations are performed.
+ * With ufs design, SSU should be sent to ufs-device wlun.
+ * Hence register a scsi driver for ufs wluns only.
+ */
+static struct scsi_driver ufs_dev_wlun_template = {
+ .gendrv = {
+ .name = "ufs_device_wlun",
+ .owner = THIS_MODULE,
+ .probe = ufshcd_wl_probe,
+ .remove = ufshcd_wl_remove,
+ .pm = &ufshcd_wl_pm_ops,
+ .shutdown = ufshcd_wl_shutdown,
+ },
+};
+
+static int ufshcd_rpmb_probe(struct device *dev)
+{
+ return is_rpmb_wlun(to_scsi_device(dev)) ? 0 : -ENODEV;
+}
+
+static inline int ufshcd_clear_rpmb_uac(struct ufs_hba *hba)
+{
+ int ret = 0;
+
+ if (!hba->wlun_rpmb_clr_ua)
+ return 0;
+ ret = ufshcd_clear_ua_wlun(hba, UFS_UPIU_RPMB_WLUN);
+ if (!ret)
+ hba->wlun_rpmb_clr_ua = 0;
+ return ret;
+}
+
+static int ufshcd_rpmb_resume(struct device *dev)
+{
+ struct ufs_hba *hba = wlun_dev_to_hba(dev);
+
+ if (hba->sdev_rpmb)
+ ufshcd_clear_rpmb_uac(hba);
+ return 0;
+}
+
+static const struct dev_pm_ops ufs_rpmb_pm_ops = {
+ SET_RUNTIME_PM_OPS(NULL, ufshcd_rpmb_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(NULL, ufshcd_rpmb_resume)
+};
+
+/* ufs_rpmb_wlun_template - Describes UFS RPMB WLUN. Used only to send UAC. */
+static struct scsi_driver ufs_rpmb_wlun_template = {
+ .gendrv = {
+ .name = "ufs_rpmb_wlun",
+ .owner = THIS_MODULE,
+ .probe = ufshcd_rpmb_probe,
+ .pm = &ufs_rpmb_pm_ops,
+ },
+};
+
static int __init ufshcd_core_init(void)
{
+ int ret;
+
ufs_debugfs_init();
- return 0;
+
+ ret = scsi_register_driver(&ufs_dev_wlun_template.gendrv);
+ if (ret)
+ goto debugfs_exit;
+
+ ret = scsi_register_driver(&ufs_rpmb_wlun_template.gendrv);
+ if (ret)
+ goto unregister;
+
+ return ret;
+unregister:
+ scsi_unregister_driver(&ufs_dev_wlun_template.gendrv);
+debugfs_exit:
+ ufs_debugfs_exit();
+ return ret;
}
static void __exit ufshcd_core_exit(void)
{
ufs_debugfs_exit();
+ scsi_unregister_driver(&ufs_rpmb_wlun_template.gendrv);
+ scsi_unregister_driver(&ufs_dev_wlun_template.gendrv);
}
module_init(ufshcd_core_init);
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index 5eb66a8debc7..c98d540ac044 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -72,6 +72,8 @@ enum ufs_event_type {
UFS_EVT_LINK_STARTUP_FAIL,
UFS_EVT_RESUME_ERR,
UFS_EVT_SUSPEND_ERR,
+ UFS_EVT_WL_SUSP_ERR,
+ UFS_EVT_WL_RES_ERR,
/* abnormal events */
UFS_EVT_DEV_RESET,
@@ -106,10 +108,6 @@ enum ufs_pm_op {
UFS_SHUTDOWN_PM,
};
-#define ufshcd_is_runtime_pm(op) ((op) == UFS_RUNTIME_PM)
-#define ufshcd_is_system_pm(op) ((op) == UFS_SYSTEM_PM)
-#define ufshcd_is_shutdown_pm(op) ((op) == UFS_SHUTDOWN_PM)
-
/* Host <-> Device UniPro Link state */
enum uic_link_state {
UIC_LINK_OFF_STATE = 0, /* Link powered down or disabled */
@@ -157,13 +155,13 @@ enum uic_link_state {
* power off.
*/
enum ufs_pm_level {
- UFS_PM_LVL_0, /* UFS_ACTIVE_PWR_MODE, UIC_LINK_ACTIVE_STATE */
- UFS_PM_LVL_1, /* UFS_ACTIVE_PWR_MODE, UIC_LINK_HIBERN8_STATE */
- UFS_PM_LVL_2, /* UFS_SLEEP_PWR_MODE, UIC_LINK_ACTIVE_STATE */
- UFS_PM_LVL_3, /* UFS_SLEEP_PWR_MODE, UIC_LINK_HIBERN8_STATE */
- UFS_PM_LVL_4, /* UFS_POWERDOWN_PWR_MODE, UIC_LINK_HIBERN8_STATE */
- UFS_PM_LVL_5, /* UFS_POWERDOWN_PWR_MODE, UIC_LINK_OFF_STATE */
- UFS_PM_LVL_6, /* UFS_DEEPSLEEP_PWR_MODE, UIC_LINK_OFF_STATE */
+ UFS_PM_LVL_0,
+ UFS_PM_LVL_1,
+ UFS_PM_LVL_2,
+ UFS_PM_LVL_3,
+ UFS_PM_LVL_4,
+ UFS_PM_LVL_5,
+ UFS_PM_LVL_6,
UFS_PM_LVL_MAX
};
@@ -195,7 +193,6 @@ struct ufs_pm_lvl_states {
* @crypto_key_slot: the key slot to use for inline crypto (-1 if none)
* @data_unit_num: the data unit number for the first block for inline crypto
* @req_abort_skip: skip request abort task flag
- * @in_use: indicates that this lrb is still in use
*/
struct ufshcd_lrb {
struct utp_transfer_req_desc *utr_descriptor_ptr;
@@ -225,7 +222,6 @@ struct ufshcd_lrb {
#endif
bool req_abort_skip;
- bool in_use;
};
/**
@@ -645,6 +641,25 @@ struct ufs_hba_variant_params {
u32 wb_flush_threshold;
};
+struct ufs_hba_monitor {
+ unsigned long chunk_size;
+
+ unsigned long nr_sec_rw[2];
+ ktime_t total_busy[2];
+
+ unsigned long nr_req[2];
+ /* latencies*/
+ ktime_t lat_sum[2];
+ ktime_t lat_max[2];
+ ktime_t lat_min[2];
+
+ u32 nr_queued[2];
+ ktime_t busy_start_ts[2];
+
+ ktime_t enabled_ts;
+ bool enabled;
+};
+
/**
* struct ufs_hba - per adapter private structure
* @mmio_base: UFSHCI base register address
@@ -807,6 +822,7 @@ struct ufs_hba {
struct list_head clk_list_head;
bool wlun_dev_clr_ua;
+ bool wlun_rpmb_clr_ua;
/* Number of requests aborts */
int req_abort_count;
@@ -835,6 +851,8 @@ struct ufs_hba {
struct request_queue *bsg_queue;
struct delayed_work rpm_dev_flush_recheck_work;
+ struct ufs_hba_monitor monitor;
+
#ifdef CONFIG_SCSI_UFS_CRYPTO
union ufs_crypto_capabilities crypto_capabilities;
union ufs_crypto_cap_entry *crypto_cap_array;
@@ -846,6 +864,9 @@ struct ufs_hba {
struct delayed_work debugfs_ee_work;
u32 debugfs_ee_rate_limit_ms;
#endif
+ u32 luns_avail;
+ bool complete_put;
+ bool rpmb_complete_put;
};
/* Returns true if clocks can be gated. Otherwise false */
@@ -936,7 +957,7 @@ static inline void ufshcd_rmwl(struct ufs_hba *hba, u32 mask, u32 val, u32 reg)
int ufshcd_alloc_host(struct device *, struct ufs_hba **);
void ufshcd_dealloc_host(struct ufs_hba *);
int ufshcd_hba_enable(struct ufs_hba *hba);
-int ufshcd_init(struct ufs_hba * , void __iomem * , unsigned int);
+int ufshcd_init(struct ufs_hba *, void __iomem *, unsigned int);
int ufshcd_link_recovery(struct ufs_hba *hba);
int ufshcd_make_hba_operational(struct ufs_hba *hba);
void ufshcd_remove(struct ufs_hba *);
@@ -947,6 +968,7 @@ int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
unsigned long timeout_ms);
void ufshcd_parse_dev_ref_clk_freq(struct ufs_hba *hba, struct clk *refclk);
void ufshcd_update_evt_hist(struct ufs_hba *hba, u32 id, u32 val);
+void ufshcd_hba_stop(struct ufs_hba *hba);
static inline void check_upiu_size(void)
{
@@ -1105,6 +1127,8 @@ int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
enum query_opcode desc_op);
int ufshcd_wb_toggle(struct ufs_hba *hba, bool enable);
+int ufshcd_suspend_prepare(struct device *dev);
+void ufshcd_resume_complete(struct device *dev);
/* Wrapper functions for safely calling variant operations */
static inline const char *ufshcd_get_var_name(struct ufs_hba *hba)
@@ -1136,6 +1160,11 @@ static inline u32 ufshcd_vops_get_ufs_hci_version(struct ufs_hba *hba)
return ufshcd_readl(hba, REG_UFS_VERSION);
}
+static inline bool ufshcd_has_utrlcnr(struct ufs_hba *hba)
+{
+ return (hba->ufs_version >= ufshci_version(3, 0));
+}
+
static inline int ufshcd_vops_clk_scale_notify(struct ufs_hba *hba,
bool up, enum ufs_notify_change_status status)
{
@@ -1309,4 +1338,29 @@ static inline int ufshcd_update_ee_usr_mask(struct ufs_hba *hba,
&hba->ee_drv_mask, set, clr);
}
+static inline int ufshcd_rpm_get_sync(struct ufs_hba *hba)
+{
+ return pm_runtime_get_sync(&hba->sdev_ufs_device->sdev_gendev);
+}
+
+static inline int ufshcd_rpm_put_sync(struct ufs_hba *hba)
+{
+ return pm_runtime_put_sync(&hba->sdev_ufs_device->sdev_gendev);
+}
+
+static inline int ufshcd_rpm_put(struct ufs_hba *hba)
+{
+ return pm_runtime_put(&hba->sdev_ufs_device->sdev_gendev);
+}
+
+static inline int ufshcd_rpmb_rpm_get_sync(struct ufs_hba *hba)
+{
+ return pm_runtime_get_sync(&hba->sdev_rpmb->sdev_gendev);
+}
+
+static inline int ufshcd_rpmb_rpm_put(struct ufs_hba *hba)
+{
+ return pm_runtime_put(&hba->sdev_rpmb->sdev_gendev);
+}
+
#endif /* End of Header */
diff --git a/drivers/scsi/ufs/ufshci.h b/drivers/scsi/ufs/ufshci.h
index de95be5d11d4..5affb1fce5ad 100644
--- a/drivers/scsi/ufs/ufshci.h
+++ b/drivers/scsi/ufs/ufshci.h
@@ -39,6 +39,7 @@ enum {
REG_UTP_TRANSFER_REQ_DOOR_BELL = 0x58,
REG_UTP_TRANSFER_REQ_LIST_CLEAR = 0x5C,
REG_UTP_TRANSFER_REQ_LIST_RUN_STOP = 0x60,
+ REG_UTP_TRANSFER_REQ_LIST_COMPL = 0x64,
REG_UTP_TASK_REQ_LIST_BASE_L = 0x70,
REG_UTP_TASK_REQ_LIST_BASE_H = 0x74,
REG_UTP_TASK_REQ_DOOR_BELL = 0x78,
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index b9c86a7e3b97..fd69a03d6137 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -161,8 +161,7 @@ static void virtscsi_complete_cmd(struct virtio_scsi *vscsi, void *buf)
min_t(u32,
virtio32_to_cpu(vscsi->vdev, resp->sense_len),
VIRTIO_SCSI_SENSE_SIZE));
- if (resp->sense_len)
- set_driver_byte(sc, DRIVER_SENSE);
+ set_status_byte(sc, SAM_STAT_CHECK_CONDITION);
}
sc->scsi_done(sc);
@@ -355,7 +354,7 @@ static void virtscsi_rescan_hotunplug(struct virtio_scsi *vscsi)
if (result == 0 && inq_result[0] >> 5) {
/* PQ indicates the LUN is not attached */
scsi_remove_device(sdev);
- } else if (host_byte(result) == DID_BAD_TARGET) {
+ } else if (result > 0 && host_byte(result) == DID_BAD_TARGET) {
/*
* If all LUNs of a virtio-scsi device are unplugged
* it will respond with BAD TARGET on any INQUIRY
diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c
index b9969fce6b4d..ce1ba1b93629 100644
--- a/drivers/scsi/vmw_pvscsi.c
+++ b/drivers/scsi/vmw_pvscsi.c
@@ -576,9 +576,6 @@ static void pvscsi_complete_request(struct pvscsi_adapter *adapter,
cmd->result = (DID_RESET << 16);
} else {
cmd->result = (DID_OK << 16) | sdstat;
- if (sdstat == SAM_STAT_CHECK_CONDITION &&
- cmd->sense_buffer)
- cmd->result |= (DRIVER_SENSE << 24);
}
} else
switch (btstat) {
@@ -610,9 +607,6 @@ static void pvscsi_complete_request(struct pvscsi_adapter *adapter,
case BTSTAT_LUNMISMATCH:
case BTSTAT_TAGREJECT:
case BTSTAT_BADMSG:
- cmd->result = (DRIVER_INVALID << 24);
- fallthrough;
-
case BTSTAT_HAHARDWARE:
case BTSTAT_INVPHASE:
case BTSTAT_HATIMEOUT:
diff --git a/drivers/scsi/wd33c93.c b/drivers/scsi/wd33c93.c
index a23277bb870e..4468bc45aaa4 100644
--- a/drivers/scsi/wd33c93.c
+++ b/drivers/scsi/wd33c93.c
@@ -1176,13 +1176,13 @@ wd33c93_intr(struct Scsi_Host *instance)
if (cmd->SCp.Status == ILLEGAL_STATUS_BYTE)
cmd->SCp.Status = lun;
if (cmd->cmnd[0] == REQUEST_SENSE
- && cmd->SCp.Status != SAM_STAT_GOOD)
- cmd->result =
- (cmd->
- result & 0x00ffff) | (DID_ERROR << 16);
- else
- cmd->result =
- cmd->SCp.Status | (cmd->SCp.Message << 8);
+ && cmd->SCp.Status != SAM_STAT_GOOD) {
+ set_host_byte(cmd, DID_ERROR);
+ } else {
+ set_host_byte(cmd, DID_OK);
+ scsi_msg_to_host_byte(cmd, cmd->SCp.Message);
+ set_status_byte(cmd, cmd->SCp.Status);
+ }
cmd->scsi_done(cmd);
/* We are no longer connected to a target - check to see if
@@ -1262,11 +1262,14 @@ wd33c93_intr(struct Scsi_Host *instance)
hostdata->connected = NULL;
hostdata->busy[cmd->device->id] &= ~(1 << (cmd->device->lun & 0xff));
hostdata->state = S_UNCONNECTED;
- if (cmd->cmnd[0] == REQUEST_SENSE && cmd->SCp.Status != SAM_STAT_GOOD)
- cmd->result =
- (cmd->result & 0x00ffff) | (DID_ERROR << 16);
- else
- cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8);
+ if (cmd->cmnd[0] == REQUEST_SENSE &&
+ cmd->SCp.Status != SAM_STAT_GOOD) {
+ set_host_byte(cmd, DID_ERROR);
+ } else {
+ set_host_byte(cmd, DID_OK);
+ scsi_msg_to_host_byte(cmd, cmd->SCp.Message);
+ set_status_byte(cmd, cmd->SCp.Status);
+ }
cmd->scsi_done(cmd);
/* We are no longer connected to a target - check to see if
@@ -1295,14 +1298,14 @@ wd33c93_intr(struct Scsi_Host *instance)
hostdata->busy[cmd->device->id] &= ~(1 << (cmd->device->lun & 0xff));
hostdata->state = S_UNCONNECTED;
DB(DB_INTR, printk(":%d", cmd->SCp.Status))
- if (cmd->cmnd[0] == REQUEST_SENSE
- && cmd->SCp.Status != SAM_STAT_GOOD)
- cmd->result =
- (cmd->
- result & 0x00ffff) | (DID_ERROR << 16);
- else
- cmd->result =
- cmd->SCp.Status | (cmd->SCp.Message << 8);
+ if (cmd->cmnd[0] == REQUEST_SENSE
+ && cmd->SCp.Status != SAM_STAT_GOOD) {
+ set_host_byte(cmd, DID_ERROR);
+ } else {
+ set_host_byte(cmd, DID_OK);
+ scsi_msg_to_host_byte(cmd, cmd->SCp.Message);
+ set_status_byte(cmd, cmd->SCp.Status);
+ }
cmd->scsi_done(cmd);
break;
case S_PRE_TMP_DISC:
diff --git a/drivers/scsi/xen-scsifront.c b/drivers/scsi/xen-scsifront.c
index 259fc248d06c..ec9d399fbbd8 100644
--- a/drivers/scsi/xen-scsifront.c
+++ b/drivers/scsi/xen-scsifront.c
@@ -251,6 +251,7 @@ static void scsifront_cdb_cmd_done(struct vscsifrnt_info *info,
struct scsi_cmnd *sc;
uint32_t id;
uint8_t sense_len;
+ int result;
id = ring_rsp->rqid;
shadow = info->shadow[id];
@@ -261,7 +262,12 @@ static void scsifront_cdb_cmd_done(struct vscsifrnt_info *info,
scsifront_gnttab_done(info, shadow);
scsifront_put_rqid(info, id);
- sc->result = ring_rsp->rslt;
+ result = ring_rsp->rslt;
+ if (result >> 24)
+ set_host_byte(sc, DID_ERROR);
+ else
+ set_host_byte(sc, host_byte(result));
+ set_status_byte(sc, result & 0xff);
scsi_set_resid(sc, ring_rsp->residual_len);
sense_len = min_t(uint8_t, VSCSIIF_SENSE_BUFFERSIZE,
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_ddp.c b/drivers/target/iscsi/cxgbit/cxgbit_ddp.c
index af35251232eb..b044999ad002 100644
--- a/drivers/target/iscsi/cxgbit/cxgbit_ddp.c
+++ b/drivers/target/iscsi/cxgbit/cxgbit_ddp.c
@@ -265,12 +265,13 @@ void cxgbit_unmap_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
struct cxgbit_cmd *ccmd = iscsit_priv_cmd(cmd);
if (ccmd->release) {
- struct cxgbi_task_tag_info *ttinfo = &ccmd->ttinfo;
-
- if (ttinfo->sgl) {
+ if (cmd->se_cmd.se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) {
+ put_page(sg_page(&ccmd->sg));
+ } else {
struct cxgbit_sock *csk = conn->context;
struct cxgbit_device *cdev = csk->com.cdev;
struct cxgbi_ppm *ppm = cdev2ppm(cdev);
+ struct cxgbi_task_tag_info *ttinfo = &ccmd->ttinfo;
/* Abort the TCP conn if DDP is not complete to
* avoid any possibility of DDP after freeing
@@ -280,14 +281,14 @@ void cxgbit_unmap_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
cmd->se_cmd.data_length))
cxgbit_abort_conn(csk);
+ if (unlikely(ttinfo->sgl)) {
+ dma_unmap_sg(&ppm->pdev->dev, ttinfo->sgl,
+ ttinfo->nents, DMA_FROM_DEVICE);
+ ttinfo->nents = 0;
+ ttinfo->sgl = NULL;
+ }
cxgbi_ppm_ppod_release(ppm, ttinfo->idx);
-
- dma_unmap_sg(&ppm->pdev->dev, ttinfo->sgl,
- ttinfo->nents, DMA_FROM_DEVICE);
- } else {
- put_page(sg_page(&ccmd->sg));
}
-
ccmd->release = false;
}
}
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_target.c b/drivers/target/iscsi/cxgbit/cxgbit_target.c
index b926e1d6c7b8..282297ffc404 100644
--- a/drivers/target/iscsi/cxgbit/cxgbit_target.c
+++ b/drivers/target/iscsi/cxgbit/cxgbit_target.c
@@ -997,17 +997,18 @@ static int cxgbit_handle_iscsi_dataout(struct cxgbit_sock *csk)
struct scatterlist *sg_start;
struct iscsi_conn *conn = csk->conn;
struct iscsi_cmd *cmd = NULL;
+ struct cxgbit_cmd *ccmd;
+ struct cxgbi_task_tag_info *ttinfo;
struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb);
struct iscsi_data *hdr = (struct iscsi_data *)pdu_cb->hdr;
u32 data_offset = be32_to_cpu(hdr->offset);
- u32 data_len = pdu_cb->dlen;
+ u32 data_len = ntoh24(hdr->dlength);
int rc, sg_nents, sg_off;
bool dcrc_err = false;
if (pdu_cb->flags & PDUCBF_RX_DDP_CMP) {
u32 offset = be32_to_cpu(hdr->offset);
u32 ddp_data_len;
- u32 payload_length = ntoh24(hdr->dlength);
bool success = false;
cmd = iscsit_find_cmd_from_itt_or_dump(conn, hdr->itt, 0);
@@ -1022,7 +1023,7 @@ static int cxgbit_handle_iscsi_dataout(struct cxgbit_sock *csk)
cmd->data_sn = be32_to_cpu(hdr->datasn);
rc = __iscsit_check_dataout_hdr(conn, (unsigned char *)hdr,
- cmd, payload_length, &success);
+ cmd, data_len, &success);
if (rc < 0)
return rc;
else if (!success)
@@ -1060,6 +1061,20 @@ static int cxgbit_handle_iscsi_dataout(struct cxgbit_sock *csk)
cxgbit_skb_copy_to_sg(csk->skb, sg_start, sg_nents, skip);
}
+ ccmd = iscsit_priv_cmd(cmd);
+ ttinfo = &ccmd->ttinfo;
+
+ if (ccmd->release && ttinfo->sgl &&
+ (cmd->se_cmd.data_length == (cmd->write_data_done + data_len))) {
+ struct cxgbit_device *cdev = csk->com.cdev;
+ struct cxgbi_ppm *ppm = cdev2ppm(cdev);
+
+ dma_unmap_sg(&ppm->pdev->dev, ttinfo->sgl, ttinfo->nents,
+ DMA_FROM_DEVICE);
+ ttinfo->nents = 0;
+ ttinfo->sgl = NULL;
+ }
+
check_payload:
rc = iscsit_check_dataout_payload(cmd, hdr, dcrc_err);
diff --git a/drivers/target/iscsi/iscsi_target_erl1.c b/drivers/target/iscsi/iscsi_target_erl1.c
index cd670cb9b8fb..0dd52f484fec 100644
--- a/drivers/target/iscsi/iscsi_target_erl1.c
+++ b/drivers/target/iscsi/iscsi_target_erl1.c
@@ -871,8 +871,6 @@ int iscsit_execute_ooo_cmdsns(struct iscsi_session *sess)
if (iscsit_execute_cmd(cmd, 1) < 0)
return -1;
-
- continue;
}
return ooo_count;
diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
index 151e2949bb75..c0ed6f8e5c5b 100644
--- a/drivers/target/iscsi/iscsi_target_nego.c
+++ b/drivers/target/iscsi/iscsi_target_nego.c
@@ -118,13 +118,6 @@ static u32 iscsi_handle_authentication(
" CHAP auth\n");
return -1;
}
- iscsi_nacl = container_of(se_nacl, struct iscsi_node_acl,
- se_node_acl);
- if (!iscsi_nacl) {
- pr_err("Unable to locate struct iscsi_node_acl for"
- " CHAP auth\n");
- return -1;
- }
if (se_nacl->dynamic_node_acl) {
iscsi_tpg = container_of(se_nacl->se_tpg,
@@ -1082,14 +1075,12 @@ int iscsi_target_locate_portal(
login_req = (struct iscsi_login_req *) login->req;
payload_length = ntoh24(login_req->dlength);
- tmpbuf = kzalloc(payload_length + 1, GFP_KERNEL);
+ tmpbuf = kmemdup_nul(login->req_buf, payload_length, GFP_KERNEL);
if (!tmpbuf) {
pr_err("Unable to allocate memory for tmpbuf.\n");
return -1;
}
- memcpy(tmpbuf, login->req_buf, payload_length);
- tmpbuf[payload_length] = '\0';
start = tmpbuf;
end = (start + payload_length);
diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c
index 7a461fbb1566..6bc3aaf655fc 100644
--- a/drivers/target/iscsi/iscsi_target_parameters.c
+++ b/drivers/target/iscsi/iscsi_target_parameters.c
@@ -1357,14 +1357,12 @@ int iscsi_decode_text_input(
struct iscsi_param_list *param_list = conn->param_list;
char *tmpbuf, *start = NULL, *end = NULL;
- tmpbuf = kzalloc(length + 1, GFP_KERNEL);
+ tmpbuf = kmemdup_nul(textbuf, length, GFP_KERNEL);
if (!tmpbuf) {
pr_err("Unable to allocate %u + 1 bytes for tmpbuf.\n", length);
return -ENOMEM;
}
- memcpy(tmpbuf, textbuf, length);
- tmpbuf[length] = '\0';
start = tmpbuf;
end = (start + length);
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
index 2687fd7d45db..6d0b0e67e79e 100644
--- a/drivers/target/loopback/tcm_loop.c
+++ b/drivers/target/loopback/tcm_loop.c
@@ -566,7 +566,6 @@ static int tcm_loop_queue_data_or_status(const char *func,
memcpy(sc->sense_buffer, se_cmd->sense_buffer,
SCSI_SENSE_BUFFERSIZE);
sc->result = SAM_STAT_CHECK_CONDITION;
- set_driver_byte(sc, DRIVER_SENSE);
} else
sc->result = scsi_status;
diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c
index ce84f93c183a..4d3ceee23622 100644
--- a/drivers/target/sbp/sbp_target.c
+++ b/drivers/target/sbp/sbp_target.c
@@ -1261,7 +1261,6 @@ static int sbp_rw_data(struct sbp_target_request *req)
pg_size = CMDBLK_ORB_PG_SIZE(be32_to_cpu(req->orb.misc));
if (pg_size) {
pr_err("sbp_run_transaction: page size ignored\n");
- pg_size = 0x100 << pg_size;
}
spin_lock_bh(&sess->lock);
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index 5517c7dd5144..3bb921345bce 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -123,7 +123,7 @@ target_emulate_report_referrals(struct se_cmd *cmd)
transport_kunmap_data_sg(cmd);
- target_complete_cmd(cmd, GOOD);
+ target_complete_cmd(cmd, SAM_STAT_GOOD);
return 0;
}
@@ -255,7 +255,7 @@ target_emulate_report_target_port_groups(struct se_cmd *cmd)
}
transport_kunmap_data_sg(cmd);
- target_complete_cmd_with_length(cmd, GOOD, rd_len + 4);
+ target_complete_cmd_with_length(cmd, SAM_STAT_GOOD, rd_len + 4);
return 0;
}
@@ -424,7 +424,7 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd)
out:
transport_kunmap_data_sg(cmd);
if (!rc)
- target_complete_cmd(cmd, GOOD);
+ target_complete_cmd(cmd, SAM_STAT_GOOD);
return rc;
}
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index 4b2e49341ad6..102ec644bc8a 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -1479,6 +1479,54 @@ static ssize_t target_wwn_revision_store(struct config_item *item,
return count;
}
+static ssize_t
+target_wwn_company_id_show(struct config_item *item,
+ char *page)
+{
+ return snprintf(page, PAGE_SIZE, "%#08x\n",
+ to_t10_wwn(item)->company_id);
+}
+
+static ssize_t
+target_wwn_company_id_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct t10_wwn *t10_wwn = to_t10_wwn(item);
+ struct se_device *dev = t10_wwn->t10_dev;
+ u32 val;
+ int ret;
+
+ /*
+ * The IEEE COMPANY_ID field should contain a 24-bit canonical
+ * form OUI assigned by the IEEE.
+ */
+ ret = kstrtou32(page, 0, &val);
+ if (ret < 0)
+ return ret;
+
+ if (val >= 0x1000000)
+ return -EOVERFLOW;
+
+ /*
+ * Check to see if any active exports exist. If they do exist, fail
+ * here as changing this information on the fly (underneath the
+ * initiator side OS dependent multipath code) could cause negative
+ * effects.
+ */
+ if (dev->export_count) {
+ pr_err("Unable to set Company ID while %u exports exist\n",
+ dev->export_count);
+ return -EINVAL;
+ }
+
+ t10_wwn->company_id = val;
+
+ pr_debug("Target_Core_ConfigFS: Set IEEE Company ID: %#08x\n",
+ t10_wwn->company_id);
+
+ return count;
+}
+
/*
* VPD page 0x80 Unit serial
*/
@@ -1625,6 +1673,7 @@ DEF_DEV_WWN_ASSOC_SHOW(vpd_assoc_scsi_target_device, 0x20);
CONFIGFS_ATTR(target_wwn_, vendor_id);
CONFIGFS_ATTR(target_wwn_, product_id);
CONFIGFS_ATTR(target_wwn_, revision);
+CONFIGFS_ATTR(target_wwn_, company_id);
CONFIGFS_ATTR(target_wwn_, vpd_unit_serial);
CONFIGFS_ATTR_RO(target_wwn_, vpd_protocol_identifier);
CONFIGFS_ATTR_RO(target_wwn_, vpd_assoc_logical_unit);
@@ -1635,6 +1684,7 @@ static struct configfs_attribute *target_core_dev_wwn_attrs[] = {
&target_wwn_attr_vendor_id,
&target_wwn_attr_product_id,
&target_wwn_attr_revision,
+ &target_wwn_attr_company_id,
&target_wwn_attr_vpd_unit_serial,
&target_wwn_attr_vpd_protocol_identifier,
&target_wwn_attr_vpd_assoc_logical_unit,
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index a8df9f0a82fa..8cb1fa0c0585 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -773,6 +773,11 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
spin_lock_init(&dev->t10_alua.lba_map_lock);
dev->t10_wwn.t10_dev = dev;
+ /*
+ * Use OpenFabrics IEEE Company ID: 00 14 05
+ */
+ dev->t10_wwn.company_id = 0x001405;
+
dev->t10_alua.t10_dev = dev;
dev->dev_attrib.da_dev = dev;
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index a526f9678c34..44d9d028f716 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -474,7 +474,7 @@ iblock_execute_zero_out(struct block_device *bdev, struct se_cmd *cmd)
if (ret)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- target_complete_cmd(cmd, GOOD);
+ target_complete_cmd(cmd, SAM_STAT_GOOD);
return 0;
}
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index 6fd5fec95539..4b94b085625b 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -234,7 +234,7 @@ target_scsi2_reservation_release(struct se_cmd *cmd)
out_unlock:
spin_unlock(&dev->dev_reservation_lock);
out:
- target_complete_cmd(cmd, GOOD);
+ target_complete_cmd(cmd, SAM_STAT_GOOD);
return 0;
}
@@ -297,7 +297,7 @@ out_unlock:
spin_unlock(&dev->dev_reservation_lock);
out:
if (!ret)
- target_complete_cmd(cmd, GOOD);
+ target_complete_cmd(cmd, SAM_STAT_GOOD);
return ret;
}
@@ -3676,7 +3676,7 @@ target_scsi3_emulate_pr_out(struct se_cmd *cmd)
}
if (!ret)
- target_complete_cmd(cmd, GOOD);
+ target_complete_cmd(cmd, SAM_STAT_GOOD);
return ret;
}
@@ -4073,7 +4073,7 @@ target_scsi3_emulate_pr_in(struct se_cmd *cmd)
}
if (!ret)
- target_complete_cmd(cmd, GOOD);
+ target_complete_cmd(cmd, SAM_STAT_GOOD);
return ret;
}
diff --git a/drivers/target/target_core_pr.h b/drivers/target/target_core_pr.h
index a31c93e4e19c..b793c99637ab 100644
--- a/drivers/target/target_core_pr.h
+++ b/drivers/target/target_core_pr.h
@@ -52,7 +52,7 @@
/*
* Function defined in target_core_spc.c
*/
-void spc_parse_naa_6h_vendor_specific(struct se_device *, unsigned char *);
+void spc_gen_naa_6h_vendor_specific(struct se_device *, unsigned char *);
extern struct kmem_cache *t10_pr_reg_cache;
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index f2a11414366d..1646ba8eda11 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -1044,7 +1044,7 @@ static void pscsi_req_done(struct request *req, blk_status_t status)
struct se_cmd *cmd = req->end_io_data;
struct pscsi_plugin_task *pt = cmd->priv;
int result = scsi_req(req)->result;
- u8 scsi_status = status_byte(result) << 1;
+ enum sam_status scsi_status = result & 0xff;
if (scsi_status != SAM_STAT_GOOD) {
pr_debug("PSCSI Status Byte exception at cmd: %p CDB:"
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index 7b07e557dc8d..b32f4ee88e79 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -67,7 +67,7 @@ sbc_emulate_readcapacity(struct se_cmd *cmd)
transport_kunmap_data_sg(cmd);
}
- target_complete_cmd_with_length(cmd, GOOD, 8);
+ target_complete_cmd_with_length(cmd, SAM_STAT_GOOD, 8);
return 0;
}
@@ -130,7 +130,7 @@ sbc_emulate_readcapacity_16(struct se_cmd *cmd)
transport_kunmap_data_sg(cmd);
}
- target_complete_cmd_with_length(cmd, GOOD, 32);
+ target_complete_cmd_with_length(cmd, SAM_STAT_GOOD, 32);
return 0;
}
@@ -202,14 +202,14 @@ sbc_execute_write_same_unmap(struct se_cmd *cmd)
return ret;
}
- target_complete_cmd(cmd, GOOD);
+ target_complete_cmd(cmd, SAM_STAT_GOOD);
return 0;
}
static sense_reason_t
sbc_emulate_noop(struct se_cmd *cmd)
{
- target_complete_cmd(cmd, GOOD);
+ target_complete_cmd(cmd, SAM_STAT_GOOD);
return 0;
}
@@ -1245,7 +1245,7 @@ sbc_execute_unmap(struct se_cmd *cmd)
err:
transport_kunmap_data_sg(cmd);
if (!ret)
- target_complete_cmd(cmd, GOOD);
+ target_complete_cmd(cmd, SAM_STAT_GOOD);
return ret;
}
diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c
index 70a661801cb9..22703a0dbd07 100644
--- a/drivers/target/target_core_spc.c
+++ b/drivers/target/target_core_spc.c
@@ -47,10 +47,32 @@ static void spc_fill_alua_data(struct se_lun *lun, unsigned char *buf)
spin_unlock(&lun->lun_tg_pt_gp_lock);
}
+static u16
+spc_find_scsi_transport_vd(int proto_id)
+{
+ switch (proto_id) {
+ case SCSI_PROTOCOL_FCP:
+ return SCSI_VERSION_DESCRIPTOR_FCP4;
+ case SCSI_PROTOCOL_ISCSI:
+ return SCSI_VERSION_DESCRIPTOR_ISCSI;
+ case SCSI_PROTOCOL_SAS:
+ return SCSI_VERSION_DESCRIPTOR_SAS3;
+ case SCSI_PROTOCOL_SBP:
+ return SCSI_VERSION_DESCRIPTOR_SBP3;
+ case SCSI_PROTOCOL_SRP:
+ return SCSI_VERSION_DESCRIPTOR_SRP;
+ default:
+ pr_warn("Cannot find VERSION DESCRIPTOR value for unknown SCSI"
+ " transport PROTOCOL IDENTIFIER %#x\n", proto_id);
+ return 0;
+ }
+}
+
sense_reason_t
spc_emulate_inquiry_std(struct se_cmd *cmd, unsigned char *buf)
{
struct se_lun *lun = cmd->se_lun;
+ struct se_portal_group *tpg = lun->lun_tpg;
struct se_device *dev = cmd->se_dev;
struct se_session *sess = cmd->se_sess;
@@ -58,7 +80,7 @@ spc_emulate_inquiry_std(struct se_cmd *cmd, unsigned char *buf)
if (dev->transport->get_device_type(dev) == TYPE_TAPE)
buf[1] = 0x80;
- buf[2] = 0x05; /* SPC-3 */
+ buf[2] = 0x06; /* SPC-4 */
/*
* NORMACA and HISUP = 0, RESPONSE DATA FORMAT = 2
@@ -108,7 +130,17 @@ spc_emulate_inquiry_std(struct se_cmd *cmd, unsigned char *buf)
strnlen(dev->t10_wwn.model, INQUIRY_MODEL_LEN));
memcpy(&buf[32], dev->t10_wwn.revision,
strnlen(dev->t10_wwn.revision, INQUIRY_REVISION_LEN));
- buf[4] = 31; /* Set additional length to 31 */
+
+ /*
+ * Set the VERSION DESCRIPTOR fields
+ */
+ put_unaligned_be16(SCSI_VERSION_DESCRIPTOR_SAM5, &buf[58]);
+ put_unaligned_be16(spc_find_scsi_transport_vd(tpg->proto_id), &buf[60]);
+ put_unaligned_be16(SCSI_VERSION_DESCRIPTOR_SPC4, &buf[62]);
+ if (cmd->se_dev->transport->get_device_type(dev) == TYPE_DISK)
+ put_unaligned_be16(SCSI_VERSION_DESCRIPTOR_SBC3, &buf[64]);
+
+ buf[4] = 91; /* Set additional length to 91 */
return 0;
}
@@ -129,14 +161,29 @@ spc_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf)
return 0;
}
-void spc_parse_naa_6h_vendor_specific(struct se_device *dev,
- unsigned char *buf)
+/*
+ * Generate NAA IEEE Registered Extended designator
+ */
+void spc_gen_naa_6h_vendor_specific(struct se_device *dev,
+ unsigned char *buf)
{
unsigned char *p = &dev->t10_wwn.unit_serial[0];
- int cnt;
+ u32 company_id = dev->t10_wwn.company_id;
+ int cnt, off = 0;
bool next = true;
/*
+ * Start NAA IEEE Registered Extended Identifier/Designator
+ */
+ buf[off] = 0x6 << 4;
+
+ /* IEEE COMPANY_ID */
+ buf[off++] |= (company_id >> 20) & 0xf;
+ buf[off++] = (company_id >> 12) & 0xff;
+ buf[off++] = (company_id >> 4) & 0xff;
+ buf[off] = (company_id & 0xf) << 4;
+
+ /*
* Generate up to 36 bits of VENDOR SPECIFIC IDENTIFIER starting on
* byte 3 bit 3-0 for NAA IEEE Registered Extended DESIGNATOR field
* format, followed by 64 bits of VENDOR SPECIFIC IDENTIFIER EXTENSION
@@ -144,7 +191,7 @@ void spc_parse_naa_6h_vendor_specific(struct se_device *dev,
* NUMBER set via vpd_unit_serial in target_core_configfs.c to ensure
* per device uniqeness.
*/
- for (cnt = 0; *p && cnt < 13; p++) {
+ for (cnt = off + 13; *p && off < cnt; p++) {
int val = hex_to_bin(*p);
if (val < 0)
@@ -152,10 +199,10 @@ void spc_parse_naa_6h_vendor_specific(struct se_device *dev,
if (next) {
next = false;
- buf[cnt++] |= val;
+ buf[off++] |= val;
} else {
next = true;
- buf[cnt] = val << 4;
+ buf[off] = val << 4;
}
}
}
@@ -203,24 +250,8 @@ spc_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf)
/* Identifier/Designator length */
buf[off++] = 0x10;
- /*
- * Start NAA IEEE Registered Extended Identifier/Designator
- */
- buf[off++] = (0x6 << 4);
-
- /*
- * Use OpenFabrics IEEE Company ID: 00 14 05
- */
- buf[off++] = 0x01;
- buf[off++] = 0x40;
- buf[off] = (0x5 << 4);
-
- /*
- * Return ConfigFS Unit Serial Number information for
- * VENDOR_SPECIFIC_IDENTIFIER and
- * VENDOR_SPECIFIC_IDENTIFIER_EXTENTION
- */
- spc_parse_naa_6h_vendor_specific(dev, &buf[off]);
+ /* NAA IEEE Registered Extended designator */
+ spc_gen_naa_6h_vendor_specific(dev, &buf[off]);
len = 20;
off = (len + 4);
@@ -750,7 +781,7 @@ out:
kfree(buf);
if (!ret)
- target_complete_cmd_with_length(cmd, GOOD, len);
+ target_complete_cmd_with_length(cmd, SAM_STAT_GOOD, len);
return ret;
}
@@ -1104,7 +1135,7 @@ set_length:
transport_kunmap_data_sg(cmd);
}
- target_complete_cmd_with_length(cmd, GOOD, length);
+ target_complete_cmd_with_length(cmd, SAM_STAT_GOOD, length);
return 0;
}
@@ -1122,7 +1153,7 @@ static sense_reason_t spc_emulate_modeselect(struct se_cmd *cmd)
int i;
if (!cmd->data_length) {
- target_complete_cmd(cmd, GOOD);
+ target_complete_cmd(cmd, SAM_STAT_GOOD);
return 0;
}
@@ -1165,7 +1196,7 @@ out:
transport_kunmap_data_sg(cmd);
if (!ret)
- target_complete_cmd(cmd, GOOD);
+ target_complete_cmd(cmd, SAM_STAT_GOOD);
return ret;
}
@@ -1198,7 +1229,7 @@ static sense_reason_t spc_emulate_request_sense(struct se_cmd *cmd)
memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length));
transport_kunmap_data_sg(cmd);
- target_complete_cmd(cmd, GOOD);
+ target_complete_cmd(cmd, SAM_STAT_GOOD);
return 0;
}
@@ -1265,7 +1296,7 @@ done:
transport_kunmap_data_sg(cmd);
}
- target_complete_cmd_with_length(cmd, GOOD, 8 + lun_count * 8);
+ target_complete_cmd_with_length(cmd, SAM_STAT_GOOD, 8 + lun_count * 8);
return 0;
}
EXPORT_SYMBOL(spc_emulate_report_luns);
@@ -1273,7 +1304,7 @@ EXPORT_SYMBOL(spc_emulate_report_luns);
static sense_reason_t
spc_emulate_testunitready(struct se_cmd *cmd)
{
- target_complete_cmd(cmd, GOOD);
+ target_complete_cmd(cmd, SAM_STAT_GOOD);
return 0;
}
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index 4bba10e7755a..fbb6ffaddfbe 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -121,7 +121,7 @@ struct tcmu_dev {
#define TCMU_DEV_BIT_BROKEN 1
#define TCMU_DEV_BIT_BLOCKED 2
#define TCMU_DEV_BIT_TMR_NOTIFY 3
-#define TCM_DEV_BIT_PLUGGED 4
+#define TCMU_DEV_BIT_PLUGGED 4
unsigned long flags;
struct uio_info uio_info;
@@ -982,7 +982,7 @@ static void tcmu_unplug_device(struct se_dev_plug *se_plug)
struct se_device *se_dev = se_plug->se_dev;
struct tcmu_dev *udev = TCMU_DEV(se_dev);
- clear_bit(TCM_DEV_BIT_PLUGGED, &udev->flags);
+ clear_bit(TCMU_DEV_BIT_PLUGGED, &udev->flags);
uio_event_notify(&udev->uio_info);
}
@@ -990,7 +990,7 @@ static struct se_dev_plug *tcmu_plug_device(struct se_device *se_dev)
{
struct tcmu_dev *udev = TCMU_DEV(se_dev);
- if (!test_and_set_bit(TCM_DEV_BIT_PLUGGED, &udev->flags))
+ if (!test_and_set_bit(TCMU_DEV_BIT_PLUGGED, &udev->flags))
return &udev->se_plug;
return NULL;
@@ -1124,7 +1124,7 @@ static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err)
list_add_tail(&tcmu_cmd->queue_entry, &udev->inflight_queue);
- if (!test_bit(TCM_DEV_BIT_PLUGGED, &udev->flags))
+ if (!test_bit(TCMU_DEV_BIT_PLUGGED, &udev->flags))
uio_event_notify(&udev->uio_info);
return 0;
@@ -1423,7 +1423,7 @@ static bool tcmu_handle_completions(struct tcmu_dev *udev)
if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) {
pr_err("ring broken, not handling completions\n");
- return 0;
+ return false;
}
mb = udev->mb_addr;
diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c
index d31ed071cb08..0f1319336f3e 100644
--- a/drivers/target/target_core_xcopy.c
+++ b/drivers/target/target_core_xcopy.c
@@ -33,19 +33,6 @@ static struct workqueue_struct *xcopy_wq = NULL;
static sense_reason_t target_parse_xcopy_cmd(struct xcopy_op *xop);
-static int target_xcopy_gen_naa_ieee(struct se_device *dev, unsigned char *buf)
-{
- int off = 0;
-
- buf[off++] = (0x6 << 4);
- buf[off++] = 0x01;
- buf[off++] = 0x40;
- buf[off] = (0x5 << 4);
-
- spc_parse_naa_6h_vendor_specific(dev, &buf[off]);
- return 0;
-}
-
/**
* target_xcopy_locate_se_dev_e4_iter - compare XCOPY NAA device identifiers
*
@@ -65,7 +52,7 @@ static int target_xcopy_locate_se_dev_e4_iter(struct se_device *se_dev,
}
memset(&tmp_dev_wwn[0], 0, XCOPY_NAA_IEEE_REGEX_LEN);
- target_xcopy_gen_naa_ieee(se_dev, &tmp_dev_wwn[0]);
+ spc_gen_naa_6h_vendor_specific(se_dev, &tmp_dev_wwn[0]);
rc = memcmp(&tmp_dev_wwn[0], dev_wwn, XCOPY_NAA_IEEE_REGEX_LEN);
if (rc != 0) {
@@ -241,7 +228,7 @@ static int target_xcopy_parse_target_descriptors(struct se_cmd *se_cmd,
* se_device the XCOPY was received upon..
*/
memset(&xop->local_dev_wwn[0], 0, XCOPY_NAA_IEEE_REGEX_LEN);
- target_xcopy_gen_naa_ieee(local_dev, &xop->local_dev_wwn[0]);
+ spc_gen_naa_6h_vendor_specific(local_dev, &xop->local_dev_wwn[0]);
while (start < tdll) {
/*
@@ -1011,7 +998,7 @@ static sense_reason_t target_rcr_operating_parameters(struct se_cmd *se_cmd)
put_unaligned_be32(42, &p[0]);
transport_kunmap_data_sg(se_cmd);
- target_complete_cmd(se_cmd, GOOD);
+ target_complete_cmd(se_cmd, SAM_STAT_GOOD);
return TCM_NO_SENSE;
}
diff --git a/drivers/usb/storage/cypress_atacb.c b/drivers/usb/storage/cypress_atacb.c
index a6f3267bbef6..2f7093ba5a2f 100644
--- a/drivers/usb/storage/cypress_atacb.c
+++ b/drivers/usb/storage/cypress_atacb.c
@@ -221,11 +221,11 @@ static void cypress_atacb_passthrough(struct scsi_cmnd *srb, struct us_data *us)
desc[12] = regs[6]; /* device */
desc[13] = regs[7]; /* command */
- srb->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
+ srb->result = SAM_STAT_CHECK_CONDITION;
}
goto end;
invalid_fld:
- srb->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
+ srb->result = SAM_STAT_CHECK_CONDITION;
memcpy(srb->sense_buffer,
usb_stor_sense_invalidCDB,
diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c
index 55a4763da05e..61ce0d142eea 100644
--- a/drivers/xen/xen-scsiback.c
+++ b/drivers/xen/xen-scsiback.c
@@ -222,10 +222,10 @@ static void scsiback_print_status(char *sense_buffer, int errors,
{
struct scsiback_tpg *tpg = pending_req->v2p->tpg;
- pr_err("[%s:%d] cmnd[0]=%02x -> st=%02x msg=%02x host=%02x drv=%02x\n",
+ pr_err("[%s:%d] cmnd[0]=%02x -> st=%02x msg=%02x host=%02x\n",
tpg->tport->tport_name, pending_req->v2p->lun,
- pending_req->cmnd[0], status_byte(errors), msg_byte(errors),
- host_byte(errors), driver_byte(errors));
+ pending_req->cmnd[0], errors & 0xff, COMMAND_COMPLETE,
+ host_byte(errors));
}
static void scsiback_fast_flush_area(struct vscsibk_pend *req)
@@ -719,10 +719,10 @@ static int scsiback_do_cmd_fn(struct vscsibk_info *info,
result = DID_NO_CONNECT;
break;
default:
- result = DRIVER_ERROR;
+ result = DID_ERROR;
break;
}
- scsiback_send_response(info, NULL, result << 24, 0,
+ scsiback_send_response(info, NULL, result << 16, 0,
ring_req.rqid);
return 1;
}
@@ -732,7 +732,7 @@ static int scsiback_do_cmd_fn(struct vscsibk_info *info,
if (scsiback_gnttab_data_map(&ring_req, pending_req)) {
scsiback_fast_flush_area(pending_req);
scsiback_do_resp_with_sense(NULL,
- DRIVER_ERROR << 24, 0, pending_req);
+ DID_ERROR << 16, 0, pending_req);
transport_generic_free_cmd(&pending_req->se_cmd, 0);
} else {
scsiback_cmd_exec(pending_req);
@@ -747,7 +747,7 @@ static int scsiback_do_cmd_fn(struct vscsibk_info *info,
break;
default:
pr_err_ratelimited("invalid request\n");
- scsiback_do_resp_with_sense(NULL, DRIVER_ERROR << 24, 0,
+ scsiback_do_resp_with_sense(NULL, DID_ERROR << 16, 0,
pending_req);
transport_generic_free_cmd(&pending_req->se_cmd, 0);
break;
@@ -1401,8 +1401,7 @@ static int scsiback_queue_status(struct se_cmd *se_cmd)
if (se_cmd->sense_buffer &&
((se_cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ||
(se_cmd->se_cmd_flags & SCF_EMULATED_TASK_SENSE)))
- pending_req->result = (DRIVER_SENSE << 24) |
- SAM_STAT_CHECK_CONDITION;
+ pending_req->result = SAM_STAT_CHECK_CONDITION;
else
pending_req->result = se_cmd->scsi_status;