summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2010-02-26 16:55:27 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2010-02-26 16:55:27 -0800
commit654451748b779b28077d9058442d0f354251870d (patch)
treeff889a2f6226e16b1121789f809927666a9ccf13 /drivers
parent64d497f55379b1e320a08ec2426468d96f5642ec (diff)
parent77c9cfc51b0d732b2524799810fb30018074fd60 (diff)
downloadlinux-654451748b779b28077d9058442d0f354251870d.tar.bz2
Merge git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6: (158 commits) [SCSI] Fix printing of failed 32-byte commands [SCSI] Fix printing of variable length commands [SCSI] libsrp: fix bug in ADDITIONAL CDB LENGTH interpretation [SCSI] scsi_dh_alua: Add IBM Power Virtual SCSI ALUA device to dev list [SCSI] scsi_dh_alua: add netapp to dev list [SCSI] qla2xxx: Update version number to 8.03.02-k1. [SCSI] qla2xxx: EEH: Restore PCI saved state during pci slot reset. [SCSI] qla2xxx: Add firmware ETS burst support. [SCSI] qla2xxx: Correct loop-resync issues during SNS scans. [SCSI] qla2xxx: Correct use-after-free issue in terminate_rport_io callback. [SCSI] qla2xxx: Correct EH bus-reset handling. [SCSI] qla2xxx: Proper clean-up of BSG requests when request times out. [SCSI] qla2xxx: Initialize payload receive length in failure path of vendor commands [SCSI] fix duplicate removal on error path in scsi_sysfs_add_sdev [SCSI] fix refcounting bug in scsi_get_host_dev [SCSI] fix memory leak in scsi_report_lun_scan [SCSI] lpfc: correct PPC build failure [SCSI] raid_class: add raid1e [SCSI] mpt2sas: Do not call sas_is_tlr_enabled for RAID volumes. [SCSI] zfcp: Introduce header file for qdio structs and inline functions ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/message/fusion/mptbase.c5
-rw-r--r--drivers/message/fusion/mptbase.h4
-rw-r--r--drivers/message/fusion/mptctl.c4
-rw-r--r--drivers/message/fusion/mptfc.c17
-rw-r--r--drivers/message/fusion/mptsas.c211
-rw-r--r--drivers/message/fusion/mptscsih.c9
-rw-r--r--drivers/s390/scsi/zfcp_aux.c90
-rw-r--r--drivers/s390/scsi/zfcp_ccw.c11
-rw-r--r--drivers/s390/scsi/zfcp_dbf.c20
-rw-r--r--drivers/s390/scsi/zfcp_dbf.h34
-rw-r--r--drivers/s390/scsi/zfcp_def.h114
-rw-r--r--drivers/s390/scsi/zfcp_erp.c36
-rw-r--r--drivers/s390/scsi/zfcp_ext.h9
-rw-r--r--drivers/s390/scsi/zfcp_fc.c23
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c163
-rw-r--r--drivers/s390/scsi/zfcp_qdio.c50
-rw-r--r--drivers/s390/scsi/zfcp_qdio.h109
-rw-r--r--drivers/s390/scsi/zfcp_reqlist.h183
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c38
-rw-r--r--drivers/s390/scsi/zfcp_sysfs.c37
-rw-r--r--drivers/scsi/FlashPoint.c2
-rw-r--r--drivers/scsi/be2iscsi/be.h21
-rw-r--r--drivers/scsi/be2iscsi/be_cmds.c88
-rw-r--r--drivers/scsi/be2iscsi/be_cmds.h14
-rw-r--r--drivers/scsi/be2iscsi/be_iscsi.c136
-rw-r--r--drivers/scsi/be2iscsi/be_iscsi.h2
-rw-r--r--drivers/scsi/be2iscsi/be_main.c488
-rw-r--r--drivers/scsi/be2iscsi/be_main.h27
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.c139
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.h6
-rw-r--r--drivers/scsi/bnx2i/bnx2i_iscsi.c5
-rw-r--r--drivers/scsi/constants.c20
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_iscsi.c17
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_offload.c7
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_pdu.c6
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c2
-rw-r--r--drivers/scsi/eata.c2
-rw-r--r--drivers/scsi/esp_scsi.c14
-rw-r--r--drivers/scsi/fnic/fnic.h2
-rw-r--r--drivers/scsi/fnic/fnic_main.c4
-rw-r--r--drivers/scsi/fnic/vnic_devcmd.h2
-rw-r--r--drivers/scsi/gdth.c430
-rw-r--r--drivers/scsi/gdth.h952
-rw-r--r--drivers/scsi/gdth_ioctl.h366
-rw-r--r--drivers/scsi/gdth_proc.c42
-rw-r--r--drivers/scsi/gdth_proc.h4
-rw-r--r--drivers/scsi/hpsa.c793
-rw-r--r--drivers/scsi/hpsa.h136
-rw-r--r--drivers/scsi/hpsa_cmd.h204
-rw-r--r--drivers/scsi/ibmmca.c2
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c2
-rw-r--r--drivers/scsi/iscsi_tcp.c8
-rw-r--r--drivers/scsi/libiscsi.c53
-rw-r--r--drivers/scsi/libsrp.c8
-rw-r--r--drivers/scsi/lpfc/lpfc.h14
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c118
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c2473
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.h98
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h22
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c15
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c145
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c735
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h23
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h265
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c547
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c111
-rw-r--r--drivers/scsi/lpfc/lpfc_nl.h22
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c85
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c46
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c329
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h10
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h82
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c7
-rw-r--r--drivers/scsi/mac_esp.c95
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.c246
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h36
-rw-r--r--drivers/scsi/mpt2sas/Kconfig1
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2.h16
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h25
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_history.txt93
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_init.h24
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_ioc.h77
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_sas.h6
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.c18
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.h14
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_config.c51
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_ctl.c13
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_scsih.c266
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_transport.c196
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c2
-rw-r--r--drivers/scsi/qla1280.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c732
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h155
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h33
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h7
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c32
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c120
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c110
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c151
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c135
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h6
-rw-r--r--drivers/scsi/qla4xxx/ql4_init.c14
-rw-r--r--drivers/scsi/raid_class.c1
-rw-r--r--drivers/scsi/scsi.c40
-rw-r--r--drivers/scsi/scsi_lib.c10
-rw-r--r--drivers/scsi/scsi_sas_internal.h2
-rw-r--r--drivers/scsi/scsi_scan.c9
-rw-r--r--drivers/scsi/scsi_sysfs.c18
-rw-r--r--drivers/scsi/scsi_transport_fc.c26
-rw-r--r--drivers/scsi/scsi_transport_sas.c103
-rw-r--r--drivers/scsi/sd.c54
-rw-r--r--drivers/scsi/ses.c10
-rw-r--r--drivers/scsi/u14-34f.c2
-rw-r--r--drivers/scsi/vmw_pvscsi.c3
116 files changed, 9988 insertions, 3291 deletions
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index 44d2037e9e56..5382b5a44aff 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -126,8 +126,6 @@ static int mfcounter = 0;
* Public data...
*/
-static struct proc_dir_entry *mpt_proc_root_dir;
-
#define WHOINIT_UNKNOWN 0xAA
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -146,6 +144,9 @@ static MPT_EVHANDLER MptEvHandlers[MPT_MAX_PROTOCOL_DRIVERS];
static MPT_RESETHANDLER MptResetHandlers[MPT_MAX_PROTOCOL_DRIVERS];
static struct mpt_pci_driver *MptDeviceDriverHandlers[MPT_MAX_PROTOCOL_DRIVERS];
+#ifdef CONFIG_PROC_FS
+static struct proc_dir_entry *mpt_proc_root_dir;
+#endif
/*
* Driver Callback Index's
diff --git a/drivers/message/fusion/mptbase.h b/drivers/message/fusion/mptbase.h
index b4948671eb92..9718c8f2e959 100644
--- a/drivers/message/fusion/mptbase.h
+++ b/drivers/message/fusion/mptbase.h
@@ -76,8 +76,8 @@
#define COPYRIGHT "Copyright (c) 1999-2008 " MODULEAUTHOR
#endif
-#define MPT_LINUX_VERSION_COMMON "3.04.13"
-#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.04.13"
+#define MPT_LINUX_VERSION_COMMON "3.04.14"
+#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.04.14"
#define WHAT_MAGIC_STRING "@" "(" "#" ")"
#define show_mptmod_ver(s,ver) \
diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c
index 352acd05c46b..caa8f568a41c 100644
--- a/drivers/message/fusion/mptctl.c
+++ b/drivers/message/fusion/mptctl.c
@@ -360,8 +360,8 @@ static int mptctl_bus_reset(MPT_ADAPTER *ioc, u8 function)
u16 iocstatus;
/* bus reset is only good for SCSI IO, RAID PASSTHRU */
- if (!(function == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) ||
- (function == MPI_FUNCTION_SCSI_IO_REQUEST)) {
+ if (!(function == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH ||
+ function == MPI_FUNCTION_SCSI_IO_REQUEST)) {
dtmprintk(ioc, printk(MYIOC_s_WARN_FMT
"TaskMgmt, not SCSI_IO!!\n", ioc->name));
return -EPERM;
diff --git a/drivers/message/fusion/mptfc.c b/drivers/message/fusion/mptfc.c
index ebf6ae024da4..612ab3c51a6b 100644
--- a/drivers/message/fusion/mptfc.c
+++ b/drivers/message/fusion/mptfc.c
@@ -195,29 +195,34 @@ mptfc_block_error_handler(struct scsi_cmnd *SCpnt,
unsigned long flags;
int ready;
MPT_ADAPTER *ioc;
+ int loops = 40; /* seconds */
hd = shost_priv(SCpnt->device->host);
ioc = hd->ioc;
spin_lock_irqsave(shost->host_lock, flags);
- while ((ready = fc_remote_port_chkready(rport) >> 16) == DID_IMM_RETRY) {
+ while ((ready = fc_remote_port_chkready(rport) >> 16) == DID_IMM_RETRY
+ || (loops > 0 && ioc->active == 0)) {
spin_unlock_irqrestore(shost->host_lock, flags);
dfcprintk (ioc, printk(MYIOC_s_DEBUG_FMT
"mptfc_block_error_handler.%d: %d:%d, port status is "
- "DID_IMM_RETRY, deferring %s recovery.\n",
+ "%x, active flag %d, deferring %s recovery.\n",
ioc->name, ioc->sh->host_no,
- SCpnt->device->id, SCpnt->device->lun, caller));
+ SCpnt->device->id, SCpnt->device->lun,
+ ready, ioc->active, caller));
msleep(1000);
spin_lock_irqsave(shost->host_lock, flags);
+ loops --;
}
spin_unlock_irqrestore(shost->host_lock, flags);
- if (ready == DID_NO_CONNECT || !SCpnt->device->hostdata) {
+ if (ready == DID_NO_CONNECT || !SCpnt->device->hostdata
+ || ioc->active == 0) {
dfcprintk (ioc, printk(MYIOC_s_DEBUG_FMT
"%s.%d: %d:%d, failing recovery, "
- "port state %d, vdevice %p.\n", caller,
+ "port state %x, active %d, vdevice %p.\n", caller,
ioc->name, ioc->sh->host_no,
SCpnt->device->id, SCpnt->device->lun, ready,
- SCpnt->device->hostdata));
+ ioc->active, SCpnt->device->hostdata));
return FAILED;
}
dfcprintk (ioc, printk(MYIOC_s_DEBUG_FMT
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index 83873e3d0ce7..c20bbe45da82 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -1075,6 +1075,19 @@ mptsas_target_reset(MPT_ADAPTER *ioc, u8 channel, u8 id)
return 0;
}
+static void
+mptsas_block_io_sdev(struct scsi_device *sdev, void *data)
+{
+ scsi_device_set_state(sdev, SDEV_BLOCK);
+}
+
+static void
+mptsas_block_io_starget(struct scsi_target *starget)
+{
+ if (starget)
+ starget_for_each_device(starget, NULL, mptsas_block_io_sdev);
+}
+
/**
* mptsas_target_reset_queue
*
@@ -1098,10 +1111,11 @@ mptsas_target_reset_queue(MPT_ADAPTER *ioc,
id = sas_event_data->TargetID;
channel = sas_event_data->Bus;
- if (!(vtarget = mptsas_find_vtarget(ioc, channel, id)))
- return;
-
- vtarget->deleted = 1; /* block IO */
+ vtarget = mptsas_find_vtarget(ioc, channel, id);
+ if (vtarget) {
+ mptsas_block_io_starget(vtarget->starget);
+ vtarget->deleted = 1; /* block IO */
+ }
target_reset_list = kzalloc(sizeof(struct mptsas_target_reset_event),
GFP_ATOMIC);
@@ -1868,7 +1882,8 @@ mptsas_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
if (ioc->sas_discovery_quiesce_io)
return SCSI_MLQUEUE_HOST_BUSY;
-// scsi_print_command(SCpnt);
+ if (ioc->debug_level & MPT_DEBUG_SCSI)
+ scsi_print_command(SCpnt);
return mptscsih_qcmd(SCpnt,done);
}
@@ -2686,6 +2701,187 @@ mptsas_sas_expander_pg1(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info,
return error;
}
+struct rep_manu_request{
+ u8 smp_frame_type;
+ u8 function;
+ u8 reserved;
+ u8 request_length;
+};
+
+struct rep_manu_reply{
+ u8 smp_frame_type; /* 0x41 */
+ u8 function; /* 0x01 */
+ u8 function_result;
+ u8 response_length;
+ u16 expander_change_count;
+ u8 reserved0[2];
+ u8 sas_format:1;
+ u8 reserved1:7;
+ u8 reserved2[3];
+ u8 vendor_id[SAS_EXPANDER_VENDOR_ID_LEN];
+ u8 product_id[SAS_EXPANDER_PRODUCT_ID_LEN];
+ u8 product_rev[SAS_EXPANDER_PRODUCT_REV_LEN];
+ u8 component_vendor_id[SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN];
+ u16 component_id;
+ u8 component_revision_id;
+ u8 reserved3;
+ u8 vendor_specific[8];
+};
+
+/**
+ * mptsas_exp_repmanufacture_info -
+ * @ioc: per adapter object
+ * @sas_address: expander sas address
+ * @edev: the sas_expander_device object
+ *
+ * Fills in the sas_expander_device object when SMP port is created.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+mptsas_exp_repmanufacture_info(MPT_ADAPTER *ioc,
+ u64 sas_address, struct sas_expander_device *edev)
+{
+ MPT_FRAME_HDR *mf;
+ SmpPassthroughRequest_t *smpreq;
+ SmpPassthroughReply_t *smprep;
+ struct rep_manu_reply *manufacture_reply;
+ struct rep_manu_request *manufacture_request;
+ int ret;
+ int flagsLength;
+ unsigned long timeleft;
+ char *psge;
+ unsigned long flags;
+ void *data_out = NULL;
+ dma_addr_t data_out_dma = 0;
+ u32 sz;
+
+ spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
+ if (ioc->ioc_reset_in_progress) {
+ spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
+ printk(MYIOC_s_INFO_FMT "%s: host reset in progress!\n",
+ __func__, ioc->name);
+ return -EFAULT;
+ }
+ spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
+
+ ret = mutex_lock_interruptible(&ioc->sas_mgmt.mutex);
+ if (ret)
+ goto out;
+
+ mf = mpt_get_msg_frame(mptsasMgmtCtx, ioc);
+ if (!mf) {
+ ret = -ENOMEM;
+ goto out_unlock;
+ }
+
+ smpreq = (SmpPassthroughRequest_t *)mf;
+ memset(smpreq, 0, sizeof(*smpreq));
+
+ sz = sizeof(struct rep_manu_request) + sizeof(struct rep_manu_reply);
+
+ data_out = pci_alloc_consistent(ioc->pcidev, sz, &data_out_dma);
+ if (!data_out) {
+ printk(KERN_ERR "Memory allocation failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ ret = -ENOMEM;
+ goto put_mf;
+ }
+
+ manufacture_request = data_out;
+ manufacture_request->smp_frame_type = 0x40;
+ manufacture_request->function = 1;
+ manufacture_request->reserved = 0;
+ manufacture_request->request_length = 0;
+
+ smpreq->Function = MPI_FUNCTION_SMP_PASSTHROUGH;
+ smpreq->PhysicalPort = 0xFF;
+ *((u64 *)&smpreq->SASAddress) = cpu_to_le64(sas_address);
+ smpreq->RequestDataLength = sizeof(struct rep_manu_request);
+
+ psge = (char *)
+ (((int *) mf) + (offsetof(SmpPassthroughRequest_t, SGL) / 4));
+
+ flagsLength = MPI_SGE_FLAGS_SIMPLE_ELEMENT |
+ MPI_SGE_FLAGS_SYSTEM_ADDRESS |
+ MPI_SGE_FLAGS_HOST_TO_IOC |
+ MPI_SGE_FLAGS_END_OF_BUFFER;
+ flagsLength = flagsLength << MPI_SGE_FLAGS_SHIFT;
+ flagsLength |= sizeof(struct rep_manu_request);
+
+ ioc->add_sge(psge, flagsLength, data_out_dma);
+ psge += ioc->SGE_size;
+
+ flagsLength = MPI_SGE_FLAGS_SIMPLE_ELEMENT |
+ MPI_SGE_FLAGS_SYSTEM_ADDRESS |
+ MPI_SGE_FLAGS_IOC_TO_HOST |
+ MPI_SGE_FLAGS_END_OF_BUFFER;
+ flagsLength = flagsLength << MPI_SGE_FLAGS_SHIFT;
+ flagsLength |= sizeof(struct rep_manu_reply);
+ ioc->add_sge(psge, flagsLength, data_out_dma +
+ sizeof(struct rep_manu_request));
+
+ INITIALIZE_MGMT_STATUS(ioc->sas_mgmt.status)
+ mpt_put_msg_frame(mptsasMgmtCtx, ioc, mf);
+
+ timeleft = wait_for_completion_timeout(&ioc->sas_mgmt.done, 10 * HZ);
+ if (!(ioc->sas_mgmt.status & MPT_MGMT_STATUS_COMMAND_GOOD)) {
+ ret = -ETIME;
+ mpt_free_msg_frame(ioc, mf);
+ mf = NULL;
+ if (ioc->sas_mgmt.status & MPT_MGMT_STATUS_DID_IOCRESET)
+ goto out_free;
+ if (!timeleft)
+ mpt_HardResetHandler(ioc, CAN_SLEEP);
+ goto out_free;
+ }
+
+ mf = NULL;
+
+ if (ioc->sas_mgmt.status & MPT_MGMT_STATUS_RF_VALID) {
+ u8 *tmp;
+
+ smprep = (SmpPassthroughReply_t *)ioc->sas_mgmt.reply;
+ if (le16_to_cpu(smprep->ResponseDataLength) !=
+ sizeof(struct rep_manu_reply))
+ goto out_free;
+
+ manufacture_reply = data_out + sizeof(struct rep_manu_request);
+ strncpy(edev->vendor_id, manufacture_reply->vendor_id,
+ SAS_EXPANDER_VENDOR_ID_LEN);
+ strncpy(edev->product_id, manufacture_reply->product_id,
+ SAS_EXPANDER_PRODUCT_ID_LEN);
+ strncpy(edev->product_rev, manufacture_reply->product_rev,
+ SAS_EXPANDER_PRODUCT_REV_LEN);
+ edev->level = manufacture_reply->sas_format;
+ if (manufacture_reply->sas_format) {
+ strncpy(edev->component_vendor_id,
+ manufacture_reply->component_vendor_id,
+ SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN);
+ tmp = (u8 *)&manufacture_reply->component_id;
+ edev->component_id = tmp[0] << 8 | tmp[1];
+ edev->component_revision_id =
+ manufacture_reply->component_revision_id;
+ }
+ } else {
+ printk(MYIOC_s_ERR_FMT
+ "%s: smp passthru reply failed to be returned\n",
+ ioc->name, __func__);
+ ret = -ENXIO;
+ }
+out_free:
+ if (data_out_dma)
+ pci_free_consistent(ioc->pcidev, sz, data_out, data_out_dma);
+put_mf:
+ if (mf)
+ mpt_free_msg_frame(ioc, mf);
+out_unlock:
+ CLEAR_MGMT_STATUS(ioc->sas_mgmt.status)
+ mutex_unlock(&ioc->sas_mgmt.mutex);
+out:
+ return ret;
+ }
+
static void
mptsas_parse_device_info(struct sas_identify *identify,
struct mptsas_devinfo *device_info)
@@ -2967,6 +3163,11 @@ static int mptsas_probe_one_phy(struct device *dev,
goto out;
}
mptsas_set_rphy(ioc, phy_info, rphy);
+ if (identify.device_type == SAS_EDGE_EXPANDER_DEVICE ||
+ identify.device_type == SAS_FANOUT_EXPANDER_DEVICE)
+ mptsas_exp_repmanufacture_info(ioc,
+ identify.sas_address,
+ rphy_to_expander_device(rphy));
}
out:
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
index 81279b3d694c..4a7d1afcb666 100644
--- a/drivers/message/fusion/mptscsih.c
+++ b/drivers/message/fusion/mptscsih.c
@@ -1438,9 +1438,14 @@ mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
&& (vdevice->vtarget->tflags & MPT_TARGET_FLAGS_Q_YES)
&& (SCpnt->device->tagged_supported)) {
scsictl = scsidir | MPI_SCSIIO_CONTROL_SIMPLEQ;
- } else {
+ if (SCpnt->request && SCpnt->request->ioprio) {
+ if (((SCpnt->request->ioprio & 0x7) == 1) ||
+ !(SCpnt->request->ioprio & 0x7))
+ scsictl |= MPI_SCSIIO_CONTROL_HEADOFQ;
+ }
+ } else
scsictl = scsidir | MPI_SCSIIO_CONTROL_UNTAGGED;
- }
+
/* Use the above information to set up the message frame
*/
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index 9d0c941b7d33..66d6c01fcf3e 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -3,7 +3,7 @@
*
* Module interface and handling of zfcp data structures.
*
- * Copyright IBM Corporation 2002, 2009
+ * Copyright IBM Corporation 2002, 2010
*/
/*
@@ -32,6 +32,7 @@
#include <linux/seq_file.h>
#include "zfcp_ext.h"
#include "zfcp_fc.h"
+#include "zfcp_reqlist.h"
#define ZFCP_BUS_ID_SIZE 20
@@ -49,36 +50,6 @@ static struct kmem_cache *zfcp_cache_hw_align(const char *name,
return kmem_cache_create(name, size, roundup_pow_of_two(size), 0, NULL);
}
-static int zfcp_reqlist_alloc(struct zfcp_adapter *adapter)
-{
- int idx;
-
- adapter->req_list = kcalloc(REQUEST_LIST_SIZE, sizeof(struct list_head),
- GFP_KERNEL);
- if (!adapter->req_list)
- return -ENOMEM;
-
- for (idx = 0; idx < REQUEST_LIST_SIZE; idx++)
- INIT_LIST_HEAD(&adapter->req_list[idx]);
- return 0;
-}
-
-/**
- * zfcp_reqlist_isempty - is the request list empty
- * @adapter: pointer to struct zfcp_adapter
- *
- * Returns: true if list is empty, false otherwise
- */
-int zfcp_reqlist_isempty(struct zfcp_adapter *adapter)
-{
- unsigned int idx;
-
- for (idx = 0; idx < REQUEST_LIST_SIZE; idx++)
- if (!list_empty(&adapter->req_list[idx]))
- return 0;
- return 1;
-}
-
static void __init zfcp_init_device_configure(char *busid, u64 wwpn, u64 lun)
{
struct ccw_device *cdev;
@@ -110,7 +81,7 @@ static void __init zfcp_init_device_configure(char *busid, u64 wwpn, u64 lun)
flush_work(&unit->scsi_work);
out_unit:
- put_device(&port->sysfs_device);
+ put_device(&port->dev);
out_port:
zfcp_ccw_adapter_put(adapter);
out_ccw_device:
@@ -255,7 +226,7 @@ struct zfcp_unit *zfcp_get_unit_by_lun(struct zfcp_port *port, u64 fcp_lun)
read_lock_irqsave(&port->unit_list_lock, flags);
list_for_each_entry(unit, &port->unit_list, list)
if (unit->fcp_lun == fcp_lun) {
- if (!get_device(&unit->sysfs_device))
+ if (!get_device(&unit->dev))
unit = NULL;
read_unlock_irqrestore(&port->unit_list_lock, flags);
return unit;
@@ -280,7 +251,7 @@ struct zfcp_port *zfcp_get_port_by_wwpn(struct zfcp_adapter *adapter,
read_lock_irqsave(&adapter->port_list_lock, flags);
list_for_each_entry(port, &adapter->port_list, list)
if (port->wwpn == wwpn) {
- if (!get_device(&port->sysfs_device))
+ if (!get_device(&port->dev))
port = NULL;
read_unlock_irqrestore(&adapter->port_list_lock, flags);
return port;
@@ -298,10 +269,9 @@ struct zfcp_port *zfcp_get_port_by_wwpn(struct zfcp_adapter *adapter,
*/
static void zfcp_unit_release(struct device *dev)
{
- struct zfcp_unit *unit = container_of(dev, struct zfcp_unit,
- sysfs_device);
+ struct zfcp_unit *unit = container_of(dev, struct zfcp_unit, dev);
- put_device(&unit->port->sysfs_device);
+ put_device(&unit->port->dev);
kfree(unit);
}
@@ -318,11 +288,11 @@ struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *port, u64 fcp_lun)
struct zfcp_unit *unit;
int retval = -ENOMEM;
- get_device(&port->sysfs_device);
+ get_device(&port->dev);
unit = zfcp_get_unit_by_lun(port, fcp_lun);
if (unit) {
- put_device(&unit->sysfs_device);
+ put_device(&unit->dev);
retval = -EEXIST;
goto err_out;
}
@@ -333,10 +303,10 @@ struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *port, u64 fcp_lun)
unit->port = port;
unit->fcp_lun = fcp_lun;
- unit->sysfs_device.parent = &port->sysfs_device;
- unit->sysfs_device.release = zfcp_unit_release;
+ unit->dev.parent = &port->dev;
+ unit->dev.release = zfcp_unit_release;
- if (dev_set_name(&unit->sysfs_device, "0x%016llx",
+ if (dev_set_name(&unit->dev, "0x%016llx",
(unsigned long long) fcp_lun)) {
kfree(unit);
goto err_out;
@@ -353,13 +323,12 @@ struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *port, u64 fcp_lun)
unit->latencies.cmd.channel.min = 0xFFFFFFFF;
unit->latencies.cmd.fabric.min = 0xFFFFFFFF;
- if (device_register(&unit->sysfs_device)) {
- put_device(&unit->sysfs_device);
+ if (device_register(&unit->dev)) {
+ put_device(&unit->dev);
goto err_out;
}
- if (sysfs_create_group(&unit->sysfs_device.kobj,
- &zfcp_sysfs_unit_attrs))
+ if (sysfs_create_group(&unit->dev.kobj, &zfcp_sysfs_unit_attrs))
goto err_out_put;
write_lock_irq(&port->unit_list_lock);
@@ -371,9 +340,9 @@ struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *port, u64 fcp_lun)
return unit;
err_out_put:
- device_unregister(&unit->sysfs_device);
+ device_unregister(&unit->dev);
err_out:
- put_device(&port->sysfs_device);
+ put_device(&port->dev);
return ERR_PTR(retval);
}
@@ -539,7 +508,8 @@ struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *ccw_device)
if (zfcp_allocate_low_mem_buffers(adapter))
goto failed;
- if (zfcp_reqlist_alloc(adapter))
+ adapter->req_list = zfcp_reqlist_alloc();
+ if (!adapter->req_list)
goto failed;
if (zfcp_dbf_adapter_register(adapter))
@@ -560,8 +530,6 @@ struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *ccw_device)
INIT_LIST_HEAD(&adapter->erp_ready_head);
INIT_LIST_HEAD(&adapter->erp_running_head);
- spin_lock_init(&adapter->req_list_lock);
-
rwlock_init(&adapter->erp_lock);
rwlock_init(&adapter->abort_lock);
@@ -640,8 +608,7 @@ void zfcp_device_unregister(struct device *dev,
static void zfcp_port_release(struct device *dev)
{
- struct zfcp_port *port = container_of(dev, struct zfcp_port,
- sysfs_device);
+ struct zfcp_port *port = container_of(dev, struct zfcp_port, dev);
zfcp_ccw_adapter_put(port->adapter);
kfree(port);
@@ -669,7 +636,7 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn,
port = zfcp_get_port_by_wwpn(adapter, wwpn);
if (port) {
- put_device(&port->sysfs_device);
+ put_device(&port->dev);
retval = -EEXIST;
goto err_out;
}
@@ -689,22 +656,21 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn,
port->d_id = d_id;
port->wwpn = wwpn;
port->rport_task = RPORT_NONE;
- port->sysfs_device.parent = &adapter->ccw_device->dev;
- port->sysfs_device.release = zfcp_port_release;
+ port->dev.parent = &adapter->ccw_device->dev;
+ port->dev.release = zfcp_port_release;
- if (dev_set_name(&port->sysfs_device, "0x%016llx",
- (unsigned long long)wwpn)) {
+ if (dev_set_name(&port->dev, "0x%016llx", (unsigned long long)wwpn)) {
kfree(port);
goto err_out;
}
retval = -EINVAL;
- if (device_register(&port->sysfs_device)) {
- put_device(&port->sysfs_device);
+ if (device_register(&port->dev)) {
+ put_device(&port->dev);
goto err_out;
}
- if (sysfs_create_group(&port->sysfs_device.kobj,
+ if (sysfs_create_group(&port->dev.kobj,
&zfcp_sysfs_port_attrs))
goto err_out_put;
@@ -717,7 +683,7 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn,
return port;
err_out_put:
- device_unregister(&port->sysfs_device);
+ device_unregister(&port->dev);
err_out:
zfcp_ccw_adapter_put(adapter);
return ERR_PTR(retval);
diff --git a/drivers/s390/scsi/zfcp_ccw.c b/drivers/s390/scsi/zfcp_ccw.c
index c22cb72a5ae8..ce1cc7a11fb4 100644
--- a/drivers/s390/scsi/zfcp_ccw.c
+++ b/drivers/s390/scsi/zfcp_ccw.c
@@ -3,13 +3,14 @@
*
* Registration and callback for the s390 common I/O layer.
*
- * Copyright IBM Corporation 2002, 2009
+ * Copyright IBM Corporation 2002, 2010
*/
#define KMSG_COMPONENT "zfcp"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include "zfcp_ext.h"
+#include "zfcp_reqlist.h"
#define ZFCP_MODEL_PRIV 0x4
@@ -122,12 +123,10 @@ static void zfcp_ccw_remove(struct ccw_device *cdev)
zfcp_ccw_adapter_put(adapter); /* put from zfcp_ccw_adapter_by_cdev */
list_for_each_entry_safe(unit, u, &unit_remove_lh, list)
- zfcp_device_unregister(&unit->sysfs_device,
- &zfcp_sysfs_unit_attrs);
+ zfcp_device_unregister(&unit->dev, &zfcp_sysfs_unit_attrs);
list_for_each_entry_safe(port, p, &port_remove_lh, list)
- zfcp_device_unregister(&port->sysfs_device,
- &zfcp_sysfs_port_attrs);
+ zfcp_device_unregister(&port->dev, &zfcp_sysfs_port_attrs);
zfcp_adapter_unregister(adapter);
}
@@ -162,7 +161,7 @@ static int zfcp_ccw_set_online(struct ccw_device *cdev)
}
/* initialize request counter */
- BUG_ON(!zfcp_reqlist_isempty(adapter));
+ BUG_ON(!zfcp_reqlist_isempty(adapter->req_list));
adapter->req_no = 0;
zfcp_erp_modify_adapter_status(adapter, "ccsonl1", NULL,
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
index 7369c8911bcf..7a149fd85f6d 100644
--- a/drivers/s390/scsi/zfcp_dbf.c
+++ b/drivers/s390/scsi/zfcp_dbf.c
@@ -140,9 +140,9 @@ void _zfcp_dbf_hba_fsf_response(const char *tag2, int level,
memcpy(response->fsf_status_qual,
fsf_status_qual, FSF_STATUS_QUALIFIER_SIZE);
response->fsf_req_status = fsf_req->status;
- response->sbal_first = fsf_req->queue_req.sbal_first;
- response->sbal_last = fsf_req->queue_req.sbal_last;
- response->sbal_response = fsf_req->queue_req.sbal_response;
+ response->sbal_first = fsf_req->qdio_req.sbal_first;
+ response->sbal_last = fsf_req->qdio_req.sbal_last;
+ response->sbal_response = fsf_req->qdio_req.sbal_response;
response->pool = fsf_req->pool != NULL;
response->erp_action = (unsigned long)fsf_req->erp_action;
@@ -576,7 +576,8 @@ void zfcp_dbf_rec_adapter(char *id, void *ref, struct zfcp_dbf *dbf)
struct zfcp_adapter *adapter = dbf->adapter;
zfcp_dbf_rec_target(id, ref, dbf, &adapter->status,
- &adapter->erp_counter, 0, 0, 0);
+ &adapter->erp_counter, 0, 0,
+ ZFCP_DBF_INVALID_LUN);
}
/**
@@ -590,8 +591,8 @@ void zfcp_dbf_rec_port(char *id, void *ref, struct zfcp_port *port)
struct zfcp_dbf *dbf = port->adapter->dbf;
zfcp_dbf_rec_target(id, ref, dbf, &port->status,
- &port->erp_counter, port->wwpn, port->d_id,
- 0);
+ &port->erp_counter, port->wwpn, port->d_id,
+ ZFCP_DBF_INVALID_LUN);
}
/**
@@ -642,10 +643,9 @@ void zfcp_dbf_rec_trigger(char *id2, void *ref, u8 want, u8 need, void *action,
r->u.trigger.ps = atomic_read(&port->status);
r->u.trigger.wwpn = port->wwpn;
}
- if (unit) {
+ if (unit)
r->u.trigger.us = atomic_read(&unit->status);
- r->u.trigger.fcp_lun = unit->fcp_lun;
- }
+ r->u.trigger.fcp_lun = unit ? unit->fcp_lun : ZFCP_DBF_INVALID_LUN;
debug_event(dbf->rec, action ? 1 : 4, r, sizeof(*r));
spin_unlock_irqrestore(&dbf->rec_lock, flags);
}
@@ -668,7 +668,7 @@ void zfcp_dbf_rec_action(char *id2, struct zfcp_erp_action *erp_action)
r->u.action.action = (unsigned long)erp_action;
r->u.action.status = erp_action->status;
r->u.action.step = erp_action->step;
- r->u.action.fsf_req = (unsigned long)erp_action->fsf_req;
+ r->u.action.fsf_req = erp_action->fsf_req_id;
debug_event(dbf->rec, 5, r, sizeof(*r));
spin_unlock_irqrestore(&dbf->rec_lock, flags);
}
diff --git a/drivers/s390/scsi/zfcp_dbf.h b/drivers/s390/scsi/zfcp_dbf.h
index 8b7fd9a1033e..457e046f2d28 100644
--- a/drivers/s390/scsi/zfcp_dbf.h
+++ b/drivers/s390/scsi/zfcp_dbf.h
@@ -30,6 +30,8 @@
#define ZFCP_DBF_TAG_SIZE 4
#define ZFCP_DBF_ID_SIZE 7
+#define ZFCP_DBF_INVALID_LUN 0xFFFFFFFFFFFFFFFFull
+
struct zfcp_dbf_dump {
u8 tag[ZFCP_DBF_TAG_SIZE];
u32 total_size; /* size of total dump data */
@@ -192,10 +194,10 @@ struct zfcp_dbf_san_record {
struct zfcp_dbf_san_record_ct_response ct_resp;
struct zfcp_dbf_san_record_els els;
} u;
-#define ZFCP_DBF_SAN_MAX_PAYLOAD 1024
- u8 payload[32];
} __attribute__ ((packed));
+#define ZFCP_DBF_SAN_MAX_PAYLOAD 1024
+
struct zfcp_dbf_scsi_record {
u8 tag[ZFCP_DBF_TAG_SIZE];
u8 tag2[ZFCP_DBF_TAG_SIZE];
@@ -301,17 +303,31 @@ void zfcp_dbf_scsi(const char *tag, const char *tag2, int level,
/**
* zfcp_dbf_scsi_result - trace event for SCSI command completion
- * @tag: tag indicating success or failure of SCSI command
- * @level: trace level applicable for this event
- * @adapter: adapter that has been used to issue the SCSI command
+ * @dbf: adapter dbf trace
+ * @scmd: SCSI command pointer
+ * @req: FSF request used to issue SCSI command
+ */
+static inline
+void zfcp_dbf_scsi_result(struct zfcp_dbf *dbf, struct scsi_cmnd *scmd,
+ struct zfcp_fsf_req *req)
+{
+ if (scmd->result != 0)
+ zfcp_dbf_scsi("rslt", "erro", 3, dbf, scmd, req, 0);
+ else if (scmd->retries > 0)
+ zfcp_dbf_scsi("rslt", "retr", 4, dbf, scmd, req, 0);
+ else
+ zfcp_dbf_scsi("rslt", "norm", 6, dbf, scmd, req, 0);
+}
+
+/**
+ * zfcp_dbf_scsi_fail_send - trace event for failure to send SCSI command
+ * @dbf: adapter dbf trace
* @scmd: SCSI command pointer
- * @fsf_req: request used to issue SCSI command (might be NULL)
*/
static inline
-void zfcp_dbf_scsi_result(const char *tag, int level, struct zfcp_dbf *dbf,
- struct scsi_cmnd *scmd, struct zfcp_fsf_req *fsf_req)
+void zfcp_dbf_scsi_fail_send(struct zfcp_dbf *dbf, struct scsi_cmnd *scmd)
{
- zfcp_dbf_scsi("rslt", tag, level, dbf, scmd, fsf_req, 0);
+ zfcp_dbf_scsi("rslt", "fail", 4, dbf, scmd, NULL, 0);
}
/**
diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h
index e1b5b88e2ddb..7131c7db1f04 100644
--- a/drivers/s390/scsi/zfcp_def.h
+++ b/drivers/s390/scsi/zfcp_def.h
@@ -3,7 +3,7 @@
*
* Global definitions for the zfcp device driver.
*
- * Copyright IBM Corporation 2002, 2009
+ * Copyright IBM Corporation 2002, 2010
*/
#ifndef ZFCP_DEF_H
@@ -33,15 +33,13 @@
#include <scsi/scsi_transport_fc.h>
#include <scsi/scsi_bsg_fc.h>
#include <asm/ccwdev.h>
-#include <asm/qdio.h>
#include <asm/debug.h>
#include <asm/ebcdic.h>
#include <asm/sysinfo.h>
#include "zfcp_fsf.h"
+#include "zfcp_qdio.h"
-/********************* GENERAL DEFINES *********************************/
-
-#define REQUEST_LIST_SIZE 128
+struct zfcp_reqlist;
/********************* SCSI SPECIFIC DEFINES *********************************/
#define ZFCP_SCSI_ER_TIMEOUT (10*HZ)
@@ -129,12 +127,6 @@ struct zfcp_adapter_mempool {
mempool_t *qtcb_pool;
};
-struct zfcp_qdio_queue {
- struct qdio_buffer *sbal[QDIO_MAX_BUFFERS_PER_Q];
- u8 first; /* index of next free bfr in queue */
- atomic_t count; /* number of free buffers in queue */
-};
-
struct zfcp_erp_action {
struct list_head list;
int action; /* requested action code */
@@ -143,8 +135,7 @@ struct zfcp_erp_action {
struct zfcp_unit *unit;
u32 status; /* recovery status */
u32 step; /* active step of this erp action */
- struct zfcp_fsf_req *fsf_req; /* fsf request currently pending
- for this action */
+ unsigned long fsf_req_id;
struct timer_list timer;
};
@@ -167,29 +158,6 @@ struct zfcp_latencies {
spinlock_t lock;
};
-/** struct zfcp_qdio - basic QDIO data structure
- * @resp_q: response queue
- * @req_q: request queue
- * @stat_lock: lock to protect req_q_util and req_q_time
- * @req_q_lock; lock to serialize access to request queue
- * @req_q_time: time of last fill level change
- * @req_q_util: used for accounting
- * @req_q_full: queue full incidents
- * @req_q_wq: used to wait for SBAL availability
- * @adapter: adapter used in conjunction with this QDIO structure
- */
-struct zfcp_qdio {
- struct zfcp_qdio_queue resp_q;
- struct zfcp_qdio_queue req_q;
- spinlock_t stat_lock;
- spinlock_t req_q_lock;
- unsigned long long req_q_time;
- u64 req_q_util;
- atomic_t req_q_full;
- wait_queue_head_t req_q_wq;
- struct zfcp_adapter *adapter;
-};
-
struct zfcp_adapter {
struct kref ref;
u64 peer_wwnn; /* P2P peer WWNN */
@@ -207,8 +175,7 @@ struct zfcp_adapter {
struct list_head port_list; /* remote port list */
rwlock_t port_list_lock; /* port list lock */
unsigned long req_no; /* unique FSF req number */
- struct list_head *req_list; /* list of pending reqs */
- spinlock_t req_list_lock; /* request list lock */
+ struct zfcp_reqlist *req_list;
u32 fsf_req_seq_no; /* FSF cmnd seq number */
rwlock_t abort_lock; /* Protects against SCSI
stack abort/command
@@ -241,7 +208,7 @@ struct zfcp_adapter {
};
struct zfcp_port {
- struct device sysfs_device; /* sysfs device */
+ struct device dev;
struct fc_rport *rport; /* rport of fc transport class */
struct list_head list; /* list of remote ports */
struct zfcp_adapter *adapter; /* adapter used to access port */
@@ -263,7 +230,7 @@ struct zfcp_port {
};
struct zfcp_unit {
- struct device sysfs_device; /* sysfs device */
+ struct device dev;
struct list_head list; /* list of logical units */
struct zfcp_port *port; /* remote port of unit */
atomic_t status; /* status of this logical unit */
@@ -277,33 +244,11 @@ struct zfcp_unit {
};
/**
- * struct zfcp_queue_req - queue related values for a request
- * @sbal_number: number of free SBALs
- * @sbal_first: first SBAL for this request
- * @sbal_last: last SBAL for this request
- * @sbal_limit: last possible SBAL for this request
- * @sbale_curr: current SBALE at creation of this request
- * @sbal_response: SBAL used in interrupt
- * @qdio_outb_usage: usage of outbound queue
- * @qdio_inb_usage: usage of inbound queue
- */
-struct zfcp_queue_req {
- u8 sbal_number;
- u8 sbal_first;
- u8 sbal_last;
- u8 sbal_limit;
- u8 sbale_curr;
- u8 sbal_response;
- u16 qdio_outb_usage;
- u16 qdio_inb_usage;
-};
-
-/**
* struct zfcp_fsf_req - basic FSF request structure
* @list: list of FSF requests
* @req_id: unique request ID
* @adapter: adapter this request belongs to
- * @queue_req: queue related values
+ * @qdio_req: qdio queue related values
* @completion: used to signal the completion of the request
* @status: status of the request
* @fsf_command: FSF command issued
@@ -321,7 +266,7 @@ struct zfcp_fsf_req {
struct list_head list;
unsigned long req_id;
struct zfcp_adapter *adapter;
- struct zfcp_queue_req queue_req;
+ struct zfcp_qdio_req qdio_req;
struct completion completion;
u32 status;
u32 fsf_command;
@@ -352,45 +297,4 @@ struct zfcp_data {
#define ZFCP_SET 0x00000100
#define ZFCP_CLEAR 0x00000200
-/*
- * Helper functions for request ID management.
- */
-static inline int zfcp_reqlist_hash(unsigned long req_id)
-{
- return req_id % REQUEST_LIST_SIZE;
-}
-
-static inline void zfcp_reqlist_remove(struct zfcp_adapter *adapter,
- struct zfcp_fsf_req *fsf_req)
-{
- list_del(&fsf_req->list);
-}
-
-static inline struct zfcp_fsf_req *
-zfcp_reqlist_find(struct zfcp_adapter *adapter, unsigned long req_id)
-{
- struct zfcp_fsf_req *request;
- unsigned int idx;
-
- idx = zfcp_reqlist_hash(req_id);
- list_for_each_entry(request, &adapter->req_list[idx], list)
- if (request->req_id == req_id)
- return request;
- return NULL;
-}
-
-static inline struct zfcp_fsf_req *
-zfcp_reqlist_find_safe(struct zfcp_adapter *adapter, struct zfcp_fsf_req *req)
-{
- struct zfcp_fsf_req *request;
- unsigned int idx;
-
- for (idx = 0; idx < REQUEST_LIST_SIZE; idx++) {
- list_for_each_entry(request, &adapter->req_list[idx], list)
- if (request == req)
- return request;
- }
- return NULL;
-}
-
#endif /* ZFCP_DEF_H */
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index b51a11a82e63..0be5e7ea2828 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -3,7 +3,7 @@
*
* Error Recovery Procedures (ERP).
*
- * Copyright IBM Corporation 2002, 2009
+ * Copyright IBM Corporation 2002, 2010
*/
#define KMSG_COMPONENT "zfcp"
@@ -11,6 +11,7 @@
#include <linux/kthread.h>
#include "zfcp_ext.h"
+#include "zfcp_reqlist.h"
#define ZFCP_MAX_ERPS 3
@@ -174,7 +175,7 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need,
switch (need) {
case ZFCP_ERP_ACTION_REOPEN_UNIT:
- if (!get_device(&unit->sysfs_device))
+ if (!get_device(&unit->dev))
return NULL;
atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &unit->status);
erp_action = &unit->erp_action;
@@ -184,7 +185,7 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need,
case ZFCP_ERP_ACTION_REOPEN_PORT:
case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
- if (!get_device(&port->sysfs_device))
+ if (!get_device(&port->dev))
return NULL;
zfcp_erp_action_dismiss_port(port);
atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &port->status);
@@ -478,26 +479,27 @@ static void zfcp_erp_action_to_running(struct zfcp_erp_action *erp_action)
static void zfcp_erp_strategy_check_fsfreq(struct zfcp_erp_action *act)
{
struct zfcp_adapter *adapter = act->adapter;
+ struct zfcp_fsf_req *req;
- if (!act->fsf_req)
+ if (!act->fsf_req_id)
return;
- spin_lock(&adapter->req_list_lock);
- if (zfcp_reqlist_find_safe(adapter, act->fsf_req) &&
- act->fsf_req->erp_action == act) {
+ spin_lock(&adapter->req_list->lock);
+ req = _zfcp_reqlist_find(adapter->req_list, act->fsf_req_id);
+ if (req && req->erp_action == act) {
if (act->status & (ZFCP_STATUS_ERP_DISMISSED |
ZFCP_STATUS_ERP_TIMEDOUT)) {
- act->fsf_req->status |= ZFCP_STATUS_FSFREQ_DISMISSED;
+ req->status |= ZFCP_STATUS_FSFREQ_DISMISSED;
zfcp_dbf_rec_action("erscf_1", act);
- act->fsf_req->erp_action = NULL;
+ req->erp_action = NULL;
}
if (act->status & ZFCP_STATUS_ERP_TIMEDOUT)
zfcp_dbf_rec_action("erscf_2", act);
- if (act->fsf_req->status & ZFCP_STATUS_FSFREQ_DISMISSED)
- act->fsf_req = NULL;
+ if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED)
+ act->fsf_req_id = 0;
} else
- act->fsf_req = NULL;
- spin_unlock(&adapter->req_list_lock);
+ act->fsf_req_id = 0;
+ spin_unlock(&adapter->req_list->lock);
}
/**
@@ -1179,19 +1181,19 @@ static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result)
switch (act->action) {
case ZFCP_ERP_ACTION_REOPEN_UNIT:
if ((result == ZFCP_ERP_SUCCEEDED) && !unit->device) {
- get_device(&unit->sysfs_device);
+ get_device(&unit->dev);
if (scsi_queue_work(unit->port->adapter->scsi_host,
&unit->scsi_work) <= 0)
- put_device(&unit->sysfs_device);
+ put_device(&unit->dev);
}
- put_device(&unit->sysfs_device);
+ put_device(&unit->dev);
break;
case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
case ZFCP_ERP_ACTION_REOPEN_PORT:
if (result == ZFCP_ERP_SUCCEEDED)
zfcp_scsi_schedule_rport_register(port);
- put_device(&port->sysfs_device);
+ put_device(&port->dev);
break;
case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index 66bdb34143cb..8786a79c7f8f 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -21,7 +21,6 @@ extern struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *);
extern struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *, u64, u32,
u32);
extern struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *, u64);
-extern int zfcp_reqlist_isempty(struct zfcp_adapter *);
extern void zfcp_sg_free_table(struct scatterlist *, int);
extern int zfcp_sg_setup_table(struct scatterlist *, int);
extern void zfcp_device_unregister(struct device *,
@@ -144,13 +143,9 @@ extern void zfcp_fsf_reqid_check(struct zfcp_qdio *, int);
/* zfcp_qdio.c */
extern int zfcp_qdio_setup(struct zfcp_adapter *);
extern void zfcp_qdio_destroy(struct zfcp_qdio *);
-extern int zfcp_qdio_send(struct zfcp_qdio *, struct zfcp_queue_req *);
-extern struct qdio_buffer_element
- *zfcp_qdio_sbale_req(struct zfcp_qdio *, struct zfcp_queue_req *);
-extern struct qdio_buffer_element
- *zfcp_qdio_sbale_curr(struct zfcp_qdio *, struct zfcp_queue_req *);
+extern int zfcp_qdio_send(struct zfcp_qdio *, struct zfcp_qdio_req *);
extern int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *,
- struct zfcp_queue_req *, unsigned long,
+ struct zfcp_qdio_req *, unsigned long,
struct scatterlist *, int);
extern int zfcp_qdio_open(struct zfcp_qdio *);
extern void zfcp_qdio_close(struct zfcp_qdio *);
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c
index 271399f62f1b..5219670f0c99 100644
--- a/drivers/s390/scsi/zfcp_fc.c
+++ b/drivers/s390/scsi/zfcp_fc.c
@@ -3,7 +3,7 @@
*
* Fibre Channel related functions for the zfcp device driver.
*
- * Copyright IBM Corporation 2008, 2009
+ * Copyright IBM Corporation 2008, 2010
*/
#define KMSG_COMPONENT "zfcp"
@@ -316,7 +316,7 @@ void zfcp_fc_port_did_lookup(struct work_struct *work)
zfcp_erp_port_reopen(port, 0, "fcgpn_3", NULL);
out:
- put_device(&port->sysfs_device);
+ put_device(&port->dev);
}
/**
@@ -325,9 +325,9 @@ out:
*/
void zfcp_fc_trigger_did_lookup(struct zfcp_port *port)
{
- get_device(&port->sysfs_device);
+ get_device(&port->dev);
if (!queue_work(port->adapter->work_queue, &port->gid_pn_work))
- put_device(&port->sysfs_device);
+ put_device(&port->dev);
}
/**
@@ -389,7 +389,7 @@ static void zfcp_fc_adisc_handler(void *data)
zfcp_scsi_schedule_rport_register(port);
out:
atomic_clear_mask(ZFCP_STATUS_PORT_LINK_TEST, &port->status);
- put_device(&port->sysfs_device);
+ put_device(&port->dev);
kmem_cache_free(zfcp_data.adisc_cache, adisc);
}
@@ -436,7 +436,7 @@ void zfcp_fc_link_test_work(struct work_struct *work)
container_of(work, struct zfcp_port, test_link_work);
int retval;
- get_device(&port->sysfs_device);
+ get_device(&port->dev);
port->rport_task = RPORT_DEL;
zfcp_scsi_rport_work(&port->rport_work);
@@ -455,7 +455,7 @@ void zfcp_fc_link_test_work(struct work_struct *work)
zfcp_erp_port_forced_reopen(port, 0, "fcltwk1", NULL);
out:
- put_device(&port->sysfs_device);
+ put_device(&port->dev);
}
/**
@@ -468,9 +468,9 @@ out:
*/
void zfcp_fc_test_link(struct zfcp_port *port)
{
- get_device(&port->sysfs_device);
+ get_device(&port->dev);
if (!queue_work(port->adapter->work_queue, &port->test_link_work))
- put_device(&port->sysfs_device);
+ put_device(&port->dev);
}
static void zfcp_free_sg_env(struct zfcp_fc_gpn_ft *gpn_ft, int buf_num)
@@ -617,8 +617,7 @@ static int zfcp_fc_eval_gpn_ft(struct zfcp_fc_gpn_ft *gpn_ft,
list_for_each_entry_safe(port, tmp, &remove_lh, list) {
zfcp_erp_port_shutdown(port, 0, "fcegpf2", NULL);
- zfcp_device_unregister(&port->sysfs_device,
- &zfcp_sysfs_port_attrs);
+ zfcp_device_unregister(&port->dev, &zfcp_sysfs_port_attrs);
}
return ret;
@@ -731,7 +730,7 @@ static int zfcp_fc_exec_els_job(struct fc_bsg_job *job,
return -EINVAL;
d_id = port->d_id;
- put_device(&port->sysfs_device);
+ put_device(&port->dev);
} else
d_id = ntoh24(job->request->rqst_data.h_els.port_id);
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index e8fb4d9baa8b..6538742b421a 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -3,7 +3,7 @@
*
* Implementation of FSF commands.
*
- * Copyright IBM Corporation 2002, 2009
+ * Copyright IBM Corporation 2002, 2010
*/
#define KMSG_COMPONENT "zfcp"
@@ -14,6 +14,8 @@
#include "zfcp_ext.h"
#include "zfcp_fc.h"
#include "zfcp_dbf.h"
+#include "zfcp_qdio.h"
+#include "zfcp_reqlist.h"
static void zfcp_fsf_request_timeout_handler(unsigned long data)
{
@@ -393,7 +395,7 @@ static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req)
case FSF_PROT_LINK_DOWN:
zfcp_fsf_link_down_info_eval(req, "fspse_5",
&psq->link_down_info);
- /* FIXME: reopening adapter now? better wait for link up */
+ /* go through reopen to flush pending requests */
zfcp_erp_adapter_reopen(adapter, 0, "fspse_6", req);
break;
case FSF_PROT_REEST_QUEUE:
@@ -457,15 +459,10 @@ static void zfcp_fsf_req_complete(struct zfcp_fsf_req *req)
void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *adapter)
{
struct zfcp_fsf_req *req, *tmp;
- unsigned long flags;
LIST_HEAD(remove_queue);
- unsigned int i;
BUG_ON(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP);
- spin_lock_irqsave(&adapter->req_list_lock, flags);
- for (i = 0; i < REQUEST_LIST_SIZE; i++)
- list_splice_init(&adapter->req_list[i], &remove_queue);
- spin_unlock_irqrestore(&adapter->req_list_lock, flags);
+ zfcp_reqlist_move(adapter->req_list, &remove_queue);
list_for_each_entry_safe(req, tmp, &remove_queue, list) {
list_del(&req->list);
@@ -495,8 +492,6 @@ static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
fc_host_port_id(shost) = ntoh24(bottom->s_id);
fc_host_speed(shost) = bottom->fc_link_speed;
fc_host_supported_classes(shost) = FC_COS_CLASS2 | FC_COS_CLASS3;
- fc_host_supported_fc4s(shost)[2] = 1; /* FCP */
- fc_host_active_fc4s(shost)[2] = 1; /* FCP */
adapter->hydra_version = bottom->adapter_type;
adapter->timer_ticks = bottom->timer_interval;
@@ -619,6 +614,10 @@ static void zfcp_fsf_exchange_port_evaluate(struct zfcp_fsf_req *req)
fc_host_permanent_port_name(shost) = fc_host_port_name(shost);
fc_host_maxframe_size(shost) = bottom->maximum_frame_size;
fc_host_supported_speeds(shost) = bottom->supported_speed;
+ memcpy(fc_host_supported_fc4s(shost), bottom->supported_fc4_types,
+ FC_FC4_LIST_SIZE);
+ memcpy(fc_host_active_fc4s(shost), bottom->active_fc4_types,
+ FC_FC4_LIST_SIZE);
}
static void zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *req)
@@ -725,12 +724,12 @@ static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_qdio *qdio,
req->adapter = adapter;
req->fsf_command = fsf_cmd;
req->req_id = adapter->req_no;
- req->queue_req.sbal_number = 1;
- req->queue_req.sbal_first = req_q->first;
- req->queue_req.sbal_last = req_q->first;
- req->queue_req.sbale_curr = 1;
+ req->qdio_req.sbal_number = 1;
+ req->qdio_req.sbal_first = req_q->first;
+ req->qdio_req.sbal_last = req_q->first;
+ req->qdio_req.sbale_curr = 1;
- sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
+ sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
sbale[0].addr = (void *) req->req_id;
sbale[0].flags |= SBAL_FLAGS0_COMMAND;
@@ -745,6 +744,7 @@ static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_qdio *qdio,
return ERR_PTR(-ENOMEM);
}
+ req->seq_no = adapter->fsf_req_seq_no;
req->qtcb->prefix.req_seq_no = adapter->fsf_req_seq_no;
req->qtcb->prefix.req_id = req->req_id;
req->qtcb->prefix.ulp_info = 26;
@@ -752,8 +752,6 @@ static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_qdio *qdio,
req->qtcb->prefix.qtcb_version = FSF_QTCB_CURRENT_VERSION;
req->qtcb->header.req_handle = req->req_id;
req->qtcb->header.fsf_command = req->fsf_command;
- req->seq_no = adapter->fsf_req_seq_no;
- req->qtcb->prefix.req_seq_no = adapter->fsf_req_seq_no;
sbale[1].addr = (void *) req->qtcb;
sbale[1].length = sizeof(struct fsf_qtcb);
}
@@ -770,25 +768,17 @@ static int zfcp_fsf_req_send(struct zfcp_fsf_req *req)
{
struct zfcp_adapter *adapter = req->adapter;
struct zfcp_qdio *qdio = adapter->qdio;
- unsigned long flags;
- int idx;
- int with_qtcb = (req->qtcb != NULL);
+ int with_qtcb = (req->qtcb != NULL);
+ int req_id = req->req_id;
- /* put allocated FSF request into hash table */
- spin_lock_irqsave(&adapter->req_list_lock, flags);
- idx = zfcp_reqlist_hash(req->req_id);
- list_add_tail(&req->list, &adapter->req_list[idx]);
- spin_unlock_irqrestore(&adapter->req_list_lock, flags);
+ zfcp_reqlist_add(adapter->req_list, req);
- req->queue_req.qdio_outb_usage = atomic_read(&qdio->req_q.count);
+ req->qdio_req.qdio_outb_usage = atomic_read(&qdio->req_q.count);
req->issued = get_clock();
- if (zfcp_qdio_send(qdio, &req->queue_req)) {
+ if (zfcp_qdio_send(qdio, &req->qdio_req)) {
del_timer(&req->timer);
- spin_lock_irqsave(&adapter->req_list_lock, flags);
/* lookup request again, list might have changed */
- if (zfcp_reqlist_find_safe(adapter, req))
- zfcp_reqlist_remove(adapter, req);
- spin_unlock_irqrestore(&adapter->req_list_lock, flags);
+ zfcp_reqlist_find_rm(adapter->req_list, req_id);
zfcp_erp_adapter_reopen(adapter, 0, "fsrs__1", req);
return -EIO;
}
@@ -826,9 +816,9 @@ int zfcp_fsf_status_read(struct zfcp_qdio *qdio)
goto out;
}
- sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
+ sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
sbale[2].flags |= SBAL_FLAGS_LAST_ENTRY;
- req->queue_req.sbale_curr = 2;
+ req->qdio_req.sbale_curr = 2;
sr_buf = mempool_alloc(adapter->pool.status_read_data, GFP_ATOMIC);
if (!sr_buf) {
@@ -837,7 +827,7 @@ int zfcp_fsf_status_read(struct zfcp_qdio *qdio)
}
memset(sr_buf, 0, sizeof(*sr_buf));
req->data = sr_buf;
- sbale = zfcp_qdio_sbale_curr(qdio, &req->queue_req);
+ sbale = zfcp_qdio_sbale_curr(qdio, &req->qdio_req);
sbale->addr = (void *) sr_buf;
sbale->length = sizeof(*sr_buf);
@@ -934,7 +924,7 @@ struct zfcp_fsf_req *zfcp_fsf_abort_fcp_command(unsigned long old_req_id,
ZFCP_STATUS_COMMON_UNBLOCKED)))
goto out_error_free;
- sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
+ sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
@@ -1029,7 +1019,7 @@ static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req,
{
struct zfcp_adapter *adapter = req->adapter;
struct qdio_buffer_element *sbale = zfcp_qdio_sbale_req(adapter->qdio,
- &req->queue_req);
+ &req->qdio_req);
u32 feat = adapter->adapter_features;
int bytes;
@@ -1047,15 +1037,15 @@ static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req,
return 0;
}
- bytes = zfcp_qdio_sbals_from_sg(adapter->qdio, &req->queue_req,
+ bytes = zfcp_qdio_sbals_from_sg(adapter->qdio, &req->qdio_req,
SBAL_FLAGS0_TYPE_WRITE_READ,
sg_req, max_sbals);
if (bytes <= 0)
return -EIO;
req->qtcb->bottom.support.req_buf_length = bytes;
- req->queue_req.sbale_curr = ZFCP_LAST_SBALE_PER_SBAL;
+ req->qdio_req.sbale_curr = ZFCP_LAST_SBALE_PER_SBAL;
- bytes = zfcp_qdio_sbals_from_sg(adapter->qdio, &req->queue_req,
+ bytes = zfcp_qdio_sbals_from_sg(adapter->qdio, &req->qdio_req,
SBAL_FLAGS0_TYPE_WRITE_READ,
sg_resp, max_sbals);
req->qtcb->bottom.support.resp_buf_length = bytes;
@@ -1251,7 +1241,7 @@ int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action)
}
req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
- sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
+ sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
@@ -1262,13 +1252,13 @@ int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action)
FSF_FEATURE_UPDATE_ALERT;
req->erp_action = erp_action;
req->handler = zfcp_fsf_exchange_config_data_handler;
- erp_action->fsf_req = req;
+ erp_action->fsf_req_id = req->req_id;
zfcp_fsf_start_erp_timer(req);
retval = zfcp_fsf_req_send(req);
if (retval) {
zfcp_fsf_req_free(req);
- erp_action->fsf_req = NULL;
+ erp_action->fsf_req_id = 0;
}
out:
spin_unlock_bh(&qdio->req_q_lock);
@@ -1293,7 +1283,7 @@ int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *qdio,
goto out_unlock;
}
- sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
+ sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
req->handler = zfcp_fsf_exchange_config_data_handler;
@@ -1349,19 +1339,19 @@ int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action)
}
req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
- sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
+ sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
req->handler = zfcp_fsf_exchange_port_data_handler;
req->erp_action = erp_action;
- erp_action->fsf_req = req;
+ erp_action->fsf_req_id = req->req_id;
zfcp_fsf_start_erp_timer(req);
retval = zfcp_fsf_req_send(req);
if (retval) {
zfcp_fsf_req_free(req);
- erp_action->fsf_req = NULL;
+ erp_action->fsf_req_id = 0;
}
out:
spin_unlock_bh(&qdio->req_q_lock);
@@ -1398,7 +1388,7 @@ int zfcp_fsf_exchange_port_data_sync(struct zfcp_qdio *qdio,
if (data)
req->data = data;
- sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
+ sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
@@ -1484,7 +1474,7 @@ static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req)
}
out:
- put_device(&port->sysfs_device);
+ put_device(&port->dev);
}
/**
@@ -1513,7 +1503,7 @@ int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action)
}
req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
- sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
+ sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
@@ -1521,15 +1511,15 @@ int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action)
hton24(req->qtcb->bottom.support.d_id, port->d_id);
req->data = port;
req->erp_action = erp_action;
- erp_action->fsf_req = req;
- get_device(&port->sysfs_device);
+ erp_action->fsf_req_id = req->req_id;
+ get_device(&port->dev);
zfcp_fsf_start_erp_timer(req);
retval = zfcp_fsf_req_send(req);
if (retval) {
zfcp_fsf_req_free(req);
- erp_action->fsf_req = NULL;
- put_device(&port->sysfs_device);
+ erp_action->fsf_req_id = 0;
+ put_device(&port->dev);
}
out:
spin_unlock_bh(&qdio->req_q_lock);
@@ -1583,7 +1573,7 @@ int zfcp_fsf_close_port(struct zfcp_erp_action *erp_action)
}
req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
- sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
+ sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
@@ -1591,13 +1581,13 @@ int zfcp_fsf_close_port(struct zfcp_erp_action *erp_action)
req->data = erp_action->port;
req->erp_action = erp_action;
req->qtcb->header.port_handle = erp_action->port->handle;
- erp_action->fsf_req = req;
+ erp_action->fsf_req_id = req->req_id;
zfcp_fsf_start_erp_timer(req);
retval = zfcp_fsf_req_send(req);
if (retval) {
zfcp_fsf_req_free(req);
- erp_action->fsf_req = NULL;
+ erp_action->fsf_req_id = 0;
}
out:
spin_unlock_bh(&qdio->req_q_lock);
@@ -1660,7 +1650,7 @@ int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port)
}
req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
- sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
+ sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
@@ -1715,7 +1705,7 @@ int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port)
}
req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
- sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
+ sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
@@ -1809,7 +1799,7 @@ int zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action)
}
req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
- sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
+ sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
@@ -1817,13 +1807,13 @@ int zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action)
req->qtcb->header.port_handle = erp_action->port->handle;
req->erp_action = erp_action;
req->handler = zfcp_fsf_close_physical_port_handler;
- erp_action->fsf_req = req;
+ erp_action->fsf_req_id = req->req_id;
zfcp_fsf_start_erp_timer(req);
retval = zfcp_fsf_req_send(req);
if (retval) {
zfcp_fsf_req_free(req);
- erp_action->fsf_req = NULL;
+ erp_action->fsf_req_id = 0;
}
out:
spin_unlock_bh(&qdio->req_q_lock);
@@ -1982,7 +1972,7 @@ int zfcp_fsf_open_unit(struct zfcp_erp_action *erp_action)
}
req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
- sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
+ sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
@@ -1991,7 +1981,7 @@ int zfcp_fsf_open_unit(struct zfcp_erp_action *erp_action)
req->handler = zfcp_fsf_open_unit_handler;
req->data = erp_action->unit;
req->erp_action = erp_action;
- erp_action->fsf_req = req;
+ erp_action->fsf_req_id = req->req_id;
if (!(adapter->connection_features & FSF_FEATURE_NPIV_MODE))
req->qtcb->bottom.support.option = FSF_OPEN_LUN_SUPPRESS_BOXING;
@@ -2000,7 +1990,7 @@ int zfcp_fsf_open_unit(struct zfcp_erp_action *erp_action)
retval = zfcp_fsf_req_send(req);
if (retval) {
zfcp_fsf_req_free(req);
- erp_action->fsf_req = NULL;
+ erp_action->fsf_req_id = 0;
}
out:
spin_unlock_bh(&qdio->req_q_lock);
@@ -2068,7 +2058,7 @@ int zfcp_fsf_close_unit(struct zfcp_erp_action *erp_action)
}
req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
- sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
+ sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
@@ -2077,13 +2067,13 @@ int zfcp_fsf_close_unit(struct zfcp_erp_action *erp_action)
req->handler = zfcp_fsf_close_unit_handler;
req->data = erp_action->unit;
req->erp_action = erp_action;
- erp_action->fsf_req = req;
+ erp_action->fsf_req_id = req->req_id;
zfcp_fsf_start_erp_timer(req);
retval = zfcp_fsf_req_send(req);
if (retval) {
zfcp_fsf_req_free(req);
- erp_action->fsf_req = NULL;
+ erp_action->fsf_req_id = 0;
}
out:
spin_unlock_bh(&qdio->req_q_lock);
@@ -2111,8 +2101,8 @@ static void zfcp_fsf_req_trace(struct zfcp_fsf_req *req, struct scsi_cmnd *scsi)
blktrc.magic = ZFCP_BLK_DRV_DATA_MAGIC;
if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
blktrc.flags |= ZFCP_BLK_REQ_ERROR;
- blktrc.inb_usage = req->queue_req.qdio_inb_usage;
- blktrc.outb_usage = req->queue_req.qdio_outb_usage;
+ blktrc.inb_usage = req->qdio_req.qdio_inb_usage;
+ blktrc.outb_usage = req->qdio_req.qdio_outb_usage;
if (req->adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA) {
blktrc.flags |= ZFCP_BLK_LAT_VALID;
@@ -2169,12 +2159,7 @@ static void zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *req)
zfcp_fsf_req_trace(req, scpnt);
skip_fsfstatus:
- if (scpnt->result != 0)
- zfcp_dbf_scsi_result("erro", 3, req->adapter->dbf, scpnt, req);
- else if (scpnt->retries > 0)
- zfcp_dbf_scsi_result("retr", 4, req->adapter->dbf, scpnt, req);
- else
- zfcp_dbf_scsi_result("norm", 6, req->adapter->dbf, scpnt, req);
+ zfcp_dbf_scsi_result(req->adapter->dbf, scpnt, req);
scpnt->host_scribble = NULL;
(scpnt->scsi_done) (scpnt);
@@ -2274,7 +2259,7 @@ skip_fsfstatus:
else {
zfcp_fsf_send_fcp_command_task_handler(req);
req->unit = NULL;
- put_device(&unit->sysfs_device);
+ put_device(&unit->dev);
}
}
@@ -2312,7 +2297,7 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit,
}
req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
- get_device(&unit->sysfs_device);
+ get_device(&unit->dev);
req->unit = unit;
req->data = scsi_cmnd;
req->handler = zfcp_fsf_send_fcp_command_handler;
@@ -2346,11 +2331,11 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit,
fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd;
zfcp_fc_scsi_to_fcp(fcp_cmnd, scsi_cmnd);
- real_bytes = zfcp_qdio_sbals_from_sg(qdio, &req->queue_req, sbtype,
+ real_bytes = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sbtype,
scsi_sglist(scsi_cmnd),
FSF_MAX_SBALS_PER_REQ);
if (unlikely(real_bytes < 0)) {
- if (req->queue_req.sbal_number >= FSF_MAX_SBALS_PER_REQ) {
+ if (req->qdio_req.sbal_number >= FSF_MAX_SBALS_PER_REQ) {
dev_err(&adapter->ccw_device->dev,
"Oversize data package, unit 0x%016Lx "
"on port 0x%016Lx closed\n",
@@ -2369,7 +2354,7 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit,
goto out;
failed_scsi_cmnd:
- put_device(&unit->sysfs_device);
+ put_device(&unit->dev);
zfcp_fsf_req_free(req);
scsi_cmnd->host_scribble = NULL;
out:
@@ -2415,7 +2400,7 @@ struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_unit *unit, u8 tm_flags)
req->qtcb->bottom.io.service_class = FSF_CLASS_3;
req->qtcb->bottom.io.fcp_cmnd_length = FCP_CMND_LEN;
- sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
+ sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
sbale[0].flags |= SBAL_FLAGS0_TYPE_WRITE;
sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
@@ -2478,14 +2463,14 @@ struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *adapter,
req->handler = zfcp_fsf_control_file_handler;
- sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
+ sbale = zfcp_qdio_sbale_req(qdio, &req->qdio_req);
sbale[0].flags |= direction;
bottom = &req->qtcb->bottom.support;
bottom->operation_subtype = FSF_CFDC_OPERATION_SUBTYPE;
bottom->option = fsf_cfdc->option;
- bytes = zfcp_qdio_sbals_from_sg(qdio, &req->queue_req,
+ bytes = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req,
direction, fsf_cfdc->sg,
FSF_MAX_SBALS_PER_REQ);
if (bytes != ZFCP_CFDC_MAX_SIZE) {
@@ -2516,15 +2501,14 @@ void zfcp_fsf_reqid_check(struct zfcp_qdio *qdio, int sbal_idx)
struct qdio_buffer *sbal = qdio->resp_q.sbal[sbal_idx];
struct qdio_buffer_element *sbale;
struct zfcp_fsf_req *fsf_req;
- unsigned long flags, req_id;
+ unsigned long req_id;
int idx;
for (idx = 0; idx < QDIO_MAX_ELEMENTS_PER_BUFFER; idx++) {
sbale = &sbal->element[idx];
req_id = (unsigned long) sbale->addr;
- spin_lock_irqsave(&adapter->req_list_lock, flags);
- fsf_req = zfcp_reqlist_find(adapter, req_id);
+ fsf_req = zfcp_reqlist_find_rm(adapter->req_list, req_id);
if (!fsf_req)
/*
@@ -2534,11 +2518,8 @@ void zfcp_fsf_reqid_check(struct zfcp_qdio *qdio, int sbal_idx)
panic("error: unknown req_id (%lx) on adapter %s.\n",
req_id, dev_name(&adapter->ccw_device->dev));
- list_del(&fsf_req->list);
- spin_unlock_irqrestore(&adapter->req_list_lock, flags);
-
- fsf_req->queue_req.sbal_response = sbal_idx;
- fsf_req->queue_req.qdio_inb_usage =
+ fsf_req->qdio_req.sbal_response = sbal_idx;
+ fsf_req->qdio_req.qdio_inb_usage =
atomic_read(&qdio->resp_q.count);
zfcp_fsf_req_complete(fsf_req);
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
index 6c5228b627fc..71b97ff77cf0 100644
--- a/drivers/s390/scsi/zfcp_qdio.c
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -10,6 +10,7 @@
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include "zfcp_ext.h"
+#include "zfcp_qdio.h"
#define QBUFF_PER_PAGE (PAGE_SIZE / sizeof(struct qdio_buffer))
@@ -28,12 +29,6 @@ static int zfcp_qdio_buffers_enqueue(struct qdio_buffer **sbal)
return 0;
}
-static struct qdio_buffer_element *
-zfcp_qdio_sbale(struct zfcp_qdio_queue *q, int sbal_idx, int sbale_idx)
-{
- return &q->sbal[sbal_idx]->element[sbale_idx];
-}
-
static void zfcp_qdio_handler_error(struct zfcp_qdio *qdio, char *id)
{
struct zfcp_adapter *adapter = qdio->adapter;
@@ -106,7 +101,7 @@ static void zfcp_qdio_resp_put_back(struct zfcp_qdio *qdio, int processed)
if (unlikely(retval)) {
atomic_set(&queue->count, count);
- /* FIXME: Recover this with an adapter reopen? */
+ zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdrpb_1", NULL);
} else {
queue->first += count;
queue->first %= QDIO_MAX_BUFFERS_PER_Q;
@@ -145,32 +140,8 @@ static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err,
zfcp_qdio_resp_put_back(qdio, count);
}
-/**
- * zfcp_qdio_sbale_req - return ptr to SBALE of req_q for a struct zfcp_fsf_req
- * @qdio: pointer to struct zfcp_qdio
- * @q_rec: pointer to struct zfcp_queue_rec
- * Returns: pointer to qdio_buffer_element (SBALE) structure
- */
-struct qdio_buffer_element *zfcp_qdio_sbale_req(struct zfcp_qdio *qdio,
- struct zfcp_queue_req *q_req)
-{
- return zfcp_qdio_sbale(&qdio->req_q, q_req->sbal_last, 0);
-}
-
-/**
- * zfcp_qdio_sbale_curr - return curr SBALE on req_q for a struct zfcp_fsf_req
- * @fsf_req: pointer to struct fsf_req
- * Returns: pointer to qdio_buffer_element (SBALE) structure
- */
-struct qdio_buffer_element *zfcp_qdio_sbale_curr(struct zfcp_qdio *qdio,
- struct zfcp_queue_req *q_req)
-{
- return zfcp_qdio_sbale(&qdio->req_q, q_req->sbal_last,
- q_req->sbale_curr);
-}
-
static void zfcp_qdio_sbal_limit(struct zfcp_qdio *qdio,
- struct zfcp_queue_req *q_req, int max_sbals)
+ struct zfcp_qdio_req *q_req, int max_sbals)
{
int count = atomic_read(&qdio->req_q.count);
count = min(count, max_sbals);
@@ -179,7 +150,7 @@ static void zfcp_qdio_sbal_limit(struct zfcp_qdio *qdio,
}
static struct qdio_buffer_element *
-zfcp_qdio_sbal_chain(struct zfcp_qdio *qdio, struct zfcp_queue_req *q_req,
+zfcp_qdio_sbal_chain(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
unsigned long sbtype)
{
struct qdio_buffer_element *sbale;
@@ -214,7 +185,7 @@ zfcp_qdio_sbal_chain(struct zfcp_qdio *qdio, struct zfcp_queue_req *q_req,
}
static struct qdio_buffer_element *
-zfcp_qdio_sbale_next(struct zfcp_qdio *qdio, struct zfcp_queue_req *q_req,
+zfcp_qdio_sbale_next(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
unsigned int sbtype)
{
if (q_req->sbale_curr == ZFCP_LAST_SBALE_PER_SBAL)
@@ -224,7 +195,7 @@ zfcp_qdio_sbale_next(struct zfcp_qdio *qdio, struct zfcp_queue_req *q_req,
}
static void zfcp_qdio_undo_sbals(struct zfcp_qdio *qdio,
- struct zfcp_queue_req *q_req)
+ struct zfcp_qdio_req *q_req)
{
struct qdio_buffer **sbal = qdio->req_q.sbal;
int first = q_req->sbal_first;
@@ -235,7 +206,7 @@ static void zfcp_qdio_undo_sbals(struct zfcp_qdio *qdio,
}
static int zfcp_qdio_fill_sbals(struct zfcp_qdio *qdio,
- struct zfcp_queue_req *q_req,
+ struct zfcp_qdio_req *q_req,
unsigned int sbtype, void *start_addr,
unsigned int total_length)
{
@@ -271,8 +242,7 @@ static int zfcp_qdio_fill_sbals(struct zfcp_qdio *qdio,
* @max_sbals: upper bound for number of SBALs to be used
* Returns: number of bytes, or error (negativ)
*/
-int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio,
- struct zfcp_queue_req *q_req,
+int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
unsigned long sbtype, struct scatterlist *sg,
int max_sbals)
{
@@ -304,10 +274,10 @@ int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio,
/**
* zfcp_qdio_send - set PCI flag in first SBALE and send req to QDIO
* @qdio: pointer to struct zfcp_qdio
- * @q_req: pointer to struct zfcp_queue_req
+ * @q_req: pointer to struct zfcp_qdio_req
* Returns: 0 on success, error otherwise
*/
-int zfcp_qdio_send(struct zfcp_qdio *qdio, struct zfcp_queue_req *q_req)
+int zfcp_qdio_send(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
{
struct zfcp_qdio_queue *req_q = &qdio->req_q;
int first = q_req->sbal_first;
diff --git a/drivers/s390/scsi/zfcp_qdio.h b/drivers/s390/scsi/zfcp_qdio.h
new file mode 100644
index 000000000000..8cca54631e1e
--- /dev/null
+++ b/drivers/s390/scsi/zfcp_qdio.h
@@ -0,0 +1,109 @@
+/*
+ * zfcp device driver
+ *
+ * Header file for zfcp qdio interface
+ *
+ * Copyright IBM Corporation 2010
+ */
+
+#ifndef ZFCP_QDIO_H
+#define ZFCP_QDIO_H
+
+#include <asm/qdio.h>
+
+/**
+ * struct zfcp_qdio_queue - qdio queue buffer, zfcp index and free count
+ * @sbal: qdio buffers
+ * @first: index of next free buffer in queue
+ * @count: number of free buffers in queue
+ */
+struct zfcp_qdio_queue {
+ struct qdio_buffer *sbal[QDIO_MAX_BUFFERS_PER_Q];
+ u8 first;
+ atomic_t count;
+};
+
+/**
+ * struct zfcp_qdio - basic qdio data structure
+ * @resp_q: response queue
+ * @req_q: request queue
+ * @stat_lock: lock to protect req_q_util and req_q_time
+ * @req_q_lock: lock to serialize access to request queue
+ * @req_q_time: time of last fill level change
+ * @req_q_util: used for accounting
+ * @req_q_full: queue full incidents
+ * @req_q_wq: used to wait for SBAL availability
+ * @adapter: adapter used in conjunction with this qdio structure
+ */
+struct zfcp_qdio {
+ struct zfcp_qdio_queue resp_q;
+ struct zfcp_qdio_queue req_q;
+ spinlock_t stat_lock;
+ spinlock_t req_q_lock;
+ unsigned long long req_q_time;
+ u64 req_q_util;
+ atomic_t req_q_full;
+ wait_queue_head_t req_q_wq;
+ struct zfcp_adapter *adapter;
+};
+
+/**
+ * struct zfcp_qdio_req - qdio queue related values for a request
+ * @sbal_number: number of free sbals
+ * @sbal_first: first sbal for this request
+ * @sbal_last: last sbal for this request
+ * @sbal_limit: last possible sbal for this request
+ * @sbale_curr: current sbale at creation of this request
+ * @sbal_response: sbal used in interrupt
+ * @qdio_outb_usage: usage of outbound queue
+ * @qdio_inb_usage: usage of inbound queue
+ */
+struct zfcp_qdio_req {
+ u8 sbal_number;
+ u8 sbal_first;
+ u8 sbal_last;
+ u8 sbal_limit;
+ u8 sbale_curr;
+ u8 sbal_response;
+ u16 qdio_outb_usage;
+ u16 qdio_inb_usage;
+};
+
+/**
+ * zfcp_qdio_sbale - return pointer to sbale in qdio queue
+ * @q: queue where to find sbal
+ * @sbal_idx: sbal index in queue
+ * @sbale_idx: sbale index in sbal
+ */
+static inline struct qdio_buffer_element *
+zfcp_qdio_sbale(struct zfcp_qdio_queue *q, int sbal_idx, int sbale_idx)
+{
+ return &q->sbal[sbal_idx]->element[sbale_idx];
+}
+
+/**
+ * zfcp_qdio_sbale_req - return pointer to sbale on req_q for a request
+ * @qdio: pointer to struct zfcp_qdio
+ * @q_rec: pointer to struct zfcp_qdio_req
+ * Returns: pointer to qdio_buffer_element (sbale) structure
+ */
+static inline struct qdio_buffer_element *
+zfcp_qdio_sbale_req(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
+{
+ return zfcp_qdio_sbale(&qdio->req_q, q_req->sbal_last, 0);
+}
+
+/**
+ * zfcp_qdio_sbale_curr - return current sbale on req_q for a request
+ * @qdio: pointer to struct zfcp_qdio
+ * @fsf_req: pointer to struct zfcp_fsf_req
+ * Returns: pointer to qdio_buffer_element (sbale) structure
+ */
+static inline struct qdio_buffer_element *
+zfcp_qdio_sbale_curr(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
+{
+ return zfcp_qdio_sbale(&qdio->req_q, q_req->sbal_last,
+ q_req->sbale_curr);
+}
+
+#endif /* ZFCP_QDIO_H */
diff --git a/drivers/s390/scsi/zfcp_reqlist.h b/drivers/s390/scsi/zfcp_reqlist.h
new file mode 100644
index 000000000000..a72d1b730aba
--- /dev/null
+++ b/drivers/s390/scsi/zfcp_reqlist.h
@@ -0,0 +1,183 @@
+/*
+ * zfcp device driver
+ *
+ * Data structure and helper functions for tracking pending FSF
+ * requests.
+ *
+ * Copyright IBM Corporation 2009
+ */
+
+#ifndef ZFCP_REQLIST_H
+#define ZFCP_REQLIST_H
+
+/* number of hash buckets */
+#define ZFCP_REQ_LIST_BUCKETS 128
+
+/**
+ * struct zfcp_reqlist - Container for request list (reqlist)
+ * @lock: Spinlock for protecting the hash list
+ * @list: Array of hashbuckets, each is a list of requests in this bucket
+ */
+struct zfcp_reqlist {
+ spinlock_t lock;
+ struct list_head buckets[ZFCP_REQ_LIST_BUCKETS];
+};
+
+static inline int zfcp_reqlist_hash(unsigned long req_id)
+{
+ return req_id % ZFCP_REQ_LIST_BUCKETS;
+}
+
+/**
+ * zfcp_reqlist_alloc - Allocate and initialize reqlist
+ *
+ * Returns pointer to allocated reqlist on success, or NULL on
+ * allocation failure.
+ */
+static inline struct zfcp_reqlist *zfcp_reqlist_alloc(void)
+{
+ unsigned int i;
+ struct zfcp_reqlist *rl;
+
+ rl = kzalloc(sizeof(struct zfcp_reqlist), GFP_KERNEL);
+ if (!rl)
+ return NULL;
+
+ spin_lock_init(&rl->lock);
+
+ for (i = 0; i < ZFCP_REQ_LIST_BUCKETS; i++)
+ INIT_LIST_HEAD(&rl->buckets[i]);
+
+ return rl;
+}
+
+/**
+ * zfcp_reqlist_isempty - Check whether the request list empty
+ * @rl: pointer to reqlist
+ *
+ * Returns: 1 if list is empty, 0 if not
+ */
+static inline int zfcp_reqlist_isempty(struct zfcp_reqlist *rl)
+{
+ unsigned int i;
+
+ for (i = 0; i < ZFCP_REQ_LIST_BUCKETS; i++)
+ if (!list_empty(&rl->buckets[i]))
+ return 0;
+ return 1;
+}
+
+/**
+ * zfcp_reqlist_free - Free allocated memory for reqlist
+ * @rl: The reqlist where to free memory
+ */
+static inline void zfcp_reqlist_free(struct zfcp_reqlist *rl)
+{
+ /* sanity check */
+ BUG_ON(!zfcp_reqlist_isempty(rl));
+
+ kfree(rl);
+}
+
+static inline struct zfcp_fsf_req *
+_zfcp_reqlist_find(struct zfcp_reqlist *rl, unsigned long req_id)
+{
+ struct zfcp_fsf_req *req;
+ unsigned int i;
+
+ i = zfcp_reqlist_hash(req_id);
+ list_for_each_entry(req, &rl->buckets[i], list)
+ if (req->req_id == req_id)
+ return req;
+ return NULL;
+}
+
+/**
+ * zfcp_reqlist_find - Lookup FSF request by its request id
+ * @rl: The reqlist where to lookup the FSF request
+ * @req_id: The request id to look for
+ *
+ * Returns a pointer to the FSF request with the specified request id
+ * or NULL if there is no known FSF request with this id.
+ */
+static inline struct zfcp_fsf_req *
+zfcp_reqlist_find(struct zfcp_reqlist *rl, unsigned long req_id)
+{
+ unsigned long flags;
+ struct zfcp_fsf_req *req;
+
+ spin_lock_irqsave(&rl->lock, flags);
+ req = _zfcp_reqlist_find(rl, req_id);
+ spin_unlock_irqrestore(&rl->lock, flags);
+
+ return req;
+}
+
+/**
+ * zfcp_reqlist_find_rm - Lookup request by id and remove it from reqlist
+ * @rl: reqlist where to search and remove entry
+ * @req_id: The request id of the request to look for
+ *
+ * This functions tries to find the FSF request with the specified
+ * id and then removes it from the reqlist. The reqlist lock is held
+ * during both steps of the operation.
+ *
+ * Returns: Pointer to the FSF request if the request has been found,
+ * NULL if it has not been found.
+ */
+static inline struct zfcp_fsf_req *
+zfcp_reqlist_find_rm(struct zfcp_reqlist *rl, unsigned long req_id)
+{
+ unsigned long flags;
+ struct zfcp_fsf_req *req;
+
+ spin_lock_irqsave(&rl->lock, flags);
+ req = _zfcp_reqlist_find(rl, req_id);
+ if (req)
+ list_del(&req->list);
+ spin_unlock_irqrestore(&rl->lock, flags);
+
+ return req;
+}
+
+/**
+ * zfcp_reqlist_add - Add entry to reqlist
+ * @rl: reqlist where to add the entry
+ * @req: The entry to add
+ *
+ * The request id always increases. As an optimization new requests
+ * are added here with list_add_tail at the end of the bucket lists
+ * while old requests are looked up starting at the beginning of the
+ * lists.
+ */
+static inline void zfcp_reqlist_add(struct zfcp_reqlist *rl,
+ struct zfcp_fsf_req *req)
+{
+ unsigned int i;
+ unsigned long flags;
+
+ i = zfcp_reqlist_hash(req->req_id);
+
+ spin_lock_irqsave(&rl->lock, flags);
+ list_add_tail(&req->list, &rl->buckets[i]);
+ spin_unlock_irqrestore(&rl->lock, flags);
+}
+
+/**
+ * zfcp_reqlist_move - Move all entries from reqlist to simple list
+ * @rl: The zfcp_reqlist where to remove all entries
+ * @list: The list where to move all entries
+ */
+static inline void zfcp_reqlist_move(struct zfcp_reqlist *rl,
+ struct list_head *list)
+{
+ unsigned int i;
+ unsigned long flags;
+
+ spin_lock_irqsave(&rl->lock, flags);
+ for (i = 0; i < ZFCP_REQ_LIST_BUCKETS; i++)
+ list_splice_init(&rl->buckets[i], list);
+ spin_unlock_irqrestore(&rl->lock, flags);
+}
+
+#endif /* ZFCP_REQLIST_H */
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index 8e6fc68d6bd4..c3c4178888af 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -3,7 +3,7 @@
*
* Interface to Linux SCSI midlayer.
*
- * Copyright IBM Corporation 2002, 2009
+ * Copyright IBM Corporation 2002, 2010
*/
#define KMSG_COMPONENT "zfcp"
@@ -15,6 +15,7 @@
#include "zfcp_ext.h"
#include "zfcp_dbf.h"
#include "zfcp_fc.h"
+#include "zfcp_reqlist.h"
static unsigned int default_depth = 32;
module_param_named(queue_depth, default_depth, uint, 0600);
@@ -43,7 +44,7 @@ static void zfcp_scsi_slave_destroy(struct scsi_device *sdpnt)
{
struct zfcp_unit *unit = (struct zfcp_unit *) sdpnt->hostdata;
unit->device = NULL;
- put_device(&unit->sysfs_device);
+ put_device(&unit->dev);
}
static int zfcp_scsi_slave_configure(struct scsi_device *sdp)
@@ -59,10 +60,9 @@ static void zfcp_scsi_command_fail(struct scsi_cmnd *scpnt, int result)
{
struct zfcp_adapter *adapter =
(struct zfcp_adapter *) scpnt->device->host->hostdata[0];
+
set_host_byte(scpnt, result);
- if ((scpnt->device != NULL) && (scpnt->device->host != NULL))
- zfcp_dbf_scsi_result("fail", 4, adapter->dbf, scpnt, NULL);
- /* return directly */
+ zfcp_dbf_scsi_fail_send(adapter->dbf, scpnt);
scpnt->scsi_done(scpnt);
}
@@ -86,18 +86,10 @@ static int zfcp_scsi_queuecommand(struct scsi_cmnd *scpnt,
adapter = (struct zfcp_adapter *) scpnt->device->host->hostdata[0];
unit = scpnt->device->hostdata;
- BUG_ON(!adapter || (adapter != unit->port->adapter));
- BUG_ON(!scpnt->scsi_done);
-
- if (unlikely(!unit)) {
- zfcp_scsi_command_fail(scpnt, DID_NO_CONNECT);
- return 0;
- }
-
scsi_result = fc_remote_port_chkready(rport);
if (unlikely(scsi_result)) {
scpnt->result = scsi_result;
- zfcp_dbf_scsi_result("fail", 4, adapter->dbf, scpnt, NULL);
+ zfcp_dbf_scsi_fail_send(adapter->dbf, scpnt);
scpnt->scsi_done(scpnt);
return 0;
}
@@ -189,9 +181,7 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
/* avoid race condition between late normal completion and abort */
write_lock_irqsave(&adapter->abort_lock, flags);
- spin_lock(&adapter->req_list_lock);
- old_req = zfcp_reqlist_find(adapter, old_reqid);
- spin_unlock(&adapter->req_list_lock);
+ old_req = zfcp_reqlist_find(adapter->req_list, old_reqid);
if (!old_req) {
write_unlock_irqrestore(&adapter->abort_lock, flags);
zfcp_dbf_scsi_abort("lte1", adapter->dbf, scpnt, NULL,
@@ -521,7 +511,7 @@ static void zfcp_scsi_terminate_rport_io(struct fc_rport *rport)
if (port) {
zfcp_erp_port_reopen(port, 0, "sctrpi1", NULL);
- put_device(&port->sysfs_device);
+ put_device(&port->dev);
}
}
@@ -563,23 +553,23 @@ static void zfcp_scsi_rport_block(struct zfcp_port *port)
void zfcp_scsi_schedule_rport_register(struct zfcp_port *port)
{
- get_device(&port->sysfs_device);
+ get_device(&port->dev);
port->rport_task = RPORT_ADD;
if (!queue_work(port->adapter->work_queue, &port->rport_work))
- put_device(&port->sysfs_device);
+ put_device(&port->dev);
}
void zfcp_scsi_schedule_rport_block(struct zfcp_port *port)
{
- get_device(&port->sysfs_device);
+ get_device(&port->dev);
port->rport_task = RPORT_DEL;
if (port->rport && queue_work(port->adapter->work_queue,
&port->rport_work))
return;
- put_device(&port->sysfs_device);
+ put_device(&port->dev);
}
void zfcp_scsi_schedule_rports_block(struct zfcp_adapter *adapter)
@@ -608,7 +598,7 @@ void zfcp_scsi_rport_work(struct work_struct *work)
}
}
- put_device(&port->sysfs_device);
+ put_device(&port->dev);
}
@@ -626,7 +616,7 @@ void zfcp_scsi_scan(struct work_struct *work)
scsilun_to_int((struct scsi_lun *)
&unit->fcp_lun), 0);
- put_device(&unit->sysfs_device);
+ put_device(&unit->dev);
}
struct fc_function_template zfcp_transport_functions = {
diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c
index f539e006683c..a43035d4bd70 100644
--- a/drivers/s390/scsi/zfcp_sysfs.c
+++ b/drivers/s390/scsi/zfcp_sysfs.c
@@ -3,7 +3,7 @@
*
* sysfs attributes.
*
- * Copyright IBM Corporation 2008, 2009
+ * Copyright IBM Corporation 2008, 2010
*/
#define KMSG_COMPONENT "zfcp"
@@ -19,8 +19,7 @@ static ssize_t zfcp_sysfs_##_feat##_##_name##_show(struct device *dev, \
struct device_attribute *at,\
char *buf) \
{ \
- struct _feat_def *_feat = container_of(dev, struct _feat_def, \
- sysfs_device); \
+ struct _feat_def *_feat = container_of(dev, struct _feat_def, dev); \
\
return sprintf(buf, _format, _value); \
} \
@@ -87,8 +86,7 @@ static ssize_t zfcp_sysfs_##_feat##_failed_show(struct device *dev, \
struct device_attribute *attr, \
char *buf) \
{ \
- struct _feat_def *_feat = container_of(dev, struct _feat_def, \
- sysfs_device); \
+ struct _feat_def *_feat = container_of(dev, struct _feat_def, dev); \
\
if (atomic_read(&_feat->status) & ZFCP_STATUS_COMMON_ERP_FAILED) \
return sprintf(buf, "1\n"); \
@@ -99,12 +97,11 @@ static ssize_t zfcp_sysfs_##_feat##_failed_store(struct device *dev, \
struct device_attribute *attr,\
const char *buf, size_t count)\
{ \
- struct _feat_def *_feat = container_of(dev, struct _feat_def, \
- sysfs_device); \
+ struct _feat_def *_feat = container_of(dev, struct _feat_def, dev); \
unsigned long val; \
int retval = 0; \
\
- if (!(_feat && get_device(&_feat->sysfs_device))) \
+ if (!(_feat && get_device(&_feat->dev))) \
return -EBUSY; \
\
if (strict_strtoul(buf, 0, &val) || val != 0) { \
@@ -118,7 +115,7 @@ static ssize_t zfcp_sysfs_##_feat##_failed_store(struct device *dev, \
_reopen_id, NULL); \
zfcp_erp_wait(_adapter); \
out: \
- put_device(&_feat->sysfs_device); \
+ put_device(&_feat->dev); \
return retval ? retval : (ssize_t) count; \
} \
static ZFCP_DEV_ATTR(_feat, failed, S_IWUSR | S_IRUGO, \
@@ -224,10 +221,10 @@ static ssize_t zfcp_sysfs_port_remove_store(struct device *dev,
list_del(&port->list);
write_unlock_irq(&adapter->port_list_lock);
- put_device(&port->sysfs_device);
+ put_device(&port->dev);
zfcp_erp_port_shutdown(port, 0, "syprs_1", NULL);
- zfcp_device_unregister(&port->sysfs_device, &zfcp_sysfs_port_attrs);
+ zfcp_device_unregister(&port->dev, &zfcp_sysfs_port_attrs);
out:
zfcp_ccw_adapter_put(adapter);
return retval ? retval : (ssize_t) count;
@@ -258,13 +255,12 @@ static ssize_t zfcp_sysfs_unit_add_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct zfcp_port *port = container_of(dev, struct zfcp_port,
- sysfs_device);
+ struct zfcp_port *port = container_of(dev, struct zfcp_port, dev);
struct zfcp_unit *unit;
u64 fcp_lun;
int retval = -EINVAL;
- if (!(port && get_device(&port->sysfs_device)))
+ if (!(port && get_device(&port->dev)))
return -EBUSY;
if (strict_strtoull(buf, 0, (unsigned long long *) &fcp_lun))
@@ -280,7 +276,7 @@ static ssize_t zfcp_sysfs_unit_add_store(struct device *dev,
zfcp_erp_wait(unit->port->adapter);
flush_work(&unit->scsi_work);
out:
- put_device(&port->sysfs_device);
+ put_device(&port->dev);
return retval ? retval : (ssize_t) count;
}
static DEVICE_ATTR(unit_add, S_IWUSR, NULL, zfcp_sysfs_unit_add_store);
@@ -289,13 +285,12 @@ static ssize_t zfcp_sysfs_unit_remove_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct zfcp_port *port = container_of(dev, struct zfcp_port,
- sysfs_device);
+ struct zfcp_port *port = container_of(dev, struct zfcp_port, dev);
struct zfcp_unit *unit;
u64 fcp_lun;
int retval = -EINVAL;
- if (!(port && get_device(&port->sysfs_device)))
+ if (!(port && get_device(&port->dev)))
return -EBUSY;
if (strict_strtoull(buf, 0, (unsigned long long *) &fcp_lun))
@@ -314,12 +309,12 @@ static ssize_t zfcp_sysfs_unit_remove_store(struct device *dev,
list_del(&unit->list);
write_unlock_irq(&port->unit_list_lock);
- put_device(&unit->sysfs_device);
+ put_device(&unit->dev);
zfcp_erp_unit_shutdown(unit, 0, "syurs_1", NULL);
- zfcp_device_unregister(&unit->sysfs_device, &zfcp_sysfs_unit_attrs);
+ zfcp_device_unregister(&unit->dev, &zfcp_sysfs_unit_attrs);
out:
- put_device(&port->sysfs_device);
+ put_device(&port->dev);
return retval ? retval : (ssize_t) count;
}
static DEVICE_ATTR(unit_remove, S_IWUSR, NULL, zfcp_sysfs_unit_remove_store);
diff --git a/drivers/scsi/FlashPoint.c b/drivers/scsi/FlashPoint.c
index b898d382b7b0..e40cdfb7541f 100644
--- a/drivers/scsi/FlashPoint.c
+++ b/drivers/scsi/FlashPoint.c
@@ -3924,7 +3924,7 @@ static void FPT_sinits(struct sccb *p_sccb, unsigned char p_card)
{
struct sccb_mgr_tar_info *currTar_Info;
- if ((p_sccb->TargID > MAX_SCSI_TAR) || (p_sccb->Lun > MAX_LUN)) {
+ if ((p_sccb->TargID >= MAX_SCSI_TAR) || (p_sccb->Lun >= MAX_LUN)) {
return;
}
currTar_Info = &FPT_sccbMgrTbl[p_card][p_sccb->TargID];
diff --git a/drivers/scsi/be2iscsi/be.h b/drivers/scsi/be2iscsi/be.h
index a93a5040f087..136b49cea791 100644
--- a/drivers/scsi/be2iscsi/be.h
+++ b/drivers/scsi/be2iscsi/be.h
@@ -1,5 +1,5 @@
/**
- * Copyright (C) 2005 - 2009 ServerEngines
+ * Copyright (C) 2005 - 2010 ServerEngines
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -24,6 +24,10 @@
#define FW_VER_LEN 32
#define MCC_Q_LEN 128
#define MCC_CQ_LEN 256
+#define MAX_MCC_CMD 16
+/* BladeEngine Generation numbers */
+#define BE_GEN2 2
+#define BE_GEN3 3
struct be_dma_mem {
void *va;
@@ -57,6 +61,11 @@ static inline void *queue_head_node(struct be_queue_info *q)
return q->dma_mem.va + q->head * q->entry_size;
}
+static inline void *queue_get_wrb(struct be_queue_info *q, unsigned int wrb_num)
+{
+ return q->dma_mem.va + wrb_num * q->entry_size;
+}
+
static inline void *queue_tail_node(struct be_queue_info *q)
{
return q->dma_mem.va + q->tail * q->entry_size;
@@ -104,15 +113,19 @@ struct be_ctrl_info {
spinlock_t mcc_lock; /* For serializing mcc cmds to BE card */
spinlock_t mcc_cq_lock;
- /* MCC Async callback */
- void (*async_cb) (void *adapter, bool link_up);
- void *adapter_ctxt;
+ wait_queue_head_t mcc_wait[MAX_MCC_CMD + 1];
+ unsigned int mcc_tag[MAX_MCC_CMD];
+ unsigned int mcc_numtag[MAX_MCC_CMD + 1];
+ unsigned short mcc_alloc_index;
+ unsigned short mcc_free_index;
+ unsigned int mcc_tag_available;
};
#include "be_cmds.h"
#define PAGE_SHIFT_4K 12
#define PAGE_SIZE_4K (1 << PAGE_SHIFT_4K)
+#define mcc_timeout 120000 /* 5s timeout */
/* Returns number of pages spanned by the data starting at the given addr */
#define PAGES_4K_SPANNED(_address, size) \
diff --git a/drivers/scsi/be2iscsi/be_cmds.c b/drivers/scsi/be2iscsi/be_cmds.c
index f008708f1b08..67098578fba4 100644
--- a/drivers/scsi/be2iscsi/be_cmds.c
+++ b/drivers/scsi/be2iscsi/be_cmds.c
@@ -1,5 +1,5 @@
/**
- * Copyright (C) 2005 - 2009 ServerEngines
+ * Copyright (C) 2005 - 2010 ServerEngines
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -19,7 +19,7 @@
#include "be_mgmt.h"
#include "be_main.h"
-static void be_mcc_notify(struct beiscsi_hba *phba)
+void be_mcc_notify(struct beiscsi_hba *phba)
{
struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
u32 val = 0;
@@ -29,6 +29,52 @@ static void be_mcc_notify(struct beiscsi_hba *phba)
iowrite32(val, phba->db_va + DB_MCCQ_OFFSET);
}
+unsigned int alloc_mcc_tag(struct beiscsi_hba *phba)
+{
+ unsigned int tag = 0;
+ unsigned int num = 0;
+
+mcc_tag_rdy:
+ if (phba->ctrl.mcc_tag_available) {
+ tag = phba->ctrl.mcc_tag[phba->ctrl.mcc_alloc_index];
+ phba->ctrl.mcc_tag[phba->ctrl.mcc_alloc_index] = 0;
+ phba->ctrl.mcc_numtag[tag] = 0;
+ } else {
+ udelay(100);
+ num++;
+ if (num < mcc_timeout)
+ goto mcc_tag_rdy;
+ }
+ if (tag) {
+ phba->ctrl.mcc_tag_available--;
+ if (phba->ctrl.mcc_alloc_index == (MAX_MCC_CMD - 1))
+ phba->ctrl.mcc_alloc_index = 0;
+ else
+ phba->ctrl.mcc_alloc_index++;
+ }
+ return tag;
+}
+
+void free_mcc_tag(struct be_ctrl_info *ctrl, unsigned int tag)
+{
+ spin_lock(&ctrl->mbox_lock);
+ tag = tag & 0x000000FF;
+ ctrl->mcc_tag[ctrl->mcc_free_index] = tag;
+ if (ctrl->mcc_free_index == (MAX_MCC_CMD - 1))
+ ctrl->mcc_free_index = 0;
+ else
+ ctrl->mcc_free_index++;
+ ctrl->mcc_tag_available++;
+ spin_unlock(&ctrl->mbox_lock);
+}
+
+bool is_link_state_evt(u32 trailer)
+{
+ return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
+ ASYNC_TRAILER_EVENT_CODE_MASK) ==
+ ASYNC_EVENT_CODE_LINK_STATE);
+}
+
static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl)
{
if (compl->flags != 0) {
@@ -64,12 +110,30 @@ static int be_mcc_compl_process(struct be_ctrl_info *ctrl,
return 0;
}
-
-static inline bool is_link_state_evt(u32 trailer)
+int be_mcc_compl_process_isr(struct be_ctrl_info *ctrl,
+ struct be_mcc_compl *compl)
{
- return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
- ASYNC_TRAILER_EVENT_CODE_MASK) ==
- ASYNC_EVENT_CODE_LINK_STATE);
+ u16 compl_status, extd_status;
+ unsigned short tag;
+
+ be_dws_le_to_cpu(compl, 4);
+
+ compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
+ CQE_STATUS_COMPL_MASK;
+ /* The ctrl.mcc_numtag[tag] is filled with
+ * [31] = valid, [30:24] = Rsvd, [23:16] = wrb, [15:8] = extd_status,
+ * [7:0] = compl_status
+ */
+ tag = (compl->tag0 & 0x000000FF);
+ extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
+ CQE_STATUS_EXTD_MASK;
+
+ ctrl->mcc_numtag[tag] = 0x80000000;
+ ctrl->mcc_numtag[tag] |= (compl->tag0 & 0x00FF0000);
+ ctrl->mcc_numtag[tag] |= (extd_status & 0x000000FF) << 8;
+ ctrl->mcc_numtag[tag] |= (compl_status & 0x000000FF);
+ wake_up_interruptible(&ctrl->mcc_wait[tag]);
+ return 0;
}
static struct be_mcc_compl *be_mcc_compl_get(struct beiscsi_hba *phba)
@@ -89,7 +153,7 @@ static void be2iscsi_fail_session(struct iscsi_cls_session *cls_session)
iscsi_session_failure(cls_session->dd_data, ISCSI_ERR_CONN_FAILED);
}
-static void beiscsi_async_link_state_process(struct beiscsi_hba *phba,
+void beiscsi_async_link_state_process(struct beiscsi_hba *phba,
struct be_async_event_link_state *evt)
{
switch (evt->port_link_status) {
@@ -97,13 +161,13 @@ static void beiscsi_async_link_state_process(struct beiscsi_hba *phba,
SE_DEBUG(DBG_LVL_1, "Link Down on Physical Port %d \n",
evt->physical_port);
phba->state |= BE_ADAPTER_LINK_DOWN;
+ iscsi_host_for_each_session(phba->shost,
+ be2iscsi_fail_session);
break;
case ASYNC_EVENT_LINK_UP:
phba->state = BE_ADAPTER_UP;
SE_DEBUG(DBG_LVL_1, "Link UP on Physical Port %d \n",
evt->physical_port);
- iscsi_host_for_each_session(phba->shost,
- be2iscsi_fail_session);
break;
default:
SE_DEBUG(DBG_LVL_1, "Unexpected Async Notification %d on"
@@ -162,7 +226,6 @@ int beiscsi_process_mcc(struct beiscsi_hba *phba)
/* Wait till no more pending mcc requests are present */
static int be_mcc_wait_compl(struct beiscsi_hba *phba)
{
-#define mcc_timeout 120000 /* 5s timeout */
int i, status;
for (i = 0; i < mcc_timeout; i++) {
status = beiscsi_process_mcc(phba);
@@ -372,9 +435,10 @@ struct be_mcc_wrb *wrb_from_mccq(struct beiscsi_hba *phba)
BUG_ON(atomic_read(&mccq->used) >= mccq->len);
wrb = queue_head_node(mccq);
+ memset(wrb, 0, sizeof(*wrb));
+ wrb->tag0 = (mccq->head & 0x000000FF) << 16;
queue_head_inc(mccq);
atomic_inc(&mccq->used);
- memset(wrb, 0, sizeof(*wrb));
return wrb;
}
diff --git a/drivers/scsi/be2iscsi/be_cmds.h b/drivers/scsi/be2iscsi/be_cmds.h
index 5de8acb924cb..49fcc787ee8b 100644
--- a/drivers/scsi/be2iscsi/be_cmds.h
+++ b/drivers/scsi/be2iscsi/be_cmds.h
@@ -1,5 +1,5 @@
/**
- * Copyright (C) 2005 - 2009 ServerEngines
+ * Copyright (C) 2005 - 2010 ServerEngines
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -425,14 +425,20 @@ int beiscsi_cmd_mccq_create(struct beiscsi_hba *phba,
int be_poll_mcc(struct be_ctrl_info *ctrl);
unsigned char mgmt_check_supported_fw(struct be_ctrl_info *ctrl,
struct beiscsi_hba *phba);
-int be_cmd_get_mac_addr(struct beiscsi_hba *phba, u8 *mac_addr);
-
+unsigned int be_cmd_get_mac_addr(struct beiscsi_hba *phba);
+void free_mcc_tag(struct be_ctrl_info *ctrl, unsigned int tag);
/*ISCSI Functuions */
int be_cmd_fw_initialize(struct be_ctrl_info *ctrl);
struct be_mcc_wrb *wrb_from_mbox(struct be_dma_mem *mbox_mem);
struct be_mcc_wrb *wrb_from_mccq(struct beiscsi_hba *phba);
int be_mcc_notify_wait(struct beiscsi_hba *phba);
+void be_mcc_notify(struct beiscsi_hba *phba);
+unsigned int alloc_mcc_tag(struct beiscsi_hba *phba);
+void beiscsi_async_link_state_process(struct beiscsi_hba *phba,
+ struct be_async_event_link_state *evt);
+int be_mcc_compl_process_isr(struct be_ctrl_info *ctrl,
+ struct be_mcc_compl *compl);
int be_mbox_notify(struct be_ctrl_info *ctrl);
@@ -448,6 +454,8 @@ int be_cmd_iscsi_post_sgl_pages(struct be_ctrl_info *ctrl,
int be_cmd_wrbq_create(struct be_ctrl_info *ctrl, struct be_dma_mem *q_mem,
struct be_queue_info *wrbq);
+bool is_link_state_evt(u32 trailer);
+
struct be_default_pdu_context {
u32 dw[4];
} __packed;
diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c
index d587b0362f18..29a3aaf35f9f 100644
--- a/drivers/scsi/be2iscsi/be_iscsi.c
+++ b/drivers/scsi/be2iscsi/be_iscsi.c
@@ -1,5 +1,5 @@
/**
- * Copyright (C) 2005 - 2009 ServerEngines
+ * Copyright (C) 2005 - 2010 ServerEngines
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -101,6 +101,7 @@ void beiscsi_session_destroy(struct iscsi_cls_session *cls_session)
struct iscsi_session *sess = cls_session->dd_data;
struct beiscsi_session *beiscsi_sess = sess->dd_data;
+ SE_DEBUG(DBG_LVL_8, "In beiscsi_session_destroy\n");
pci_pool_destroy(beiscsi_sess->bhs_pool);
iscsi_session_teardown(cls_session);
}
@@ -224,6 +225,7 @@ int beiscsi_conn_get_param(struct iscsi_cls_conn *cls_conn,
struct beiscsi_conn *beiscsi_conn = conn->dd_data;
int len = 0;
+ SE_DEBUG(DBG_LVL_8, "In beiscsi_conn_get_param, param= %d\n", param);
beiscsi_ep = beiscsi_conn->ep;
if (!beiscsi_ep) {
SE_DEBUG(DBG_LVL_1,
@@ -254,6 +256,7 @@ int beiscsi_set_param(struct iscsi_cls_conn *cls_conn,
struct iscsi_session *session = conn->session;
int ret;
+ SE_DEBUG(DBG_LVL_8, "In beiscsi_conn_set_param, param= %d\n", param);
ret = iscsi_set_param(cls_conn, param, buf, buflen);
if (ret)
return ret;
@@ -271,8 +274,8 @@ int beiscsi_set_param(struct iscsi_cls_conn *cls_conn,
conn->max_recv_dlength = 65536;
break;
case ISCSI_PARAM_MAX_BURST:
- if (session->first_burst > 262144)
- session->first_burst = 262144;
+ if (session->max_burst > 262144)
+ session->max_burst = 262144;
break;
default:
return 0;
@@ -293,12 +296,41 @@ int beiscsi_get_host_param(struct Scsi_Host *shost,
enum iscsi_host_param param, char *buf)
{
struct beiscsi_hba *phba = (struct beiscsi_hba *)iscsi_host_priv(shost);
+ struct be_cmd_resp_get_mac_addr *resp;
+ struct be_mcc_wrb *wrb;
+ unsigned int tag, wrb_num;
int len = 0;
+ unsigned short status, extd_status;
+ struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
+ SE_DEBUG(DBG_LVL_8, "In beiscsi_get_host_param, param= %d\n", param);
switch (param) {
case ISCSI_HOST_PARAM_HWADDRESS:
- be_cmd_get_mac_addr(phba, phba->mac_address);
- len = sysfs_format_mac(buf, phba->mac_address, ETH_ALEN);
+ tag = be_cmd_get_mac_addr(phba);
+ if (!tag) {
+ SE_DEBUG(DBG_LVL_1, "be_cmd_get_mac_addr Failed \n");
+ return -1;
+ } else
+ wait_event_interruptible(phba->ctrl.mcc_wait[tag],
+ phba->ctrl.mcc_numtag[tag]);
+
+ wrb_num = (phba->ctrl.mcc_numtag[tag] & 0x00FF0000) >> 16;
+ extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8;
+ status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
+ if (status || extd_status) {
+ SE_DEBUG(DBG_LVL_1, "be_cmd_get_mac_addr Failed"
+ " status = %d extd_status = %d \n",
+ status, extd_status);
+ free_mcc_tag(&phba->ctrl, tag);
+ return -1;
+ } else {
+ wrb = queue_get_wrb(mccq, wrb_num);
+ free_mcc_tag(&phba->ctrl, tag);
+ resp = embedded_payload(wrb);
+ memcpy(phba->mac_address, resp->mac_address, ETH_ALEN);
+ len = sysfs_format_mac(buf, phba->mac_address,
+ ETH_ALEN);
+ }
break;
default:
return iscsi_host_get_param(shost, param, buf);
@@ -378,6 +410,7 @@ int beiscsi_conn_start(struct iscsi_cls_conn *cls_conn)
struct beiscsi_endpoint *beiscsi_ep;
struct beiscsi_offload_params params;
+ SE_DEBUG(DBG_LVL_8, "In beiscsi_conn_start\n");
memset(&params, 0, sizeof(struct beiscsi_offload_params));
beiscsi_ep = beiscsi_conn->ep;
if (!beiscsi_ep)
@@ -422,8 +455,14 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
{
struct beiscsi_endpoint *beiscsi_ep = ep->dd_data;
struct beiscsi_hba *phba = beiscsi_ep->phba;
+ struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
+ struct be_mcc_wrb *wrb;
+ struct tcp_connect_and_offload_out *ptcpcnct_out;
+ unsigned short status, extd_status;
+ unsigned int tag, wrb_num;
int ret = -1;
+ SE_DEBUG(DBG_LVL_8, "In beiscsi_open_conn\n");
beiscsi_ep->ep_cid = beiscsi_get_cid(phba);
if (beiscsi_ep->ep_cid == 0xFFFF) {
SE_DEBUG(DBG_LVL_1, "No free cid available\n");
@@ -431,15 +470,44 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
}
SE_DEBUG(DBG_LVL_8, "In beiscsi_open_conn, ep_cid=%d ",
beiscsi_ep->ep_cid);
- phba->ep_array[beiscsi_ep->ep_cid] = ep;
- if (beiscsi_ep->ep_cid >
- (phba->fw_config.iscsi_cid_start + phba->params.cxns_per_ctrl)) {
+ phba->ep_array[beiscsi_ep->ep_cid -
+ phba->fw_config.iscsi_cid_start] = ep;
+ if (beiscsi_ep->ep_cid > (phba->fw_config.iscsi_cid_start +
+ phba->params.cxns_per_ctrl * 2)) {
SE_DEBUG(DBG_LVL_1, "Failed in allocate iscsi cid\n");
return ret;
}
beiscsi_ep->cid_vld = 0;
- return mgmt_open_connection(phba, dst_addr, beiscsi_ep);
+ tag = mgmt_open_connection(phba, dst_addr, beiscsi_ep);
+ if (!tag) {
+ SE_DEBUG(DBG_LVL_1,
+ "mgmt_invalidate_connection Failed for cid=%d \n",
+ beiscsi_ep->ep_cid);
+ } else {
+ wait_event_interruptible(phba->ctrl.mcc_wait[tag],
+ phba->ctrl.mcc_numtag[tag]);
+ }
+ wrb_num = (phba->ctrl.mcc_numtag[tag] & 0x00FF0000) >> 16;
+ extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8;
+ status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
+ if (status || extd_status) {
+ SE_DEBUG(DBG_LVL_1, "mgmt_open_connection Failed"
+ " status = %d extd_status = %d \n",
+ status, extd_status);
+ free_mcc_tag(&phba->ctrl, tag);
+ return -1;
+ } else {
+ wrb = queue_get_wrb(mccq, wrb_num);
+ free_mcc_tag(&phba->ctrl, tag);
+
+ ptcpcnct_out = embedded_payload(wrb);
+ beiscsi_ep = ep->dd_data;
+ beiscsi_ep->fw_handle = ptcpcnct_out->connection_handle;
+ beiscsi_ep->cid_vld = 1;
+ SE_DEBUG(DBG_LVL_8, "mgmt_open_connection Success\n");
+ }
+ return 0;
}
/**
@@ -459,14 +527,12 @@ static void beiscsi_put_cid(struct beiscsi_hba *phba, unsigned short cid)
* beiscsi_free_ep - free endpoint
* @ep: pointer to iscsi endpoint structure
*/
-static void beiscsi_free_ep(struct iscsi_endpoint *ep)
+static void beiscsi_free_ep(struct beiscsi_endpoint *beiscsi_ep)
{
- struct beiscsi_endpoint *beiscsi_ep = ep->dd_data;
struct beiscsi_hba *phba = beiscsi_ep->phba;
beiscsi_put_cid(phba, beiscsi_ep->ep_cid);
beiscsi_ep->phba = NULL;
- iscsi_destroy_endpoint(ep);
}
/**
@@ -495,9 +561,9 @@ beiscsi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
return ERR_PTR(ret);
}
- if (phba->state) {
+ if (phba->state != BE_ADAPTER_UP) {
ret = -EBUSY;
- SE_DEBUG(DBG_LVL_1, "The Adapet state is Not UP \n");
+ SE_DEBUG(DBG_LVL_1, "The Adapter state is Not UP \n");
return ERR_PTR(ret);
}
@@ -509,9 +575,9 @@ beiscsi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
beiscsi_ep = ep->dd_data;
beiscsi_ep->phba = phba;
-
+ beiscsi_ep->openiscsi_ep = ep;
if (beiscsi_open_conn(ep, NULL, dst_addr, non_blocking)) {
- SE_DEBUG(DBG_LVL_1, "Failed in allocate iscsi cid\n");
+ SE_DEBUG(DBG_LVL_1, "Failed in beiscsi_open_conn \n");
ret = -ENOMEM;
goto free_ep;
}
@@ -519,7 +585,7 @@ beiscsi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
return ep;
free_ep:
- beiscsi_free_ep(ep);
+ beiscsi_free_ep(beiscsi_ep);
return ERR_PTR(ret);
}
@@ -546,20 +612,22 @@ int beiscsi_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
* @ep: The iscsi endpoint
* @flag: The type of connection closure
*/
-static int beiscsi_close_conn(struct iscsi_endpoint *ep, int flag)
+static int beiscsi_close_conn(struct beiscsi_endpoint *beiscsi_ep, int flag)
{
int ret = 0;
- struct beiscsi_endpoint *beiscsi_ep = ep->dd_data;
+ unsigned int tag;
struct beiscsi_hba *phba = beiscsi_ep->phba;
- if (MGMT_STATUS_SUCCESS !=
- mgmt_upload_connection(phba, beiscsi_ep->ep_cid,
- CONNECTION_UPLOAD_GRACEFUL)) {
+ tag = mgmt_upload_connection(phba, beiscsi_ep->ep_cid, flag);
+ if (!tag) {
SE_DEBUG(DBG_LVL_8, "upload failed for cid 0x%x",
beiscsi_ep->ep_cid);
ret = -1;
+ } else {
+ wait_event_interruptible(phba->ctrl.mcc_wait[tag],
+ phba->ctrl.mcc_numtag[tag]);
+ free_mcc_tag(&phba->ctrl, tag);
}
-
return ret;
}
@@ -574,19 +642,17 @@ void beiscsi_ep_disconnect(struct iscsi_endpoint *ep)
struct beiscsi_conn *beiscsi_conn;
struct beiscsi_endpoint *beiscsi_ep;
struct beiscsi_hba *phba;
- int flag = 0;
beiscsi_ep = ep->dd_data;
phba = beiscsi_ep->phba;
- SE_DEBUG(DBG_LVL_8, "In beiscsi_ep_disconnect\n");
+ SE_DEBUG(DBG_LVL_8, "In beiscsi_ep_disconnect for ep_cid = %d\n",
+ beiscsi_ep->ep_cid);
if (beiscsi_ep->conn) {
beiscsi_conn = beiscsi_ep->conn;
iscsi_suspend_queue(beiscsi_conn->conn);
- beiscsi_close_conn(ep, flag);
}
- beiscsi_free_ep(ep);
}
/**
@@ -619,23 +685,31 @@ void beiscsi_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
struct iscsi_session *session = conn->session;
struct Scsi_Host *shost = iscsi_session_to_shost(session->cls_session);
struct beiscsi_hba *phba = iscsi_host_priv(shost);
- unsigned int status;
+ unsigned int tag;
unsigned short savecfg_flag = CMD_ISCSI_SESSION_SAVE_CFG_ON_FLASH;
- SE_DEBUG(DBG_LVL_8, "In beiscsi_conn_stop\n");
beiscsi_ep = beiscsi_conn->ep;
if (!beiscsi_ep) {
SE_DEBUG(DBG_LVL_8, "In beiscsi_conn_stop , no beiscsi_ep\n");
return;
}
- status = mgmt_invalidate_connection(phba, beiscsi_ep,
+ SE_DEBUG(DBG_LVL_8, "In beiscsi_conn_stop ep_cid = %d\n",
+ beiscsi_ep->ep_cid);
+ tag = mgmt_invalidate_connection(phba, beiscsi_ep,
beiscsi_ep->ep_cid, 1,
savecfg_flag);
- if (status != MGMT_STATUS_SUCCESS) {
+ if (!tag) {
SE_DEBUG(DBG_LVL_1,
"mgmt_invalidate_connection Failed for cid=%d \n",
beiscsi_ep->ep_cid);
+ } else {
+ wait_event_interruptible(phba->ctrl.mcc_wait[tag],
+ phba->ctrl.mcc_numtag[tag]);
+ free_mcc_tag(&phba->ctrl, tag);
}
+ beiscsi_close_conn(beiscsi_ep, CONNECTION_UPLOAD_GRACEFUL);
+ beiscsi_free_ep(beiscsi_ep);
+ iscsi_destroy_endpoint(beiscsi_ep->openiscsi_ep);
beiscsi_unbind_conn_to_cid(phba, beiscsi_ep->ep_cid);
iscsi_conn_stop(cls_conn, flag);
}
diff --git a/drivers/scsi/be2iscsi/be_iscsi.h b/drivers/scsi/be2iscsi/be_iscsi.h
index f92ffc5349fb..1f512c28cbf9 100644
--- a/drivers/scsi/be2iscsi/be_iscsi.h
+++ b/drivers/scsi/be2iscsi/be_iscsi.h
@@ -1,5 +1,5 @@
/**
- * Copyright (C) 2005 - 2009 ServerEngines
+ * Copyright (C) 2005 - 2010 ServerEngines
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index 1a557fa77888..7c22616ab141 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -1,5 +1,5 @@
/**
- * Copyright (C) 2005 - 2009 ServerEngines
+ * Copyright (C) 2005 - 2010 ServerEngines
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -40,7 +40,6 @@
static unsigned int be_iopoll_budget = 10;
static unsigned int be_max_phys_size = 64;
static unsigned int enable_msix = 1;
-static unsigned int ring_mode;
MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table);
MODULE_DESCRIPTION(DRV_DESC " " BUILD_STR);
@@ -62,10 +61,10 @@ static int beiscsi_slave_configure(struct scsi_device *sdev)
/*------------------- PCI Driver operations and data ----------------- */
static DEFINE_PCI_DEVICE_TABLE(beiscsi_pci_id_table) = {
{ PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
+ { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
{ PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
{ PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
{ PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID3) },
- { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID4) },
{ 0 }
};
MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table);
@@ -112,6 +111,7 @@ static struct beiscsi_hba *beiscsi_hba_alloc(struct pci_dev *pcidev)
memset(phba, 0, sizeof(*phba));
phba->shost = shost;
phba->pcidev = pci_dev_get(pcidev);
+ pci_set_drvdata(pcidev, phba);
if (iscsi_host_add(shost, &phba->pcidev->dev))
goto free_devices;
@@ -143,6 +143,7 @@ static int beiscsi_map_pci_bars(struct beiscsi_hba *phba,
struct pci_dev *pcidev)
{
u8 __iomem *addr;
+ int pcicfg_reg;
addr = ioremap_nocache(pci_resource_start(pcidev, 2),
pci_resource_len(pcidev, 2));
@@ -159,13 +160,19 @@ static int beiscsi_map_pci_bars(struct beiscsi_hba *phba,
phba->db_va = addr;
phba->db_pa.u.a64.address = pci_resource_start(pcidev, 4);
- addr = ioremap_nocache(pci_resource_start(pcidev, 1),
- pci_resource_len(pcidev, 1));
+ if (phba->generation == BE_GEN2)
+ pcicfg_reg = 1;
+ else
+ pcicfg_reg = 0;
+
+ addr = ioremap_nocache(pci_resource_start(pcidev, pcicfg_reg),
+ pci_resource_len(pcidev, pcicfg_reg));
+
if (addr == NULL)
goto pci_map_err;
phba->ctrl.pcicfg = addr;
phba->pci_va = addr;
- phba->pci_pa.u.a64.address = pci_resource_start(pcidev, 1);
+ phba->pci_pa.u.a64.address = pci_resource_start(pcidev, pcicfg_reg);
return 0;
pci_map_err:
@@ -230,29 +237,27 @@ static int be_ctrl_init(struct beiscsi_hba *phba, struct pci_dev *pdev)
static void beiscsi_get_params(struct beiscsi_hba *phba)
{
- phba->params.ios_per_ctrl = BE2_IO_DEPTH;
- phba->params.cxns_per_ctrl = BE2_MAX_SESSIONS;
- phba->params.asyncpdus_per_ctrl = BE2_ASYNCPDUS;
- phba->params.icds_per_ctrl = BE2_MAX_ICDS / 2;
+ phba->params.ios_per_ctrl = (phba->fw_config.iscsi_icd_count
+ - (phba->fw_config.iscsi_cid_count
+ + BE2_TMFS
+ + BE2_NOPOUT_REQ));
+ phba->params.cxns_per_ctrl = phba->fw_config.iscsi_cid_count;
+ phba->params.asyncpdus_per_ctrl = phba->fw_config.iscsi_cid_count;;
+ phba->params.icds_per_ctrl = phba->fw_config.iscsi_icd_count;;
phba->params.num_sge_per_io = BE2_SGE;
phba->params.defpdu_hdr_sz = BE2_DEFPDU_HDR_SZ;
phba->params.defpdu_data_sz = BE2_DEFPDU_DATA_SZ;
phba->params.eq_timer = 64;
phba->params.num_eq_entries =
- (((BE2_CMDS_PER_CXN * 2 + BE2_LOGOUTS + BE2_TMFS + BE2_ASYNCPDUS) /
- 512) + 1) * 512;
+ (((BE2_CMDS_PER_CXN * 2 + phba->fw_config.iscsi_cid_count * 2
+ + BE2_TMFS) / 512) + 1) * 512;
phba->params.num_eq_entries = (phba->params.num_eq_entries < 1024)
? 1024 : phba->params.num_eq_entries;
SE_DEBUG(DBG_LVL_8, "phba->params.num_eq_entries=%d \n",
- phba->params.num_eq_entries);
+ phba->params.num_eq_entries);
phba->params.num_cq_entries =
- (((BE2_CMDS_PER_CXN * 2 + BE2_LOGOUTS + BE2_TMFS + BE2_ASYNCPDUS) /
- 512) + 1) * 512;
- SE_DEBUG(DBG_LVL_8,
- "phba->params.num_cq_entries=%d BE2_CMDS_PER_CXN=%d"
- "BE2_LOGOUTS=%d BE2_TMFS=%d BE2_ASYNCPDUS=%d \n",
- phba->params.num_cq_entries, BE2_CMDS_PER_CXN,
- BE2_LOGOUTS, BE2_TMFS, BE2_ASYNCPDUS);
+ (((BE2_CMDS_PER_CXN * 2 + phba->fw_config.iscsi_cid_count * 2
+ + BE2_TMFS) / 512) + 1) * 512;
phba->params.wrbs_per_cxn = 256;
}
@@ -443,7 +448,7 @@ static irqreturn_t be_isr(int irq, void *dev_id)
if (phba->todo_mcc_cq)
queue_work(phba->wq, &phba->work_cqs);
- if ((num_mcceq_processed) && (!num_ioeq_processed))
+ if ((num_mcceq_processed) && (!num_ioeq_processed))
hwi_ring_eq_db(phba, eq->id, 0,
(num_ioeq_processed +
num_mcceq_processed) , 1, 1);
@@ -561,6 +566,7 @@ beiscsi_process_async_pdu(struct beiscsi_conn *beiscsi_conn,
SE_DEBUG(DBG_LVL_1, "In ISCSI_OP_REJECT\n");
break;
case ISCSI_OP_LOGIN_RSP:
+ case ISCSI_OP_TEXT_RSP:
task = conn->login_task;
io_task = task->dd_data;
login_hdr = (struct iscsi_hdr *)ppdu;
@@ -631,29 +637,29 @@ free_io_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
* alloc_wrb_handle - To allocate a wrb handle
* @phba: The hba pointer
* @cid: The cid to use for allocation
- * @index: index allocation and wrb index
*
* This happens under session_lock until submission to chip
*/
-struct wrb_handle *alloc_wrb_handle(struct beiscsi_hba *phba, unsigned int cid,
- int index)
+struct wrb_handle *alloc_wrb_handle(struct beiscsi_hba *phba, unsigned int cid)
{
struct hwi_wrb_context *pwrb_context;
struct hwi_controller *phwi_ctrlr;
- struct wrb_handle *pwrb_handle;
+ struct wrb_handle *pwrb_handle, *pwrb_handle_tmp;
phwi_ctrlr = phba->phwi_ctrlr;
pwrb_context = &phwi_ctrlr->wrb_context[cid];
- if (pwrb_context->wrb_handles_available) {
+ if (pwrb_context->wrb_handles_available >= 2) {
pwrb_handle = pwrb_context->pwrb_handle_base[
pwrb_context->alloc_index];
pwrb_context->wrb_handles_available--;
- pwrb_handle->nxt_wrb_index = pwrb_handle->wrb_index;
if (pwrb_context->alloc_index ==
(phba->params.wrbs_per_cxn - 1))
pwrb_context->alloc_index = 0;
else
pwrb_context->alloc_index++;
+ pwrb_handle_tmp = pwrb_context->pwrb_handle_base[
+ pwrb_context->alloc_index];
+ pwrb_handle->nxt_wrb_index = pwrb_handle_tmp->wrb_index;
} else
pwrb_handle = NULL;
return pwrb_handle;
@@ -671,9 +677,7 @@ static void
free_wrb_handle(struct beiscsi_hba *phba, struct hwi_wrb_context *pwrb_context,
struct wrb_handle *pwrb_handle)
{
- if (!ring_mode)
- pwrb_context->pwrb_handle_base[pwrb_context->free_index] =
- pwrb_handle;
+ pwrb_context->pwrb_handle_base[pwrb_context->free_index] = pwrb_handle;
pwrb_context->wrb_handles_available++;
if (pwrb_context->free_index == (phba->params.wrbs_per_cxn - 1))
pwrb_context->free_index = 0;
@@ -790,6 +794,7 @@ be_complete_io(struct beiscsi_conn *beiscsi_conn,
memcpy(task->sc->sense_buffer, sense,
min_t(u16, sense_len, SCSI_SENSE_BUFFERSIZE));
}
+
if (io_task->cmd_bhs->iscsi_hdr.flags & ISCSI_FLAG_CMD_READ) {
if (psol->dw[offsetof(struct amap_sol_cqe, i_res_cnt) / 32]
& SOL_RES_CNT_MASK)
@@ -811,6 +816,7 @@ be_complete_logout(struct beiscsi_conn *beiscsi_conn,
struct iscsi_conn *conn = beiscsi_conn->conn;
hdr = (struct iscsi_logout_rsp *)task->hdr;
+ hdr->opcode = ISCSI_OP_LOGOUT_RSP;
hdr->t2wait = 5;
hdr->t2retain = 0;
hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
@@ -825,6 +831,9 @@ be_complete_logout(struct beiscsi_conn *beiscsi_conn,
& SOL_EXP_CMD_SN_MASK) +
((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
/ 32] & SOL_CMD_WND_MASK) >> 24) - 1);
+ hdr->dlength[0] = 0;
+ hdr->dlength[1] = 0;
+ hdr->dlength[2] = 0;
hdr->hlength = 0;
hdr->itt = io_task->libiscsi_itt;
__iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
@@ -839,6 +848,7 @@ be_complete_tmf(struct beiscsi_conn *beiscsi_conn,
struct beiscsi_io_task *io_task = task->dd_data;
hdr = (struct iscsi_tm_rsp *)task->hdr;
+ hdr->opcode = ISCSI_OP_SCSI_TMFUNC_RSP;
hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
& SOL_FLAGS_MASK) >> 24) | 0x80;
hdr->response = (psol->dw[offsetof(struct amap_sol_cqe, i_resp) /
@@ -859,7 +869,6 @@ hwi_complete_drvr_msgs(struct beiscsi_conn *beiscsi_conn,
{
struct hwi_wrb_context *pwrb_context;
struct wrb_handle *pwrb_handle = NULL;
- struct sgl_handle *psgl_handle = NULL;
struct hwi_controller *phwi_ctrlr;
struct iscsi_task *task;
struct beiscsi_io_task *io_task;
@@ -867,22 +876,14 @@ hwi_complete_drvr_msgs(struct beiscsi_conn *beiscsi_conn,
struct iscsi_session *session = conn->session;
phwi_ctrlr = phba->phwi_ctrlr;
- if (ring_mode) {
- psgl_handle = phba->sgl_hndl_array[((psol->
- dw[offsetof(struct amap_sol_cqe_ring, icd_index) /
- 32] & SOL_ICD_INDEX_MASK) >> 6)];
- pwrb_context = &phwi_ctrlr->wrb_context[psgl_handle->cid];
- task = psgl_handle->task;
- pwrb_handle = NULL;
- } else {
- pwrb_context = &phwi_ctrlr->wrb_context[((psol->
+ pwrb_context = &phwi_ctrlr->wrb_context[((psol->
dw[offsetof(struct amap_sol_cqe, cid) / 32] &
- SOL_CID_MASK) >> 6)];
- pwrb_handle = pwrb_context->pwrb_handle_basestd[((psol->
+ SOL_CID_MASK) >> 6) -
+ phba->fw_config.iscsi_cid_start];
+ pwrb_handle = pwrb_context->pwrb_handle_basestd[((psol->
dw[offsetof(struct amap_sol_cqe, wrb_index) /
32] & SOL_WRB_INDEX_MASK) >> 16)];
- task = pwrb_handle->pio_handle;
- }
+ task = pwrb_handle->pio_handle;
io_task = task->dd_data;
spin_lock(&phba->mgmt_sgl_lock);
@@ -923,31 +924,23 @@ static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn,
struct iscsi_wrb *pwrb = NULL;
struct hwi_controller *phwi_ctrlr;
struct iscsi_task *task;
- struct sgl_handle *psgl_handle = NULL;
unsigned int type;
struct iscsi_conn *conn = beiscsi_conn->conn;
struct iscsi_session *session = conn->session;
phwi_ctrlr = phba->phwi_ctrlr;
- if (ring_mode) {
- psgl_handle = phba->sgl_hndl_array[((psol->
- dw[offsetof(struct amap_sol_cqe_ring, icd_index) /
- 32] & SOL_ICD_INDEX_MASK) >> 6)];
- task = psgl_handle->task;
- type = psgl_handle->type;
- } else {
- pwrb_context = &phwi_ctrlr->
- wrb_context[((psol->dw[offsetof
+ pwrb_context = &phwi_ctrlr->wrb_context[((psol->dw[offsetof
(struct amap_sol_cqe, cid) / 32]
- & SOL_CID_MASK) >> 6)];
- pwrb_handle = pwrb_context->pwrb_handle_basestd[((psol->
+ & SOL_CID_MASK) >> 6) -
+ phba->fw_config.iscsi_cid_start];
+ pwrb_handle = pwrb_context->pwrb_handle_basestd[((psol->
dw[offsetof(struct amap_sol_cqe, wrb_index) /
32] & SOL_WRB_INDEX_MASK) >> 16)];
- task = pwrb_handle->pio_handle;
- pwrb = pwrb_handle->pwrb;
- type = (pwrb->dw[offsetof(struct amap_iscsi_wrb, type) / 32] &
- WRB_TYPE_MASK) >> 28;
- }
+ task = pwrb_handle->pio_handle;
+ pwrb = pwrb_handle->pwrb;
+ type = (pwrb->dw[offsetof(struct amap_iscsi_wrb, type) / 32] &
+ WRB_TYPE_MASK) >> 28;
+
spin_lock_bh(&session->lock);
switch (type) {
case HWH_TYPE_IO:
@@ -978,15 +971,7 @@ static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn,
break;
default:
- if (ring_mode)
- shost_printk(KERN_WARNING, phba->shost,
- "In hwi_complete_cmd, unknown type = %d"
- "icd_index 0x%x CID 0x%x\n", type,
- ((psol->dw[offsetof(struct amap_sol_cqe_ring,
- icd_index) / 32] & SOL_ICD_INDEX_MASK) >> 6),
- psgl_handle->cid);
- else
- shost_printk(KERN_WARNING, phba->shost,
+ shost_printk(KERN_WARNING, phba->shost,
"In hwi_complete_cmd, unknown type = %d"
"wrb_index 0x%x CID 0x%x\n", type,
((psol->dw[offsetof(struct amap_iscsi_wrb,
@@ -1077,7 +1062,8 @@ hwi_get_async_handle(struct beiscsi_hba *phba,
WARN_ON(!pasync_handle);
- pasync_handle->cri = (unsigned short)beiscsi_conn->beiscsi_conn_cid;
+ pasync_handle->cri = (unsigned short)beiscsi_conn->beiscsi_conn_cid -
+ phba->fw_config.iscsi_cid_start;
pasync_handle->is_header = is_header;
pasync_handle->buffer_len = ((pdpdu_cqe->
dw[offsetof(struct amap_i_t_dpdu_cqe, dpl) / 32]
@@ -1327,9 +1313,10 @@ hwi_fwd_async_msg(struct beiscsi_conn *beiscsi_conn,
}
status = beiscsi_process_async_pdu(beiscsi_conn, phba,
- beiscsi_conn->beiscsi_conn_cid,
- phdr, hdr_len, pfirst_buffer,
- buf_len);
+ (beiscsi_conn->beiscsi_conn_cid -
+ phba->fw_config.iscsi_cid_start),
+ phdr, hdr_len, pfirst_buffer,
+ buf_len);
if (status == 0)
hwi_free_async_msg(phba, cri);
@@ -1422,6 +1409,48 @@ static void hwi_process_default_pdu_ring(struct beiscsi_conn *beiscsi_conn,
hwi_post_async_buffers(phba, pasync_handle->is_header);
}
+static void beiscsi_process_mcc_isr(struct beiscsi_hba *phba)
+{
+ struct be_queue_info *mcc_cq;
+ struct be_mcc_compl *mcc_compl;
+ unsigned int num_processed = 0;
+
+ mcc_cq = &phba->ctrl.mcc_obj.cq;
+ mcc_compl = queue_tail_node(mcc_cq);
+ mcc_compl->flags = le32_to_cpu(mcc_compl->flags);
+ while (mcc_compl->flags & CQE_FLAGS_VALID_MASK) {
+
+ if (num_processed >= 32) {
+ hwi_ring_cq_db(phba, mcc_cq->id,
+ num_processed, 0, 0);
+ num_processed = 0;
+ }
+ if (mcc_compl->flags & CQE_FLAGS_ASYNC_MASK) {
+ /* Interpret flags as an async trailer */
+ if (is_link_state_evt(mcc_compl->flags))
+ /* Interpret compl as a async link evt */
+ beiscsi_async_link_state_process(phba,
+ (struct be_async_event_link_state *) mcc_compl);
+ else
+ SE_DEBUG(DBG_LVL_1,
+ " Unsupported Async Event, flags"
+ " = 0x%08x \n", mcc_compl->flags);
+ } else if (mcc_compl->flags & CQE_FLAGS_COMPLETED_MASK) {
+ be_mcc_compl_process_isr(&phba->ctrl, mcc_compl);
+ atomic_dec(&phba->ctrl.mcc_obj.q.used);
+ }
+
+ mcc_compl->flags = 0;
+ queue_tail_inc(mcc_cq);
+ mcc_compl = queue_tail_node(mcc_cq);
+ mcc_compl->flags = le32_to_cpu(mcc_compl->flags);
+ num_processed++;
+ }
+
+ if (num_processed > 0)
+ hwi_ring_cq_db(phba, mcc_cq->id, num_processed, 1, 0);
+
+}
static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
{
@@ -1431,7 +1460,8 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
unsigned int num_processed = 0;
unsigned int tot_nump = 0;
struct beiscsi_conn *beiscsi_conn;
- struct sgl_handle *psgl_handle = NULL;
+ struct beiscsi_endpoint *beiscsi_ep;
+ struct iscsi_endpoint *ep;
struct beiscsi_hba *phba;
cq = pbe_eq->cq;
@@ -1442,32 +1472,13 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
CQE_VALID_MASK) {
be_dws_le_to_cpu(sol, sizeof(struct sol_cqe));
- if (ring_mode) {
- psgl_handle = phba->sgl_hndl_array[((sol->
- dw[offsetof(struct amap_sol_cqe_ring,
- icd_index) / 32] & SOL_ICD_INDEX_MASK)
- >> 6)];
- beiscsi_conn = phba->conn_table[psgl_handle->cid];
- if (!beiscsi_conn || !beiscsi_conn->ep) {
- shost_printk(KERN_WARNING, phba->shost,
- "Connection table empty for cid = %d\n",
- psgl_handle->cid);
- return 0;
- }
+ ep = phba->ep_array[(u32) ((sol->
+ dw[offsetof(struct amap_sol_cqe, cid) / 32] &
+ SOL_CID_MASK) >> 6) -
+ phba->fw_config.iscsi_cid_start];
- } else {
- beiscsi_conn = phba->conn_table[(u32) (sol->
- dw[offsetof(struct amap_sol_cqe, cid) / 32] &
- SOL_CID_MASK) >> 6];
-
- if (!beiscsi_conn || !beiscsi_conn->ep) {
- shost_printk(KERN_WARNING, phba->shost,
- "Connection table empty for cid = %d\n",
- (u32)(sol->dw[offsetof(struct amap_sol_cqe,
- cid) / 32] & SOL_CID_MASK) >> 6);
- return 0;
- }
- }
+ beiscsi_ep = ep->dd_data;
+ beiscsi_conn = beiscsi_ep->conn;
if (num_processed >= 32) {
hwi_ring_cq_db(phba, cq->id,
@@ -1511,21 +1522,13 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
case CMD_CXN_KILLED_ITT_INVALID:
case CMD_CXN_KILLED_SEQ_OUTOFORDER:
case CMD_CXN_KILLED_INVALID_DATASN_RCVD:
- if (ring_mode) {
- SE_DEBUG(DBG_LVL_1,
- "CQ Error notification for cmd.. "
- "code %d cid 0x%x\n",
- sol->dw[offsetof(struct amap_sol_cqe, code) /
- 32] & CQE_CODE_MASK, psgl_handle->cid);
- } else {
- SE_DEBUG(DBG_LVL_1,
+ SE_DEBUG(DBG_LVL_1,
"CQ Error notification for cmd.. "
"code %d cid 0x%x\n",
sol->dw[offsetof(struct amap_sol_cqe, code) /
32] & CQE_CODE_MASK,
(sol->dw[offsetof(struct amap_sol_cqe, cid) /
32] & SOL_CID_MASK));
- }
break;
case UNSOL_DATA_DIGEST_ERROR_NOTIFY:
SE_DEBUG(DBG_LVL_1,
@@ -1547,37 +1550,23 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
case CXN_KILLED_OVER_RUN_RESIDUAL:
case CXN_KILLED_UNDER_RUN_RESIDUAL:
case CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN:
- if (ring_mode) {
- SE_DEBUG(DBG_LVL_1, "CQ Error %d, reset CID "
- "0x%x...\n",
- sol->dw[offsetof(struct amap_sol_cqe, code) /
- 32] & CQE_CODE_MASK, psgl_handle->cid);
- } else {
- SE_DEBUG(DBG_LVL_1, "CQ Error %d, reset CID "
+ SE_DEBUG(DBG_LVL_1, "CQ Error %d, reset CID "
"0x%x...\n",
sol->dw[offsetof(struct amap_sol_cqe, code) /
32] & CQE_CODE_MASK,
- sol->dw[offsetof(struct amap_sol_cqe, cid) /
- 32] & CQE_CID_MASK);
- }
+ (sol->dw[offsetof(struct amap_sol_cqe, cid) /
+ 32] & CQE_CID_MASK));
iscsi_conn_failure(beiscsi_conn->conn,
ISCSI_ERR_CONN_FAILED);
break;
case CXN_KILLED_RST_SENT:
case CXN_KILLED_RST_RCVD:
- if (ring_mode) {
- SE_DEBUG(DBG_LVL_1, "CQ Error %d, reset"
- "received/sent on CID 0x%x...\n",
- sol->dw[offsetof(struct amap_sol_cqe, code) /
- 32] & CQE_CODE_MASK, psgl_handle->cid);
- } else {
- SE_DEBUG(DBG_LVL_1, "CQ Error %d, reset"
+ SE_DEBUG(DBG_LVL_1, "CQ Error %d, reset"
"received/sent on CID 0x%x...\n",
sol->dw[offsetof(struct amap_sol_cqe, code) /
32] & CQE_CODE_MASK,
- sol->dw[offsetof(struct amap_sol_cqe, cid) /
- 32] & CQE_CID_MASK);
- }
+ (sol->dw[offsetof(struct amap_sol_cqe, cid) /
+ 32] & CQE_CID_MASK));
iscsi_conn_failure(beiscsi_conn->conn,
ISCSI_ERR_CONN_FAILED);
break;
@@ -1586,8 +1575,8 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
"received on CID 0x%x...\n",
sol->dw[offsetof(struct amap_sol_cqe, code) /
32] & CQE_CODE_MASK,
- sol->dw[offsetof(struct amap_sol_cqe, cid) /
- 32] & CQE_CID_MASK);
+ (sol->dw[offsetof(struct amap_sol_cqe, cid) /
+ 32] & CQE_CID_MASK));
break;
}
@@ -1604,7 +1593,7 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
return tot_nump;
}
-static void beiscsi_process_all_cqs(struct work_struct *work)
+void beiscsi_process_all_cqs(struct work_struct *work)
{
unsigned long flags;
struct hwi_controller *phwi_ctrlr;
@@ -1624,6 +1613,7 @@ static void beiscsi_process_all_cqs(struct work_struct *work)
spin_lock_irqsave(&phba->isr_lock, flags);
phba->todo_mcc_cq = 0;
spin_unlock_irqrestore(&phba->isr_lock, flags);
+ beiscsi_process_mcc_isr(phba);
}
if (phba->todo_cq) {
@@ -1668,7 +1658,8 @@ hwi_write_sgl(struct iscsi_wrb *pwrb, struct scatterlist *sg,
io_task->bhs_pa.u.a32.address_hi);
l_sg = sg;
- for (index = 0; (index < num_sg) && (index < 2); index++, sg_next(sg)) {
+ for (index = 0; (index < num_sg) && (index < 2); index++,
+ sg = sg_next(sg)) {
if (index == 0) {
sg_len = sg_dma_len(sg);
addr = (u64) sg_dma_address(sg);
@@ -1679,11 +1670,7 @@ hwi_write_sgl(struct iscsi_wrb *pwrb, struct scatterlist *sg,
AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb,
sg_len);
sge_len = sg_len;
- AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
- 1);
} else {
- AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
- 0);
AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_r2t_offset,
pwrb, sge_len);
sg_len = sg_dma_len(sg);
@@ -1706,13 +1693,27 @@ hwi_write_sgl(struct iscsi_wrb *pwrb, struct scatterlist *sg,
AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
io_task->bhs_pa.u.a32.address_lo);
- if (num_sg == 2)
- AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb, 1);
+ if (num_sg == 1) {
+ AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
+ 1);
+ AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb,
+ 0);
+ } else if (num_sg == 2) {
+ AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
+ 0);
+ AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb,
+ 1);
+ } else {
+ AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
+ 0);
+ AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb,
+ 0);
+ }
sg = l_sg;
psgl++;
psgl++;
offset = 0;
- for (index = 0; index < num_sg; index++, sg_next(sg), psgl++) {
+ for (index = 0; index < num_sg; index++, sg = sg_next(sg), psgl++) {
sg_len = sg_dma_len(sg);
addr = (u64) sg_dma_address(sg);
AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
@@ -2048,10 +2049,9 @@ static void beiscsi_init_wrb_handle(struct beiscsi_hba *phba)
}
idx = 0;
pwrb = mem_descr_wrb->mem_array[idx].virtual_address;
- num_cxn_wrb =
- ((mem_descr_wrb->mem_array[idx].size) / (sizeof(struct iscsi_wrb)) *
- phba->params.wrbs_per_cxn);
-
+ num_cxn_wrb = (mem_descr_wrb->mem_array[idx].size) /
+ ((sizeof(struct iscsi_wrb) *
+ phba->params.wrbs_per_cxn));
for (index = 0; index < phba->params.cxns_per_ctrl; index += 2) {
pwrb_context = &phwi_ctrlr->wrb_context[index];
if (num_cxn_wrb) {
@@ -2064,9 +2064,9 @@ static void beiscsi_init_wrb_handle(struct beiscsi_hba *phba)
} else {
idx++;
pwrb = mem_descr_wrb->mem_array[idx].virtual_address;
- num_cxn_wrb = ((mem_descr_wrb->mem_array[idx].size) /
- (sizeof(struct iscsi_wrb)) *
- phba->params.wrbs_per_cxn);
+ num_cxn_wrb = (mem_descr_wrb->mem_array[idx].size) /
+ ((sizeof(struct iscsi_wrb) *
+ phba->params.wrbs_per_cxn));
for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
pwrb_handle = pwrb_context->pwrb_handle_base[j];
pwrb_handle->pwrb = pwrb;
@@ -2383,7 +2383,7 @@ static int beiscsi_create_cqs(struct beiscsi_hba *phba,
&paddr);
if (!cq_vaddress)
goto create_cq_error;
- ret = be_fill_queue(cq, phba->params.icds_per_ctrl / 2,
+ ret = be_fill_queue(cq, phba->params.num_cq_entries,
sizeof(struct sol_cqe), cq_vaddress);
if (ret) {
shost_printk(KERN_ERR, phba->shost,
@@ -2634,7 +2634,8 @@ beiscsi_create_wrb_rings(struct beiscsi_hba *phba,
"wrbq create failed.");
return status;
}
- phwi_ctrlr->wrb_context[i].cid = phwi_context->be_wrbq[i].id;
+ phwi_ctrlr->wrb_context[i * 2].cid = phwi_context->be_wrbq[i].
+ id;
}
kfree(pwrb_arr);
return 0;
@@ -2803,17 +2804,6 @@ static int hwi_init_port(struct beiscsi_hba *phba)
goto error;
}
- if (phba->fw_config.iscsi_features == 0x1)
- ring_mode = 1;
- else
- ring_mode = 0;
- status = mgmt_get_fw_config(ctrl, phba);
- if (status != 0) {
- shost_printk(KERN_ERR, phba->shost,
- "Error getting fw config\n");
- goto error;
- }
-
status = beiscsi_create_cqs(phba, phwi_context);
if (status != 0) {
shost_printk(KERN_ERR, phba->shost, "CQ not created\n");
@@ -2941,17 +2931,6 @@ static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba)
phba->io_sgl_hndl_avbl = 0;
phba->eh_sgl_hndl_avbl = 0;
- if (ring_mode) {
- phba->sgl_hndl_array = kzalloc(sizeof(struct sgl_handle *) *
- phba->params.icds_per_ctrl,
- GFP_KERNEL);
- if (!phba->sgl_hndl_array) {
- shost_printk(KERN_ERR, phba->shost,
- "Mem Alloc Failed. Failing to load\n");
- return -ENOMEM;
- }
- }
-
mem_descr_sglh = phba->init_mem;
mem_descr_sglh += HWI_MEM_SGLH;
if (1 == mem_descr_sglh->num_elements) {
@@ -2959,8 +2938,6 @@ static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba)
phba->params.ios_per_ctrl,
GFP_KERNEL);
if (!phba->io_sgl_hndl_base) {
- if (ring_mode)
- kfree(phba->sgl_hndl_array);
shost_printk(KERN_ERR, phba->shost,
"Mem Alloc Failed. Failing to load\n");
return -ENOMEM;
@@ -3032,7 +3009,7 @@ static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba)
AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, pfrag, 0);
pfrag += phba->params.num_sge_per_io;
psgl_handle->sgl_index =
- phba->fw_config.iscsi_cid_start + arr_index++;
+ phba->fw_config.iscsi_icd_start + arr_index++;
}
idx++;
}
@@ -3047,7 +3024,7 @@ static int hba_setup_cid_tbls(struct beiscsi_hba *phba)
{
int i, new_cid;
- phba->cid_array = kmalloc(sizeof(void *) * phba->params.cxns_per_ctrl,
+ phba->cid_array = kzalloc(sizeof(void *) * phba->params.cxns_per_ctrl,
GFP_KERNEL);
if (!phba->cid_array) {
shost_printk(KERN_ERR, phba->shost,
@@ -3055,7 +3032,7 @@ static int hba_setup_cid_tbls(struct beiscsi_hba *phba)
"hba_setup_cid_tbls\n");
return -ENOMEM;
}
- phba->ep_array = kmalloc(sizeof(struct iscsi_endpoint *) *
+ phba->ep_array = kzalloc(sizeof(struct iscsi_endpoint *) *
phba->params.cxns_per_ctrl * 2, GFP_KERNEL);
if (!phba->ep_array) {
shost_printk(KERN_ERR, phba->shost,
@@ -3064,7 +3041,7 @@ static int hba_setup_cid_tbls(struct beiscsi_hba *phba)
kfree(phba->cid_array);
return -ENOMEM;
}
- new_cid = phba->fw_config.iscsi_icd_start;
+ new_cid = phba->fw_config.iscsi_cid_start;
for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
phba->cid_array[i] = new_cid;
new_cid += 2;
@@ -3145,8 +3122,6 @@ static int beiscsi_init_port(struct beiscsi_hba *phba)
if (hba_setup_cid_tbls(phba)) {
shost_printk(KERN_ERR, phba->shost,
"Failed in hba_setup_cid_tbls\n");
- if (ring_mode)
- kfree(phba->sgl_hndl_array);
kfree(phba->io_sgl_hndl_base);
kfree(phba->eh_sgl_hndl_base);
goto do_cleanup_ctrlr;
@@ -3166,6 +3141,7 @@ static void hwi_purge_eq(struct beiscsi_hba *phba)
struct be_queue_info *eq;
struct be_eq_entry *eqe = NULL;
int i, eq_msix;
+ unsigned int num_processed;
phwi_ctrlr = phba->phwi_ctrlr;
phwi_context = phwi_ctrlr->phwi_ctxt;
@@ -3177,13 +3153,17 @@ static void hwi_purge_eq(struct beiscsi_hba *phba)
for (i = 0; i < (phba->num_cpus + eq_msix); i++) {
eq = &phwi_context->be_eq[i].q;
eqe = queue_tail_node(eq);
-
+ num_processed = 0;
while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
& EQE_VALID_MASK) {
AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
queue_tail_inc(eq);
eqe = queue_tail_node(eq);
+ num_processed++;
}
+
+ if (num_processed)
+ hwi_ring_eq_db(phba, eq->id, 1, num_processed, 1, 1);
}
}
@@ -3195,10 +3175,9 @@ static void beiscsi_clean_port(struct beiscsi_hba *phba)
if (mgmt_status)
shost_printk(KERN_WARNING, phba->shost,
"mgmt_epfw_cleanup FAILED \n");
- hwi_cleanup(phba);
+
hwi_purge_eq(phba);
- if (ring_mode)
- kfree(phba->sgl_hndl_array);
+ hwi_cleanup(phba);
kfree(phba->io_sgl_hndl_base);
kfree(phba->eh_sgl_hndl_base);
kfree(phba->cid_array);
@@ -3219,7 +3198,8 @@ beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn,
* We can always use 0 here because it is reserved by libiscsi for
* login/startup related tasks.
*/
- pwrb_handle = alloc_wrb_handle(phba, beiscsi_conn->beiscsi_conn_cid, 0);
+ pwrb_handle = alloc_wrb_handle(phba, (beiscsi_conn->beiscsi_conn_cid -
+ phba->fw_config.iscsi_cid_start));
pwrb = (struct iscsi_target_context_update_wrb *)pwrb_handle->pwrb;
memset(pwrb, 0, sizeof(*pwrb));
AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
@@ -3283,8 +3263,7 @@ beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn,
be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_target_context_update_wrb));
doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK;
- if (!ring_mode)
- doorbell |= (pwrb_handle->wrb_index & DB_DEF_PDU_WRB_INDEX_MASK)
+ doorbell |= (pwrb_handle->wrb_index & DB_DEF_PDU_WRB_INDEX_MASK)
<< DB_DEF_PDU_WRB_INDEX_SHIFT;
doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
@@ -3328,8 +3307,9 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
io_task->bhs_pa.u.a64.address = paddr;
io_task->libiscsi_itt = (itt_t)task->itt;
io_task->pwrb_handle = alloc_wrb_handle(phba,
- beiscsi_conn->beiscsi_conn_cid,
- task->itt);
+ beiscsi_conn->beiscsi_conn_cid -
+ phba->fw_config.iscsi_cid_start
+ );
io_task->conn = beiscsi_conn;
task->hdr = (struct iscsi_hdr *)&io_task->cmd_bhs->iscsi_hdr;
@@ -3343,7 +3323,7 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
goto free_hndls;
} else {
io_task->scsi_cmnd = NULL;
- if ((task->hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN) {
+ if ((opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN) {
if (!beiscsi_conn->login_in_progress) {
spin_lock(&phba->mgmt_sgl_lock);
io_task->psgl_handle = (struct sgl_handle *)
@@ -3370,21 +3350,16 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
itt = (itt_t) cpu_to_be32(((unsigned int)io_task->pwrb_handle->
wrb_index << 16) | (unsigned int)
(io_task->psgl_handle->sgl_index));
- if (ring_mode) {
- phba->sgl_hndl_array[io_task->psgl_handle->sgl_index -
- phba->fw_config.iscsi_cid_start] =
- io_task->psgl_handle;
- io_task->psgl_handle->task = task;
- io_task->psgl_handle->cid = beiscsi_conn->beiscsi_conn_cid;
- } else
- io_task->pwrb_handle->pio_handle = task;
+ io_task->pwrb_handle->pio_handle = task;
io_task->cmd_bhs->iscsi_hdr.itt = itt;
return 0;
free_hndls:
phwi_ctrlr = phba->phwi_ctrlr;
- pwrb_context = &phwi_ctrlr->wrb_context[beiscsi_conn->beiscsi_conn_cid];
+ pwrb_context = &phwi_ctrlr->wrb_context[
+ beiscsi_conn->beiscsi_conn_cid -
+ phba->fw_config.iscsi_cid_start];
free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle);
io_task->pwrb_handle = NULL;
pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs,
@@ -3404,7 +3379,8 @@ static void beiscsi_cleanup_task(struct iscsi_task *task)
struct hwi_controller *phwi_ctrlr;
phwi_ctrlr = phba->phwi_ctrlr;
- pwrb_context = &phwi_ctrlr->wrb_context[beiscsi_conn->beiscsi_conn_cid];
+ pwrb_context = &phwi_ctrlr->wrb_context[beiscsi_conn->beiscsi_conn_cid
+ - phba->fw_config.iscsi_cid_start];
if (io_task->pwrb_handle) {
free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle);
io_task->pwrb_handle = NULL;
@@ -3460,18 +3436,12 @@ static int beiscsi_iotask(struct iscsi_task *task, struct scatterlist *sg,
ISCSI_OPCODE_SCSI_DATA_OUT);
AMAP_SET_BITS(struct amap_pdu_data_out, final_bit,
&io_task->cmd_bhs->iscsi_data_pdu, 1);
- if (ring_mode)
- io_task->psgl_handle->type = INI_WR_CMD;
- else
- AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
- INI_WR_CMD);
+ AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
+ INI_WR_CMD);
AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1);
} else {
- if (ring_mode)
- io_task->psgl_handle->type = INI_RD_CMD;
- else
- AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
- INI_RD_CMD);
+ AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
+ INI_RD_CMD);
AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
}
memcpy(&io_task->cmd_bhs->iscsi_data_pdu.
@@ -3496,8 +3466,7 @@ static int beiscsi_iotask(struct iscsi_task *task, struct scatterlist *sg,
be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb));
doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK;
- if (!ring_mode)
- doorbell |= (io_task->pwrb_handle->wrb_index &
+ doorbell |= (io_task->pwrb_handle->wrb_index &
DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT;
doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
@@ -3519,49 +3488,46 @@ static int beiscsi_mtask(struct iscsi_task *task)
unsigned int doorbell = 0;
unsigned int i, cid;
struct iscsi_task *aborted_task;
+ unsigned int tag;
cid = beiscsi_conn->beiscsi_conn_cid;
pwrb = io_task->pwrb_handle->pwrb;
+ memset(pwrb, 0, sizeof(*pwrb));
AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb,
be32_to_cpu(task->cmdsn));
AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb,
io_task->pwrb_handle->wrb_index);
AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb,
io_task->psgl_handle->sgl_index);
-
switch (task->hdr->opcode & ISCSI_OPCODE_MASK) {
case ISCSI_OP_LOGIN:
- if (ring_mode)
- io_task->psgl_handle->type = TGT_DM_CMD;
- else
- AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
- TGT_DM_CMD);
+ AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
+ TGT_DM_CMD);
AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, 1);
hwi_write_buffer(pwrb, task);
break;
case ISCSI_OP_NOOP_OUT:
- if (ring_mode)
- io_task->psgl_handle->type = INI_RD_CMD;
+ AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
+ INI_RD_CMD);
+ if (task->hdr->ttt == ISCSI_RESERVED_TAG)
+ AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
else
- AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
- INI_RD_CMD);
+ AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 1);
hwi_write_buffer(pwrb, task);
break;
case ISCSI_OP_TEXT:
- if (ring_mode)
- io_task->psgl_handle->type = INI_WR_CMD;
- else
- AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
- INI_WR_CMD);
- AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1);
+ AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
+ TGT_DM_CMD);
+ AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
hwi_write_buffer(pwrb, task);
break;
case ISCSI_OP_SCSI_TMFUNC:
session = conn->session;
i = ((struct iscsi_tm *)task->hdr)->rtt;
phwi_ctrlr = phba->phwi_ctrlr;
- pwrb_context = &phwi_ctrlr->wrb_context[cid];
+ pwrb_context = &phwi_ctrlr->wrb_context[cid -
+ phba->fw_config.iscsi_cid_start];
pwrb_handle = pwrb_context->pwrb_handle_basestd[be32_to_cpu(i)
>> 16];
aborted_task = pwrb_handle->pio_handle;
@@ -3572,22 +3538,25 @@ static int beiscsi_mtask(struct iscsi_task *task)
if (!aborted_io_task->scsi_cmnd)
return 0;
- mgmt_invalidate_icds(phba,
+ tag = mgmt_invalidate_icds(phba,
aborted_io_task->psgl_handle->sgl_index,
cid);
- if (ring_mode)
- io_task->psgl_handle->type = INI_TMF_CMD;
- else
- AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
- INI_TMF_CMD);
+ if (!tag) {
+ shost_printk(KERN_WARNING, phba->shost,
+ "mgmt_invalidate_icds could not be"
+ " submitted\n");
+ } else {
+ wait_event_interruptible(phba->ctrl.mcc_wait[tag],
+ phba->ctrl.mcc_numtag[tag]);
+ free_mcc_tag(&phba->ctrl, tag);
+ }
+ AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
+ INI_TMF_CMD);
AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
hwi_write_buffer(pwrb, task);
break;
case ISCSI_OP_LOGOUT:
AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
- if (ring_mode)
- io_task->psgl_handle->type = HWH_TYPE_LOGOUT;
- else
AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
HWH_TYPE_LOGOUT);
hwi_write_buffer(pwrb, task);
@@ -3600,14 +3569,13 @@ static int beiscsi_mtask(struct iscsi_task *task)
}
AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb,
- be32_to_cpu(task->data_count));
+ task->data_count);
AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb,
io_task->pwrb_handle->nxt_wrb_index);
be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb));
doorbell |= cid & DB_WRB_POST_CID_MASK;
- if (!ring_mode)
- doorbell |= (io_task->pwrb_handle->wrb_index &
+ doorbell |= (io_task->pwrb_handle->wrb_index &
DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT;
doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET);
@@ -3649,7 +3617,6 @@ static int beiscsi_task_xmit(struct iscsi_task *task)
return beiscsi_iotask(task, sg, num_sg, xferlen, writedir);
}
-
static void beiscsi_remove(struct pci_dev *pcidev)
{
struct beiscsi_hba *phba = NULL;
@@ -3734,7 +3701,20 @@ static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev,
}
SE_DEBUG(DBG_LVL_8, " phba = %p \n", phba);
- pci_set_drvdata(pcidev, phba);
+ switch (pcidev->device) {
+ case BE_DEVICE_ID1:
+ case OC_DEVICE_ID1:
+ case OC_DEVICE_ID2:
+ phba->generation = BE_GEN2;
+ break;
+ case BE_DEVICE_ID2:
+ case OC_DEVICE_ID3:
+ phba->generation = BE_GEN3;
+ break;
+ default:
+ phba->generation = 0;
+ }
+
if (enable_msix)
num_cpus = find_num_cpus();
else
@@ -3754,7 +3734,15 @@ static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev,
spin_lock_init(&phba->io_sgl_lock);
spin_lock_init(&phba->mgmt_sgl_lock);
spin_lock_init(&phba->isr_lock);
+ ret = mgmt_get_fw_config(&phba->ctrl, phba);
+ if (ret != 0) {
+ shost_printk(KERN_ERR, phba->shost,
+ "Error getting fw config\n");
+ goto free_port;
+ }
+ phba->shost->max_id = phba->fw_config.iscsi_cid_count;
beiscsi_get_params(phba);
+ phba->shost->can_queue = phba->params.ios_per_ctrl;
ret = beiscsi_init_port(phba);
if (ret < 0) {
shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
@@ -3762,6 +3750,15 @@ static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev,
goto free_port;
}
+ for (i = 0; i < MAX_MCC_CMD ; i++) {
+ init_waitqueue_head(&phba->ctrl.mcc_wait[i + 1]);
+ phba->ctrl.mcc_tag[i] = i + 1;
+ phba->ctrl.mcc_numtag[i + 1] = 0;
+ phba->ctrl.mcc_tag_available++;
+ }
+
+ phba->ctrl.mcc_alloc_index = phba->ctrl.mcc_free_index = 0;
+
snprintf(phba->wq_name, sizeof(phba->wq_name), "beiscsi_q_irq%u",
phba->shost->host_no);
phba->wq = create_workqueue(phba->wq_name);
@@ -3836,7 +3833,7 @@ disable_pci:
struct iscsi_transport beiscsi_iscsi_transport = {
.owner = THIS_MODULE,
.name = DRV_NAME,
- .caps = CAP_RECOVERY_L0 | CAP_HDRDGST |
+ .caps = CAP_RECOVERY_L0 | CAP_HDRDGST | CAP_TEXT_NEGO |
CAP_MULTI_R2T | CAP_DATADGST | CAP_DATA_PATH_OFFLOAD,
.param_mask = ISCSI_MAX_RECV_DLENGTH |
ISCSI_MAX_XMIT_DLENGTH |
@@ -3859,7 +3856,7 @@ struct iscsi_transport beiscsi_iscsi_transport = {
ISCSI_USERNAME | ISCSI_PASSWORD |
ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN |
ISCSI_FAST_ABORT | ISCSI_ABORT_TMO |
- ISCSI_LU_RESET_TMO | ISCSI_TGT_RESET_TMO |
+ ISCSI_LU_RESET_TMO |
ISCSI_PING_TMO | ISCSI_RECV_TMO |
ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME,
.host_param_mask = ISCSI_HOST_HWADDRESS | ISCSI_HOST_IPADDRESS |
@@ -3905,7 +3902,7 @@ static int __init beiscsi_module_init(void)
SE_DEBUG(DBG_LVL_1,
"beiscsi_module_init - Unable to register beiscsi"
"transport.\n");
- ret = -ENOMEM;
+ return -ENOMEM;
}
SE_DEBUG(DBG_LVL_8, "In beiscsi_module_init, tt=%p \n",
&beiscsi_iscsi_transport);
@@ -3917,7 +3914,6 @@ static int __init beiscsi_module_init(void)
"beiscsi pci driver.\n");
goto unregister_iscsi_transport;
}
- ring_mode = 0;
return 0;
unregister_iscsi_transport:
diff --git a/drivers/scsi/be2iscsi/be_main.h b/drivers/scsi/be2iscsi/be_main.h
index 25e6b208b771..c53a80ab796c 100644
--- a/drivers/scsi/be2iscsi/be_main.h
+++ b/drivers/scsi/be2iscsi/be_main.h
@@ -1,5 +1,5 @@
/**
- * Copyright (C) 2005 - 2009 ServerEngines
+ * Copyright (C) 2005 - 2010 ServerEngines
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -40,31 +40,29 @@
#define DRV_DESC BE_NAME " " "Driver"
#define BE_VENDOR_ID 0x19A2
+/* DEVICE ID's for BE2 */
#define BE_DEVICE_ID1 0x212
#define OC_DEVICE_ID1 0x702
#define OC_DEVICE_ID2 0x703
+
+/* DEVICE ID's for BE3 */
+#define BE_DEVICE_ID2 0x222
#define OC_DEVICE_ID3 0x712
-#define OC_DEVICE_ID4 0x222
-#define BE2_MAX_SESSIONS 64
+#define BE2_IO_DEPTH 1024
+#define BE2_MAX_SESSIONS 256
#define BE2_CMDS_PER_CXN 128
-#define BE2_LOGOUTS BE2_MAX_SESSIONS
#define BE2_TMFS 16
#define BE2_NOPOUT_REQ 16
-#define BE2_ASYNCPDUS BE2_MAX_SESSIONS
-#define BE2_MAX_ICDS 2048
#define BE2_SGE 32
#define BE2_DEFPDU_HDR_SZ 64
#define BE2_DEFPDU_DATA_SZ 8192
-#define BE2_IO_DEPTH \
- (BE2_MAX_ICDS / 2 - (BE2_LOGOUTS + BE2_TMFS + BE2_NOPOUT_REQ))
#define MAX_CPUS 31
-#define BEISCSI_SGLIST_ELEMENTS BE2_SGE
+#define BEISCSI_SGLIST_ELEMENTS 30
-#define BEISCSI_MAX_CMNDS 1024 /* Max IO's per Ctrlr sht->can_queue */
#define BEISCSI_CMD_PER_LUN 128 /* scsi_host->cmd_per_lun */
-#define BEISCSI_MAX_SECTORS 2048 /* scsi_host->max_sectors */
+#define BEISCSI_MAX_SECTORS 256 /* scsi_host->max_sectors */
#define BEISCSI_MAX_CMD_LEN 16 /* scsi_host->max_cmd_len */
#define BEISCSI_NUM_MAX_LUN 256 /* scsi_host->max_lun */
@@ -330,6 +328,7 @@ struct beiscsi_hba {
struct workqueue_struct *wq; /* The actuak work queue */
struct work_struct work_cqs; /* The work being queued */
struct be_ctrl_info ctrl;
+ unsigned int generation;
};
struct beiscsi_session {
@@ -656,11 +655,12 @@ struct amap_iscsi_wrb {
} __packed;
-struct wrb_handle *alloc_wrb_handle(struct beiscsi_hba *phba, unsigned int cid,
- int index);
+struct wrb_handle *alloc_wrb_handle(struct beiscsi_hba *phba, unsigned int cid);
void
free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle);
+void beiscsi_process_all_cqs(struct work_struct *work);
+
struct pdu_nop_out {
u32 dw[12];
};
@@ -802,7 +802,6 @@ struct hwi_controller {
struct be_ring default_pdu_hdr;
struct be_ring default_pdu_data;
struct hwi_context_memory *phwi_ctxt;
- unsigned short cq_errors[CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN];
};
enum hwh_type_enum {
diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c
index 79c2bd525a84..317bcd042ced 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.c
+++ b/drivers/scsi/be2iscsi/be_mgmt.c
@@ -1,5 +1,5 @@
/**
- * Copyright (C) 2005 - 2009 ServerEngines
+ * Copyright (C) 2005 - 2010 ServerEngines
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -48,6 +48,14 @@ unsigned char mgmt_get_fw_config(struct be_ctrl_info *ctrl,
pfw_cfg->ulp[0].sq_base;
phba->fw_config.iscsi_cid_count =
pfw_cfg->ulp[0].sq_count;
+ if (phba->fw_config.iscsi_cid_count > (BE2_MAX_SESSIONS / 2)) {
+ SE_DEBUG(DBG_LVL_8,
+ "FW reported MAX CXNS as %d \t"
+ "Max Supported = %d.\n",
+ phba->fw_config.iscsi_cid_count,
+ BE2_MAX_SESSIONS);
+ phba->fw_config.iscsi_cid_count = BE2_MAX_SESSIONS / 2;
+ }
} else {
shost_printk(KERN_WARNING, phba->shost,
"Failed in mgmt_get_fw_config \n");
@@ -77,6 +85,7 @@ unsigned char mgmt_check_supported_fw(struct be_ctrl_info *ctrl,
}
nonemb_cmd.size = sizeof(struct be_mgmt_controller_attributes);
req = nonemb_cmd.va;
+ memset(req, 0, sizeof(*req));
spin_lock(&ctrl->mbox_lock);
memset(wrb, 0, sizeof(*wrb));
be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1);
@@ -140,10 +149,17 @@ unsigned char mgmt_invalidate_icds(struct beiscsi_hba *phba,
{
struct be_dma_mem nonemb_cmd;
struct be_ctrl_info *ctrl = &phba->ctrl;
- struct be_mcc_wrb *wrb = wrb_from_mccq(phba);
- struct be_sge *sge = nonembedded_sgl(wrb);
+ struct be_mcc_wrb *wrb;
+ struct be_sge *sge;
struct invalidate_commands_params_in *req;
- int status = 0;
+ unsigned int tag = 0;
+
+ spin_lock(&ctrl->mbox_lock);
+ tag = alloc_mcc_tag(phba);
+ if (!tag) {
+ spin_unlock(&ctrl->mbox_lock);
+ return tag;
+ }
nonemb_cmd.va = pci_alloc_consistent(ctrl->pdev,
sizeof(struct invalidate_commands_params_in),
@@ -156,8 +172,10 @@ unsigned char mgmt_invalidate_icds(struct beiscsi_hba *phba,
}
nonemb_cmd.size = sizeof(struct invalidate_commands_params_in);
req = nonemb_cmd.va;
- spin_lock(&ctrl->mbox_lock);
- memset(wrb, 0, sizeof(*wrb));
+ memset(req, 0, sizeof(*req));
+ wrb = wrb_from_mccq(phba);
+ sge = nonembedded_sgl(wrb);
+ wrb->tag0 |= tag;
be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1);
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
@@ -172,14 +190,12 @@ unsigned char mgmt_invalidate_icds(struct beiscsi_hba *phba,
sge->pa_lo = cpu_to_le32(nonemb_cmd.dma & 0xFFFFFFFF);
sge->len = cpu_to_le32(nonemb_cmd.size);
- status = be_mcc_notify_wait(phba);
- if (status)
- SE_DEBUG(DBG_LVL_1, "ICDS Invalidation Failed\n");
+ be_mcc_notify(phba);
spin_unlock(&ctrl->mbox_lock);
if (nonemb_cmd.va)
pci_free_consistent(ctrl->pdev, nonemb_cmd.size,
nonemb_cmd.va, nonemb_cmd.dma);
- return status;
+ return tag;
}
unsigned char mgmt_invalidate_connection(struct beiscsi_hba *phba,
@@ -189,13 +205,19 @@ unsigned char mgmt_invalidate_connection(struct beiscsi_hba *phba,
unsigned short savecfg_flag)
{
struct be_ctrl_info *ctrl = &phba->ctrl;
- struct be_mcc_wrb *wrb = wrb_from_mccq(phba);
- struct iscsi_invalidate_connection_params_in *req =
- embedded_payload(wrb);
- int status = 0;
+ struct be_mcc_wrb *wrb;
+ struct iscsi_invalidate_connection_params_in *req;
+ unsigned int tag = 0;
spin_lock(&ctrl->mbox_lock);
- memset(wrb, 0, sizeof(*wrb));
+ tag = alloc_mcc_tag(phba);
+ if (!tag) {
+ spin_unlock(&ctrl->mbox_lock);
+ return tag;
+ }
+ wrb = wrb_from_mccq(phba);
+ wrb->tag0 |= tag;
+ req = embedded_payload(wrb);
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI,
@@ -208,35 +230,37 @@ unsigned char mgmt_invalidate_connection(struct beiscsi_hba *phba,
else
req->cleanup_type = CMD_ISCSI_CONNECTION_INVALIDATE;
req->save_cfg = savecfg_flag;
- status = be_mcc_notify_wait(phba);
- if (status)
- SE_DEBUG(DBG_LVL_1, "Invalidation Failed\n");
-
+ be_mcc_notify(phba);
spin_unlock(&ctrl->mbox_lock);
- return status;
+ return tag;
}
unsigned char mgmt_upload_connection(struct beiscsi_hba *phba,
unsigned short cid, unsigned int upload_flag)
{
struct be_ctrl_info *ctrl = &phba->ctrl;
- struct be_mcc_wrb *wrb = wrb_from_mccq(phba);
- struct tcp_upload_params_in *req = embedded_payload(wrb);
- int status = 0;
+ struct be_mcc_wrb *wrb;
+ struct tcp_upload_params_in *req;
+ unsigned int tag = 0;
spin_lock(&ctrl->mbox_lock);
- memset(wrb, 0, sizeof(*wrb));
+ tag = alloc_mcc_tag(phba);
+ if (!tag) {
+ spin_unlock(&ctrl->mbox_lock);
+ return tag;
+ }
+ wrb = wrb_from_mccq(phba);
+ req = embedded_payload(wrb);
+ wrb->tag0 |= tag;
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
be_cmd_hdr_prepare(&req->hdr, CMD_COMMON_TCP_UPLOAD,
OPCODE_COMMON_TCP_UPLOAD, sizeof(*req));
req->id = (unsigned short)cid;
req->upload_type = (unsigned char)upload_flag;
- status = be_mcc_notify_wait(phba);
- if (status)
- SE_DEBUG(DBG_LVL_1, "mgmt_upload_connection Failed\n");
+ be_mcc_notify(phba);
spin_unlock(&ctrl->mbox_lock);
- return status;
+ return tag;
}
int mgmt_open_connection(struct beiscsi_hba *phba,
@@ -248,13 +272,13 @@ int mgmt_open_connection(struct beiscsi_hba *phba,
struct sockaddr_in *daddr_in = (struct sockaddr_in *)dst_addr;
struct sockaddr_in6 *daddr_in6 = (struct sockaddr_in6 *)dst_addr;
struct be_ctrl_info *ctrl = &phba->ctrl;
- struct be_mcc_wrb *wrb = wrb_from_mccq(phba);
- struct tcp_connect_and_offload_in *req = embedded_payload(wrb);
+ struct be_mcc_wrb *wrb;
+ struct tcp_connect_and_offload_in *req;
unsigned short def_hdr_id;
unsigned short def_data_id;
struct phys_addr template_address = { 0, 0 };
struct phys_addr *ptemplate_address;
- int status = 0;
+ unsigned int tag = 0;
unsigned int i;
unsigned short cid = beiscsi_ep->ep_cid;
@@ -266,7 +290,14 @@ int mgmt_open_connection(struct beiscsi_hba *phba,
ptemplate_address = &template_address;
ISCSI_GET_PDU_TEMPLATE_ADDRESS(phba, ptemplate_address);
spin_lock(&ctrl->mbox_lock);
- memset(wrb, 0, sizeof(*wrb));
+ tag = alloc_mcc_tag(phba);
+ if (!tag) {
+ spin_unlock(&ctrl->mbox_lock);
+ return tag;
+ }
+ wrb = wrb_from_mccq(phba);
+ req = embedded_payload(wrb);
+ wrb->tag0 |= tag;
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
@@ -311,46 +342,36 @@ int mgmt_open_connection(struct beiscsi_hba *phba,
req->do_offload = 1;
req->dataout_template_pa.lo = ptemplate_address->lo;
req->dataout_template_pa.hi = ptemplate_address->hi;
- status = be_mcc_notify_wait(phba);
- if (!status) {
- struct iscsi_endpoint *ep;
- struct tcp_connect_and_offload_out *ptcpcnct_out =
- embedded_payload(wrb);
-
- ep = phba->ep_array[ptcpcnct_out->cid];
- beiscsi_ep = ep->dd_data;
- beiscsi_ep->fw_handle = ptcpcnct_out->connection_handle;
- beiscsi_ep->cid_vld = 1;
- SE_DEBUG(DBG_LVL_8, "mgmt_open_connection Success\n");
- } else
- SE_DEBUG(DBG_LVL_1, "mgmt_open_connection Failed\n");
+ be_mcc_notify(phba);
spin_unlock(&ctrl->mbox_lock);
- return status;
+ return tag;
}
-int be_cmd_get_mac_addr(struct beiscsi_hba *phba, u8 *mac_addr)
+unsigned int be_cmd_get_mac_addr(struct beiscsi_hba *phba)
{
struct be_ctrl_info *ctrl = &phba->ctrl;
- struct be_mcc_wrb *wrb = wrb_from_mccq(phba);
- struct be_cmd_req_get_mac_addr *req = embedded_payload(wrb);
- int status;
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_get_mac_addr *req;
+ unsigned int tag = 0;
SE_DEBUG(DBG_LVL_8, "In be_cmd_get_mac_addr\n");
spin_lock(&ctrl->mbox_lock);
- memset(wrb, 0, sizeof(*wrb));
+ tag = alloc_mcc_tag(phba);
+ if (!tag) {
+ spin_unlock(&ctrl->mbox_lock);
+ return tag;
+ }
+
+ wrb = wrb_from_mccq(phba);
+ req = embedded_payload(wrb);
+ wrb->tag0 |= tag;
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
OPCODE_COMMON_ISCSI_NTWK_GET_NIC_CONFIG,
sizeof(*req));
- status = be_mcc_notify_wait(phba);
- if (!status) {
- struct be_cmd_resp_get_mac_addr *resp = embedded_payload(wrb);
-
- memcpy(mac_addr, resp->mac_address, ETH_ALEN);
- }
-
+ be_mcc_notify(phba);
spin_unlock(&ctrl->mbox_lock);
- return status;
+ return tag;
}
diff --git a/drivers/scsi/be2iscsi/be_mgmt.h b/drivers/scsi/be2iscsi/be_mgmt.h
index 24eaff923f85..ecead6a5aa56 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.h
+++ b/drivers/scsi/be2iscsi/be_mgmt.h
@@ -1,5 +1,5 @@
/**
- * Copyright (C) 2005 - 2009 ServerEngines
+ * Copyright (C) 2005 - 2010 ServerEngines
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -231,6 +231,7 @@ struct beiscsi_endpoint {
struct beiscsi_hba *phba;
struct beiscsi_sess *sess;
struct beiscsi_conn *conn;
+ struct iscsi_endpoint *openiscsi_ep;
unsigned short ip_type;
char dst6_addr[ISCSI_ADDRESS_BUF_LEN];
unsigned long dst_addr;
@@ -249,7 +250,4 @@ unsigned char mgmt_invalidate_connection(struct beiscsi_hba *phba,
unsigned short issue_reset,
unsigned short savecfg_flag);
-unsigned char mgmt_fw_cmd(struct be_ctrl_info *ctrl,
- struct beiscsi_hba *phba,
- char *buf, unsigned int len);
#endif
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
index 33b2294625bb..1c4d1215769d 100644
--- a/drivers/scsi/bnx2i/bnx2i_iscsi.c
+++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
@@ -1426,8 +1426,8 @@ static int bnx2i_conn_get_param(struct iscsi_cls_conn *cls_conn,
break;
case ISCSI_PARAM_CONN_ADDRESS:
if (bnx2i_conn->ep)
- len = sprintf(buf, NIPQUAD_FMT "\n",
- NIPQUAD(bnx2i_conn->ep->cm_sk->dst_ip));
+ len = sprintf(buf, "%pI4\n",
+ &bnx2i_conn->ep->cm_sk->dst_ip);
break;
default:
return iscsi_conn_get_param(cls_conn, param, buf);
@@ -1990,6 +1990,7 @@ static struct scsi_host_template bnx2i_host_template = {
.eh_abort_handler = iscsi_eh_abort,
.eh_device_reset_handler = iscsi_eh_device_reset,
.eh_target_reset_handler = iscsi_eh_target_reset,
+ .change_queue_depth = iscsi_change_queue_depth,
.can_queue = 1024,
.max_sectors = 127,
.cmd_per_lun = 32,
diff --git a/drivers/scsi/constants.c b/drivers/scsi/constants.c
index 9129bcf117cf..cd05e049d5f6 100644
--- a/drivers/scsi/constants.c
+++ b/drivers/scsi/constants.c
@@ -219,18 +219,15 @@ static void print_opcode_name(unsigned char * cdbp, int cdb_len)
break;
}
sa = (cdbp[8] << 8) + cdbp[9];
- name = get_sa_name(maint_in_arr, MAINT_IN_SZ, sa);
- if (name) {
+ name = get_sa_name(variable_length_arr, VARIABLE_LENGTH_SZ, sa);
+ if (name)
printk("%s", name);
- if ((cdb_len > 0) && (len != cdb_len))
- printk(", in_cdb_len=%d, ext_len=%d",
- len, cdb_len);
- } else {
+ else
printk("cdb[0]=0x%x, sa=0x%x", cdb0, sa);
- if ((cdb_len > 0) && (len != cdb_len))
- printk(", in_cdb_len=%d, ext_len=%d",
- len, cdb_len);
- }
+
+ if ((cdb_len > 0) && (len != cdb_len))
+ printk(", in_cdb_len=%d, ext_len=%d", len, cdb_len);
+
break;
case MAINTENANCE_IN:
sa = cdbp[1] & 0x1f;
@@ -349,6 +346,9 @@ void scsi_print_command(struct scsi_cmnd *cmd)
{
int k;
+ if (cmd->cmnd == NULL)
+ return;
+
scmd_printk(KERN_INFO, cmd, "CDB: ");
print_opcode_name(cmd->cmnd, cmd->cmd_len);
diff --git a/drivers/scsi/cxgb3i/cxgb3i_iscsi.c b/drivers/scsi/cxgb3i/cxgb3i_iscsi.c
index 969c83162cc4..412853c65372 100644
--- a/drivers/scsi/cxgb3i/cxgb3i_iscsi.c
+++ b/drivers/scsi/cxgb3i/cxgb3i_iscsi.c
@@ -591,8 +591,7 @@ static int cxgb3i_conn_bind(struct iscsi_cls_session *cls_session,
cxgb3i_conn_max_recv_dlength(conn);
spin_lock_bh(&conn->session->lock);
- sprintf(conn->portal_address, NIPQUAD_FMT,
- NIPQUAD(c3cn->daddr.sin_addr.s_addr));
+ sprintf(conn->portal_address, "%pI4", &c3cn->daddr.sin_addr.s_addr);
conn->portal_port = ntohs(c3cn->daddr.sin_port);
spin_unlock_bh(&conn->session->lock);
@@ -709,6 +708,12 @@ static int cxgb3i_host_set_param(struct Scsi_Host *shost,
{
struct cxgb3i_hba *hba = iscsi_host_priv(shost);
+ if (!hba->ndev) {
+ shost_printk(KERN_ERR, shost, "Could not set host param. "
+ "Netdev for host not set.\n");
+ return -ENODEV;
+ }
+
cxgb3i_api_debug("param %d, buf %s.\n", param, buf);
switch (param) {
@@ -739,6 +744,12 @@ static int cxgb3i_host_get_param(struct Scsi_Host *shost,
struct cxgb3i_hba *hba = iscsi_host_priv(shost);
int len = 0;
+ if (!hba->ndev) {
+ shost_printk(KERN_ERR, shost, "Could not set host param. "
+ "Netdev for host not set.\n");
+ return -ENODEV;
+ }
+
cxgb3i_api_debug("hba %s, param %d.\n", hba->ndev->name, param);
switch (param) {
@@ -753,7 +764,7 @@ static int cxgb3i_host_get_param(struct Scsi_Host *shost,
__be32 addr;
addr = cxgb3i_get_private_ipv4addr(hba->ndev);
- len = sprintf(buf, NIPQUAD_FMT, NIPQUAD(addr));
+ len = sprintf(buf, "%pI4", &addr);
break;
}
default:
diff --git a/drivers/scsi/cxgb3i/cxgb3i_offload.c b/drivers/scsi/cxgb3i/cxgb3i_offload.c
index 15a00e8b7122..3e08c430ff29 100644
--- a/drivers/scsi/cxgb3i/cxgb3i_offload.c
+++ b/drivers/scsi/cxgb3i/cxgb3i_offload.c
@@ -1675,10 +1675,11 @@ int cxgb3i_c3cn_connect(struct net_device *dev, struct s3_conn *c3cn,
} else
c3cn->saddr.sin_addr.s_addr = sipv4;
- c3cn_conn_debug("c3cn 0x%p, %u.%u.%u.%u,%u-%u.%u.%u.%u,%u SYN_SENT.\n",
- c3cn, NIPQUAD(c3cn->saddr.sin_addr.s_addr),
+ c3cn_conn_debug("c3cn 0x%p, %pI4,%u-%pI4,%u SYN_SENT.\n",
+ c3cn,
+ &c3cn->saddr.sin_addr.s_addr,
ntohs(c3cn->saddr.sin_port),
- NIPQUAD(c3cn->daddr.sin_addr.s_addr),
+ &c3cn->daddr.sin_addr.s_addr,
ntohs(c3cn->daddr.sin_port));
c3cn_set_state(c3cn, C3CN_STATE_CONNECTING);
diff --git a/drivers/scsi/cxgb3i/cxgb3i_pdu.c b/drivers/scsi/cxgb3i/cxgb3i_pdu.c
index 1fe3b0f1f3c9..9c38539557fc 100644
--- a/drivers/scsi/cxgb3i/cxgb3i_pdu.c
+++ b/drivers/scsi/cxgb3i/cxgb3i_pdu.c
@@ -461,10 +461,8 @@ void cxgb3i_conn_pdu_ready(struct s3_conn *c3cn)
skb = skb_peek(&c3cn->receive_queue);
}
read_unlock(&c3cn->callback_lock);
- if (c3cn) {
- c3cn->copied_seq += read;
- cxgb3i_c3cn_rx_credits(c3cn, read);
- }
+ c3cn->copied_seq += read;
+ cxgb3i_c3cn_rx_credits(c3cn, read);
conn->rxdata_octets += read;
if (err) {
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index 4f0d0138f48b..bc9e94f5915e 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -717,6 +717,8 @@ static const struct scsi_dh_devlist alua_dev_list[] = {
{"IBM", "2145" },
{"Pillar", "Axiom" },
{"Intel", "Multi-Flex"},
+ {"NETAPP", "LUN"},
+ {"AIX", "NVDISK"},
{NULL, NULL}
};
diff --git a/drivers/scsi/eata.c b/drivers/scsi/eata.c
index c7076ce25e21..3c5abf7cd762 100644
--- a/drivers/scsi/eata.c
+++ b/drivers/scsi/eata.c
@@ -1509,7 +1509,7 @@ static int option_setup(char *str)
char *cur = str;
int i = 1;
- while (cur && isdigit(*cur) && i <= MAX_INT_PARAM) {
+ while (cur && isdigit(*cur) && i < MAX_INT_PARAM) {
ints[i++] = simple_strtoul(cur, NULL, 0);
if ((cur = strchr(cur, ',')) != NULL)
diff --git a/drivers/scsi/esp_scsi.c b/drivers/scsi/esp_scsi.c
index a680e18b5f3b..e2bc779f86c1 100644
--- a/drivers/scsi/esp_scsi.c
+++ b/drivers/scsi/esp_scsi.c
@@ -1449,9 +1449,6 @@ static void esp_msgin_sdtr(struct esp *esp, struct esp_target_data *tp)
if (offset > 15)
goto do_reject;
- if (esp->flags & ESP_FLAG_DISABLE_SYNC)
- offset = 0;
-
if (offset) {
int one_clock;
@@ -2405,12 +2402,6 @@ static int esp_slave_configure(struct scsi_device *dev)
struct esp_target_data *tp = &esp->target[dev->id];
int goal_tags, queue_depth;
- if (esp->flags & ESP_FLAG_DISABLE_SYNC) {
- /* Bypass async domain validation */
- dev->ppr = 0;
- dev->sdtr = 0;
- }
-
goal_tags = 0;
if (dev->tagged_supported) {
@@ -2660,7 +2651,10 @@ static void esp_set_offset(struct scsi_target *target, int offset)
struct esp *esp = shost_priv(host);
struct esp_target_data *tp = &esp->target[target->id];
- tp->nego_goal_offset = offset;
+ if (esp->flags & ESP_FLAG_DISABLE_SYNC)
+ tp->nego_goal_offset = 0;
+ else
+ tp->nego_goal_offset = offset;
tp->flags |= ESP_TGT_CHECK_NEGO;
}
diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h
index bb208a6091e7..3966c71d0095 100644
--- a/drivers/scsi/fnic/fnic.h
+++ b/drivers/scsi/fnic/fnic.h
@@ -36,7 +36,7 @@
#define DRV_NAME "fnic"
#define DRV_DESCRIPTION "Cisco FCoE HBA Driver"
-#define DRV_VERSION "1.0.0.1121"
+#define DRV_VERSION "1.4.0.98"
#define PFX DRV_NAME ": "
#define DFX DRV_NAME "%d: "
diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
index fe1b1031f7ab..507e26c1c29f 100644
--- a/drivers/scsi/fnic/fnic_main.c
+++ b/drivers/scsi/fnic/fnic_main.c
@@ -620,6 +620,8 @@ static int __devinit fnic_probe(struct pci_dev *pdev,
if (fnic->config.flags & VFCF_FIP_CAPABLE) {
shost_printk(KERN_INFO, fnic->lport->host,
"firmware supports FIP\n");
+ /* enable directed and multicast */
+ vnic_dev_packet_filter(fnic->vdev, 1, 1, 0, 0, 0);
vnic_dev_add_addr(fnic->vdev, FIP_ALL_ENODE_MACS);
vnic_dev_add_addr(fnic->vdev, fnic->ctlr.ctl_src_addr);
} else {
@@ -698,6 +700,8 @@ static int __devinit fnic_probe(struct pci_dev *pdev,
goto err_out_remove_scsi_host;
}
+ fc_lport_init_stats(lp);
+
fc_lport_config(lp);
if (fc_set_mfs(lp, fnic->config.maxdatafieldsize +
diff --git a/drivers/scsi/fnic/vnic_devcmd.h b/drivers/scsi/fnic/vnic_devcmd.h
index d62b9061bf12..7c9ccbd4134b 100644
--- a/drivers/scsi/fnic/vnic_devcmd.h
+++ b/drivers/scsi/fnic/vnic_devcmd.h
@@ -94,7 +94,7 @@ enum vnic_devcmd_cmd {
CMD_STATS_DUMP = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 4),
/* set Rx packet filter: (u32)a0=filters (see CMD_PFILTER_*) */
- CMD_PACKET_FILTER = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 7),
+ CMD_PACKET_FILTER = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 7),
/* hang detection notification */
CMD_HANG_NOTIFY = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 8),
diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c
index 9e8fce0f0c1b..ba3c94c9c25f 100644
--- a/drivers/scsi/gdth.c
+++ b/drivers/scsi/gdth.c
@@ -140,40 +140,40 @@
#include "gdth.h"
static void gdth_delay(int milliseconds);
-static void gdth_eval_mapping(ulong32 size, ulong32 *cyls, int *heads, int *secs);
+static void gdth_eval_mapping(u32 size, u32 *cyls, int *heads, int *secs);
static irqreturn_t gdth_interrupt(int irq, void *dev_id);
static irqreturn_t __gdth_interrupt(gdth_ha_str *ha,
int gdth_from_wait, int* pIndex);
-static int gdth_sync_event(gdth_ha_str *ha, int service, unchar index,
+static int gdth_sync_event(gdth_ha_str *ha, int service, u8 index,
Scsi_Cmnd *scp);
static int gdth_async_event(gdth_ha_str *ha);
static void gdth_log_event(gdth_evt_data *dvr, char *buffer);
-static void gdth_putq(gdth_ha_str *ha, Scsi_Cmnd *scp, unchar priority);
+static void gdth_putq(gdth_ha_str *ha, Scsi_Cmnd *scp, u8 priority);
static void gdth_next(gdth_ha_str *ha);
-static int gdth_fill_raw_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, unchar b);
+static int gdth_fill_raw_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, u8 b);
static int gdth_special_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp);
-static gdth_evt_str *gdth_store_event(gdth_ha_str *ha, ushort source,
- ushort idx, gdth_evt_data *evt);
+static gdth_evt_str *gdth_store_event(gdth_ha_str *ha, u16 source,
+ u16 idx, gdth_evt_data *evt);
static int gdth_read_event(gdth_ha_str *ha, int handle, gdth_evt_str *estr);
-static void gdth_readapp_event(gdth_ha_str *ha, unchar application,
+static void gdth_readapp_event(gdth_ha_str *ha, u8 application,
gdth_evt_str *estr);
static void gdth_clear_events(void);
static void gdth_copy_internal_data(gdth_ha_str *ha, Scsi_Cmnd *scp,
- char *buffer, ushort count);
+ char *buffer, u16 count);
static int gdth_internal_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp);
-static int gdth_fill_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, ushort hdrive);
+static int gdth_fill_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, u16 hdrive);
static void gdth_enable_int(gdth_ha_str *ha);
static int gdth_test_busy(gdth_ha_str *ha);
static int gdth_get_cmd_index(gdth_ha_str *ha);
static void gdth_release_event(gdth_ha_str *ha);
-static int gdth_wait(gdth_ha_str *ha, int index,ulong32 time);
-static int gdth_internal_cmd(gdth_ha_str *ha, unchar service, ushort opcode,
- ulong32 p1, ulong64 p2,ulong64 p3);
+static int gdth_wait(gdth_ha_str *ha, int index,u32 time);
+static int gdth_internal_cmd(gdth_ha_str *ha, u8 service, u16 opcode,
+ u32 p1, u64 p2,u64 p3);
static int gdth_search_drives(gdth_ha_str *ha);
-static int gdth_analyse_hdrive(gdth_ha_str *ha, ushort hdrive);
+static int gdth_analyse_hdrive(gdth_ha_str *ha, u16 hdrive);
static const char *gdth_ctr_name(gdth_ha_str *ha);
@@ -189,7 +189,7 @@ static int __gdth_queuecommand(gdth_ha_str *ha, struct scsi_cmnd *scp,
static void gdth_scsi_done(struct scsi_cmnd *scp);
#ifdef DEBUG_GDTH
-static unchar DebugState = DEBUG_GDTH;
+static u8 DebugState = DEBUG_GDTH;
#ifdef __SERIAL__
#define MAX_SERBUF 160
@@ -270,30 +270,30 @@ static int ser_printk(const char *fmt, ...)
#endif
#ifdef GDTH_STATISTICS
-static ulong32 max_rq=0, max_index=0, max_sg=0;
+static u32 max_rq=0, max_index=0, max_sg=0;
#ifdef INT_COAL
-static ulong32 max_int_coal=0;
+static u32 max_int_coal=0;
#endif
-static ulong32 act_ints=0, act_ios=0, act_stats=0, act_rq=0;
+static u32 act_ints=0, act_ios=0, act_stats=0, act_rq=0;
static struct timer_list gdth_timer;
#endif
-#define PTR2USHORT(a) (ushort)(ulong)(a)
+#define PTR2USHORT(a) (u16)(unsigned long)(a)
#define GDTOFFSOF(a,b) (size_t)&(((a*)0)->b)
#define INDEX_OK(i,t) ((i)<ARRAY_SIZE(t))
#define BUS_L2P(a,b) ((b)>(a)->virt_bus ? (b-1):(b))
#ifdef CONFIG_ISA
-static unchar gdth_drq_tab[4] = {5,6,7,7}; /* DRQ table */
+static u8 gdth_drq_tab[4] = {5,6,7,7}; /* DRQ table */
#endif
#if defined(CONFIG_EISA) || defined(CONFIG_ISA)
-static unchar gdth_irq_tab[6] = {0,10,11,12,14,0}; /* IRQ table */
+static u8 gdth_irq_tab[6] = {0,10,11,12,14,0}; /* IRQ table */
#endif
-static unchar gdth_polling; /* polling if TRUE */
+static u8 gdth_polling; /* polling if TRUE */
static int gdth_ctr_count = 0; /* controller count */
static LIST_HEAD(gdth_instances); /* controller list */
-static unchar gdth_write_through = FALSE; /* write through */
+static u8 gdth_write_through = FALSE; /* write through */
static gdth_evt_str ebuffer[MAX_EVENTS]; /* event buffer */
static int elastidx;
static int eoldidx;
@@ -303,7 +303,7 @@ static int major;
#define DOU 2 /* OUT data direction */
#define DNO DIN /* no data transfer */
#define DUN DIN /* unknown data direction */
-static unchar gdth_direction_tab[0x100] = {
+static u8 gdth_direction_tab[0x100] = {
DNO,DNO,DIN,DIN,DOU,DIN,DIN,DOU,DIN,DUN,DOU,DOU,DUN,DUN,DUN,DIN,
DNO,DIN,DIN,DOU,DIN,DOU,DNO,DNO,DOU,DNO,DIN,DNO,DIN,DOU,DNO,DUN,
DIN,DUN,DIN,DUN,DOU,DIN,DUN,DUN,DIN,DIN,DOU,DNO,DUN,DIN,DOU,DOU,
@@ -390,7 +390,7 @@ static gdth_ha_str *gdth_find_ha(int hanum)
static struct gdth_cmndinfo *gdth_get_cmndinfo(gdth_ha_str *ha)
{
struct gdth_cmndinfo *priv = NULL;
- ulong flags;
+ unsigned long flags;
int i;
spin_lock_irqsave(&ha->smp_lock, flags);
@@ -493,7 +493,7 @@ int gdth_execute(struct Scsi_Host *shost, gdth_cmd_str *gdtcmd, char *cmnd,
return rval;
}
-static void gdth_eval_mapping(ulong32 size, ulong32 *cyls, int *heads, int *secs)
+static void gdth_eval_mapping(u32 size, u32 *cyls, int *heads, int *secs)
{
*cyls = size /HEADS/SECS;
if (*cyls <= MAXCYLS) {
@@ -514,9 +514,9 @@ static void gdth_eval_mapping(ulong32 size, ulong32 *cyls, int *heads, int *secs
/* controller search and initialization functions */
#ifdef CONFIG_EISA
-static int __init gdth_search_eisa(ushort eisa_adr)
+static int __init gdth_search_eisa(u16 eisa_adr)
{
- ulong32 id;
+ u32 id;
TRACE(("gdth_search_eisa() adr. %x\n",eisa_adr));
id = inl(eisa_adr+ID0REG);
@@ -533,13 +533,13 @@ static int __init gdth_search_eisa(ushort eisa_adr)
#endif /* CONFIG_EISA */
#ifdef CONFIG_ISA
-static int __init gdth_search_isa(ulong32 bios_adr)
+static int __init gdth_search_isa(u32 bios_adr)
{
void __iomem *addr;
- ulong32 id;
+ u32 id;
TRACE(("gdth_search_isa() bios adr. %x\n",bios_adr));
- if ((addr = ioremap(bios_adr+BIOS_ID_OFFS, sizeof(ulong32))) != NULL) {
+ if ((addr = ioremap(bios_adr+BIOS_ID_OFFS, sizeof(u32))) != NULL) {
id = readl(addr);
iounmap(addr);
if (id == GDT2_ID) /* GDT2000 */
@@ -551,7 +551,7 @@ static int __init gdth_search_isa(ulong32 bios_adr)
#ifdef CONFIG_PCI
-static bool gdth_search_vortex(ushort device)
+static bool gdth_search_vortex(u16 device)
{
if (device <= PCI_DEVICE_ID_VORTEX_GDT6555)
return true;
@@ -603,9 +603,9 @@ static void __devexit gdth_pci_remove_one(struct pci_dev *pdev)
static int __devinit gdth_pci_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
- ushort vendor = pdev->vendor;
- ushort device = pdev->device;
- ulong base0, base1, base2;
+ u16 vendor = pdev->vendor;
+ u16 device = pdev->device;
+ unsigned long base0, base1, base2;
int rc;
gdth_pci_str gdth_pcistr;
gdth_ha_str *ha = NULL;
@@ -658,10 +658,10 @@ static int __devinit gdth_pci_init_one(struct pci_dev *pdev,
#endif /* CONFIG_PCI */
#ifdef CONFIG_EISA
-static int __init gdth_init_eisa(ushort eisa_adr,gdth_ha_str *ha)
+static int __init gdth_init_eisa(u16 eisa_adr,gdth_ha_str *ha)
{
- ulong32 retries,id;
- unchar prot_ver,eisacf,i,irq_found;
+ u32 retries,id;
+ u8 prot_ver,eisacf,i,irq_found;
TRACE(("gdth_init_eisa() adr. %x\n",eisa_adr));
@@ -688,7 +688,7 @@ static int __init gdth_init_eisa(ushort eisa_adr,gdth_ha_str *ha)
return 0;
}
ha->bmic = eisa_adr;
- ha->brd_phys = (ulong32)eisa_adr >> 12;
+ ha->brd_phys = (u32)eisa_adr >> 12;
outl(0,eisa_adr+MAILBOXREG);
outl(0,eisa_adr+MAILBOXREG+4);
@@ -752,12 +752,12 @@ static int __init gdth_init_eisa(ushort eisa_adr,gdth_ha_str *ha)
#endif /* CONFIG_EISA */
#ifdef CONFIG_ISA
-static int __init gdth_init_isa(ulong32 bios_adr,gdth_ha_str *ha)
+static int __init gdth_init_isa(u32 bios_adr,gdth_ha_str *ha)
{
register gdt2_dpram_str __iomem *dp2_ptr;
int i;
- unchar irq_drq,prot_ver;
- ulong32 retries;
+ u8 irq_drq,prot_ver;
+ u32 retries;
TRACE(("gdth_init_isa() bios adr. %x\n",bios_adr));
@@ -812,7 +812,7 @@ static int __init gdth_init_isa(ulong32 bios_adr,gdth_ha_str *ha)
}
gdth_delay(1);
}
- prot_ver = (unchar)readl(&dp2_ptr->u.ic.S_Info[0]);
+ prot_ver = (u8)readl(&dp2_ptr->u.ic.S_Info[0]);
writeb(0, &dp2_ptr->u.ic.Status);
writeb(0xff, &dp2_ptr->io.irqdel);
if (prot_ver != PROTOCOL_VERSION) {
@@ -859,9 +859,9 @@ static int __devinit gdth_init_pci(struct pci_dev *pdev, gdth_pci_str *pcistr,
register gdt6_dpram_str __iomem *dp6_ptr;
register gdt6c_dpram_str __iomem *dp6c_ptr;
register gdt6m_dpram_str __iomem *dp6m_ptr;
- ulong32 retries;
- unchar prot_ver;
- ushort command;
+ u32 retries;
+ u8 prot_ver;
+ u16 command;
int i, found = FALSE;
TRACE(("gdth_init_pci()\n"));
@@ -871,7 +871,7 @@ static int __devinit gdth_init_pci(struct pci_dev *pdev, gdth_pci_str *pcistr,
else
ha->oem_id = OEM_ID_ICP;
ha->brd_phys = (pdev->bus->number << 8) | (pdev->devfn & 0xf8);
- ha->stype = (ulong32)pdev->device;
+ ha->stype = (u32)pdev->device;
ha->irq = pdev->irq;
ha->pdev = pdev;
@@ -891,7 +891,7 @@ static int __devinit gdth_init_pci(struct pci_dev *pdev, gdth_pci_str *pcistr,
found = FALSE;
for (i = 0xC8000; i < 0xE8000; i += 0x4000) {
iounmap(ha->brd);
- ha->brd = ioremap(i, sizeof(ushort));
+ ha->brd = ioremap(i, sizeof(u16));
if (ha->brd == NULL) {
printk("GDT-PCI: Initialization error (DPMEM remap error)\n");
return 0;
@@ -947,7 +947,7 @@ static int __devinit gdth_init_pci(struct pci_dev *pdev, gdth_pci_str *pcistr,
}
gdth_delay(1);
}
- prot_ver = (unchar)readl(&dp6_ptr->u.ic.S_Info[0]);
+ prot_ver = (u8)readl(&dp6_ptr->u.ic.S_Info[0]);
writeb(0, &dp6_ptr->u.ic.S_Status);
writeb(0xff, &dp6_ptr->io.irqdel);
if (prot_ver != PROTOCOL_VERSION) {
@@ -1000,7 +1000,7 @@ static int __devinit gdth_init_pci(struct pci_dev *pdev, gdth_pci_str *pcistr,
found = FALSE;
for (i = 0xC8000; i < 0xE8000; i += 0x4000) {
iounmap(ha->brd);
- ha->brd = ioremap(i, sizeof(ushort));
+ ha->brd = ioremap(i, sizeof(u16));
if (ha->brd == NULL) {
printk("GDT-PCI: Initialization error (DPMEM remap error)\n");
return 0;
@@ -1059,7 +1059,7 @@ static int __devinit gdth_init_pci(struct pci_dev *pdev, gdth_pci_str *pcistr,
}
gdth_delay(1);
}
- prot_ver = (unchar)readl(&dp6c_ptr->u.ic.S_Info[0]);
+ prot_ver = (u8)readl(&dp6c_ptr->u.ic.S_Info[0]);
writeb(0, &dp6c_ptr->u.ic.Status);
if (prot_ver != PROTOCOL_VERSION) {
printk("GDT-PCI: Illegal protocol version\n");
@@ -1128,7 +1128,7 @@ static int __devinit gdth_init_pci(struct pci_dev *pdev, gdth_pci_str *pcistr,
found = FALSE;
for (i = 0xC8000; i < 0xE8000; i += 0x4000) {
iounmap(ha->brd);
- ha->brd = ioremap(i, sizeof(ushort));
+ ha->brd = ioremap(i, sizeof(u16));
if (ha->brd == NULL) {
printk("GDT-PCI: Initialization error (DPMEM remap error)\n");
return 0;
@@ -1180,7 +1180,7 @@ static int __devinit gdth_init_pci(struct pci_dev *pdev, gdth_pci_str *pcistr,
}
gdth_delay(1);
}
- prot_ver = (unchar)readl(&dp6m_ptr->u.ic.S_Info[0]);
+ prot_ver = (u8)readl(&dp6m_ptr->u.ic.S_Info[0]);
writeb(0, &dp6m_ptr->u.ic.S_Status);
if (prot_ver != PROTOCOL_VERSION) {
printk("GDT-PCI: Illegal protocol version\n");
@@ -1223,7 +1223,7 @@ static int __devinit gdth_init_pci(struct pci_dev *pdev, gdth_pci_str *pcistr,
}
gdth_delay(1);
}
- prot_ver = (unchar)(readl(&dp6m_ptr->u.ic.S_Info[0]) >> 16);
+ prot_ver = (u8)(readl(&dp6m_ptr->u.ic.S_Info[0]) >> 16);
writeb(0, &dp6m_ptr->u.ic.S_Status);
if (prot_ver < 0x2b) /* FW < x.43: no 64-bit DMA support */
ha->dma64_support = 0;
@@ -1239,7 +1239,7 @@ static int __devinit gdth_init_pci(struct pci_dev *pdev, gdth_pci_str *pcistr,
static void __devinit gdth_enable_int(gdth_ha_str *ha)
{
- ulong flags;
+ unsigned long flags;
gdt2_dpram_str __iomem *dp2_ptr;
gdt6_dpram_str __iomem *dp6_ptr;
gdt6m_dpram_str __iomem *dp6m_ptr;
@@ -1274,14 +1274,14 @@ static void __devinit gdth_enable_int(gdth_ha_str *ha)
}
/* return IStatus if interrupt was from this card else 0 */
-static unchar gdth_get_status(gdth_ha_str *ha)
+static u8 gdth_get_status(gdth_ha_str *ha)
{
- unchar IStatus = 0;
+ u8 IStatus = 0;
TRACE(("gdth_get_status() irq %d ctr_count %d\n", ha->irq, gdth_ctr_count));
if (ha->type == GDT_EISA)
- IStatus = inb((ushort)ha->bmic + EDOORREG);
+ IStatus = inb((u16)ha->bmic + EDOORREG);
else if (ha->type == GDT_ISA)
IStatus =
readb(&((gdt2_dpram_str __iomem *)ha->brd)->u.ic.Cmd_Index);
@@ -1329,7 +1329,7 @@ static int gdth_get_cmd_index(gdth_ha_str *ha)
if (ha->cmd_tab[i].cmnd == UNUSED_CMND) {
ha->cmd_tab[i].cmnd = ha->pccb->RequestBuffer;
ha->cmd_tab[i].service = ha->pccb->Service;
- ha->pccb->CommandIndex = (ulong32)i+2;
+ ha->pccb->CommandIndex = (u32)i+2;
return (i+2);
}
}
@@ -1362,7 +1362,7 @@ static void gdth_copy_command(gdth_ha_str *ha)
register gdt6c_dpram_str __iomem *dp6c_ptr;
gdt6_dpram_str __iomem *dp6_ptr;
gdt2_dpram_str __iomem *dp2_ptr;
- ushort cp_count,dp_offset,cmd_no;
+ u16 cp_count,dp_offset,cmd_no;
TRACE(("gdth_copy_command() hanum %d\n", ha->hanum));
@@ -1386,28 +1386,28 @@ static void gdth_copy_command(gdth_ha_str *ha)
dp2_ptr = ha->brd;
writew(dp_offset + DPMEM_COMMAND_OFFSET,
&dp2_ptr->u.ic.comm_queue[cmd_no].offset);
- writew((ushort)cmd_ptr->Service,
+ writew((u16)cmd_ptr->Service,
&dp2_ptr->u.ic.comm_queue[cmd_no].serv_id);
memcpy_toio(&dp2_ptr->u.ic.gdt_dpr_cmd[dp_offset],cmd_ptr,cp_count);
} else if (ha->type == GDT_PCI) {
dp6_ptr = ha->brd;
writew(dp_offset + DPMEM_COMMAND_OFFSET,
&dp6_ptr->u.ic.comm_queue[cmd_no].offset);
- writew((ushort)cmd_ptr->Service,
+ writew((u16)cmd_ptr->Service,
&dp6_ptr->u.ic.comm_queue[cmd_no].serv_id);
memcpy_toio(&dp6_ptr->u.ic.gdt_dpr_cmd[dp_offset],cmd_ptr,cp_count);
} else if (ha->type == GDT_PCINEW) {
dp6c_ptr = ha->brd;
writew(dp_offset + DPMEM_COMMAND_OFFSET,
&dp6c_ptr->u.ic.comm_queue[cmd_no].offset);
- writew((ushort)cmd_ptr->Service,
+ writew((u16)cmd_ptr->Service,
&dp6c_ptr->u.ic.comm_queue[cmd_no].serv_id);
memcpy_toio(&dp6c_ptr->u.ic.gdt_dpr_cmd[dp_offset],cmd_ptr,cp_count);
} else if (ha->type == GDT_PCIMPR) {
dp6m_ptr = ha->brd;
writew(dp_offset + DPMEM_COMMAND_OFFSET,
&dp6m_ptr->u.ic.comm_queue[cmd_no].offset);
- writew((ushort)cmd_ptr->Service,
+ writew((u16)cmd_ptr->Service,
&dp6m_ptr->u.ic.comm_queue[cmd_no].serv_id);
memcpy_toio(&dp6m_ptr->u.ic.gdt_dpr_cmd[dp_offset],cmd_ptr,cp_count);
}
@@ -1420,14 +1420,14 @@ static void gdth_release_event(gdth_ha_str *ha)
#ifdef GDTH_STATISTICS
{
- ulong32 i,j;
+ u32 i,j;
for (i=0,j=0; j<GDTH_MAXCMDS; ++j) {
if (ha->cmd_tab[j].cmnd != UNUSED_CMND)
++i;
}
if (max_index < i) {
max_index = i;
- TRACE3(("GDT: max_index = %d\n",(ushort)i));
+ TRACE3(("GDT: max_index = %d\n",(u16)i));
}
}
#endif
@@ -1450,7 +1450,7 @@ static void gdth_release_event(gdth_ha_str *ha)
}
}
-static int gdth_wait(gdth_ha_str *ha, int index, ulong32 time)
+static int gdth_wait(gdth_ha_str *ha, int index, u32 time)
{
int answer_found = FALSE;
int wait_index = 0;
@@ -1476,8 +1476,8 @@ static int gdth_wait(gdth_ha_str *ha, int index, ulong32 time)
}
-static int gdth_internal_cmd(gdth_ha_str *ha, unchar service, ushort opcode,
- ulong32 p1, ulong64 p2, ulong64 p3)
+static int gdth_internal_cmd(gdth_ha_str *ha, u8 service, u16 opcode,
+ u32 p1, u64 p2, u64 p3)
{
register gdth_cmd_str *cmd_ptr;
int retries,index;
@@ -1501,35 +1501,35 @@ static int gdth_internal_cmd(gdth_ha_str *ha, unchar service, ushort opcode,
if (service == CACHESERVICE) {
if (opcode == GDT_IOCTL) {
cmd_ptr->u.ioctl.subfunc = p1;
- cmd_ptr->u.ioctl.channel = (ulong32)p2;
- cmd_ptr->u.ioctl.param_size = (ushort)p3;
+ cmd_ptr->u.ioctl.channel = (u32)p2;
+ cmd_ptr->u.ioctl.param_size = (u16)p3;
cmd_ptr->u.ioctl.p_param = ha->scratch_phys;
} else {
if (ha->cache_feat & GDT_64BIT) {
- cmd_ptr->u.cache64.DeviceNo = (ushort)p1;
+ cmd_ptr->u.cache64.DeviceNo = (u16)p1;
cmd_ptr->u.cache64.BlockNo = p2;
} else {
- cmd_ptr->u.cache.DeviceNo = (ushort)p1;
- cmd_ptr->u.cache.BlockNo = (ulong32)p2;
+ cmd_ptr->u.cache.DeviceNo = (u16)p1;
+ cmd_ptr->u.cache.BlockNo = (u32)p2;
}
}
} else if (service == SCSIRAWSERVICE) {
if (ha->raw_feat & GDT_64BIT) {
cmd_ptr->u.raw64.direction = p1;
- cmd_ptr->u.raw64.bus = (unchar)p2;
- cmd_ptr->u.raw64.target = (unchar)p3;
- cmd_ptr->u.raw64.lun = (unchar)(p3 >> 8);
+ cmd_ptr->u.raw64.bus = (u8)p2;
+ cmd_ptr->u.raw64.target = (u8)p3;
+ cmd_ptr->u.raw64.lun = (u8)(p3 >> 8);
} else {
cmd_ptr->u.raw.direction = p1;
- cmd_ptr->u.raw.bus = (unchar)p2;
- cmd_ptr->u.raw.target = (unchar)p3;
- cmd_ptr->u.raw.lun = (unchar)(p3 >> 8);
+ cmd_ptr->u.raw.bus = (u8)p2;
+ cmd_ptr->u.raw.target = (u8)p3;
+ cmd_ptr->u.raw.lun = (u8)(p3 >> 8);
}
} else if (service == SCREENSERVICE) {
if (opcode == GDT_REALTIME) {
- *(ulong32 *)&cmd_ptr->u.screen.su.data[0] = p1;
- *(ulong32 *)&cmd_ptr->u.screen.su.data[4] = (ulong32)p2;
- *(ulong32 *)&cmd_ptr->u.screen.su.data[8] = (ulong32)p3;
+ *(u32 *)&cmd_ptr->u.screen.su.data[0] = p1;
+ *(u32 *)&cmd_ptr->u.screen.su.data[4] = (u32)p2;
+ *(u32 *)&cmd_ptr->u.screen.su.data[8] = (u32)p3;
}
}
ha->cmd_len = sizeof(gdth_cmd_str);
@@ -1555,9 +1555,9 @@ static int gdth_internal_cmd(gdth_ha_str *ha, unchar service, ushort opcode,
static int __devinit gdth_search_drives(gdth_ha_str *ha)
{
- ushort cdev_cnt, i;
+ u16 cdev_cnt, i;
int ok;
- ulong32 bus_no, drv_cnt, drv_no, j;
+ u32 bus_no, drv_cnt, drv_no, j;
gdth_getch_str *chn;
gdth_drlist_str *drl;
gdth_iochan_str *ioc;
@@ -1570,8 +1570,8 @@ static int __devinit gdth_search_drives(gdth_ha_str *ha)
#endif
#ifdef GDTH_RTC
- unchar rtc[12];
- ulong flags;
+ u8 rtc[12];
+ unsigned long flags;
#endif
TRACE(("gdth_search_drives() hanum %d\n", ha->hanum));
@@ -1584,7 +1584,7 @@ static int __devinit gdth_search_drives(gdth_ha_str *ha)
if (ok)
ha->screen_feat = GDT_64BIT;
}
- if (force_dma32 || (!ok && ha->status == (ushort)S_NOFUNC))
+ if (force_dma32 || (!ok && ha->status == (u16)S_NOFUNC))
ok = gdth_internal_cmd(ha, SCREENSERVICE, GDT_INIT, 0, 0, 0);
if (!ok) {
printk("GDT-HA %d: Initialization error screen service (code %d)\n",
@@ -1609,11 +1609,11 @@ static int __devinit gdth_search_drives(gdth_ha_str *ha)
rtc[j] = CMOS_READ(j);
} while (rtc[0] != CMOS_READ(0));
spin_unlock_irqrestore(&rtc_lock, flags);
- TRACE2(("gdth_search_drives(): RTC: %x/%x/%x\n",*(ulong32 *)&rtc[0],
- *(ulong32 *)&rtc[4], *(ulong32 *)&rtc[8]));
+ TRACE2(("gdth_search_drives(): RTC: %x/%x/%x\n",*(u32 *)&rtc[0],
+ *(u32 *)&rtc[4], *(u32 *)&rtc[8]));
/* 3. send to controller firmware */
- gdth_internal_cmd(ha, SCREENSERVICE, GDT_REALTIME, *(ulong32 *)&rtc[0],
- *(ulong32 *)&rtc[4], *(ulong32 *)&rtc[8]);
+ gdth_internal_cmd(ha, SCREENSERVICE, GDT_REALTIME, *(u32 *)&rtc[0],
+ *(u32 *)&rtc[4], *(u32 *)&rtc[8]);
#endif
/* unfreeze all IOs */
@@ -1627,7 +1627,7 @@ static int __devinit gdth_search_drives(gdth_ha_str *ha)
if (ok)
ha->cache_feat = GDT_64BIT;
}
- if (force_dma32 || (!ok && ha->status == (ushort)S_NOFUNC))
+ if (force_dma32 || (!ok && ha->status == (u16)S_NOFUNC))
ok = gdth_internal_cmd(ha, CACHESERVICE, GDT_INIT, LINUX_OS, 0, 0);
if (!ok) {
printk("GDT-HA %d: Initialization error cache service (code %d)\n",
@@ -1635,7 +1635,7 @@ static int __devinit gdth_search_drives(gdth_ha_str *ha)
return 0;
}
TRACE2(("gdth_search_drives(): CACHESERVICE initialized\n"));
- cdev_cnt = (ushort)ha->info;
+ cdev_cnt = (u16)ha->info;
ha->fw_vers = ha->service;
#ifdef INT_COAL
@@ -1644,7 +1644,7 @@ static int __devinit gdth_search_drives(gdth_ha_str *ha)
pmod = (gdth_perf_modes *)ha->pscratch;
pmod->version = 1;
pmod->st_mode = 1; /* enable one status buffer */
- *((ulong64 *)&pmod->st_buff_addr1) = ha->coal_stat_phys;
+ *((u64 *)&pmod->st_buff_addr1) = ha->coal_stat_phys;
pmod->st_buff_indx1 = COALINDEX;
pmod->st_buff_addr2 = 0;
pmod->st_buff_u_addr2 = 0;
@@ -1705,7 +1705,7 @@ static int __devinit gdth_search_drives(gdth_ha_str *ha)
else
ha->bus_id[bus_no] = 0xff;
}
- ha->bus_cnt = (unchar)bus_no;
+ ha->bus_cnt = (u8)bus_no;
}
TRACE2(("gdth_search_drives() %d channels\n",ha->bus_cnt));
@@ -1789,12 +1789,12 @@ static int __devinit gdth_search_drives(gdth_ha_str *ha)
/* logical drives */
if (gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL, CACHE_DRV_CNT,
- INVALID_CHANNEL,sizeof(ulong32))) {
- drv_cnt = *(ulong32 *)ha->pscratch;
+ INVALID_CHANNEL,sizeof(u32))) {
+ drv_cnt = *(u32 *)ha->pscratch;
if (gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL, CACHE_DRV_LIST,
- INVALID_CHANNEL,drv_cnt * sizeof(ulong32))) {
+ INVALID_CHANNEL,drv_cnt * sizeof(u32))) {
for (j = 0; j < drv_cnt; ++j) {
- drv_no = ((ulong32 *)ha->pscratch)[j];
+ drv_no = ((u32 *)ha->pscratch)[j];
if (drv_no < MAX_LDRIVES) {
ha->hdr[drv_no].is_logdrv = TRUE;
TRACE2(("Drive %d is log. drive\n",drv_no));
@@ -1838,7 +1838,7 @@ static int __devinit gdth_search_drives(gdth_ha_str *ha)
if (ok)
ha->raw_feat = GDT_64BIT;
}
- if (force_dma32 || (!ok && ha->status == (ushort)S_NOFUNC))
+ if (force_dma32 || (!ok && ha->status == (u16)S_NOFUNC))
ok = gdth_internal_cmd(ha, SCSIRAWSERVICE, GDT_INIT, 0, 0, 0);
if (!ok) {
printk("GDT-HA %d: Initialization error raw service (code %d)\n",
@@ -1854,7 +1854,7 @@ static int __devinit gdth_search_drives(gdth_ha_str *ha)
if (gdth_internal_cmd(ha, SCSIRAWSERVICE, GDT_GET_FEAT, 0, 0, 0)) {
TRACE2(("gdth_search_dr(): get feat RAWSERVICE %d\n",
ha->info));
- ha->raw_feat |= (ushort)ha->info;
+ ha->raw_feat |= (u16)ha->info;
}
}
@@ -1865,7 +1865,7 @@ static int __devinit gdth_search_drives(gdth_ha_str *ha)
if (gdth_internal_cmd(ha, CACHESERVICE, GDT_GET_FEAT, 0, 0, 0)) {
TRACE2(("gdth_search_dr(): get feat CACHESERV. %d\n",
ha->info));
- ha->cache_feat |= (ushort)ha->info;
+ ha->cache_feat |= (u16)ha->info;
}
}
@@ -1923,9 +1923,9 @@ static int __devinit gdth_search_drives(gdth_ha_str *ha)
return 1;
}
-static int gdth_analyse_hdrive(gdth_ha_str *ha, ushort hdrive)
+static int gdth_analyse_hdrive(gdth_ha_str *ha, u16 hdrive)
{
- ulong32 drv_cyls;
+ u32 drv_cyls;
int drv_hds, drv_secs;
TRACE(("gdth_analyse_hdrive() hanum %d drive %d\n", ha->hanum, hdrive));
@@ -1944,17 +1944,17 @@ static int gdth_analyse_hdrive(gdth_ha_str *ha, ushort hdrive)
} else {
drv_hds = ha->info2 & 0xff;
drv_secs = (ha->info2 >> 8) & 0xff;
- drv_cyls = (ulong32)ha->hdr[hdrive].size / drv_hds / drv_secs;
+ drv_cyls = (u32)ha->hdr[hdrive].size / drv_hds / drv_secs;
}
- ha->hdr[hdrive].heads = (unchar)drv_hds;
- ha->hdr[hdrive].secs = (unchar)drv_secs;
+ ha->hdr[hdrive].heads = (u8)drv_hds;
+ ha->hdr[hdrive].secs = (u8)drv_secs;
/* round size */
ha->hdr[hdrive].size = drv_cyls * drv_hds * drv_secs;
if (ha->cache_feat & GDT_64BIT) {
if (gdth_internal_cmd(ha, CACHESERVICE, GDT_X_INFO, hdrive, 0, 0)
&& ha->info2 != 0) {
- ha->hdr[hdrive].size = ((ulong64)ha->info2 << 32) | ha->info;
+ ha->hdr[hdrive].size = ((u64)ha->info2 << 32) | ha->info;
}
}
TRACE2(("gdth_search_dr() cdr. %d size %d hds %d scs %d\n",
@@ -1964,7 +1964,7 @@ static int gdth_analyse_hdrive(gdth_ha_str *ha, ushort hdrive)
if (gdth_internal_cmd(ha, CACHESERVICE, GDT_DEVTYPE, hdrive, 0, 0)) {
TRACE2(("gdth_search_dr() cache drive %d devtype %d\n",
hdrive,ha->info));
- ha->hdr[hdrive].devtype = (ushort)ha->info;
+ ha->hdr[hdrive].devtype = (u16)ha->info;
}
/* cluster info */
@@ -1972,14 +1972,14 @@ static int gdth_analyse_hdrive(gdth_ha_str *ha, ushort hdrive)
TRACE2(("gdth_search_dr() cache drive %d cluster info %d\n",
hdrive,ha->info));
if (!shared_access)
- ha->hdr[hdrive].cluster_type = (unchar)ha->info;
+ ha->hdr[hdrive].cluster_type = (u8)ha->info;
}
/* R/W attributes */
if (gdth_internal_cmd(ha, CACHESERVICE, GDT_RW_ATTRIBS, hdrive, 0, 0)) {
TRACE2(("gdth_search_dr() cache drive %d r/w attrib. %d\n",
hdrive,ha->info));
- ha->hdr[hdrive].rw_attribs = (unchar)ha->info;
+ ha->hdr[hdrive].rw_attribs = (u8)ha->info;
}
return 1;
@@ -1988,12 +1988,12 @@ static int gdth_analyse_hdrive(gdth_ha_str *ha, ushort hdrive)
/* command queueing/sending functions */
-static void gdth_putq(gdth_ha_str *ha, Scsi_Cmnd *scp, unchar priority)
+static void gdth_putq(gdth_ha_str *ha, Scsi_Cmnd *scp, u8 priority)
{
struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp);
register Scsi_Cmnd *pscp;
register Scsi_Cmnd *nscp;
- ulong flags;
+ unsigned long flags;
TRACE(("gdth_putq() priority %d\n",priority));
spin_lock_irqsave(&ha->smp_lock, flags);
@@ -2023,7 +2023,7 @@ static void gdth_putq(gdth_ha_str *ha, Scsi_Cmnd *scp, unchar priority)
++flags;
if (max_rq < flags) {
max_rq = flags;
- TRACE3(("GDT: max_rq = %d\n",(ushort)max_rq));
+ TRACE3(("GDT: max_rq = %d\n",(u16)max_rq));
}
#endif
}
@@ -2032,9 +2032,9 @@ static void gdth_next(gdth_ha_str *ha)
{
register Scsi_Cmnd *pscp;
register Scsi_Cmnd *nscp;
- unchar b, t, l, firsttime;
- unchar this_cmd, next_cmd;
- ulong flags = 0;
+ u8 b, t, l, firsttime;
+ u8 this_cmd, next_cmd;
+ unsigned long flags = 0;
int cmd_index;
TRACE(("gdth_next() hanum %d\n", ha->hanum));
@@ -2282,20 +2282,20 @@ static void gdth_next(gdth_ha_str *ha)
* buffers, kmap_atomic() as needed.
*/
static void gdth_copy_internal_data(gdth_ha_str *ha, Scsi_Cmnd *scp,
- char *buffer, ushort count)
+ char *buffer, u16 count)
{
- ushort cpcount,i, max_sg = scsi_sg_count(scp);
- ushort cpsum,cpnow;
+ u16 cpcount,i, max_sg = scsi_sg_count(scp);
+ u16 cpsum,cpnow;
struct scatterlist *sl;
char *address;
- cpcount = min_t(ushort, count, scsi_bufflen(scp));
+ cpcount = min_t(u16, count, scsi_bufflen(scp));
if (cpcount) {
cpsum=0;
scsi_for_each_sg(scp, sl, max_sg, i) {
unsigned long flags;
- cpnow = (ushort)sl->length;
+ cpnow = (u16)sl->length;
TRACE(("copy_internal() now %d sum %d count %d %d\n",
cpnow, cpsum, cpcount, scsi_bufflen(scp)));
if (cpsum+cpnow > cpcount)
@@ -2325,7 +2325,7 @@ static void gdth_copy_internal_data(gdth_ha_str *ha, Scsi_Cmnd *scp,
static int gdth_internal_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp)
{
- unchar t;
+ u8 t;
gdth_inq_data inq;
gdth_rdcap_data rdc;
gdth_sense_data sd;
@@ -2389,7 +2389,7 @@ static int gdth_internal_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp)
case READ_CAPACITY:
TRACE2(("Read capacity hdrive %d\n",t));
- if (ha->hdr[t].size > (ulong64)0xffffffff)
+ if (ha->hdr[t].size > (u64)0xffffffff)
rdc.last_block_no = 0xffffffff;
else
rdc.last_block_no = cpu_to_be32(ha->hdr[t].size-1);
@@ -2425,12 +2425,12 @@ static int gdth_internal_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp)
return 0;
}
-static int gdth_fill_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, ushort hdrive)
+static int gdth_fill_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, u16 hdrive)
{
register gdth_cmd_str *cmdp;
struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp);
- ulong32 cnt, blockcnt;
- ulong64 no, blockno;
+ u32 cnt, blockcnt;
+ u64 no, blockno;
int i, cmd_index, read_write, sgcnt, mode64;
cmdp = ha->pccb;
@@ -2498,17 +2498,17 @@ static int gdth_fill_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, ushort hdrive)
if (read_write) {
if (scp->cmd_len == 16) {
- memcpy(&no, &scp->cmnd[2], sizeof(ulong64));
+ memcpy(&no, &scp->cmnd[2], sizeof(u64));
blockno = be64_to_cpu(no);
- memcpy(&cnt, &scp->cmnd[10], sizeof(ulong32));
+ memcpy(&cnt, &scp->cmnd[10], sizeof(u32));
blockcnt = be32_to_cpu(cnt);
} else if (scp->cmd_len == 10) {
- memcpy(&no, &scp->cmnd[2], sizeof(ulong32));
+ memcpy(&no, &scp->cmnd[2], sizeof(u32));
blockno = be32_to_cpu(no);
- memcpy(&cnt, &scp->cmnd[7], sizeof(ushort));
+ memcpy(&cnt, &scp->cmnd[7], sizeof(u16));
blockcnt = be16_to_cpu(cnt);
} else {
- memcpy(&no, &scp->cmnd[0], sizeof(ulong32));
+ memcpy(&no, &scp->cmnd[0], sizeof(u32));
blockno = be32_to_cpu(no) & 0x001fffffUL;
blockcnt= scp->cmnd[4]==0 ? 0x100 : scp->cmnd[4];
}
@@ -2516,7 +2516,7 @@ static int gdth_fill_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, ushort hdrive)
cmdp->u.cache64.BlockNo = blockno;
cmdp->u.cache64.BlockCnt = blockcnt;
} else {
- cmdp->u.cache.BlockNo = (ulong32)blockno;
+ cmdp->u.cache.BlockNo = (u32)blockno;
cmdp->u.cache.BlockCnt = blockcnt;
}
@@ -2528,12 +2528,12 @@ static int gdth_fill_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, ushort hdrive)
if (mode64) {
struct scatterlist *sl;
- cmdp->u.cache64.DestAddr= (ulong64)-1;
+ cmdp->u.cache64.DestAddr= (u64)-1;
cmdp->u.cache64.sg_canz = sgcnt;
scsi_for_each_sg(scp, sl, sgcnt, i) {
cmdp->u.cache64.sg_lst[i].sg_ptr = sg_dma_address(sl);
#ifdef GDTH_DMA_STATISTICS
- if (cmdp->u.cache64.sg_lst[i].sg_ptr > (ulong64)0xffffffff)
+ if (cmdp->u.cache64.sg_lst[i].sg_ptr > (u64)0xffffffff)
ha->dma64_cnt++;
else
ha->dma32_cnt++;
@@ -2555,8 +2555,8 @@ static int gdth_fill_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, ushort hdrive)
}
#ifdef GDTH_STATISTICS
- if (max_sg < (ulong32)sgcnt) {
- max_sg = (ulong32)sgcnt;
+ if (max_sg < (u32)sgcnt) {
+ max_sg = (u32)sgcnt;
TRACE3(("GDT: max_sg = %d\n",max_sg));
}
#endif
@@ -2572,7 +2572,7 @@ static int gdth_fill_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, ushort hdrive)
TRACE(("cache cmd: cmd %d blockno. %d, blockcnt %d\n",
cmdp->OpCode,cmdp->u.cache64.BlockNo,cmdp->u.cache64.BlockCnt));
ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.cache64.sg_lst) +
- (ushort)cmdp->u.cache64.sg_canz * sizeof(gdth_sg64_str);
+ (u16)cmdp->u.cache64.sg_canz * sizeof(gdth_sg64_str);
} else {
TRACE(("cache cmd: addr. %x sganz %x sgptr0 %x sglen0 %x\n",
cmdp->u.cache.DestAddr,cmdp->u.cache.sg_canz,
@@ -2581,7 +2581,7 @@ static int gdth_fill_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, ushort hdrive)
TRACE(("cache cmd: cmd %d blockno. %d, blockcnt %d\n",
cmdp->OpCode,cmdp->u.cache.BlockNo,cmdp->u.cache.BlockCnt));
ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.cache.sg_lst) +
- (ushort)cmdp->u.cache.sg_canz * sizeof(gdth_sg_str);
+ (u16)cmdp->u.cache.sg_canz * sizeof(gdth_sg_str);
}
if (ha->cmd_len & 3)
ha->cmd_len += (4 - (ha->cmd_len & 3));
@@ -2600,15 +2600,15 @@ static int gdth_fill_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, ushort hdrive)
return cmd_index;
}
-static int gdth_fill_raw_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, unchar b)
+static int gdth_fill_raw_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, u8 b)
{
register gdth_cmd_str *cmdp;
- ushort i;
+ u16 i;
dma_addr_t sense_paddr;
int cmd_index, sgcnt, mode64;
- unchar t,l;
+ u8 t,l;
struct page *page;
- ulong offset;
+ unsigned long offset;
struct gdth_cmndinfo *cmndinfo;
t = scp->device->id;
@@ -2654,7 +2654,7 @@ static int gdth_fill_raw_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, unchar b)
} else {
page = virt_to_page(scp->sense_buffer);
- offset = (ulong)scp->sense_buffer & ~PAGE_MASK;
+ offset = (unsigned long)scp->sense_buffer & ~PAGE_MASK;
sense_paddr = pci_map_page(ha->pdev,page,offset,
16,PCI_DMA_FROMDEVICE);
@@ -2703,12 +2703,12 @@ static int gdth_fill_raw_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, unchar b)
if (mode64) {
struct scatterlist *sl;
- cmdp->u.raw64.sdata = (ulong64)-1;
+ cmdp->u.raw64.sdata = (u64)-1;
cmdp->u.raw64.sg_ranz = sgcnt;
scsi_for_each_sg(scp, sl, sgcnt, i) {
cmdp->u.raw64.sg_lst[i].sg_ptr = sg_dma_address(sl);
#ifdef GDTH_DMA_STATISTICS
- if (cmdp->u.raw64.sg_lst[i].sg_ptr > (ulong64)0xffffffff)
+ if (cmdp->u.raw64.sg_lst[i].sg_ptr > (u64)0xffffffff)
ha->dma64_cnt++;
else
ha->dma32_cnt++;
@@ -2744,7 +2744,7 @@ static int gdth_fill_raw_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, unchar b)
cmdp->u.raw64.sg_lst[0].sg_len));
/* evaluate command size */
ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.raw64.sg_lst) +
- (ushort)cmdp->u.raw64.sg_ranz * sizeof(gdth_sg64_str);
+ (u16)cmdp->u.raw64.sg_ranz * sizeof(gdth_sg64_str);
} else {
TRACE(("raw cmd: addr. %x sganz %x sgptr0 %x sglen0 %x\n",
cmdp->u.raw.sdata,cmdp->u.raw.sg_ranz,
@@ -2752,7 +2752,7 @@ static int gdth_fill_raw_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, unchar b)
cmdp->u.raw.sg_lst[0].sg_len));
/* evaluate command size */
ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.raw.sg_lst) +
- (ushort)cmdp->u.raw.sg_ranz * sizeof(gdth_sg_str);
+ (u16)cmdp->u.raw.sg_ranz * sizeof(gdth_sg_str);
}
}
/* check space */
@@ -2802,7 +2802,7 @@ static int gdth_special_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp)
if (cmdp->OpCode == GDT_IOCTL) {
TRACE2(("IOCTL\n"));
ha->cmd_len =
- GDTOFFSOF(gdth_cmd_str,u.ioctl.p_param) + sizeof(ulong64);
+ GDTOFFSOF(gdth_cmd_str,u.ioctl.p_param) + sizeof(u64);
} else if (cmdp->Service == CACHESERVICE) {
TRACE2(("cache command %d\n",cmdp->OpCode));
if (ha->cache_feat & GDT_64BIT)
@@ -2840,8 +2840,8 @@ static int gdth_special_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp)
/* Controller event handling functions */
-static gdth_evt_str *gdth_store_event(gdth_ha_str *ha, ushort source,
- ushort idx, gdth_evt_data *evt)
+static gdth_evt_str *gdth_store_event(gdth_ha_str *ha, u16 source,
+ u16 idx, gdth_evt_data *evt)
{
gdth_evt_str *e;
struct timeval tv;
@@ -2890,7 +2890,7 @@ static int gdth_read_event(gdth_ha_str *ha, int handle, gdth_evt_str *estr)
{
gdth_evt_str *e;
int eindex;
- ulong flags;
+ unsigned long flags;
TRACE2(("gdth_read_event() handle %d\n", handle));
spin_lock_irqsave(&ha->smp_lock, flags);
@@ -2919,12 +2919,12 @@ static int gdth_read_event(gdth_ha_str *ha, int handle, gdth_evt_str *estr)
}
static void gdth_readapp_event(gdth_ha_str *ha,
- unchar application, gdth_evt_str *estr)
+ u8 application, gdth_evt_str *estr)
{
gdth_evt_str *e;
int eindex;
- ulong flags;
- unchar found = FALSE;
+ unsigned long flags;
+ u8 found = FALSE;
TRACE2(("gdth_readapp_event() app. %d\n", application));
spin_lock_irqsave(&ha->smp_lock, flags);
@@ -2969,9 +2969,9 @@ static irqreturn_t __gdth_interrupt(gdth_ha_str *ha,
gdt2_dpram_str __iomem *dp2_ptr;
Scsi_Cmnd *scp;
int rval, i;
- unchar IStatus;
- ushort Service;
- ulong flags = 0;
+ u8 IStatus;
+ u16 Service;
+ unsigned long flags = 0;
#ifdef INT_COAL
int coalesced = FALSE;
int next = FALSE;
@@ -3018,7 +3018,7 @@ static irqreturn_t __gdth_interrupt(gdth_ha_str *ha,
if (coalesced) {
/* For coalesced requests all status
information is found in the status buffer */
- IStatus = (unchar)(pcs->status & 0xff);
+ IStatus = (u8)(pcs->status & 0xff);
}
#endif
@@ -3197,7 +3197,7 @@ static irqreturn_t __gdth_interrupt(gdth_ha_str *ha,
++act_int_coal;
if (act_int_coal > max_int_coal) {
max_int_coal = act_int_coal;
- printk("GDT: max_int_coal = %d\n",(ushort)max_int_coal);
+ printk("GDT: max_int_coal = %d\n",(u16)max_int_coal);
}
#endif
/* see if there is another status */
@@ -3225,12 +3225,12 @@ static irqreturn_t gdth_interrupt(int irq, void *dev_id)
return __gdth_interrupt(ha, false, NULL);
}
-static int gdth_sync_event(gdth_ha_str *ha, int service, unchar index,
+static int gdth_sync_event(gdth_ha_str *ha, int service, u8 index,
Scsi_Cmnd *scp)
{
gdth_msg_str *msg;
gdth_cmd_str *cmdp;
- unchar b, t;
+ u8 b, t;
struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp);
cmdp = ha->pccb;
@@ -3263,7 +3263,7 @@ static int gdth_sync_event(gdth_ha_str *ha, int service, unchar index,
cmdp->u.screen.su.msg.msg_addr = ha->msg_phys;
ha->cmd_offs_dpmem = 0;
ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.screen.su.msg.msg_addr)
- + sizeof(ulong64);
+ + sizeof(u64);
ha->cmd_cnt = 0;
gdth_copy_command(ha);
gdth_release_event(ha);
@@ -3297,7 +3297,7 @@ static int gdth_sync_event(gdth_ha_str *ha, int service, unchar index,
cmdp->u.screen.su.msg.msg_addr = ha->msg_phys;
ha->cmd_offs_dpmem = 0;
ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.screen.su.msg.msg_addr)
- + sizeof(ulong64);
+ + sizeof(u64);
ha->cmd_cnt = 0;
gdth_copy_command(ha);
gdth_release_event(ha);
@@ -3335,7 +3335,7 @@ static int gdth_sync_event(gdth_ha_str *ha, int service, unchar index,
cmndinfo->OpCode));
/* special commands GDT_CLUST_INFO/GDT_MOUNT ? */
if (cmndinfo->OpCode == GDT_CLUST_INFO) {
- ha->hdr[t].cluster_type = (unchar)ha->info;
+ ha->hdr[t].cluster_type = (u8)ha->info;
if (!(ha->hdr[t].cluster_type &
CLUSTER_MOUNTED)) {
/* NOT MOUNTED -> MOUNT */
@@ -3397,7 +3397,7 @@ static int gdth_sync_event(gdth_ha_str *ha, int service, unchar index,
ha->hdr[t].cluster_type &= ~CLUSTER_RESERVED;
}
memset((char*)scp->sense_buffer,0,16);
- if (ha->status == (ushort)S_CACHE_RESERV) {
+ if (ha->status == (u16)S_CACHE_RESERV) {
scp->result = (DID_OK << 16) | (RESERVATION_CONFLICT << 1);
} else {
scp->sense_buffer[0] = 0x70;
@@ -3614,16 +3614,16 @@ static int gdth_async_event(gdth_ha_str *ha)
cmdp->u.screen.su.msg.msg_addr = ha->msg_phys;
ha->cmd_offs_dpmem = 0;
ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.screen.su.msg.msg_addr)
- + sizeof(ulong64);
+ + sizeof(u64);
ha->cmd_cnt = 0;
gdth_copy_command(ha);
if (ha->type == GDT_EISA)
- printk("[EISA slot %d] ",(ushort)ha->brd_phys);
+ printk("[EISA slot %d] ",(u16)ha->brd_phys);
else if (ha->type == GDT_ISA)
- printk("[DPMEM 0x%4X] ",(ushort)ha->brd_phys);
+ printk("[DPMEM 0x%4X] ",(u16)ha->brd_phys);
else
- printk("[PCI %d/%d] ",(ushort)(ha->brd_phys>>8),
- (ushort)((ha->brd_phys>>3)&0x1f));
+ printk("[PCI %d/%d] ",(u16)(ha->brd_phys>>8),
+ (u16)((ha->brd_phys>>3)&0x1f));
gdth_release_event(ha);
}
@@ -3640,7 +3640,7 @@ static int gdth_async_event(gdth_ha_str *ha)
ha->dvr.eu.async.service = ha->service;
ha->dvr.eu.async.status = ha->status;
ha->dvr.eu.async.info = ha->info;
- *(ulong32 *)ha->dvr.eu.async.scsi_coord = ha->info2;
+ *(u32 *)ha->dvr.eu.async.scsi_coord = ha->info2;
}
gdth_store_event( ha, ES_ASYNC, ha->service, &ha->dvr );
gdth_log_event( &ha->dvr, NULL );
@@ -3648,8 +3648,8 @@ static int gdth_async_event(gdth_ha_str *ha)
/* new host drive from expand? */
if (ha->service == CACHESERVICE && ha->status == 56) {
TRACE2(("gdth_async_event(): new host drive %d created\n",
- (ushort)ha->info));
- /* gdth_analyse_hdrive(hanum, (ushort)ha->info); */
+ (u16)ha->info));
+ /* gdth_analyse_hdrive(hanum, (u16)ha->info); */
}
}
return 1;
@@ -3680,13 +3680,13 @@ static void gdth_log_event(gdth_evt_data *dvr, char *buffer)
for (j=0,i=1; i < f[0]; i+=2) {
switch (f[i+1]) {
case 4:
- stack.b[j++] = *(ulong32*)&dvr->eu.stream[(int)f[i]];
+ stack.b[j++] = *(u32*)&dvr->eu.stream[(int)f[i]];
break;
case 2:
- stack.b[j++] = *(ushort*)&dvr->eu.stream[(int)f[i]];
+ stack.b[j++] = *(u16*)&dvr->eu.stream[(int)f[i]];
break;
case 1:
- stack.b[j++] = *(unchar*)&dvr->eu.stream[(int)f[i]];
+ stack.b[j++] = *(u8*)&dvr->eu.stream[(int)f[i]];
break;
default:
break;
@@ -3712,14 +3712,14 @@ static void gdth_log_event(gdth_evt_data *dvr, char *buffer)
}
#ifdef GDTH_STATISTICS
-static unchar gdth_timer_running;
+static u8 gdth_timer_running;
-static void gdth_timeout(ulong data)
+static void gdth_timeout(unsigned long data)
{
- ulong32 i;
+ u32 i;
Scsi_Cmnd *nscp;
gdth_ha_str *ha;
- ulong flags;
+ unsigned long flags;
if(unlikely(list_empty(&gdth_instances))) {
gdth_timer_running = 0;
@@ -3891,8 +3891,8 @@ static enum blk_eh_timer_return gdth_timed_out(struct scsi_cmnd *scp)
{
gdth_ha_str *ha = shost_priv(scp->device->host);
struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp);
- unchar b, t;
- ulong flags;
+ u8 b, t;
+ unsigned long flags;
enum blk_eh_timer_return retval = BLK_EH_NOT_HANDLED;
TRACE(("%s() cmd 0x%x\n", scp->cmnd[0], __func__));
@@ -3924,9 +3924,9 @@ static int gdth_eh_bus_reset(Scsi_Cmnd *scp)
{
gdth_ha_str *ha = shost_priv(scp->device->host);
int i;
- ulong flags;
+ unsigned long flags;
Scsi_Cmnd *cmnd;
- unchar b;
+ u8 b;
TRACE2(("gdth_eh_bus_reset()\n"));
@@ -3974,7 +3974,7 @@ static int gdth_eh_bus_reset(Scsi_Cmnd *scp)
static int gdth_bios_param(struct scsi_device *sdev,struct block_device *bdev,sector_t cap,int *ip)
{
- unchar b, t;
+ u8 b, t;
gdth_ha_str *ha = shost_priv(sdev->host);
struct scsi_device *sd;
unsigned capacity;
@@ -4062,7 +4062,7 @@ static int ioc_event(void __user *arg)
{
gdth_ioctl_event evt;
gdth_ha_str *ha;
- ulong flags;
+ unsigned long flags;
if (copy_from_user(&evt, arg, sizeof(gdth_ioctl_event)))
return -EFAULT;
@@ -4098,8 +4098,8 @@ static int ioc_event(void __user *arg)
static int ioc_lockdrv(void __user *arg)
{
gdth_ioctl_lockdrv ldrv;
- unchar i, j;
- ulong flags;
+ u8 i, j;
+ unsigned long flags;
gdth_ha_str *ha;
if (copy_from_user(&ldrv, arg, sizeof(gdth_ioctl_lockdrv)))
@@ -4165,7 +4165,7 @@ static int ioc_general(void __user *arg, char *cmnd)
{
gdth_ioctl_general gen;
char *buf = NULL;
- ulong64 paddr;
+ u64 paddr;
gdth_ha_str *ha;
int rval;
@@ -4194,7 +4194,7 @@ static int ioc_general(void __user *arg, char *cmnd)
gen.command.u.cache64.DeviceNo = gen.command.u.cache.DeviceNo;
/* addresses */
if (ha->cache_feat & SCATTER_GATHER) {
- gen.command.u.cache64.DestAddr = (ulong64)-1;
+ gen.command.u.cache64.DestAddr = (u64)-1;
gen.command.u.cache64.sg_canz = 1;
gen.command.u.cache64.sg_lst[0].sg_ptr = paddr;
gen.command.u.cache64.sg_lst[0].sg_len = gen.data_len;
@@ -4207,7 +4207,7 @@ static int ioc_general(void __user *arg, char *cmnd)
if (ha->cache_feat & SCATTER_GATHER) {
gen.command.u.cache.DestAddr = 0xffffffff;
gen.command.u.cache.sg_canz = 1;
- gen.command.u.cache.sg_lst[0].sg_ptr = (ulong32)paddr;
+ gen.command.u.cache.sg_lst[0].sg_ptr = (u32)paddr;
gen.command.u.cache.sg_lst[0].sg_len = gen.data_len;
gen.command.u.cache.sg_lst[1].sg_len = 0;
} else {
@@ -4230,7 +4230,7 @@ static int ioc_general(void __user *arg, char *cmnd)
gen.command.u.raw64.direction = gen.command.u.raw.direction;
/* addresses */
if (ha->raw_feat & SCATTER_GATHER) {
- gen.command.u.raw64.sdata = (ulong64)-1;
+ gen.command.u.raw64.sdata = (u64)-1;
gen.command.u.raw64.sg_ranz = 1;
gen.command.u.raw64.sg_lst[0].sg_ptr = paddr;
gen.command.u.raw64.sg_lst[0].sg_len = gen.data_len;
@@ -4244,14 +4244,14 @@ static int ioc_general(void __user *arg, char *cmnd)
if (ha->raw_feat & SCATTER_GATHER) {
gen.command.u.raw.sdata = 0xffffffff;
gen.command.u.raw.sg_ranz = 1;
- gen.command.u.raw.sg_lst[0].sg_ptr = (ulong32)paddr;
+ gen.command.u.raw.sg_lst[0].sg_ptr = (u32)paddr;
gen.command.u.raw.sg_lst[0].sg_len = gen.data_len;
gen.command.u.raw.sg_lst[1].sg_len = 0;
} else {
gen.command.u.raw.sdata = paddr;
gen.command.u.raw.sg_ranz = 0;
}
- gen.command.u.raw.sense_data = (ulong32)paddr + gen.data_len;
+ gen.command.u.raw.sense_data = (u32)paddr + gen.data_len;
}
} else {
gdth_ioctl_free(ha, gen.data_len+gen.sense_len, buf, paddr);
@@ -4283,7 +4283,7 @@ static int ioc_hdrlist(void __user *arg, char *cmnd)
gdth_ioctl_rescan *rsc;
gdth_cmd_str *cmd;
gdth_ha_str *ha;
- unchar i;
+ u8 i;
int rc = -ENOMEM;
u32 cluster_type = 0;
@@ -4335,11 +4335,11 @@ static int ioc_rescan(void __user *arg, char *cmnd)
{
gdth_ioctl_rescan *rsc;
gdth_cmd_str *cmd;
- ushort i, status, hdr_cnt;
- ulong32 info;
+ u16 i, status, hdr_cnt;
+ u32 info;
int cyls, hds, secs;
int rc = -ENOMEM;
- ulong flags;
+ unsigned long flags;
gdth_ha_str *ha;
rsc = kmalloc(sizeof(*rsc), GFP_KERNEL);
@@ -4367,7 +4367,7 @@ static int ioc_rescan(void __user *arg, char *cmnd)
status = __gdth_execute(ha->sdev, cmd, cmnd, 30, &info);
i = 0;
- hdr_cnt = (status == S_OK ? (ushort)info : 0);
+ hdr_cnt = (status == S_OK ? (u16)info : 0);
} else {
i = rsc->hdr_no;
hdr_cnt = i + 1;
@@ -4418,7 +4418,7 @@ static int ioc_rescan(void __user *arg, char *cmnd)
status = __gdth_execute(ha->sdev, cmd, cmnd, 30, &info);
spin_lock_irqsave(&ha->smp_lock, flags);
- ha->hdr[i].devtype = (status == S_OK ? (ushort)info : 0);
+ ha->hdr[i].devtype = (status == S_OK ? (u16)info : 0);
spin_unlock_irqrestore(&ha->smp_lock, flags);
cmd->Service = CACHESERVICE;
@@ -4432,7 +4432,7 @@ static int ioc_rescan(void __user *arg, char *cmnd)
spin_lock_irqsave(&ha->smp_lock, flags);
ha->hdr[i].cluster_type =
- ((status == S_OK && !shared_access) ? (ushort)info : 0);
+ ((status == S_OK && !shared_access) ? (u16)info : 0);
spin_unlock_irqrestore(&ha->smp_lock, flags);
rsc->hdr_list[i].cluster_type = ha->hdr[i].cluster_type;
@@ -4446,7 +4446,7 @@ static int ioc_rescan(void __user *arg, char *cmnd)
status = __gdth_execute(ha->sdev, cmd, cmnd, 30, &info);
spin_lock_irqsave(&ha->smp_lock, flags);
- ha->hdr[i].rw_attribs = (status == S_OK ? (ushort)info : 0);
+ ha->hdr[i].rw_attribs = (status == S_OK ? (u16)info : 0);
spin_unlock_irqrestore(&ha->smp_lock, flags);
}
@@ -4466,7 +4466,7 @@ static int gdth_ioctl(struct inode *inode, struct file *filep,
{
gdth_ha_str *ha;
Scsi_Cmnd *scp;
- ulong flags;
+ unsigned long flags;
char cmnd[MAX_COMMAND_SIZE];
void __user *argp = (void __user *)arg;
@@ -4495,9 +4495,9 @@ static int gdth_ioctl(struct inode *inode, struct file *filep,
{
gdth_ioctl_osvers osv;
- osv.version = (unchar)(LINUX_VERSION_CODE >> 16);
- osv.subversion = (unchar)(LINUX_VERSION_CODE >> 8);
- osv.revision = (ushort)(LINUX_VERSION_CODE & 0xff);
+ osv.version = (u8)(LINUX_VERSION_CODE >> 16);
+ osv.subversion = (u8)(LINUX_VERSION_CODE >> 8);
+ osv.revision = (u16)(LINUX_VERSION_CODE & 0xff);
if (copy_to_user(argp, &osv, sizeof(gdth_ioctl_osvers)))
return -EFAULT;
break;
@@ -4512,10 +4512,10 @@ static int gdth_ioctl(struct inode *inode, struct file *filep,
return -EFAULT;
if (ha->type == GDT_ISA || ha->type == GDT_EISA) {
- ctrt.type = (unchar)((ha->stype>>20) - 0x10);
+ ctrt.type = (u8)((ha->stype>>20) - 0x10);
} else {
if (ha->type != GDT_PCIMPR) {
- ctrt.type = (unchar)((ha->stype<<4) + 6);
+ ctrt.type = (u8)((ha->stype<<4) + 6);
} else {
ctrt.type =
(ha->oem_id == OEM_ID_INTEL ? 0xfd : 0xfe);
@@ -4546,7 +4546,7 @@ static int gdth_ioctl(struct inode *inode, struct file *filep,
case GDTIOCTL_LOCKCHN:
{
gdth_ioctl_lockchn lchn;
- unchar i, j;
+ u8 i, j;
if (copy_from_user(&lchn, argp, sizeof(gdth_ioctl_lockchn)) ||
(NULL == (ha = gdth_find_ha(lchn.ionode))))
@@ -4670,7 +4670,7 @@ static struct scsi_host_template gdth_template = {
};
#ifdef CONFIG_ISA
-static int __init gdth_isa_probe_one(ulong32 isa_bios)
+static int __init gdth_isa_probe_one(u32 isa_bios)
{
struct Scsi_Host *shp;
gdth_ha_str *ha;
@@ -4802,7 +4802,7 @@ static int __init gdth_isa_probe_one(ulong32 isa_bios)
#endif /* CONFIG_ISA */
#ifdef CONFIG_EISA
-static int __init gdth_eisa_probe_one(ushort eisa_slot)
+static int __init gdth_eisa_probe_one(u16 eisa_slot)
{
struct Scsi_Host *shp;
gdth_ha_str *ha;
@@ -5120,7 +5120,7 @@ static void gdth_remove_one(gdth_ha_str *ha)
scsi_host_put(shp);
}
-static int gdth_halt(struct notifier_block *nb, ulong event, void *buf)
+static int gdth_halt(struct notifier_block *nb, unsigned long event, void *buf)
{
gdth_ha_str *ha;
@@ -5158,14 +5158,14 @@ static int __init gdth_init(void)
if (probe_eisa_isa) {
/* scanning for controllers, at first: ISA controller */
#ifdef CONFIG_ISA
- ulong32 isa_bios;
+ u32 isa_bios;
for (isa_bios = 0xc8000UL; isa_bios <= 0xd8000UL;
isa_bios += 0x8000UL)
gdth_isa_probe_one(isa_bios);
#endif
#ifdef CONFIG_EISA
{
- ushort eisa_slot;
+ u16 eisa_slot;
for (eisa_slot = 0x1000; eisa_slot <= 0x8000;
eisa_slot += 0x1000)
gdth_eisa_probe_one(eisa_slot);
diff --git a/drivers/scsi/gdth.h b/drivers/scsi/gdth.h
index 1646444e9bd5..120a0625a7b5 100644
--- a/drivers/scsi/gdth.h
+++ b/drivers/scsi/gdth.h
@@ -321,524 +321,524 @@
/* screenservice message */
typedef struct {
- ulong32 msg_handle; /* message handle */
- ulong32 msg_len; /* size of message */
- ulong32 msg_alen; /* answer length */
- unchar msg_answer; /* answer flag */
- unchar msg_ext; /* more messages */
- unchar msg_reserved[2];
+ u32 msg_handle; /* message handle */
+ u32 msg_len; /* size of message */
+ u32 msg_alen; /* answer length */
+ u8 msg_answer; /* answer flag */
+ u8 msg_ext; /* more messages */
+ u8 msg_reserved[2];
char msg_text[MSGLEN+2]; /* the message text */
-} PACKED gdth_msg_str;
+} __attribute__((packed)) gdth_msg_str;
/* IOCTL data structures */
/* Status coalescing buffer for returning multiple requests per interrupt */
typedef struct {
- ulong32 status;
- ulong32 ext_status;
- ulong32 info0;
- ulong32 info1;
-} PACKED gdth_coal_status;
+ u32 status;
+ u32 ext_status;
+ u32 info0;
+ u32 info1;
+} __attribute__((packed)) gdth_coal_status;
/* performance mode data structure */
typedef struct {
- ulong32 version; /* The version of this IOCTL structure. */
- ulong32 st_mode; /* 0=dis., 1=st_buf_addr1 valid, 2=both */
- ulong32 st_buff_addr1; /* physical address of status buffer 1 */
- ulong32 st_buff_u_addr1; /* reserved for 64 bit addressing */
- ulong32 st_buff_indx1; /* reserved command idx. for this buffer */
- ulong32 st_buff_addr2; /* physical address of status buffer 1 */
- ulong32 st_buff_u_addr2; /* reserved for 64 bit addressing */
- ulong32 st_buff_indx2; /* reserved command idx. for this buffer */
- ulong32 st_buff_size; /* size of each buffer in bytes */
- ulong32 cmd_mode; /* 0 = mode disabled, 1 = cmd_buff_addr1 */
- ulong32 cmd_buff_addr1; /* physical address of cmd buffer 1 */
- ulong32 cmd_buff_u_addr1; /* reserved for 64 bit addressing */
- ulong32 cmd_buff_indx1; /* cmd buf addr1 unique identifier */
- ulong32 cmd_buff_addr2; /* physical address of cmd buffer 1 */
- ulong32 cmd_buff_u_addr2; /* reserved for 64 bit addressing */
- ulong32 cmd_buff_indx2; /* cmd buf addr1 unique identifier */
- ulong32 cmd_buff_size; /* size of each cmd bufer in bytes */
- ulong32 reserved1;
- ulong32 reserved2;
-} PACKED gdth_perf_modes;
+ u32 version; /* The version of this IOCTL structure. */
+ u32 st_mode; /* 0=dis., 1=st_buf_addr1 valid, 2=both */
+ u32 st_buff_addr1; /* physical address of status buffer 1 */
+ u32 st_buff_u_addr1; /* reserved for 64 bit addressing */
+ u32 st_buff_indx1; /* reserved command idx. for this buffer */
+ u32 st_buff_addr2; /* physical address of status buffer 1 */
+ u32 st_buff_u_addr2; /* reserved for 64 bit addressing */
+ u32 st_buff_indx2; /* reserved command idx. for this buffer */
+ u32 st_buff_size; /* size of each buffer in bytes */
+ u32 cmd_mode; /* 0 = mode disabled, 1 = cmd_buff_addr1 */
+ u32 cmd_buff_addr1; /* physical address of cmd buffer 1 */
+ u32 cmd_buff_u_addr1; /* reserved for 64 bit addressing */
+ u32 cmd_buff_indx1; /* cmd buf addr1 unique identifier */
+ u32 cmd_buff_addr2; /* physical address of cmd buffer 1 */
+ u32 cmd_buff_u_addr2; /* reserved for 64 bit addressing */
+ u32 cmd_buff_indx2; /* cmd buf addr1 unique identifier */
+ u32 cmd_buff_size; /* size of each cmd bufer in bytes */
+ u32 reserved1;
+ u32 reserved2;
+} __attribute__((packed)) gdth_perf_modes;
/* SCSI drive info */
typedef struct {
- unchar vendor[8]; /* vendor string */
- unchar product[16]; /* product string */
- unchar revision[4]; /* revision */
- ulong32 sy_rate; /* current rate for sync. tr. */
- ulong32 sy_max_rate; /* max. rate for sync. tr. */
- ulong32 no_ldrive; /* belongs to this log. drv.*/
- ulong32 blkcnt; /* number of blocks */
- ushort blksize; /* size of block in bytes */
- unchar available; /* flag: access is available */
- unchar init; /* medium is initialized */
- unchar devtype; /* SCSI devicetype */
- unchar rm_medium; /* medium is removable */
- unchar wp_medium; /* medium is write protected */
- unchar ansi; /* SCSI I/II or III? */
- unchar protocol; /* same as ansi */
- unchar sync; /* flag: sync. transfer enab. */
- unchar disc; /* flag: disconnect enabled */
- unchar queueing; /* flag: command queing enab. */
- unchar cached; /* flag: caching enabled */
- unchar target_id; /* target ID of device */
- unchar lun; /* LUN id of device */
- unchar orphan; /* flag: drive fragment */
- ulong32 last_error; /* sense key or drive state */
- ulong32 last_result; /* result of last command */
- ulong32 check_errors; /* err. in last surface check */
- unchar percent; /* progress for surface check */
- unchar last_check; /* IOCTRL operation */
- unchar res[2];
- ulong32 flags; /* from 1.19/2.19: raw reserv.*/
- unchar multi_bus; /* multi bus dev? (fibre ch.) */
- unchar mb_status; /* status: available? */
- unchar res2[2];
- unchar mb_alt_status; /* status on second bus */
- unchar mb_alt_bid; /* number of second bus */
- unchar mb_alt_tid; /* target id on second bus */
- unchar res3;
- unchar fc_flag; /* from 1.22/2.22: info valid?*/
- unchar res4;
- ushort fc_frame_size; /* frame size (bytes) */
+ u8 vendor[8]; /* vendor string */
+ u8 product[16]; /* product string */
+ u8 revision[4]; /* revision */
+ u32 sy_rate; /* current rate for sync. tr. */
+ u32 sy_max_rate; /* max. rate for sync. tr. */
+ u32 no_ldrive; /* belongs to this log. drv.*/
+ u32 blkcnt; /* number of blocks */
+ u16 blksize; /* size of block in bytes */
+ u8 available; /* flag: access is available */
+ u8 init; /* medium is initialized */
+ u8 devtype; /* SCSI devicetype */
+ u8 rm_medium; /* medium is removable */
+ u8 wp_medium; /* medium is write protected */
+ u8 ansi; /* SCSI I/II or III? */
+ u8 protocol; /* same as ansi */
+ u8 sync; /* flag: sync. transfer enab. */
+ u8 disc; /* flag: disconnect enabled */
+ u8 queueing; /* flag: command queing enab. */
+ u8 cached; /* flag: caching enabled */
+ u8 target_id; /* target ID of device */
+ u8 lun; /* LUN id of device */
+ u8 orphan; /* flag: drive fragment */
+ u32 last_error; /* sense key or drive state */
+ u32 last_result; /* result of last command */
+ u32 check_errors; /* err. in last surface check */
+ u8 percent; /* progress for surface check */
+ u8 last_check; /* IOCTRL operation */
+ u8 res[2];
+ u32 flags; /* from 1.19/2.19: raw reserv.*/
+ u8 multi_bus; /* multi bus dev? (fibre ch.) */
+ u8 mb_status; /* status: available? */
+ u8 res2[2];
+ u8 mb_alt_status; /* status on second bus */
+ u8 mb_alt_bid; /* number of second bus */
+ u8 mb_alt_tid; /* target id on second bus */
+ u8 res3;
+ u8 fc_flag; /* from 1.22/2.22: info valid?*/
+ u8 res4;
+ u16 fc_frame_size; /* frame size (bytes) */
char wwn[8]; /* world wide name */
-} PACKED gdth_diskinfo_str;
+} __attribute__((packed)) gdth_diskinfo_str;
/* get SCSI channel count */
typedef struct {
- ulong32 channel_no; /* number of channel */
- ulong32 drive_cnt; /* drive count */
- unchar siop_id; /* SCSI processor ID */
- unchar siop_state; /* SCSI processor state */
-} PACKED gdth_getch_str;
+ u32 channel_no; /* number of channel */
+ u32 drive_cnt; /* drive count */
+ u8 siop_id; /* SCSI processor ID */
+ u8 siop_state; /* SCSI processor state */
+} __attribute__((packed)) gdth_getch_str;
/* get SCSI drive numbers */
typedef struct {
- ulong32 sc_no; /* SCSI channel */
- ulong32 sc_cnt; /* sc_list[] elements */
- ulong32 sc_list[MAXID]; /* minor device numbers */
-} PACKED gdth_drlist_str;
+ u32 sc_no; /* SCSI channel */
+ u32 sc_cnt; /* sc_list[] elements */
+ u32 sc_list[MAXID]; /* minor device numbers */
+} __attribute__((packed)) gdth_drlist_str;
/* get grown/primary defect count */
typedef struct {
- unchar sddc_type; /* 0x08: grown, 0x10: prim. */
- unchar sddc_format; /* list entry format */
- unchar sddc_len; /* list entry length */
- unchar sddc_res;
- ulong32 sddc_cnt; /* entry count */
-} PACKED gdth_defcnt_str;
+ u8 sddc_type; /* 0x08: grown, 0x10: prim. */
+ u8 sddc_format; /* list entry format */
+ u8 sddc_len; /* list entry length */
+ u8 sddc_res;
+ u32 sddc_cnt; /* entry count */
+} __attribute__((packed)) gdth_defcnt_str;
/* disk statistics */
typedef struct {
- ulong32 bid; /* SCSI channel */
- ulong32 first; /* first SCSI disk */
- ulong32 entries; /* number of elements */
- ulong32 count; /* (R) number of init. el. */
- ulong32 mon_time; /* time stamp */
+ u32 bid; /* SCSI channel */
+ u32 first; /* first SCSI disk */
+ u32 entries; /* number of elements */
+ u32 count; /* (R) number of init. el. */
+ u32 mon_time; /* time stamp */
struct {
- unchar tid; /* target ID */
- unchar lun; /* LUN */
- unchar res[2];
- ulong32 blk_size; /* block size in bytes */
- ulong32 rd_count; /* bytes read */
- ulong32 wr_count; /* bytes written */
- ulong32 rd_blk_count; /* blocks read */
- ulong32 wr_blk_count; /* blocks written */
- ulong32 retries; /* retries */
- ulong32 reassigns; /* reassigns */
- } PACKED list[1];
-} PACKED gdth_dskstat_str;
+ u8 tid; /* target ID */
+ u8 lun; /* LUN */
+ u8 res[2];
+ u32 blk_size; /* block size in bytes */
+ u32 rd_count; /* bytes read */
+ u32 wr_count; /* bytes written */
+ u32 rd_blk_count; /* blocks read */
+ u32 wr_blk_count; /* blocks written */
+ u32 retries; /* retries */
+ u32 reassigns; /* reassigns */
+ } __attribute__((packed)) list[1];
+} __attribute__((packed)) gdth_dskstat_str;
/* IO channel header */
typedef struct {
- ulong32 version; /* version (-1UL: newest) */
- unchar list_entries; /* list entry count */
- unchar first_chan; /* first channel number */
- unchar last_chan; /* last channel number */
- unchar chan_count; /* (R) channel count */
- ulong32 list_offset; /* offset of list[0] */
-} PACKED gdth_iochan_header;
+ u32 version; /* version (-1UL: newest) */
+ u8 list_entries; /* list entry count */
+ u8 first_chan; /* first channel number */
+ u8 last_chan; /* last channel number */
+ u8 chan_count; /* (R) channel count */
+ u32 list_offset; /* offset of list[0] */
+} __attribute__((packed)) gdth_iochan_header;
/* get IO channel description */
typedef struct {
gdth_iochan_header hdr;
struct {
- ulong32 address; /* channel address */
- unchar type; /* type (SCSI, FCAL) */
- unchar local_no; /* local number */
- ushort features; /* channel features */
- } PACKED list[MAXBUS];
-} PACKED gdth_iochan_str;
+ u32 address; /* channel address */
+ u8 type; /* type (SCSI, FCAL) */
+ u8 local_no; /* local number */
+ u16 features; /* channel features */
+ } __attribute__((packed)) list[MAXBUS];
+} __attribute__((packed)) gdth_iochan_str;
/* get raw IO channel description */
typedef struct {
gdth_iochan_header hdr;
struct {
- unchar proc_id; /* processor id */
- unchar proc_defect; /* defect ? */
- unchar reserved[2];
- } PACKED list[MAXBUS];
-} PACKED gdth_raw_iochan_str;
+ u8 proc_id; /* processor id */
+ u8 proc_defect; /* defect ? */
+ u8 reserved[2];
+ } __attribute__((packed)) list[MAXBUS];
+} __attribute__((packed)) gdth_raw_iochan_str;
/* array drive component */
typedef struct {
- ulong32 al_controller; /* controller ID */
- unchar al_cache_drive; /* cache drive number */
- unchar al_status; /* cache drive state */
- unchar al_res[2];
-} PACKED gdth_arraycomp_str;
+ u32 al_controller; /* controller ID */
+ u8 al_cache_drive; /* cache drive number */
+ u8 al_status; /* cache drive state */
+ u8 al_res[2];
+} __attribute__((packed)) gdth_arraycomp_str;
/* array drive information */
typedef struct {
- unchar ai_type; /* array type (RAID0,4,5) */
- unchar ai_cache_drive_cnt; /* active cachedrives */
- unchar ai_state; /* array drive state */
- unchar ai_master_cd; /* master cachedrive */
- ulong32 ai_master_controller; /* ID of master controller */
- ulong32 ai_size; /* user capacity [sectors] */
- ulong32 ai_striping_size; /* striping size [sectors] */
- ulong32 ai_secsize; /* sector size [bytes] */
- ulong32 ai_err_info; /* failed cache drive */
- unchar ai_name[8]; /* name of the array drive */
- unchar ai_controller_cnt; /* number of controllers */
- unchar ai_removable; /* flag: removable */
- unchar ai_write_protected; /* flag: write protected */
- unchar ai_devtype; /* type: always direct access */
+ u8 ai_type; /* array type (RAID0,4,5) */
+ u8 ai_cache_drive_cnt; /* active cachedrives */
+ u8 ai_state; /* array drive state */
+ u8 ai_master_cd; /* master cachedrive */
+ u32 ai_master_controller; /* ID of master controller */
+ u32 ai_size; /* user capacity [sectors] */
+ u32 ai_striping_size; /* striping size [sectors] */
+ u32 ai_secsize; /* sector size [bytes] */
+ u32 ai_err_info; /* failed cache drive */
+ u8 ai_name[8]; /* name of the array drive */
+ u8 ai_controller_cnt; /* number of controllers */
+ u8 ai_removable; /* flag: removable */
+ u8 ai_write_protected; /* flag: write protected */
+ u8 ai_devtype; /* type: always direct access */
gdth_arraycomp_str ai_drives[35]; /* drive components: */
- unchar ai_drive_entries; /* number of drive components */
- unchar ai_protected; /* protection flag */
- unchar ai_verify_state; /* state of a parity verify */
- unchar ai_ext_state; /* extended array drive state */
- unchar ai_expand_state; /* array expand state (>=2.18)*/
- unchar ai_reserved[3];
-} PACKED gdth_arrayinf_str;
+ u8 ai_drive_entries; /* number of drive components */
+ u8 ai_protected; /* protection flag */
+ u8 ai_verify_state; /* state of a parity verify */
+ u8 ai_ext_state; /* extended array drive state */
+ u8 ai_expand_state; /* array expand state (>=2.18)*/
+ u8 ai_reserved[3];
+} __attribute__((packed)) gdth_arrayinf_str;
/* get array drive list */
typedef struct {
- ulong32 controller_no; /* controller no. */
- unchar cd_handle; /* master cachedrive */
- unchar is_arrayd; /* Flag: is array drive? */
- unchar is_master; /* Flag: is array master? */
- unchar is_parity; /* Flag: is parity drive? */
- unchar is_hotfix; /* Flag: is hotfix drive? */
- unchar res[3];
-} PACKED gdth_alist_str;
+ u32 controller_no; /* controller no. */
+ u8 cd_handle; /* master cachedrive */
+ u8 is_arrayd; /* Flag: is array drive? */
+ u8 is_master; /* Flag: is array master? */
+ u8 is_parity; /* Flag: is parity drive? */
+ u8 is_hotfix; /* Flag: is hotfix drive? */
+ u8 res[3];
+} __attribute__((packed)) gdth_alist_str;
typedef struct {
- ulong32 entries_avail; /* allocated entries */
- ulong32 entries_init; /* returned entries */
- ulong32 first_entry; /* first entry number */
- ulong32 list_offset; /* offset of following list */
+ u32 entries_avail; /* allocated entries */
+ u32 entries_init; /* returned entries */
+ u32 first_entry; /* first entry number */
+ u32 list_offset; /* offset of following list */
gdth_alist_str list[1]; /* list */
-} PACKED gdth_arcdl_str;
+} __attribute__((packed)) gdth_arcdl_str;
/* cache info/config IOCTL */
typedef struct {
- ulong32 version; /* firmware version */
- ushort state; /* cache state (on/off) */
- ushort strategy; /* cache strategy */
- ushort write_back; /* write back state (on/off) */
- ushort block_size; /* cache block size */
-} PACKED gdth_cpar_str;
+ u32 version; /* firmware version */
+ u16 state; /* cache state (on/off) */
+ u16 strategy; /* cache strategy */
+ u16 write_back; /* write back state (on/off) */
+ u16 block_size; /* cache block size */
+} __attribute__((packed)) gdth_cpar_str;
typedef struct {
- ulong32 csize; /* cache size */
- ulong32 read_cnt; /* read/write counter */
- ulong32 write_cnt;
- ulong32 tr_hits; /* hits */
- ulong32 sec_hits;
- ulong32 sec_miss; /* misses */
-} PACKED gdth_cstat_str;
+ u32 csize; /* cache size */
+ u32 read_cnt; /* read/write counter */
+ u32 write_cnt;
+ u32 tr_hits; /* hits */
+ u32 sec_hits;
+ u32 sec_miss; /* misses */
+} __attribute__((packed)) gdth_cstat_str;
typedef struct {
gdth_cpar_str cpar;
gdth_cstat_str cstat;
-} PACKED gdth_cinfo_str;
+} __attribute__((packed)) gdth_cinfo_str;
/* cache drive info */
typedef struct {
- unchar cd_name[8]; /* cache drive name */
- ulong32 cd_devtype; /* SCSI devicetype */
- ulong32 cd_ldcnt; /* number of log. drives */
- ulong32 cd_last_error; /* last error */
- unchar cd_initialized; /* drive is initialized */
- unchar cd_removable; /* media is removable */
- unchar cd_write_protected; /* write protected */
- unchar cd_flags; /* Pool Hot Fix? */
- ulong32 ld_blkcnt; /* number of blocks */
- ulong32 ld_blksize; /* blocksize */
- ulong32 ld_dcnt; /* number of disks */
- ulong32 ld_slave; /* log. drive index */
- ulong32 ld_dtype; /* type of logical drive */
- ulong32 ld_last_error; /* last error */
- unchar ld_name[8]; /* log. drive name */
- unchar ld_error; /* error */
-} PACKED gdth_cdrinfo_str;
+ u8 cd_name[8]; /* cache drive name */
+ u32 cd_devtype; /* SCSI devicetype */
+ u32 cd_ldcnt; /* number of log. drives */
+ u32 cd_last_error; /* last error */
+ u8 cd_initialized; /* drive is initialized */
+ u8 cd_removable; /* media is removable */
+ u8 cd_write_protected; /* write protected */
+ u8 cd_flags; /* Pool Hot Fix? */
+ u32 ld_blkcnt; /* number of blocks */
+ u32 ld_blksize; /* blocksize */
+ u32 ld_dcnt; /* number of disks */
+ u32 ld_slave; /* log. drive index */
+ u32 ld_dtype; /* type of logical drive */
+ u32 ld_last_error; /* last error */
+ u8 ld_name[8]; /* log. drive name */
+ u8 ld_error; /* error */
+} __attribute__((packed)) gdth_cdrinfo_str;
/* OEM string */
typedef struct {
- ulong32 ctl_version;
- ulong32 file_major_version;
- ulong32 file_minor_version;
- ulong32 buffer_size;
- ulong32 cpy_count;
- ulong32 ext_error;
- ulong32 oem_id;
- ulong32 board_id;
-} PACKED gdth_oem_str_params;
-
-typedef struct {
- unchar product_0_1_name[16];
- unchar product_4_5_name[16];
- unchar product_cluster_name[16];
- unchar product_reserved[16];
- unchar scsi_cluster_target_vendor_id[16];
- unchar cluster_raid_fw_name[16];
- unchar oem_brand_name[16];
- unchar oem_raid_type[16];
- unchar bios_type[13];
- unchar bios_title[50];
- unchar oem_company_name[37];
- ulong32 pci_id_1;
- ulong32 pci_id_2;
- unchar validation_status[80];
- unchar reserved_1[4];
- unchar scsi_host_drive_inquiry_vendor_id[16];
- unchar library_file_template[16];
- unchar reserved_2[16];
- unchar tool_name_1[32];
- unchar tool_name_2[32];
- unchar tool_name_3[32];
- unchar oem_contact_1[84];
- unchar oem_contact_2[84];
- unchar oem_contact_3[84];
-} PACKED gdth_oem_str;
+ u32 ctl_version;
+ u32 file_major_version;
+ u32 file_minor_version;
+ u32 buffer_size;
+ u32 cpy_count;
+ u32 ext_error;
+ u32 oem_id;
+ u32 board_id;
+} __attribute__((packed)) gdth_oem_str_params;
+
+typedef struct {
+ u8 product_0_1_name[16];
+ u8 product_4_5_name[16];
+ u8 product_cluster_name[16];
+ u8 product_reserved[16];
+ u8 scsi_cluster_target_vendor_id[16];
+ u8 cluster_raid_fw_name[16];
+ u8 oem_brand_name[16];
+ u8 oem_raid_type[16];
+ u8 bios_type[13];
+ u8 bios_title[50];
+ u8 oem_company_name[37];
+ u32 pci_id_1;
+ u32 pci_id_2;
+ u8 validation_status[80];
+ u8 reserved_1[4];
+ u8 scsi_host_drive_inquiry_vendor_id[16];
+ u8 library_file_template[16];
+ u8 reserved_2[16];
+ u8 tool_name_1[32];
+ u8 tool_name_2[32];
+ u8 tool_name_3[32];
+ u8 oem_contact_1[84];
+ u8 oem_contact_2[84];
+ u8 oem_contact_3[84];
+} __attribute__((packed)) gdth_oem_str;
typedef struct {
gdth_oem_str_params params;
gdth_oem_str text;
-} PACKED gdth_oem_str_ioctl;
+} __attribute__((packed)) gdth_oem_str_ioctl;
/* board features */
typedef struct {
- unchar chaining; /* Chaining supported */
- unchar striping; /* Striping (RAID-0) supp. */
- unchar mirroring; /* Mirroring (RAID-1) supp. */
- unchar raid; /* RAID-4/5/10 supported */
-} PACKED gdth_bfeat_str;
+ u8 chaining; /* Chaining supported */
+ u8 striping; /* Striping (RAID-0) supp. */
+ u8 mirroring; /* Mirroring (RAID-1) supp. */
+ u8 raid; /* RAID-4/5/10 supported */
+} __attribute__((packed)) gdth_bfeat_str;
/* board info IOCTL */
typedef struct {
- ulong32 ser_no; /* serial no. */
- unchar oem_id[2]; /* OEM ID */
- ushort ep_flags; /* eprom flags */
- ulong32 proc_id; /* processor ID */
- ulong32 memsize; /* memory size (bytes) */
- unchar mem_banks; /* memory banks */
- unchar chan_type; /* channel type */
- unchar chan_count; /* channel count */
- unchar rdongle_pres; /* dongle present? */
- ulong32 epr_fw_ver; /* (eprom) firmware version */
- ulong32 upd_fw_ver; /* (update) firmware version */
- ulong32 upd_revision; /* update revision */
+ u32 ser_no; /* serial no. */
+ u8 oem_id[2]; /* OEM ID */
+ u16 ep_flags; /* eprom flags */
+ u32 proc_id; /* processor ID */
+ u32 memsize; /* memory size (bytes) */
+ u8 mem_banks; /* memory banks */
+ u8 chan_type; /* channel type */
+ u8 chan_count; /* channel count */
+ u8 rdongle_pres; /* dongle present? */
+ u32 epr_fw_ver; /* (eprom) firmware version */
+ u32 upd_fw_ver; /* (update) firmware version */
+ u32 upd_revision; /* update revision */
char type_string[16]; /* controller name */
char raid_string[16]; /* RAID firmware name */
- unchar update_pres; /* update present? */
- unchar xor_pres; /* XOR engine present? */
- unchar prom_type; /* ROM type (eprom/flash) */
- unchar prom_count; /* number of ROM devices */
- ulong32 dup_pres; /* duplexing module present? */
- ulong32 chan_pres; /* number of expansion chn. */
- ulong32 mem_pres; /* memory expansion inst. ? */
- unchar ft_bus_system; /* fault bus supported? */
- unchar subtype_valid; /* board_subtype valid? */
- unchar board_subtype; /* subtype/hardware level */
- unchar ramparity_pres; /* RAM parity check hardware? */
-} PACKED gdth_binfo_str;
+ u8 update_pres; /* update present? */
+ u8 xor_pres; /* XOR engine present? */
+ u8 prom_type; /* ROM type (eprom/flash) */
+ u8 prom_count; /* number of ROM devices */
+ u32 dup_pres; /* duplexing module present? */
+ u32 chan_pres; /* number of expansion chn. */
+ u32 mem_pres; /* memory expansion inst. ? */
+ u8 ft_bus_system; /* fault bus supported? */
+ u8 subtype_valid; /* board_subtype valid? */
+ u8 board_subtype; /* subtype/hardware level */
+ u8 ramparity_pres; /* RAM parity check hardware? */
+} __attribute__((packed)) gdth_binfo_str;
/* get host drive info */
typedef struct {
char name[8]; /* host drive name */
- ulong32 size; /* size (sectors) */
- unchar host_drive; /* host drive number */
- unchar log_drive; /* log. drive (master) */
- unchar reserved;
- unchar rw_attribs; /* r/w attribs */
- ulong32 start_sec; /* start sector */
-} PACKED gdth_hentry_str;
-
-typedef struct {
- ulong32 entries; /* entry count */
- ulong32 offset; /* offset of entries */
- unchar secs_p_head; /* sectors/head */
- unchar heads_p_cyl; /* heads/cylinder */
- unchar reserved;
- unchar clust_drvtype; /* cluster drive type */
- ulong32 location; /* controller number */
+ u32 size; /* size (sectors) */
+ u8 host_drive; /* host drive number */
+ u8 log_drive; /* log. drive (master) */
+ u8 reserved;
+ u8 rw_attribs; /* r/w attribs */
+ u32 start_sec; /* start sector */
+} __attribute__((packed)) gdth_hentry_str;
+
+typedef struct {
+ u32 entries; /* entry count */
+ u32 offset; /* offset of entries */
+ u8 secs_p_head; /* sectors/head */
+ u8 heads_p_cyl; /* heads/cylinder */
+ u8 reserved;
+ u8 clust_drvtype; /* cluster drive type */
+ u32 location; /* controller number */
gdth_hentry_str entry[MAX_HDRIVES]; /* entries */
-} PACKED gdth_hget_str;
+} __attribute__((packed)) gdth_hget_str;
/* DPRAM structures */
/* interface area ISA/PCI */
typedef struct {
- unchar S_Cmd_Indx; /* special command */
- unchar volatile S_Status; /* status special command */
- ushort reserved1;
- ulong32 S_Info[4]; /* add. info special command */
- unchar volatile Sema0; /* command semaphore */
- unchar reserved2[3];
- unchar Cmd_Index; /* command number */
- unchar reserved3[3];
- ushort volatile Status; /* command status */
- ushort Service; /* service(for async.events) */
- ulong32 Info[2]; /* additional info */
+ u8 S_Cmd_Indx; /* special command */
+ u8 volatile S_Status; /* status special command */
+ u16 reserved1;
+ u32 S_Info[4]; /* add. info special command */
+ u8 volatile Sema0; /* command semaphore */
+ u8 reserved2[3];
+ u8 Cmd_Index; /* command number */
+ u8 reserved3[3];
+ u16 volatile Status; /* command status */
+ u16 Service; /* service(for async.events) */
+ u32 Info[2]; /* additional info */
struct {
- ushort offset; /* command offs. in the DPRAM*/
- ushort serv_id; /* service */
- } PACKED comm_queue[MAXOFFSETS]; /* command queue */
- ulong32 bios_reserved[2];
- unchar gdt_dpr_cmd[1]; /* commands */
-} PACKED gdt_dpr_if;
+ u16 offset; /* command offs. in the DPRAM*/
+ u16 serv_id; /* service */
+ } __attribute__((packed)) comm_queue[MAXOFFSETS]; /* command queue */
+ u32 bios_reserved[2];
+ u8 gdt_dpr_cmd[1]; /* commands */
+} __attribute__((packed)) gdt_dpr_if;
/* SRAM structure PCI controllers */
typedef struct {
- ulong32 magic; /* controller ID from BIOS */
- ushort need_deinit; /* switch betw. BIOS/driver */
- unchar switch_support; /* see need_deinit */
- unchar padding[9];
- unchar os_used[16]; /* OS code per service */
- unchar unused[28];
- unchar fw_magic; /* contr. ID from firmware */
-} PACKED gdt_pci_sram;
+ u32 magic; /* controller ID from BIOS */
+ u16 need_deinit; /* switch betw. BIOS/driver */
+ u8 switch_support; /* see need_deinit */
+ u8 padding[9];
+ u8 os_used[16]; /* OS code per service */
+ u8 unused[28];
+ u8 fw_magic; /* contr. ID from firmware */
+} __attribute__((packed)) gdt_pci_sram;
/* SRAM structure EISA controllers (but NOT GDT3000/3020) */
typedef struct {
- unchar os_used[16]; /* OS code per service */
- ushort need_deinit; /* switch betw. BIOS/driver */
- unchar switch_support; /* see need_deinit */
- unchar padding;
-} PACKED gdt_eisa_sram;
+ u8 os_used[16]; /* OS code per service */
+ u16 need_deinit; /* switch betw. BIOS/driver */
+ u8 switch_support; /* see need_deinit */
+ u8 padding;
+} __attribute__((packed)) gdt_eisa_sram;
/* DPRAM ISA controllers */
typedef struct {
union {
struct {
- unchar bios_used[0x3c00-32]; /* 15KB - 32Bytes BIOS */
- ulong32 magic; /* controller (EISA) ID */
- ushort need_deinit; /* switch betw. BIOS/driver */
- unchar switch_support; /* see need_deinit */
- unchar padding[9];
- unchar os_used[16]; /* OS code per service */
- } PACKED dp_sram;
- unchar bios_area[0x4000]; /* 16KB reserved for BIOS */
+ u8 bios_used[0x3c00-32]; /* 15KB - 32Bytes BIOS */
+ u32 magic; /* controller (EISA) ID */
+ u16 need_deinit; /* switch betw. BIOS/driver */
+ u8 switch_support; /* see need_deinit */
+ u8 padding[9];
+ u8 os_used[16]; /* OS code per service */
+ } __attribute__((packed)) dp_sram;
+ u8 bios_area[0x4000]; /* 16KB reserved for BIOS */
} bu;
union {
gdt_dpr_if ic; /* interface area */
- unchar if_area[0x3000]; /* 12KB for interface */
+ u8 if_area[0x3000]; /* 12KB for interface */
} u;
struct {
- unchar memlock; /* write protection DPRAM */
- unchar event; /* release event */
- unchar irqen; /* board interrupts enable */
- unchar irqdel; /* acknowledge board int. */
- unchar volatile Sema1; /* status semaphore */
- unchar rq; /* IRQ/DRQ configuration */
- } PACKED io;
-} PACKED gdt2_dpram_str;
+ u8 memlock; /* write protection DPRAM */
+ u8 event; /* release event */
+ u8 irqen; /* board interrupts enable */
+ u8 irqdel; /* acknowledge board int. */
+ u8 volatile Sema1; /* status semaphore */
+ u8 rq; /* IRQ/DRQ configuration */
+ } __attribute__((packed)) io;
+} __attribute__((packed)) gdt2_dpram_str;
/* DPRAM PCI controllers */
typedef struct {
union {
gdt_dpr_if ic; /* interface area */
- unchar if_area[0xff0-sizeof(gdt_pci_sram)];
+ u8 if_area[0xff0-sizeof(gdt_pci_sram)];
} u;
gdt_pci_sram gdt6sr; /* SRAM structure */
struct {
- unchar unused0[1];
- unchar volatile Sema1; /* command semaphore */
- unchar unused1[3];
- unchar irqen; /* board interrupts enable */
- unchar unused2[2];
- unchar event; /* release event */
- unchar unused3[3];
- unchar irqdel; /* acknowledge board int. */
- unchar unused4[3];
- } PACKED io;
-} PACKED gdt6_dpram_str;
+ u8 unused0[1];
+ u8 volatile Sema1; /* command semaphore */
+ u8 unused1[3];
+ u8 irqen; /* board interrupts enable */
+ u8 unused2[2];
+ u8 event; /* release event */
+ u8 unused3[3];
+ u8 irqdel; /* acknowledge board int. */
+ u8 unused4[3];
+ } __attribute__((packed)) io;
+} __attribute__((packed)) gdt6_dpram_str;
/* PLX register structure (new PCI controllers) */
typedef struct {
- unchar cfg_reg; /* DPRAM cfg.(2:below 1MB,0:anywhere)*/
- unchar unused1[0x3f];
- unchar volatile sema0_reg; /* command semaphore */
- unchar volatile sema1_reg; /* status semaphore */
- unchar unused2[2];
- ushort volatile status; /* command status */
- ushort service; /* service */
- ulong32 info[2]; /* additional info */
- unchar unused3[0x10];
- unchar ldoor_reg; /* PCI to local doorbell */
- unchar unused4[3];
- unchar volatile edoor_reg; /* local to PCI doorbell */
- unchar unused5[3];
- unchar control0; /* control0 register(unused) */
- unchar control1; /* board interrupts enable */
- unchar unused6[0x16];
-} PACKED gdt6c_plx_regs;
+ u8 cfg_reg; /* DPRAM cfg.(2:below 1MB,0:anywhere)*/
+ u8 unused1[0x3f];
+ u8 volatile sema0_reg; /* command semaphore */
+ u8 volatile sema1_reg; /* status semaphore */
+ u8 unused2[2];
+ u16 volatile status; /* command status */
+ u16 service; /* service */
+ u32 info[2]; /* additional info */
+ u8 unused3[0x10];
+ u8 ldoor_reg; /* PCI to local doorbell */
+ u8 unused4[3];
+ u8 volatile edoor_reg; /* local to PCI doorbell */
+ u8 unused5[3];
+ u8 control0; /* control0 register(unused) */
+ u8 control1; /* board interrupts enable */
+ u8 unused6[0x16];
+} __attribute__((packed)) gdt6c_plx_regs;
/* DPRAM new PCI controllers */
typedef struct {
union {
gdt_dpr_if ic; /* interface area */
- unchar if_area[0x4000-sizeof(gdt_pci_sram)];
+ u8 if_area[0x4000-sizeof(gdt_pci_sram)];
} u;
gdt_pci_sram gdt6sr; /* SRAM structure */
-} PACKED gdt6c_dpram_str;
+} __attribute__((packed)) gdt6c_dpram_str;
/* i960 register structure (PCI MPR controllers) */
typedef struct {
- unchar unused1[16];
- unchar volatile sema0_reg; /* command semaphore */
- unchar unused2;
- unchar volatile sema1_reg; /* status semaphore */
- unchar unused3;
- ushort volatile status; /* command status */
- ushort service; /* service */
- ulong32 info[2]; /* additional info */
- unchar ldoor_reg; /* PCI to local doorbell */
- unchar unused4[11];
- unchar volatile edoor_reg; /* local to PCI doorbell */
- unchar unused5[7];
- unchar edoor_en_reg; /* board interrupts enable */
- unchar unused6[27];
- ulong32 unused7[939];
- ulong32 severity;
+ u8 unused1[16];
+ u8 volatile sema0_reg; /* command semaphore */
+ u8 unused2;
+ u8 volatile sema1_reg; /* status semaphore */
+ u8 unused3;
+ u16 volatile status; /* command status */
+ u16 service; /* service */
+ u32 info[2]; /* additional info */
+ u8 ldoor_reg; /* PCI to local doorbell */
+ u8 unused4[11];
+ u8 volatile edoor_reg; /* local to PCI doorbell */
+ u8 unused5[7];
+ u8 edoor_en_reg; /* board interrupts enable */
+ u8 unused6[27];
+ u32 unused7[939];
+ u32 severity;
char evt_str[256]; /* event string */
-} PACKED gdt6m_i960_regs;
+} __attribute__((packed)) gdt6m_i960_regs;
/* DPRAM PCI MPR controllers */
typedef struct {
gdt6m_i960_regs i960r; /* 4KB i960 registers */
union {
gdt_dpr_if ic; /* interface area */
- unchar if_area[0x3000-sizeof(gdt_pci_sram)];
+ u8 if_area[0x3000-sizeof(gdt_pci_sram)];
} u;
gdt_pci_sram gdt6sr; /* SRAM structure */
-} PACKED gdt6m_dpram_str;
+} __attribute__((packed)) gdt6m_dpram_str;
/* PCI resources */
typedef struct {
struct pci_dev *pdev;
- ulong dpmem; /* DPRAM address */
- ulong io; /* IO address */
+ unsigned long dpmem; /* DPRAM address */
+ unsigned long io; /* IO address */
} gdth_pci_str;
@@ -846,93 +846,93 @@ typedef struct {
typedef struct {
struct Scsi_Host *shost;
struct list_head list;
- ushort hanum;
- ushort oem_id; /* OEM */
- ushort type; /* controller class */
- ulong32 stype; /* subtype (PCI: device ID) */
- ushort fw_vers; /* firmware version */
- ushort cache_feat; /* feat. cache serv. (s/g,..)*/
- ushort raw_feat; /* feat. raw service (s/g,..)*/
- ushort screen_feat; /* feat. raw service (s/g,..)*/
- ushort bmic; /* BMIC address (EISA) */
+ u16 hanum;
+ u16 oem_id; /* OEM */
+ u16 type; /* controller class */
+ u32 stype; /* subtype (PCI: device ID) */
+ u16 fw_vers; /* firmware version */
+ u16 cache_feat; /* feat. cache serv. (s/g,..)*/
+ u16 raw_feat; /* feat. raw service (s/g,..)*/
+ u16 screen_feat; /* feat. raw service (s/g,..)*/
+ u16 bmic; /* BMIC address (EISA) */
void __iomem *brd; /* DPRAM address */
- ulong32 brd_phys; /* slot number/BIOS address */
+ u32 brd_phys; /* slot number/BIOS address */
gdt6c_plx_regs *plx; /* PLX regs (new PCI contr.) */
gdth_cmd_str cmdext;
gdth_cmd_str *pccb; /* address command structure */
- ulong32 ccb_phys; /* phys. address */
+ u32 ccb_phys; /* phys. address */
#ifdef INT_COAL
gdth_coal_status *coal_stat; /* buffer for coalescing int.*/
- ulong64 coal_stat_phys; /* phys. address */
+ u64 coal_stat_phys; /* phys. address */
#endif
char *pscratch; /* scratch (DMA) buffer */
- ulong64 scratch_phys; /* phys. address */
- unchar scratch_busy; /* in use? */
- unchar dma64_support; /* 64-bit DMA supported? */
+ u64 scratch_phys; /* phys. address */
+ u8 scratch_busy; /* in use? */
+ u8 dma64_support; /* 64-bit DMA supported? */
gdth_msg_str *pmsg; /* message buffer */
- ulong64 msg_phys; /* phys. address */
- unchar scan_mode; /* current scan mode */
- unchar irq; /* IRQ */
- unchar drq; /* DRQ (ISA controllers) */
- ushort status; /* command status */
- ushort service; /* service/firmware ver./.. */
- ulong32 info;
- ulong32 info2; /* additional info */
+ u64 msg_phys; /* phys. address */
+ u8 scan_mode; /* current scan mode */
+ u8 irq; /* IRQ */
+ u8 drq; /* DRQ (ISA controllers) */
+ u16 status; /* command status */
+ u16 service; /* service/firmware ver./.. */
+ u32 info;
+ u32 info2; /* additional info */
Scsi_Cmnd *req_first; /* top of request queue */
struct {
- unchar present; /* Flag: host drive present? */
- unchar is_logdrv; /* Flag: log. drive (master)? */
- unchar is_arraydrv; /* Flag: array drive? */
- unchar is_master; /* Flag: array drive master? */
- unchar is_parity; /* Flag: parity drive? */
- unchar is_hotfix; /* Flag: hotfix drive? */
- unchar master_no; /* number of master drive */
- unchar lock; /* drive locked? (hot plug) */
- unchar heads; /* mapping */
- unchar secs;
- ushort devtype; /* further information */
- ulong64 size; /* capacity */
- unchar ldr_no; /* log. drive no. */
- unchar rw_attribs; /* r/w attributes */
- unchar cluster_type; /* cluster properties */
- unchar media_changed; /* Flag:MOUNT/UNMOUNT occured */
- ulong32 start_sec; /* start sector */
+ u8 present; /* Flag: host drive present? */
+ u8 is_logdrv; /* Flag: log. drive (master)? */
+ u8 is_arraydrv; /* Flag: array drive? */
+ u8 is_master; /* Flag: array drive master? */
+ u8 is_parity; /* Flag: parity drive? */
+ u8 is_hotfix; /* Flag: hotfix drive? */
+ u8 master_no; /* number of master drive */
+ u8 lock; /* drive locked? (hot plug) */
+ u8 heads; /* mapping */
+ u8 secs;
+ u16 devtype; /* further information */
+ u64 size; /* capacity */
+ u8 ldr_no; /* log. drive no. */
+ u8 rw_attribs; /* r/w attributes */
+ u8 cluster_type; /* cluster properties */
+ u8 media_changed; /* Flag:MOUNT/UNMOUNT occured */
+ u32 start_sec; /* start sector */
} hdr[MAX_LDRIVES]; /* host drives */
struct {
- unchar lock; /* channel locked? (hot plug) */
- unchar pdev_cnt; /* physical device count */
- unchar local_no; /* local channel number */
- unchar io_cnt[MAXID]; /* current IO count */
- ulong32 address; /* channel address */
- ulong32 id_list[MAXID]; /* IDs of the phys. devices */
+ u8 lock; /* channel locked? (hot plug) */
+ u8 pdev_cnt; /* physical device count */
+ u8 local_no; /* local channel number */
+ u8 io_cnt[MAXID]; /* current IO count */
+ u32 address; /* channel address */
+ u32 id_list[MAXID]; /* IDs of the phys. devices */
} raw[MAXBUS]; /* SCSI channels */
struct {
Scsi_Cmnd *cmnd; /* pending request */
- ushort service; /* service */
+ u16 service; /* service */
} cmd_tab[GDTH_MAXCMDS]; /* table of pend. requests */
struct gdth_cmndinfo { /* per-command private info */
int index;
int internal_command; /* don't call scsi_done */
gdth_cmd_str *internal_cmd_str; /* crier for internal messages*/
dma_addr_t sense_paddr; /* sense dma-addr */
- unchar priority;
+ u8 priority;
int timeout_count; /* # of timeout calls */
volatile int wait_for_completion;
- ushort status;
- ulong32 info;
+ u16 status;
+ u32 info;
enum dma_data_direction dma_dir;
int phase; /* ???? */
int OpCode;
} cmndinfo[GDTH_MAXCMDS]; /* index==0 is free */
- unchar bus_cnt; /* SCSI bus count */
- unchar tid_cnt; /* Target ID count */
- unchar bus_id[MAXBUS]; /* IOP IDs */
- unchar virt_bus; /* number of virtual bus */
- unchar more_proc; /* more /proc info supported */
- ushort cmd_cnt; /* command count in DPRAM */
- ushort cmd_len; /* length of actual command */
- ushort cmd_offs_dpmem; /* actual offset in DPRAM */
- ushort ic_all_size; /* sizeof DPRAM interf. area */
+ u8 bus_cnt; /* SCSI bus count */
+ u8 tid_cnt; /* Target ID count */
+ u8 bus_id[MAXBUS]; /* IOP IDs */
+ u8 virt_bus; /* number of virtual bus */
+ u8 more_proc; /* more /proc info supported */
+ u16 cmd_cnt; /* command count in DPRAM */
+ u16 cmd_len; /* length of actual command */
+ u16 cmd_offs_dpmem; /* actual offset in DPRAM */
+ u16 ic_all_size; /* sizeof DPRAM interf. area */
gdth_cpar_str cpar; /* controller cache par. */
gdth_bfeat_str bfeat; /* controller features */
gdth_binfo_str binfo; /* controller info */
@@ -941,7 +941,7 @@ typedef struct {
struct pci_dev *pdev;
char oem_name[8];
#ifdef GDTH_DMA_STATISTICS
- ulong dma32_cnt, dma64_cnt; /* statistics: DMA buffer */
+ unsigned long dma32_cnt, dma64_cnt; /* statistics: DMA buffer */
#endif
struct scsi_device *sdev;
} gdth_ha_str;
@@ -953,65 +953,65 @@ static inline struct gdth_cmndinfo *gdth_cmnd_priv(struct scsi_cmnd* cmd)
/* INQUIRY data format */
typedef struct {
- unchar type_qual;
- unchar modif_rmb;
- unchar version;
- unchar resp_aenc;
- unchar add_length;
- unchar reserved1;
- unchar reserved2;
- unchar misc;
- unchar vendor[8];
- unchar product[16];
- unchar revision[4];
-} PACKED gdth_inq_data;
+ u8 type_qual;
+ u8 modif_rmb;
+ u8 version;
+ u8 resp_aenc;
+ u8 add_length;
+ u8 reserved1;
+ u8 reserved2;
+ u8 misc;
+ u8 vendor[8];
+ u8 product[16];
+ u8 revision[4];
+} __attribute__((packed)) gdth_inq_data;
/* READ_CAPACITY data format */
typedef struct {
- ulong32 last_block_no;
- ulong32 block_length;
-} PACKED gdth_rdcap_data;
+ u32 last_block_no;
+ u32 block_length;
+} __attribute__((packed)) gdth_rdcap_data;
/* READ_CAPACITY (16) data format */
typedef struct {
- ulong64 last_block_no;
- ulong32 block_length;
-} PACKED gdth_rdcap16_data;
+ u64 last_block_no;
+ u32 block_length;
+} __attribute__((packed)) gdth_rdcap16_data;
/* REQUEST_SENSE data format */
typedef struct {
- unchar errorcode;
- unchar segno;
- unchar key;
- ulong32 info;
- unchar add_length;
- ulong32 cmd_info;
- unchar adsc;
- unchar adsq;
- unchar fruc;
- unchar key_spec[3];
-} PACKED gdth_sense_data;
+ u8 errorcode;
+ u8 segno;
+ u8 key;
+ u32 info;
+ u8 add_length;
+ u32 cmd_info;
+ u8 adsc;
+ u8 adsq;
+ u8 fruc;
+ u8 key_spec[3];
+} __attribute__((packed)) gdth_sense_data;
/* MODE_SENSE data format */
typedef struct {
struct {
- unchar data_length;
- unchar med_type;
- unchar dev_par;
- unchar bd_length;
- } PACKED hd;
+ u8 data_length;
+ u8 med_type;
+ u8 dev_par;
+ u8 bd_length;
+ } __attribute__((packed)) hd;
struct {
- unchar dens_code;
- unchar block_count[3];
- unchar reserved;
- unchar block_length[3];
- } PACKED bd;
-} PACKED gdth_modep_data;
+ u8 dens_code;
+ u8 block_count[3];
+ u8 reserved;
+ u8 block_length[3];
+ } __attribute__((packed)) bd;
+} __attribute__((packed)) gdth_modep_data;
/* stack frame */
typedef struct {
- ulong b[10]; /* 32/64 bit compiler ! */
-} PACKED gdth_stackframe;
+ unsigned long b[10]; /* 32/64 bit compiler ! */
+} __attribute__((packed)) gdth_stackframe;
/* function prototyping */
diff --git a/drivers/scsi/gdth_ioctl.h b/drivers/scsi/gdth_ioctl.h
index 783fae737f17..b004c6165887 100644
--- a/drivers/scsi/gdth_ioctl.h
+++ b/drivers/scsi/gdth_ioctl.h
@@ -32,109 +32,101 @@
#define MAX_HDRIVES MAX_LDRIVES /* max. host drive count */
#endif
-/* typedefs */
-#ifdef __KERNEL__
-typedef u32 ulong32;
-typedef u64 ulong64;
-#endif
-
-#define PACKED __attribute__((packed))
-
/* scatter/gather element */
typedef struct {
- ulong32 sg_ptr; /* address */
- ulong32 sg_len; /* length */
-} PACKED gdth_sg_str;
+ u32 sg_ptr; /* address */
+ u32 sg_len; /* length */
+} __attribute__((packed)) gdth_sg_str;
/* scatter/gather element - 64bit addresses */
typedef struct {
- ulong64 sg_ptr; /* address */
- ulong32 sg_len; /* length */
-} PACKED gdth_sg64_str;
+ u64 sg_ptr; /* address */
+ u32 sg_len; /* length */
+} __attribute__((packed)) gdth_sg64_str;
/* command structure */
typedef struct {
- ulong32 BoardNode; /* board node (always 0) */
- ulong32 CommandIndex; /* command number */
- ushort OpCode; /* the command (READ,..) */
+ u32 BoardNode; /* board node (always 0) */
+ u32 CommandIndex; /* command number */
+ u16 OpCode; /* the command (READ,..) */
union {
struct {
- ushort DeviceNo; /* number of cache drive */
- ulong32 BlockNo; /* block number */
- ulong32 BlockCnt; /* block count */
- ulong32 DestAddr; /* dest. addr. (if s/g: -1) */
- ulong32 sg_canz; /* s/g element count */
+ u16 DeviceNo; /* number of cache drive */
+ u32 BlockNo; /* block number */
+ u32 BlockCnt; /* block count */
+ u32 DestAddr; /* dest. addr. (if s/g: -1) */
+ u32 sg_canz; /* s/g element count */
gdth_sg_str sg_lst[GDTH_MAXSG]; /* s/g list */
- } PACKED cache; /* cache service cmd. str. */
+ } __attribute__((packed)) cache; /* cache service cmd. str. */
struct {
- ushort DeviceNo; /* number of cache drive */
- ulong64 BlockNo; /* block number */
- ulong32 BlockCnt; /* block count */
- ulong64 DestAddr; /* dest. addr. (if s/g: -1) */
- ulong32 sg_canz; /* s/g element count */
+ u16 DeviceNo; /* number of cache drive */
+ u64 BlockNo; /* block number */
+ u32 BlockCnt; /* block count */
+ u64 DestAddr; /* dest. addr. (if s/g: -1) */
+ u32 sg_canz; /* s/g element count */
gdth_sg64_str sg_lst[GDTH_MAXSG]; /* s/g list */
- } PACKED cache64; /* cache service cmd. str. */
+ } __attribute__((packed)) cache64; /* cache service cmd. str. */
struct {
- ushort param_size; /* size of p_param buffer */
- ulong32 subfunc; /* IOCTL function */
- ulong32 channel; /* device */
- ulong64 p_param; /* buffer */
- } PACKED ioctl; /* IOCTL command structure */
+ u16 param_size; /* size of p_param buffer */
+ u32 subfunc; /* IOCTL function */
+ u32 channel; /* device */
+ u64 p_param; /* buffer */
+ } __attribute__((packed)) ioctl; /* IOCTL command structure */
struct {
- ushort reserved;
+ u16 reserved;
union {
struct {
- ulong32 msg_handle; /* message handle */
- ulong64 msg_addr; /* message buffer address */
- } PACKED msg;
- unchar data[12]; /* buffer for rtc data, ... */
+ u32 msg_handle; /* message handle */
+ u64 msg_addr; /* message buffer address */
+ } __attribute__((packed)) msg;
+ u8 data[12]; /* buffer for rtc data, ... */
} su;
- } PACKED screen; /* screen service cmd. str. */
+ } __attribute__((packed)) screen; /* screen service cmd. str. */
struct {
- ushort reserved;
- ulong32 direction; /* data direction */
- ulong32 mdisc_time; /* disc. time (0: no timeout)*/
- ulong32 mcon_time; /* connect time(0: no to.) */
- ulong32 sdata; /* dest. addr. (if s/g: -1) */
- ulong32 sdlen; /* data length (bytes) */
- ulong32 clen; /* SCSI cmd. length(6,10,12) */
- unchar cmd[12]; /* SCSI command */
- unchar target; /* target ID */
- unchar lun; /* LUN */
- unchar bus; /* SCSI bus number */
- unchar priority; /* only 0 used */
- ulong32 sense_len; /* sense data length */
- ulong32 sense_data; /* sense data addr. */
- ulong32 link_p; /* linked cmds (not supp.) */
- ulong32 sg_ranz; /* s/g element count */
+ u16 reserved;
+ u32 direction; /* data direction */
+ u32 mdisc_time; /* disc. time (0: no timeout)*/
+ u32 mcon_time; /* connect time(0: no to.) */
+ u32 sdata; /* dest. addr. (if s/g: -1) */
+ u32 sdlen; /* data length (bytes) */
+ u32 clen; /* SCSI cmd. length(6,10,12) */
+ u8 cmd[12]; /* SCSI command */
+ u8 target; /* target ID */
+ u8 lun; /* LUN */
+ u8 bus; /* SCSI bus number */
+ u8 priority; /* only 0 used */
+ u32 sense_len; /* sense data length */
+ u32 sense_data; /* sense data addr. */
+ u32 link_p; /* linked cmds (not supp.) */
+ u32 sg_ranz; /* s/g element count */
gdth_sg_str sg_lst[GDTH_MAXSG]; /* s/g list */
- } PACKED raw; /* raw service cmd. struct. */
+ } __attribute__((packed)) raw; /* raw service cmd. struct. */
struct {
- ushort reserved;
- ulong32 direction; /* data direction */
- ulong32 mdisc_time; /* disc. time (0: no timeout)*/
- ulong32 mcon_time; /* connect time(0: no to.) */
- ulong64 sdata; /* dest. addr. (if s/g: -1) */
- ulong32 sdlen; /* data length (bytes) */
- ulong32 clen; /* SCSI cmd. length(6,..,16) */
- unchar cmd[16]; /* SCSI command */
- unchar target; /* target ID */
- unchar lun; /* LUN */
- unchar bus; /* SCSI bus number */
- unchar priority; /* only 0 used */
- ulong32 sense_len; /* sense data length */
- ulong64 sense_data; /* sense data addr. */
- ulong32 sg_ranz; /* s/g element count */
+ u16 reserved;
+ u32 direction; /* data direction */
+ u32 mdisc_time; /* disc. time (0: no timeout)*/
+ u32 mcon_time; /* connect time(0: no to.) */
+ u64 sdata; /* dest. addr. (if s/g: -1) */
+ u32 sdlen; /* data length (bytes) */
+ u32 clen; /* SCSI cmd. length(6,..,16) */
+ u8 cmd[16]; /* SCSI command */
+ u8 target; /* target ID */
+ u8 lun; /* LUN */
+ u8 bus; /* SCSI bus number */
+ u8 priority; /* only 0 used */
+ u32 sense_len; /* sense data length */
+ u64 sense_data; /* sense data addr. */
+ u32 sg_ranz; /* s/g element count */
gdth_sg64_str sg_lst[GDTH_MAXSG]; /* s/g list */
- } PACKED raw64; /* raw service cmd. struct. */
+ } __attribute__((packed)) raw64; /* raw service cmd. struct. */
} u;
/* additional variables */
- unchar Service; /* controller service */
- unchar reserved;
- ushort Status; /* command result */
- ulong32 Info; /* additional information */
+ u8 Service; /* controller service */
+ u8 reserved;
+ u16 Status; /* command result */
+ u32 Info; /* additional information */
void *RequestBuffer; /* request buffer */
-} PACKED gdth_cmd_str;
+} __attribute__((packed)) gdth_cmd_str;
/* controller event structure */
#define ES_ASYNC 1
@@ -142,129 +134,129 @@ typedef struct {
#define ES_TEST 3
#define ES_SYNC 4
typedef struct {
- ushort size; /* size of structure */
+ u16 size; /* size of structure */
union {
char stream[16];
struct {
- ushort ionode;
- ushort service;
- ulong32 index;
- } PACKED driver;
+ u16 ionode;
+ u16 service;
+ u32 index;
+ } __attribute__((packed)) driver;
struct {
- ushort ionode;
- ushort service;
- ushort status;
- ulong32 info;
- unchar scsi_coord[3];
- } PACKED async;
+ u16 ionode;
+ u16 service;
+ u16 status;
+ u32 info;
+ u8 scsi_coord[3];
+ } __attribute__((packed)) async;
struct {
- ushort ionode;
- ushort service;
- ushort status;
- ulong32 info;
- ushort hostdrive;
- unchar scsi_coord[3];
- unchar sense_key;
- } PACKED sync;
+ u16 ionode;
+ u16 service;
+ u16 status;
+ u32 info;
+ u16 hostdrive;
+ u8 scsi_coord[3];
+ u8 sense_key;
+ } __attribute__((packed)) sync;
struct {
- ulong32 l1, l2, l3, l4;
- } PACKED test;
+ u32 l1, l2, l3, l4;
+ } __attribute__((packed)) test;
} eu;
- ulong32 severity;
- unchar event_string[256];
-} PACKED gdth_evt_data;
+ u32 severity;
+ u8 event_string[256];
+} __attribute__((packed)) gdth_evt_data;
typedef struct {
- ulong32 first_stamp;
- ulong32 last_stamp;
- ushort same_count;
- ushort event_source;
- ushort event_idx;
- unchar application;
- unchar reserved;
+ u32 first_stamp;
+ u32 last_stamp;
+ u16 same_count;
+ u16 event_source;
+ u16 event_idx;
+ u8 application;
+ u8 reserved;
gdth_evt_data event_data;
-} PACKED gdth_evt_str;
+} __attribute__((packed)) gdth_evt_str;
#ifdef GDTH_IOCTL_PROC
/* IOCTL structure (write) */
typedef struct {
- ulong32 magic; /* IOCTL magic */
- ushort ioctl; /* IOCTL */
- ushort ionode; /* controller number */
- ushort service; /* controller service */
- ushort timeout; /* timeout */
+ u32 magic; /* IOCTL magic */
+ u16 ioctl; /* IOCTL */
+ u16 ionode; /* controller number */
+ u16 service; /* controller service */
+ u16 timeout; /* timeout */
union {
struct {
- unchar command[512]; /* controller command */
- unchar data[1]; /* add. data */
+ u8 command[512]; /* controller command */
+ u8 data[1]; /* add. data */
} general;
struct {
- unchar lock; /* lock/unlock */
- unchar drive_cnt; /* drive count */
- ushort drives[MAX_HDRIVES];/* drives */
+ u8 lock; /* lock/unlock */
+ u8 drive_cnt; /* drive count */
+ u16 drives[MAX_HDRIVES];/* drives */
} lockdrv;
struct {
- unchar lock; /* lock/unlock */
- unchar channel; /* channel */
+ u8 lock; /* lock/unlock */
+ u8 channel; /* channel */
} lockchn;
struct {
int erase; /* erase event ? */
int handle;
- unchar evt[EVENT_SIZE]; /* event structure */
+ u8 evt[EVENT_SIZE]; /* event structure */
} event;
struct {
- unchar bus; /* SCSI bus */
- unchar target; /* target ID */
- unchar lun; /* LUN */
- unchar cmd_len; /* command length */
- unchar cmd[12]; /* SCSI command */
+ u8 bus; /* SCSI bus */
+ u8 target; /* target ID */
+ u8 lun; /* LUN */
+ u8 cmd_len; /* command length */
+ u8 cmd[12]; /* SCSI command */
} scsi;
struct {
- ushort hdr_no; /* host drive number */
- unchar flag; /* old meth./add/remove */
+ u16 hdr_no; /* host drive number */
+ u8 flag; /* old meth./add/remove */
} rescan;
} iu;
} gdth_iowr_str;
/* IOCTL structure (read) */
typedef struct {
- ulong32 size; /* buffer size */
- ulong32 status; /* IOCTL error code */
+ u32 size; /* buffer size */
+ u32 status; /* IOCTL error code */
union {
struct {
- unchar data[1]; /* data */
+ u8 data[1]; /* data */
} general;
struct {
- ushort version; /* driver version */
+ u16 version; /* driver version */
} drvers;
struct {
- unchar type; /* controller type */
- ushort info; /* slot etc. */
- ushort oem_id; /* OEM ID */
- ushort bios_ver; /* not used */
- ushort access; /* not used */
- ushort ext_type; /* extended type */
- ushort device_id; /* device ID */
- ushort sub_device_id; /* sub device ID */
+ u8 type; /* controller type */
+ u16 info; /* slot etc. */
+ u16 oem_id; /* OEM ID */
+ u16 bios_ver; /* not used */
+ u16 access; /* not used */
+ u16 ext_type; /* extended type */
+ u16 device_id; /* device ID */
+ u16 sub_device_id; /* sub device ID */
} ctrtype;
struct {
- unchar version; /* OS version */
- unchar subversion; /* OS subversion */
- ushort revision; /* revision */
+ u8 version; /* OS version */
+ u8 subversion; /* OS subversion */
+ u16 revision; /* revision */
} osvers;
struct {
- ushort count; /* controller count */
+ u16 count; /* controller count */
} ctrcnt;
struct {
int handle;
- unchar evt[EVENT_SIZE]; /* event structure */
+ u8 evt[EVENT_SIZE]; /* event structure */
} event;
struct {
- unchar bus; /* SCSI bus, 0xff: invalid */
- unchar target; /* target ID */
- unchar lun; /* LUN */
- unchar cluster_type; /* cluster properties */
+ u8 bus; /* SCSI bus, 0xff: invalid */
+ u8 target; /* target ID */
+ u8 lun; /* LUN */
+ u8 cluster_type; /* cluster properties */
} hdr_list[MAX_HDRIVES]; /* index is host drive number */
} iu;
} gdth_iord_str;
@@ -272,53 +264,53 @@ typedef struct {
/* GDTIOCTL_GENERAL */
typedef struct {
- ushort ionode; /* controller number */
- ushort timeout; /* timeout */
- ulong32 info; /* error info */
- ushort status; /* status */
- ulong data_len; /* data buffer size */
- ulong sense_len; /* sense buffer size */
+ u16 ionode; /* controller number */
+ u16 timeout; /* timeout */
+ u32 info; /* error info */
+ u16 status; /* status */
+ unsigned long data_len; /* data buffer size */
+ unsigned long sense_len; /* sense buffer size */
gdth_cmd_str command; /* command */
} gdth_ioctl_general;
/* GDTIOCTL_LOCKDRV */
typedef struct {
- ushort ionode; /* controller number */
- unchar lock; /* lock/unlock */
- unchar drive_cnt; /* drive count */
- ushort drives[MAX_HDRIVES]; /* drives */
+ u16 ionode; /* controller number */
+ u8 lock; /* lock/unlock */
+ u8 drive_cnt; /* drive count */
+ u16 drives[MAX_HDRIVES]; /* drives */
} gdth_ioctl_lockdrv;
/* GDTIOCTL_LOCKCHN */
typedef struct {
- ushort ionode; /* controller number */
- unchar lock; /* lock/unlock */
- unchar channel; /* channel */
+ u16 ionode; /* controller number */
+ u8 lock; /* lock/unlock */
+ u8 channel; /* channel */
} gdth_ioctl_lockchn;
/* GDTIOCTL_OSVERS */
typedef struct {
- unchar version; /* OS version */
- unchar subversion; /* OS subversion */
- ushort revision; /* revision */
+ u8 version; /* OS version */
+ u8 subversion; /* OS subversion */
+ u16 revision; /* revision */
} gdth_ioctl_osvers;
/* GDTIOCTL_CTRTYPE */
typedef struct {
- ushort ionode; /* controller number */
- unchar type; /* controller type */
- ushort info; /* slot etc. */
- ushort oem_id; /* OEM ID */
- ushort bios_ver; /* not used */
- ushort access; /* not used */
- ushort ext_type; /* extended type */
- ushort device_id; /* device ID */
- ushort sub_device_id; /* sub device ID */
+ u16 ionode; /* controller number */
+ u8 type; /* controller type */
+ u16 info; /* slot etc. */
+ u16 oem_id; /* OEM ID */
+ u16 bios_ver; /* not used */
+ u16 access; /* not used */
+ u16 ext_type; /* extended type */
+ u16 device_id; /* device ID */
+ u16 sub_device_id; /* sub device ID */
} gdth_ioctl_ctrtype;
/* GDTIOCTL_EVENT */
typedef struct {
- ushort ionode;
+ u16 ionode;
int erase; /* erase event? */
int handle; /* event handle */
gdth_evt_str event;
@@ -326,22 +318,22 @@ typedef struct {
/* GDTIOCTL_RESCAN/GDTIOCTL_HDRLIST */
typedef struct {
- ushort ionode; /* controller number */
- unchar flag; /* add/remove */
- ushort hdr_no; /* drive no. */
+ u16 ionode; /* controller number */
+ u8 flag; /* add/remove */
+ u16 hdr_no; /* drive no. */
struct {
- unchar bus; /* SCSI bus */
- unchar target; /* target ID */
- unchar lun; /* LUN */
- unchar cluster_type; /* cluster properties */
+ u8 bus; /* SCSI bus */
+ u8 target; /* target ID */
+ u8 lun; /* LUN */
+ u8 cluster_type; /* cluster properties */
} hdr_list[MAX_HDRIVES]; /* index is host drive number */
} gdth_ioctl_rescan;
/* GDTIOCTL_RESET_BUS/GDTIOCTL_RESET_DRV */
typedef struct {
- ushort ionode; /* controller number */
- ushort number; /* bus/host drive number */
- ushort status; /* status */
+ u16 ionode; /* controller number */
+ u16 number; /* bus/host drive number */
+ u16 status; /* status */
} gdth_ioctl_reset;
#endif
diff --git a/drivers/scsi/gdth_proc.c b/drivers/scsi/gdth_proc.c
index 1258da34fbc2..ffb2b21992ba 100644
--- a/drivers/scsi/gdth_proc.c
+++ b/drivers/scsi/gdth_proc.c
@@ -43,7 +43,7 @@ static int gdth_set_asc_info(struct Scsi_Host *host, char *buffer,
int i, found;
gdth_cmd_str gdtcmd;
gdth_cpar_str *pcpar;
- ulong64 paddr;
+ u64 paddr;
char cmnd[MAX_COMMAND_SIZE];
memset(cmnd, 0xff, 12);
@@ -156,8 +156,8 @@ static int gdth_get_info(char *buffer,char **start,off_t offset,int length,
off_t begin = 0,pos = 0;
int id, i, j, k, sec, flag;
int no_mdrv = 0, drv_no, is_mirr;
- ulong32 cnt;
- ulong64 paddr;
+ u32 cnt;
+ u64 paddr;
int rc = -ENOMEM;
gdth_cmd_str *gdtcmd;
@@ -220,14 +220,14 @@ static int gdth_get_info(char *buffer,char **start,off_t offset,int length,
if (ha->more_proc)
sprintf(hrec, "%d.%02d.%02d-%c%03X",
- (unchar)(ha->binfo.upd_fw_ver>>24),
- (unchar)(ha->binfo.upd_fw_ver>>16),
- (unchar)(ha->binfo.upd_fw_ver),
+ (u8)(ha->binfo.upd_fw_ver>>24),
+ (u8)(ha->binfo.upd_fw_ver>>16),
+ (u8)(ha->binfo.upd_fw_ver),
ha->bfeat.raid ? 'R':'N',
ha->binfo.upd_revision);
else
- sprintf(hrec, "%d.%02d", (unchar)(ha->cpar.version>>8),
- (unchar)(ha->cpar.version));
+ sprintf(hrec, "%d.%02d", (u8)(ha->cpar.version>>8),
+ (u8)(ha->cpar.version));
size = sprintf(buffer+len,
" Driver Ver.: \t%-10s\tFirmware Ver.: \t%s\n",
@@ -281,7 +281,7 @@ static int gdth_get_info(char *buffer,char **start,off_t offset,int length,
pds->bid = ha->raw[i].local_no;
pds->first = 0;
pds->entries = ha->raw[i].pdev_cnt;
- cnt = (3*GDTH_SCRATCH/4 - 5 * sizeof(ulong32)) /
+ cnt = (3*GDTH_SCRATCH/4 - 5 * sizeof(u32)) /
sizeof(pds->list[0]);
if (pds->entries > cnt)
pds->entries = cnt;
@@ -604,7 +604,7 @@ static int gdth_get_info(char *buffer,char **start,off_t offset,int length,
size = sprintf(buffer+len,
" Capacity [MB]:\t%-6d \tStart Sector: \t%d\n",
- (ulong32)(ha->hdr[i].size/2048), ha->hdr[i].start_sec);
+ (u32)(ha->hdr[i].size/2048), ha->hdr[i].start_sec);
len += size; pos = begin + len;
if (pos < offset) {
len = 0;
@@ -664,9 +664,9 @@ free_fail:
}
static char *gdth_ioctl_alloc(gdth_ha_str *ha, int size, int scratch,
- ulong64 *paddr)
+ u64 *paddr)
{
- ulong flags;
+ unsigned long flags;
char *ret_val;
if (size == 0)
@@ -691,9 +691,9 @@ static char *gdth_ioctl_alloc(gdth_ha_str *ha, int size, int scratch,
return ret_val;
}
-static void gdth_ioctl_free(gdth_ha_str *ha, int size, char *buf, ulong64 paddr)
+static void gdth_ioctl_free(gdth_ha_str *ha, int size, char *buf, u64 paddr)
{
- ulong flags;
+ unsigned long flags;
if (buf == ha->pscratch) {
spin_lock_irqsave(&ha->smp_lock, flags);
@@ -705,16 +705,16 @@ static void gdth_ioctl_free(gdth_ha_str *ha, int size, char *buf, ulong64 paddr)
}
#ifdef GDTH_IOCTL_PROC
-static int gdth_ioctl_check_bin(gdth_ha_str *ha, ushort size)
+static int gdth_ioctl_check_bin(gdth_ha_str *ha, u16 size)
{
- ulong flags;
+ unsigned long flags;
int ret_val;
spin_lock_irqsave(&ha->smp_lock, flags);
ret_val = FALSE;
if (ha->scratch_busy) {
- if (((gdth_iord_str *)ha->pscratch)->size == (ulong32)size)
+ if (((gdth_iord_str *)ha->pscratch)->size == (u32)size)
ret_val = TRUE;
}
spin_unlock_irqrestore(&ha->smp_lock, flags);
@@ -724,11 +724,11 @@ static int gdth_ioctl_check_bin(gdth_ha_str *ha, ushort size)
static void gdth_wait_completion(gdth_ha_str *ha, int busnum, int id)
{
- ulong flags;
+ unsigned long flags;
int i;
Scsi_Cmnd *scp;
struct gdth_cmndinfo *cmndinfo;
- unchar b, t;
+ u8 b, t;
spin_lock_irqsave(&ha->smp_lock, flags);
@@ -738,8 +738,8 @@ static void gdth_wait_completion(gdth_ha_str *ha, int busnum, int id)
b = scp->device->channel;
t = scp->device->id;
- if (!SPECIAL_SCP(scp) && t == (unchar)id &&
- b == (unchar)busnum) {
+ if (!SPECIAL_SCP(scp) && t == (u8)id &&
+ b == (u8)busnum) {
cmndinfo->wait_for_completion = 0;
spin_unlock_irqrestore(&ha->smp_lock, flags);
while (!cmndinfo->wait_for_completion)
diff --git a/drivers/scsi/gdth_proc.h b/drivers/scsi/gdth_proc.h
index 9b900cc9ebe8..dab15f59f2cc 100644
--- a/drivers/scsi/gdth_proc.h
+++ b/drivers/scsi/gdth_proc.h
@@ -17,8 +17,8 @@ static int gdth_set_asc_info(struct Scsi_Host *host, char *buffer,
int length, gdth_ha_str *ha);
static char *gdth_ioctl_alloc(gdth_ha_str *ha, int size, int scratch,
- ulong64 *paddr);
-static void gdth_ioctl_free(gdth_ha_str *ha, int size, char *buf, ulong64 paddr);
+ u64 *paddr);
+static void gdth_ioctl_free(gdth_ha_str *ha, int size, char *buf, u64 paddr);
static void gdth_wait_completion(gdth_ha_str *ha, int busnum, int id);
#endif
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index bb96fdd58e23..03697ba94251 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -52,7 +52,7 @@
#include "hpsa.h"
/* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' */
-#define HPSA_DRIVER_VERSION "1.0.0"
+#define HPSA_DRIVER_VERSION "2.0.1-3"
#define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"
/* How long to wait (in milliseconds) for board to go into simple mode */
@@ -77,9 +77,6 @@ MODULE_PARM_DESC(hpsa_allow_any,
/* define the PCI info for the cards we can control */
static const struct pci_device_id hpsa_pci_device_id[] = {
- {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3223},
- {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3234},
- {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x323D},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3241},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3243},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3245},
@@ -87,6 +84,9 @@ static const struct pci_device_id hpsa_pci_device_id[] = {
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324a},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324b},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3233},
+#define PCI_DEVICE_ID_HP_CISSF 0x333f
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x333F},
{PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
{0,}
@@ -99,9 +99,6 @@ MODULE_DEVICE_TABLE(pci, hpsa_pci_device_id);
* access = Address of the struct of function pointers
*/
static struct board_type products[] = {
- {0x3223103C, "Smart Array P800", &SA5_access},
- {0x3234103C, "Smart Array P400", &SA5_access},
- {0x323d103c, "Smart Array P700M", &SA5_access},
{0x3241103C, "Smart Array P212", &SA5_access},
{0x3243103C, "Smart Array P410", &SA5_access},
{0x3245103C, "Smart Array P410i", &SA5_access},
@@ -109,6 +106,8 @@ static struct board_type products[] = {
{0x3249103C, "Smart Array P812", &SA5_access},
{0x324a103C, "Smart Array P712m", &SA5_access},
{0x324b103C, "Smart Array P711m", &SA5_access},
+ {0x3233103C, "StorageWorks P1210m", &SA5_access},
+ {0x333F103C, "StorageWorks P1210m", &SA5_access},
{0xFFFF103C, "Unknown Smart Array", &SA5_access},
};
@@ -126,12 +125,15 @@ static void cmd_free(struct ctlr_info *h, struct CommandList *c);
static void cmd_special_free(struct ctlr_info *h, struct CommandList *c);
static struct CommandList *cmd_alloc(struct ctlr_info *h);
static struct CommandList *cmd_special_alloc(struct ctlr_info *h);
-static void fill_cmd(struct CommandList *c, __u8 cmd, struct ctlr_info *h,
- void *buff, size_t size, __u8 page_code, unsigned char *scsi3addr,
+static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
+ void *buff, size_t size, u8 page_code, unsigned char *scsi3addr,
int cmd_type);
static int hpsa_scsi_queue_command(struct scsi_cmnd *cmd,
void (*done)(struct scsi_cmnd *));
+static void hpsa_scan_start(struct Scsi_Host *);
+static int hpsa_scan_finished(struct Scsi_Host *sh,
+ unsigned long elapsed_time);
static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd);
static int hpsa_slave_alloc(struct scsi_device *sdev);
@@ -150,6 +152,11 @@ static int check_for_unit_attention(struct ctlr_info *h,
struct CommandList *c);
static void check_ioctl_unit_attention(struct ctlr_info *h,
struct CommandList *c);
+/* performant mode helper functions */
+static void calc_bucket_map(int *bucket, int num_buckets,
+ int nsgs, int *bucket_map);
+static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h);
+static inline u32 next_command(struct ctlr_info *h);
static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL);
static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL);
@@ -173,10 +180,10 @@ static struct scsi_host_template hpsa_driver_template = {
.name = "hpsa",
.proc_name = "hpsa",
.queuecommand = hpsa_scsi_queue_command,
- .can_queue = 512,
+ .scan_start = hpsa_scan_start,
+ .scan_finished = hpsa_scan_finished,
.this_id = -1,
.sg_tablesize = MAXSGENTRIES,
- .cmd_per_lun = 512,
.use_clustering = ENABLE_CLUSTERING,
.eh_device_reset_handler = hpsa_eh_device_reset_handler,
.ioctl = hpsa_ioctl,
@@ -195,6 +202,12 @@ static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev)
return (struct ctlr_info *) *priv;
}
+static inline struct ctlr_info *shost_to_hba(struct Scsi_Host *sh)
+{
+ unsigned long *priv = shost_priv(sh);
+ return (struct ctlr_info *) *priv;
+}
+
static struct task_struct *hpsa_scan_thread;
static DEFINE_MUTEX(hpsa_scan_mutex);
static LIST_HEAD(hpsa_scan_q);
@@ -312,7 +325,7 @@ static int hpsa_scan_func(__attribute__((unused)) void *data)
h->busy_scanning = 1;
mutex_unlock(&hpsa_scan_mutex);
host_no = h->scsi_host ? h->scsi_host->host_no : -1;
- hpsa_update_scsi_devices(h, host_no);
+ hpsa_scan_start(h->scsi_host);
complete_all(&h->scan_wait);
mutex_lock(&hpsa_scan_mutex);
h->busy_scanning = 0;
@@ -379,8 +392,7 @@ static ssize_t host_store_rescan(struct device *dev,
{
struct ctlr_info *h;
struct Scsi_Host *shost = class_to_shost(dev);
- unsigned long *priv = shost_priv(shost);
- h = (struct ctlr_info *) *priv;
+ h = shost_to_hba(shost);
if (add_to_scan_list(h)) {
wake_up_process(hpsa_scan_thread);
wait_for_completion_interruptible(&h->scan_wait);
@@ -394,10 +406,44 @@ static inline void addQ(struct hlist_head *list, struct CommandList *c)
hlist_add_head(&c->list, list);
}
+static inline u32 next_command(struct ctlr_info *h)
+{
+ u32 a;
+
+ if (unlikely(h->transMethod != CFGTBL_Trans_Performant))
+ return h->access.command_completed(h);
+
+ if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
+ a = *(h->reply_pool_head); /* Next cmd in ring buffer */
+ (h->reply_pool_head)++;
+ h->commands_outstanding--;
+ } else {
+ a = FIFO_EMPTY;
+ }
+ /* Check for wraparound */
+ if (h->reply_pool_head == (h->reply_pool + h->max_commands)) {
+ h->reply_pool_head = h->reply_pool;
+ h->reply_pool_wraparound ^= 1;
+ }
+ return a;
+}
+
+/* set_performant_mode: Modify the tag for cciss performant
+ * set bit 0 for pull model, bits 3-1 for block fetch
+ * register number
+ */
+static void set_performant_mode(struct ctlr_info *h, struct CommandList *c)
+{
+ if (likely(h->transMethod == CFGTBL_Trans_Performant))
+ c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1);
+}
+
static void enqueue_cmd_and_start_io(struct ctlr_info *h,
struct CommandList *c)
{
unsigned long flags;
+
+ set_performant_mode(h, c);
spin_lock_irqsave(&h->lock, flags);
addQ(&h->reqQ, c);
h->Qdepth++;
@@ -422,6 +468,15 @@ static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[])
return (scsi3addr[3] & 0xC0) == 0x40;
}
+static inline int is_scsi_rev_5(struct ctlr_info *h)
+{
+ if (!h->hba_inquiry_data)
+ return 0;
+ if ((h->hba_inquiry_data[2] & 0x07) == 5)
+ return 1;
+ return 0;
+}
+
static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG",
"UNKNOWN"
};
@@ -431,7 +486,7 @@ static ssize_t raid_level_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
ssize_t l = 0;
- int rlevel;
+ unsigned char rlevel;
struct ctlr_info *h;
struct scsi_device *sdev;
struct hpsa_scsi_dev_t *hdev;
@@ -455,7 +510,7 @@ static ssize_t raid_level_show(struct device *dev,
rlevel = hdev->raid_level;
spin_unlock_irqrestore(&h->lock, flags);
- if (rlevel < 0 || rlevel > RAID_UNKNOWN)
+ if (rlevel > RAID_UNKNOWN)
rlevel = RAID_UNKNOWN;
l = snprintf(buf, PAGE_SIZE, "RAID %s\n", raid_label[rlevel]);
return l;
@@ -620,6 +675,24 @@ lun_assigned:
return 0;
}
+/* Replace an entry from h->dev[] array. */
+static void hpsa_scsi_replace_entry(struct ctlr_info *h, int hostno,
+ int entry, struct hpsa_scsi_dev_t *new_entry,
+ struct hpsa_scsi_dev_t *added[], int *nadded,
+ struct hpsa_scsi_dev_t *removed[], int *nremoved)
+{
+ /* assumes h->devlock is held */
+ BUG_ON(entry < 0 || entry >= HPSA_MAX_SCSI_DEVS_PER_HBA);
+ removed[*nremoved] = h->dev[entry];
+ (*nremoved)++;
+ h->dev[entry] = new_entry;
+ added[*nadded] = new_entry;
+ (*nadded)++;
+ dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d changed.\n",
+ scsi_device_type(new_entry->devtype), hostno, new_entry->bus,
+ new_entry->target, new_entry->lun);
+}
+
/* Remove an entry from h->dev[] array. */
static void hpsa_scsi_remove_entry(struct ctlr_info *h, int hostno, int entry,
struct hpsa_scsi_dev_t *removed[], int *nremoved)
@@ -628,8 +701,7 @@ static void hpsa_scsi_remove_entry(struct ctlr_info *h, int hostno, int entry,
int i;
struct hpsa_scsi_dev_t *sd;
- if (entry < 0 || entry >= HPSA_MAX_SCSI_DEVS_PER_HBA)
- BUG();
+ BUG_ON(entry < 0 || entry >= HPSA_MAX_SCSI_DEVS_PER_HBA);
sd = h->dev[entry];
removed[*nremoved] = h->dev[entry];
@@ -722,6 +794,8 @@ static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t *needle,
#define DEVICE_CHANGED 1
#define DEVICE_SAME 2
for (i = 0; i < haystack_size; i++) {
+ if (haystack[i] == NULL) /* previously removed. */
+ continue;
if (SCSI3ADDR_EQ(needle->scsi3addr, haystack[i]->scsi3addr)) {
*index = i;
if (device_is_the_same(needle, haystack[i]))
@@ -734,7 +808,7 @@ static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t *needle,
return DEVICE_NOT_FOUND;
}
-static int adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno,
+static void adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno,
struct hpsa_scsi_dev_t *sd[], int nsds)
{
/* sd contains scsi3 addresses and devtypes, and inquiry
@@ -779,12 +853,12 @@ static int adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno,
continue; /* remove ^^^, hence i not incremented */
} else if (device_change == DEVICE_CHANGED) {
changes++;
- hpsa_scsi_remove_entry(h, hostno, i,
- removed, &nremoved);
- (void) hpsa_scsi_add_entry(h, hostno, sd[entry],
- added, &nadded);
- /* add can't fail, we just removed one. */
- sd[entry] = NULL; /* prevent it from being freed */
+ hpsa_scsi_replace_entry(h, hostno, i, sd[entry],
+ added, &nadded, removed, &nremoved);
+ /* Set it to NULL to prevent it from being freed
+ * at the bottom of hpsa_update_scsi_devices()
+ */
+ sd[entry] = NULL;
}
i++;
}
@@ -860,7 +934,6 @@ static int adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno,
free_and_out:
kfree(added);
kfree(removed);
- return 0;
}
/*
@@ -900,7 +973,7 @@ static int hpsa_slave_alloc(struct scsi_device *sdev)
static void hpsa_slave_destroy(struct scsi_device *sdev)
{
- return; /* nothing to do. */
+ /* nothing to do. */
}
static void hpsa_scsi_setup(struct ctlr_info *h)
@@ -908,11 +981,10 @@ static void hpsa_scsi_setup(struct ctlr_info *h)
h->ndevices = 0;
h->scsi_host = NULL;
spin_lock_init(&h->devlock);
- return;
}
static void complete_scsi_command(struct CommandList *cp,
- int timeout, __u32 tag)
+ int timeout, u32 tag)
{
struct scsi_cmnd *cmd;
struct ctlr_info *h;
@@ -987,7 +1059,6 @@ static void complete_scsi_command(struct CommandList *cp,
* required
*/
if ((asc == 0x04) && (ascq == 0x03)) {
- cmd->result = DID_NO_CONNECT << 16;
dev_warn(&h->pdev->dev, "cp %p "
"has check condition: unit "
"not ready, manual "
@@ -995,14 +1066,22 @@ static void complete_scsi_command(struct CommandList *cp,
break;
}
}
-
-
+ if (sense_key == ABORTED_COMMAND) {
+ /* Aborted command is retryable */
+ dev_warn(&h->pdev->dev, "cp %p "
+ "has check condition: aborted command: "
+ "ASC: 0x%x, ASCQ: 0x%x\n",
+ cp, asc, ascq);
+ cmd->result = DID_SOFT_ERROR << 16;
+ break;
+ }
/* Must be some other type of check condition */
dev_warn(&h->pdev->dev, "cp %p has check condition: "
"unknown type: "
"Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, "
"Returning result: 0x%x, "
"cmd=[%02x %02x %02x %02x %02x "
+ "%02x %02x %02x %02x %02x %02x "
"%02x %02x %02x %02x %02x]\n",
cp, sense_key, asc, ascq,
cmd->result,
@@ -1010,7 +1089,10 @@ static void complete_scsi_command(struct CommandList *cp,
cmd->cmnd[2], cmd->cmnd[3],
cmd->cmnd[4], cmd->cmnd[5],
cmd->cmnd[6], cmd->cmnd[7],
- cmd->cmnd[8], cmd->cmnd[9]);
+ cmd->cmnd[8], cmd->cmnd[9],
+ cmd->cmnd[10], cmd->cmnd[11],
+ cmd->cmnd[12], cmd->cmnd[13],
+ cmd->cmnd[14], cmd->cmnd[15]);
break;
}
@@ -1086,7 +1168,7 @@ static void complete_scsi_command(struct CommandList *cp,
dev_warn(&h->pdev->dev, "cp %p reports abort failed\n", cp);
break;
case CMD_UNSOLICITED_ABORT:
- cmd->result = DID_ABORT << 16;
+ cmd->result = DID_RESET << 16;
dev_warn(&h->pdev->dev, "cp %p aborted do to an unsolicited "
"abort\n", cp);
break;
@@ -1119,9 +1201,11 @@ static int hpsa_scsi_detect(struct ctlr_info *h)
sh->max_cmd_len = MAX_COMMAND_SIZE;
sh->max_lun = HPSA_MAX_LUN;
sh->max_id = HPSA_MAX_LUN;
+ sh->can_queue = h->nr_cmds;
+ sh->cmd_per_lun = h->nr_cmds;
h->scsi_host = sh;
sh->hostdata[0] = (unsigned long) h;
- sh->irq = h->intr[SIMPLE_MODE_INT];
+ sh->irq = h->intr[PERF_MODE_INT];
sh->unique_id = sh->irq;
error = scsi_add_host(sh, &h->pdev->dev);
if (error)
@@ -1133,11 +1217,11 @@ static int hpsa_scsi_detect(struct ctlr_info *h)
dev_err(&h->pdev->dev, "hpsa_scsi_detect: scsi_add_host"
" failed for controller %d\n", h->ctlr);
scsi_host_put(sh);
- return -1;
+ return error;
fail:
dev_err(&h->pdev->dev, "hpsa_scsi_detect: scsi_host_alloc"
" failed for controller %d\n", h->ctlr);
- return -1;
+ return -ENOMEM;
}
static void hpsa_pci_unmap(struct pci_dev *pdev,
@@ -1160,7 +1244,7 @@ static void hpsa_map_one(struct pci_dev *pdev,
size_t buflen,
int data_direction)
{
- __u64 addr64;
+ u64 addr64;
if (buflen == 0 || data_direction == PCI_DMA_NONE) {
cp->Header.SGList = 0;
@@ -1168,14 +1252,14 @@ static void hpsa_map_one(struct pci_dev *pdev,
return;
}
- addr64 = (__u64) pci_map_single(pdev, buf, buflen, data_direction);
+ addr64 = (u64) pci_map_single(pdev, buf, buflen, data_direction);
cp->SG[0].Addr.lower =
- (__u32) (addr64 & (__u64) 0x00000000FFFFFFFF);
+ (u32) (addr64 & (u64) 0x00000000FFFFFFFF);
cp->SG[0].Addr.upper =
- (__u32) ((addr64 >> 32) & (__u64) 0x00000000FFFFFFFF);
+ (u32) ((addr64 >> 32) & (u64) 0x00000000FFFFFFFF);
cp->SG[0].Len = buflen;
- cp->Header.SGList = (__u8) 1; /* no. SGs contig in this cmd */
- cp->Header.SGTotal = (__u16) 1; /* total sgs in this cmd list */
+ cp->Header.SGList = (u8) 1; /* no. SGs contig in this cmd */
+ cp->Header.SGTotal = (u16) 1; /* total sgs in this cmd list */
}
static inline void hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h,
@@ -1274,7 +1358,7 @@ static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr,
if (c == NULL) { /* trouble... */
dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
- return -1;
+ return -ENOMEM;
}
fill_cmd(c, HPSA_INQUIRY, h, buf, bufsize, page, scsi3addr, TYPE_CMD);
@@ -1366,9 +1450,8 @@ static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical,
dev_err(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
return -1;
}
-
- memset(&scsi3addr[0], 0, 8); /* address the controller */
-
+ /* address the controller */
+ memset(scsi3addr, 0, sizeof(scsi3addr));
fill_cmd(c, logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h,
buf, bufsize, 0, scsi3addr, TYPE_CMD);
if (extended_response)
@@ -1409,13 +1492,12 @@ static int hpsa_update_device_info(struct ctlr_info *h,
unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device)
{
#define OBDR_TAPE_INQ_SIZE 49
- unsigned char *inq_buff = NULL;
+ unsigned char *inq_buff;
- inq_buff = kmalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL);
+ inq_buff = kzalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL);
if (!inq_buff)
goto bail_out;
- memset(inq_buff, 0, OBDR_TAPE_INQ_SIZE);
/* Do an inquiry to the device to see what it is. */
if (hpsa_scsi_do_inquiry(h, scsi3addr, 0, inq_buff,
(unsigned char) OBDR_TAPE_INQ_SIZE) != 0) {
@@ -1485,32 +1567,51 @@ static int is_msa2xxx(struct ctlr_info *h, struct hpsa_scsi_dev_t *device)
* in hpsa_find_target_lun, called by hpsa_scsi_add_entry.)
*/
static void figure_bus_target_lun(struct ctlr_info *h,
- __u8 *lunaddrbytes, int *bus, int *target, int *lun,
+ u8 *lunaddrbytes, int *bus, int *target, int *lun,
struct hpsa_scsi_dev_t *device)
{
-
- __u32 lunid;
+ u32 lunid;
if (is_logical_dev_addr_mode(lunaddrbytes)) {
/* logical device */
- memcpy(&lunid, lunaddrbytes, sizeof(lunid));
- lunid = le32_to_cpu(lunid);
-
- if (is_msa2xxx(h, device)) {
- *bus = 1;
- *target = (lunid >> 16) & 0x3fff;
- *lun = lunid & 0x00ff;
- } else {
+ if (unlikely(is_scsi_rev_5(h))) {
+ /* p1210m, logical drives lun assignments
+ * match SCSI REPORT LUNS data.
+ */
+ lunid = le32_to_cpu(*((__le32 *) lunaddrbytes));
*bus = 0;
- *lun = 0;
- *target = lunid & 0x3fff;
+ *target = 0;
+ *lun = (lunid & 0x3fff) + 1;
+ } else {
+ /* not p1210m... */
+ lunid = le32_to_cpu(*((__le32 *) lunaddrbytes));
+ if (is_msa2xxx(h, device)) {
+ /* msa2xxx way, put logicals on bus 1
+ * and match target/lun numbers box
+ * reports.
+ */
+ *bus = 1;
+ *target = (lunid >> 16) & 0x3fff;
+ *lun = lunid & 0x00ff;
+ } else {
+ /* Traditional smart array way. */
+ *bus = 0;
+ *lun = 0;
+ *target = lunid & 0x3fff;
+ }
}
} else {
/* physical device */
if (is_hba_lunid(lunaddrbytes))
- *bus = 3;
+ if (unlikely(is_scsi_rev_5(h))) {
+ *bus = 0; /* put p1210m ctlr at 0,0,0 */
+ *target = 0;
+ *lun = 0;
+ return;
+ } else
+ *bus = 3; /* traditional smartarray */
else
- *bus = 2;
+ *bus = 2; /* physical disk */
*target = -1;
*lun = -1; /* we will fill these in later. */
}
@@ -1529,7 +1630,7 @@ static void figure_bus_target_lun(struct ctlr_info *h,
*/
static int add_msa2xxx_enclosure_device(struct ctlr_info *h,
struct hpsa_scsi_dev_t *tmpdevice,
- struct hpsa_scsi_dev_t *this_device, __u8 *lunaddrbytes,
+ struct hpsa_scsi_dev_t *this_device, u8 *lunaddrbytes,
int bus, int target, int lun, unsigned long lunzerobits[],
int *nmsa2xxx_enclosures)
{
@@ -1550,6 +1651,9 @@ static int add_msa2xxx_enclosure_device(struct ctlr_info *h,
if (is_hba_lunid(scsi3addr))
return 0; /* Don't add the RAID controller here. */
+ if (is_scsi_rev_5(h))
+ return 0; /* p1210m doesn't need to do this. */
+
#define MAX_MSA2XXX_ENCLOSURES 32
if (*nmsa2xxx_enclosures >= MAX_MSA2XXX_ENCLOSURES) {
dev_warn(&h->pdev->dev, "Maximum number of MSA2XXX "
@@ -1576,18 +1680,14 @@ static int add_msa2xxx_enclosure_device(struct ctlr_info *h,
*/
static int hpsa_gather_lun_info(struct ctlr_info *h,
int reportlunsize,
- struct ReportLUNdata *physdev, __u32 *nphysicals,
- struct ReportLUNdata *logdev, __u32 *nlogicals)
+ struct ReportLUNdata *physdev, u32 *nphysicals,
+ struct ReportLUNdata *logdev, u32 *nlogicals)
{
if (hpsa_scsi_do_report_phys_luns(h, physdev, reportlunsize, 0)) {
dev_err(&h->pdev->dev, "report physical LUNs failed.\n");
return -1;
}
- memcpy(nphysicals, &physdev->LUNListLength[0], sizeof(*nphysicals));
- *nphysicals = be32_to_cpu(*nphysicals) / 8;
-#ifdef DEBUG
- dev_info(&h->pdev->dev, "number of physical luns is %d\n", *nphysicals);
-#endif
+ *nphysicals = be32_to_cpu(*((__be32 *)physdev->LUNListLength)) / 8;
if (*nphysicals > HPSA_MAX_PHYS_LUN) {
dev_warn(&h->pdev->dev, "maximum physical LUNs (%d) exceeded."
" %d LUNs ignored.\n", HPSA_MAX_PHYS_LUN,
@@ -1598,11 +1698,7 @@ static int hpsa_gather_lun_info(struct ctlr_info *h,
dev_err(&h->pdev->dev, "report logical LUNs failed.\n");
return -1;
}
- memcpy(nlogicals, &logdev->LUNListLength[0], sizeof(*nlogicals));
- *nlogicals = be32_to_cpu(*nlogicals) / 8;
-#ifdef DEBUG
- dev_info(&h->pdev->dev, "number of logical luns is %d\n", *nlogicals);
-#endif
+ *nlogicals = be32_to_cpu(*((__be32 *) logdev->LUNListLength)) / 8;
/* Reject Logicals in excess of our max capability. */
if (*nlogicals > HPSA_MAX_LUN) {
dev_warn(&h->pdev->dev,
@@ -1621,6 +1717,31 @@ static int hpsa_gather_lun_info(struct ctlr_info *h,
return 0;
}
+u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position, int i,
+ int nphysicals, int nlogicals, struct ReportLUNdata *physdev_list,
+ struct ReportLUNdata *logdev_list)
+{
+ /* Helper function, figure out where the LUN ID info is coming from
+ * given index i, lists of physical and logical devices, where in
+ * the list the raid controller is supposed to appear (first or last)
+ */
+
+ int logicals_start = nphysicals + (raid_ctlr_position == 0);
+ int last_device = nphysicals + nlogicals + (raid_ctlr_position == 0);
+
+ if (i == raid_ctlr_position)
+ return RAID_CTLR_LUNID;
+
+ if (i < logicals_start)
+ return &physdev_list->LUN[i - (raid_ctlr_position == 0)][0];
+
+ if (i < last_device)
+ return &logdev_list->LUN[i - nphysicals -
+ (raid_ctlr_position == 0)][0];
+ BUG();
+ return NULL;
+}
+
static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
{
/* the idea here is we could get notified
@@ -1636,14 +1757,15 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
struct ReportLUNdata *physdev_list = NULL;
struct ReportLUNdata *logdev_list = NULL;
unsigned char *inq_buff = NULL;
- __u32 nphysicals = 0;
- __u32 nlogicals = 0;
- __u32 ndev_allocated = 0;
+ u32 nphysicals = 0;
+ u32 nlogicals = 0;
+ u32 ndev_allocated = 0;
struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice;
int ncurrent = 0;
int reportlunsize = sizeof(*physdev_list) + HPSA_MAX_PHYS_LUN * 8;
int i, nmsa2xxx_enclosures, ndevs_to_allocate;
int bus, target, lun;
+ int raid_ctlr_position;
DECLARE_BITMAP(lunzerobits, HPSA_MAX_TARGETS_PER_CTLR);
currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_SCSI_DEVS_PER_HBA,
@@ -1681,23 +1803,22 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
ndev_allocated++;
}
+ if (unlikely(is_scsi_rev_5(h)))
+ raid_ctlr_position = 0;
+ else
+ raid_ctlr_position = nphysicals + nlogicals;
+
/* adjust our table of devices */
nmsa2xxx_enclosures = 0;
for (i = 0; i < nphysicals + nlogicals + 1; i++) {
- __u8 *lunaddrbytes;
+ u8 *lunaddrbytes;
/* Figure out where the LUN ID info is coming from */
- if (i < nphysicals)
- lunaddrbytes = &physdev_list->LUN[i][0];
- else
- if (i < nphysicals + nlogicals)
- lunaddrbytes =
- &logdev_list->LUN[i-nphysicals][0];
- else /* jam in the RAID controller at the end */
- lunaddrbytes = RAID_CTLR_LUNID;
-
+ lunaddrbytes = figure_lunaddrbytes(h, raid_ctlr_position,
+ i, nphysicals, nlogicals, physdev_list, logdev_list);
/* skip masked physical devices. */
- if (lunaddrbytes[3] & 0xC0 && i < nphysicals)
+ if (lunaddrbytes[3] & 0xC0 &&
+ i < nphysicals + (raid_ctlr_position == 0))
continue;
/* Get device type, vendor, model, device id */
@@ -1777,7 +1898,6 @@ out:
kfree(inq_buff);
kfree(physdev_list);
kfree(logdev_list);
- return;
}
/* hpsa_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci
@@ -1790,7 +1910,7 @@ static int hpsa_scatter_gather(struct pci_dev *pdev,
{
unsigned int len;
struct scatterlist *sg;
- __u64 addr64;
+ u64 addr64;
int use_sg, i;
BUG_ON(scsi_sg_count(cmd) > MAXSGENTRIES);
@@ -1803,20 +1923,20 @@ static int hpsa_scatter_gather(struct pci_dev *pdev,
goto sglist_finished;
scsi_for_each_sg(cmd, sg, use_sg, i) {
- addr64 = (__u64) sg_dma_address(sg);
+ addr64 = (u64) sg_dma_address(sg);
len = sg_dma_len(sg);
cp->SG[i].Addr.lower =
- (__u32) (addr64 & (__u64) 0x00000000FFFFFFFF);
+ (u32) (addr64 & (u64) 0x00000000FFFFFFFF);
cp->SG[i].Addr.upper =
- (__u32) ((addr64 >> 32) & (__u64) 0x00000000FFFFFFFF);
+ (u32) ((addr64 >> 32) & (u64) 0x00000000FFFFFFFF);
cp->SG[i].Len = len;
cp->SG[i].Ext = 0; /* we are not chaining */
}
sglist_finished:
- cp->Header.SGList = (__u8) use_sg; /* no. SGs contig in this cmd */
- cp->Header.SGTotal = (__u16) use_sg; /* total sgs in this cmd list */
+ cp->Header.SGList = (u8) use_sg; /* no. SGs contig in this cmd */
+ cp->Header.SGTotal = (u16) use_sg; /* total sgs in this cmd list */
return 0;
}
@@ -1860,7 +1980,8 @@ static int hpsa_scsi_queue_command(struct scsi_cmnd *cmd,
c->scsi_cmd = cmd;
c->Header.ReplyQueue = 0; /* unused in simple mode */
memcpy(&c->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8);
- c->Header.Tag.lower = c->busaddr; /* Use k. address of cmd as tag */
+ c->Header.Tag.lower = (c->cmdindex << DIRECT_LOOKUP_SHIFT);
+ c->Header.Tag.lower |= DIRECT_LOOKUP_BIT;
/* Fill in the request block... */
@@ -1914,6 +2035,48 @@ static int hpsa_scsi_queue_command(struct scsi_cmnd *cmd,
return 0;
}
+static void hpsa_scan_start(struct Scsi_Host *sh)
+{
+ struct ctlr_info *h = shost_to_hba(sh);
+ unsigned long flags;
+
+ /* wait until any scan already in progress is finished. */
+ while (1) {
+ spin_lock_irqsave(&h->scan_lock, flags);
+ if (h->scan_finished)
+ break;
+ spin_unlock_irqrestore(&h->scan_lock, flags);
+ wait_event(h->scan_wait_queue, h->scan_finished);
+ /* Note: We don't need to worry about a race between this
+ * thread and driver unload because the midlayer will
+ * have incremented the reference count, so unload won't
+ * happen if we're in here.
+ */
+ }
+ h->scan_finished = 0; /* mark scan as in progress */
+ spin_unlock_irqrestore(&h->scan_lock, flags);
+
+ hpsa_update_scsi_devices(h, h->scsi_host->host_no);
+
+ spin_lock_irqsave(&h->scan_lock, flags);
+ h->scan_finished = 1; /* mark scan as finished. */
+ wake_up_all(&h->scan_wait_queue);
+ spin_unlock_irqrestore(&h->scan_lock, flags);
+}
+
+static int hpsa_scan_finished(struct Scsi_Host *sh,
+ unsigned long elapsed_time)
+{
+ struct ctlr_info *h = shost_to_hba(sh);
+ unsigned long flags;
+ int finished;
+
+ spin_lock_irqsave(&h->scan_lock, flags);
+ finished = h->scan_finished;
+ spin_unlock_irqrestore(&h->scan_lock, flags);
+ return finished;
+}
+
static void hpsa_unregister_scsi(struct ctlr_info *h)
{
/* we are being forcibly unloaded, and may not refuse. */
@@ -1926,7 +2089,6 @@ static int hpsa_register_scsi(struct ctlr_info *h)
{
int rc;
- hpsa_update_scsi_devices(h, -1);
rc = hpsa_scsi_detect(h);
if (rc != 0)
dev_err(&h->pdev->dev, "hpsa_register_scsi: failed"
@@ -2003,14 +2165,14 @@ static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
h = sdev_to_hba(scsicmd->device);
if (h == NULL) /* paranoia */
return FAILED;
- dev_warn(&h->pdev->dev, "resetting drive\n");
-
dev = scsicmd->device->hostdata;
if (!dev) {
dev_err(&h->pdev->dev, "hpsa_eh_device_reset_handler: "
"device lookup failed.\n");
return FAILED;
}
+ dev_warn(&h->pdev->dev, "resetting device %d:%d:%d:%d\n",
+ h->scsi_host->host_no, dev->bus, dev->target, dev->lun);
/* send a reset to the SCSI LUN which the command was sent to */
rc = hpsa_send_reset(h, dev->scsi3addr);
if (rc == 0 && wait_for_device_to_become_ready(h, dev->scsi3addr) == 0)
@@ -2053,8 +2215,8 @@ static struct CommandList *cmd_alloc(struct ctlr_info *h)
c->cmdindex = i;
INIT_HLIST_NODE(&c->list);
- c->busaddr = (__u32) cmd_dma_handle;
- temp64.val = (__u64) err_dma_handle;
+ c->busaddr = (u32) cmd_dma_handle;
+ temp64.val = (u64) err_dma_handle;
c->ErrDesc.Addr.lower = temp64.val32.lower;
c->ErrDesc.Addr.upper = temp64.val32.upper;
c->ErrDesc.Len = sizeof(*c->err_info);
@@ -2091,8 +2253,8 @@ static struct CommandList *cmd_special_alloc(struct ctlr_info *h)
memset(c->err_info, 0, sizeof(*c->err_info));
INIT_HLIST_NODE(&c->list);
- c->busaddr = (__u32) cmd_dma_handle;
- temp64.val = (__u64) err_dma_handle;
+ c->busaddr = (u32) cmd_dma_handle;
+ temp64.val = (u64) err_dma_handle;
c->ErrDesc.Addr.lower = temp64.val32.lower;
c->ErrDesc.Addr.upper = temp64.val32.upper;
c->ErrDesc.Len = sizeof(*c->err_info);
@@ -2125,50 +2287,6 @@ static void cmd_special_free(struct ctlr_info *h, struct CommandList *c)
#ifdef CONFIG_COMPAT
-static int do_ioctl(struct scsi_device *dev, int cmd, void *arg)
-{
- int ret;
-
- lock_kernel();
- ret = hpsa_ioctl(dev, cmd, arg);
- unlock_kernel();
- return ret;
-}
-
-static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd, void *arg);
-static int hpsa_ioctl32_big_passthru(struct scsi_device *dev,
- int cmd, void *arg);
-
-static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void *arg)
-{
- switch (cmd) {
- case CCISS_GETPCIINFO:
- case CCISS_GETINTINFO:
- case CCISS_SETINTINFO:
- case CCISS_GETNODENAME:
- case CCISS_SETNODENAME:
- case CCISS_GETHEARTBEAT:
- case CCISS_GETBUSTYPES:
- case CCISS_GETFIRMVER:
- case CCISS_GETDRIVVER:
- case CCISS_REVALIDVOLS:
- case CCISS_DEREGDISK:
- case CCISS_REGNEWDISK:
- case CCISS_REGNEWD:
- case CCISS_RESCANDISK:
- case CCISS_GETLUNINFO:
- return do_ioctl(dev, cmd, arg);
-
- case CCISS_PASSTHRU32:
- return hpsa_ioctl32_passthru(dev, cmd, arg);
- case CCISS_BIG_PASSTHRU32:
- return hpsa_ioctl32_big_passthru(dev, cmd, arg);
-
- default:
- return -ENOIOCTLCMD;
- }
-}
-
static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd, void *arg)
{
IOCTL32_Command_struct __user *arg32 =
@@ -2193,7 +2311,7 @@ static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd, void *arg)
if (err)
return -EFAULT;
- err = do_ioctl(dev, CCISS_PASSTHRU, (void *)p);
+ err = hpsa_ioctl(dev, CCISS_PASSTHRU, (void *)p);
if (err)
return err;
err |= copy_in_user(&arg32->error_info, &p->error_info,
@@ -2230,7 +2348,7 @@ static int hpsa_ioctl32_big_passthru(struct scsi_device *dev,
if (err)
return -EFAULT;
- err = do_ioctl(dev, CCISS_BIG_PASSTHRU, (void *)p);
+ err = hpsa_ioctl(dev, CCISS_BIG_PASSTHRU, (void *)p);
if (err)
return err;
err |= copy_in_user(&arg32->error_info, &p->error_info,
@@ -2239,6 +2357,36 @@ static int hpsa_ioctl32_big_passthru(struct scsi_device *dev,
return -EFAULT;
return err;
}
+
+static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void *arg)
+{
+ switch (cmd) {
+ case CCISS_GETPCIINFO:
+ case CCISS_GETINTINFO:
+ case CCISS_SETINTINFO:
+ case CCISS_GETNODENAME:
+ case CCISS_SETNODENAME:
+ case CCISS_GETHEARTBEAT:
+ case CCISS_GETBUSTYPES:
+ case CCISS_GETFIRMVER:
+ case CCISS_GETDRIVVER:
+ case CCISS_REVALIDVOLS:
+ case CCISS_DEREGDISK:
+ case CCISS_REGNEWDISK:
+ case CCISS_REGNEWD:
+ case CCISS_RESCANDISK:
+ case CCISS_GETLUNINFO:
+ return hpsa_ioctl(dev, cmd, arg);
+
+ case CCISS_PASSTHRU32:
+ return hpsa_ioctl32_passthru(dev, cmd, arg);
+ case CCISS_BIG_PASSTHRU32:
+ return hpsa_ioctl32_big_passthru(dev, cmd, arg);
+
+ default:
+ return -ENOIOCTLCMD;
+ }
+}
#endif
static int hpsa_getpciinfo_ioctl(struct ctlr_info *h, void __user *argp)
@@ -2378,8 +2526,8 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
BYTE sg_used = 0;
int status = 0;
int i;
- __u32 left;
- __u32 sz;
+ u32 left;
+ u32 sz;
BYTE __user *data_ptr;
if (!argp)
@@ -2527,7 +2675,7 @@ static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg)
case CCISS_DEREGDISK:
case CCISS_REGNEWDISK:
case CCISS_REGNEWD:
- hpsa_update_scsi_devices(h, dev->host->host_no);
+ hpsa_scan_start(h->scsi_host);
return 0;
case CCISS_GETPCIINFO:
return hpsa_getpciinfo_ioctl(h, argp);
@@ -2542,8 +2690,8 @@ static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg)
}
}
-static void fill_cmd(struct CommandList *c, __u8 cmd, struct ctlr_info *h,
- void *buff, size_t size, __u8 page_code, unsigned char *scsi3addr,
+static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
+ void *buff, size_t size, u8 page_code, unsigned char *scsi3addr,
int cmd_type)
{
int pci_dir = XFER_NONE;
@@ -2710,19 +2858,20 @@ static inline unsigned long get_next_completion(struct ctlr_info *h)
return h->access.command_completed(h);
}
-static inline int interrupt_pending(struct ctlr_info *h)
+static inline bool interrupt_pending(struct ctlr_info *h)
{
return h->access.intr_pending(h);
}
static inline long interrupt_not_for_us(struct ctlr_info *h)
{
- return ((h->access.intr_pending(h) == 0) ||
- (h->interrupts_enabled == 0));
+ return !(h->msi_vector || h->msix_vector) &&
+ ((h->access.intr_pending(h) == 0) ||
+ (h->interrupts_enabled == 0));
}
-static inline int bad_tag(struct ctlr_info *h, __u32 tag_index,
- __u32 raw_tag)
+static inline int bad_tag(struct ctlr_info *h, u32 tag_index,
+ u32 raw_tag)
{
if (unlikely(tag_index >= h->nr_cmds)) {
dev_warn(&h->pdev->dev, "bad tag 0x%08x ignored.\n", raw_tag);
@@ -2731,7 +2880,7 @@ static inline int bad_tag(struct ctlr_info *h, __u32 tag_index,
return 0;
}
-static inline void finish_cmd(struct CommandList *c, __u32 raw_tag)
+static inline void finish_cmd(struct CommandList *c, u32 raw_tag)
{
removeQ(c);
if (likely(c->cmd_type == CMD_SCSI))
@@ -2740,42 +2889,79 @@ static inline void finish_cmd(struct CommandList *c, __u32 raw_tag)
complete(c->waiting);
}
+static inline u32 hpsa_tag_contains_index(u32 tag)
+{
+#define DIRECT_LOOKUP_BIT 0x10
+ return tag & DIRECT_LOOKUP_BIT;
+}
+
+static inline u32 hpsa_tag_to_index(u32 tag)
+{
+#define DIRECT_LOOKUP_SHIFT 5
+ return tag >> DIRECT_LOOKUP_SHIFT;
+}
+
+static inline u32 hpsa_tag_discard_error_bits(u32 tag)
+{
+#define HPSA_ERROR_BITS 0x03
+ return tag & ~HPSA_ERROR_BITS;
+}
+
+/* process completion of an indexed ("direct lookup") command */
+static inline u32 process_indexed_cmd(struct ctlr_info *h,
+ u32 raw_tag)
+{
+ u32 tag_index;
+ struct CommandList *c;
+
+ tag_index = hpsa_tag_to_index(raw_tag);
+ if (bad_tag(h, tag_index, raw_tag))
+ return next_command(h);
+ c = h->cmd_pool + tag_index;
+ finish_cmd(c, raw_tag);
+ return next_command(h);
+}
+
+/* process completion of a non-indexed command */
+static inline u32 process_nonindexed_cmd(struct ctlr_info *h,
+ u32 raw_tag)
+{
+ u32 tag;
+ struct CommandList *c = NULL;
+ struct hlist_node *tmp;
+
+ tag = hpsa_tag_discard_error_bits(raw_tag);
+ hlist_for_each_entry(c, tmp, &h->cmpQ, list) {
+ if ((c->busaddr & 0xFFFFFFE0) == (tag & 0xFFFFFFE0)) {
+ finish_cmd(c, raw_tag);
+ return next_command(h);
+ }
+ }
+ bad_tag(h, h->nr_cmds + 1, raw_tag);
+ return next_command(h);
+}
+
static irqreturn_t do_hpsa_intr(int irq, void *dev_id)
{
struct ctlr_info *h = dev_id;
- struct CommandList *c;
unsigned long flags;
- __u32 raw_tag, tag, tag_index;
- struct hlist_node *tmp;
+ u32 raw_tag;
if (interrupt_not_for_us(h))
return IRQ_NONE;
spin_lock_irqsave(&h->lock, flags);
- while (interrupt_pending(h)) {
- while ((raw_tag = get_next_completion(h)) != FIFO_EMPTY) {
- if (likely(HPSA_TAG_CONTAINS_INDEX(raw_tag))) {
- tag_index = HPSA_TAG_TO_INDEX(raw_tag);
- if (bad_tag(h, tag_index, raw_tag))
- return IRQ_HANDLED;
- c = h->cmd_pool + tag_index;
- finish_cmd(c, raw_tag);
- continue;
- }
- tag = HPSA_TAG_DISCARD_ERROR_BITS(raw_tag);
- c = NULL;
- hlist_for_each_entry(c, tmp, &h->cmpQ, list) {
- if (c->busaddr == tag) {
- finish_cmd(c, raw_tag);
- break;
- }
- }
- }
+ raw_tag = get_next_completion(h);
+ while (raw_tag != FIFO_EMPTY) {
+ if (hpsa_tag_contains_index(raw_tag))
+ raw_tag = process_indexed_cmd(h, raw_tag);
+ else
+ raw_tag = process_nonindexed_cmd(h, raw_tag);
}
spin_unlock_irqrestore(&h->lock, flags);
return IRQ_HANDLED;
}
-/* Send a message CDB to the firmware. */
+/* Send a message CDB to the firmwart. */
static __devinit int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
unsigned char type)
{
@@ -2841,7 +3027,7 @@ static __devinit int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
for (i = 0; i < HPSA_MSG_SEND_RETRY_LIMIT; i++) {
tag = readl(vaddr + SA5_REPLY_PORT_OFFSET);
- if (HPSA_TAG_DISCARD_ERROR_BITS(tag) == paddr32)
+ if (hpsa_tag_discard_error_bits(tag) == paddr32)
break;
msleep(HPSA_MSG_SEND_RETRY_INTERVAL_MSECS);
}
@@ -3063,7 +3249,7 @@ static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
*/
static void __devinit hpsa_interrupt_mode(struct ctlr_info *h,
- struct pci_dev *pdev, __u32 board_id)
+ struct pci_dev *pdev, u32 board_id)
{
#ifdef CONFIG_PCI_MSI
int err;
@@ -3107,22 +3293,22 @@ static void __devinit hpsa_interrupt_mode(struct ctlr_info *h,
default_int_mode:
#endif /* CONFIG_PCI_MSI */
/* if we get here we're going to use the default interrupt mode */
- h->intr[SIMPLE_MODE_INT] = pdev->irq;
- return;
+ h->intr[PERF_MODE_INT] = pdev->irq;
}
static int hpsa_pci_init(struct ctlr_info *h, struct pci_dev *pdev)
{
ushort subsystem_vendor_id, subsystem_device_id, command;
- __u32 board_id, scratchpad = 0;
- __u64 cfg_offset;
- __u32 cfg_base_addr;
- __u64 cfg_base_addr_index;
+ u32 board_id, scratchpad = 0;
+ u64 cfg_offset;
+ u32 cfg_base_addr;
+ u64 cfg_base_addr_index;
+ u32 trans_offset;
int i, prod_index, err;
subsystem_vendor_id = pdev->subsystem_vendor;
subsystem_device_id = pdev->subsystem_device;
- board_id = (((__u32) (subsystem_device_id << 16) & 0xffff0000) |
+ board_id = (((u32) (subsystem_device_id << 16) & 0xffff0000) |
subsystem_vendor_id);
for (i = 0; i < ARRAY_SIZE(products); i++)
@@ -3199,7 +3385,7 @@ static int hpsa_pci_init(struct ctlr_info *h, struct pci_dev *pdev)
/* get the address index number */
cfg_base_addr = readl(h->vaddr + SA5_CTCFG_OFFSET);
- cfg_base_addr &= (__u32) 0x0000ffff;
+ cfg_base_addr &= (u32) 0x0000ffff;
cfg_base_addr_index = find_PCI_BAR_index(pdev, cfg_base_addr);
if (cfg_base_addr_index == -1) {
dev_warn(&pdev->dev, "cannot find cfg_base_addr_index\n");
@@ -3211,11 +3397,14 @@ static int hpsa_pci_init(struct ctlr_info *h, struct pci_dev *pdev)
h->cfgtable = remap_pci_mem(pci_resource_start(pdev,
cfg_base_addr_index) + cfg_offset,
sizeof(h->cfgtable));
- h->board_id = board_id;
-
- /* Query controller for max supported commands: */
- h->max_commands = readl(&(h->cfgtable->CmdsOutMax));
+ /* Find performant mode table. */
+ trans_offset = readl(&(h->cfgtable->TransMethodOffset));
+ h->transtable = remap_pci_mem(pci_resource_start(pdev,
+ cfg_base_addr_index)+cfg_offset+trans_offset,
+ sizeof(*h->transtable));
+ h->board_id = board_id;
+ h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands));
h->product_name = products[prod_index].product_name;
h->access = *(products[prod_index].access);
/* Allow room for some ioctls */
@@ -3232,7 +3421,7 @@ static int hpsa_pci_init(struct ctlr_info *h, struct pci_dev *pdev)
#ifdef CONFIG_X86
{
/* Need to enable prefetch in the SCSI core for 6400 in x86 */
- __u32 prefetch;
+ u32 prefetch;
prefetch = readl(&(h->cfgtable->SCSI_Prefetch));
prefetch |= 0x100;
writel(prefetch, &(h->cfgtable->SCSI_Prefetch));
@@ -3244,7 +3433,7 @@ static int hpsa_pci_init(struct ctlr_info *h, struct pci_dev *pdev)
* physical memory.
*/
if (board_id == 0x3225103C) {
- __u32 dma_prefetch;
+ u32 dma_prefetch;
dma_prefetch = readl(h->vaddr + I2O_DMA1_CFG);
dma_prefetch |= 0x8000;
writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG);
@@ -3286,10 +3475,26 @@ err_out_free_res:
return err;
}
+static void __devinit hpsa_hba_inquiry(struct ctlr_info *h)
+{
+ int rc;
+
+#define HBA_INQUIRY_BYTE_COUNT 64
+ h->hba_inquiry_data = kmalloc(HBA_INQUIRY_BYTE_COUNT, GFP_KERNEL);
+ if (!h->hba_inquiry_data)
+ return;
+ rc = hpsa_scsi_do_inquiry(h, RAID_CTLR_LUNID, 0,
+ h->hba_inquiry_data, HBA_INQUIRY_BYTE_COUNT);
+ if (rc != 0) {
+ kfree(h->hba_inquiry_data);
+ h->hba_inquiry_data = NULL;
+ }
+}
+
static int __devinit hpsa_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
- int i;
+ int i, rc;
int dac;
struct ctlr_info *h;
@@ -3314,17 +3519,23 @@ static int __devinit hpsa_init_one(struct pci_dev *pdev,
}
}
- BUILD_BUG_ON(sizeof(struct CommandList) % 8);
+ /* Command structures must be aligned on a 32-byte boundary because
+ * the 5 lower bits of the address are used by the hardware. and by
+ * the driver. See comments in hpsa.h for more info.
+ */
+#define COMMANDLIST_ALIGNMENT 32
+ BUILD_BUG_ON(sizeof(struct CommandList) % COMMANDLIST_ALIGNMENT);
h = kzalloc(sizeof(*h), GFP_KERNEL);
if (!h)
- return -1;
+ return -ENOMEM;
h->busy_initializing = 1;
INIT_HLIST_HEAD(&h->cmpQ);
INIT_HLIST_HEAD(&h->reqQ);
mutex_init(&h->busy_shutting_down);
init_completion(&h->scan_wait);
- if (hpsa_pci_init(h, pdev) != 0)
+ rc = hpsa_pci_init(h, pdev);
+ if (rc != 0)
goto clean1;
sprintf(h->devname, "hpsa%d", number_of_controllers);
@@ -3333,27 +3544,32 @@ static int __devinit hpsa_init_one(struct pci_dev *pdev,
h->pdev = pdev;
/* configure PCI DMA stuff */
- if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)))
+ rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+ if (rc == 0) {
dac = 1;
- else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))
- dac = 0;
- else {
- dev_err(&pdev->dev, "no suitable DMA available\n");
- goto clean1;
+ } else {
+ rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (rc == 0) {
+ dac = 0;
+ } else {
+ dev_err(&pdev->dev, "no suitable DMA available\n");
+ goto clean1;
+ }
}
/* make sure the board interrupts are off */
h->access.set_intr_mask(h, HPSA_INTR_OFF);
- if (request_irq(h->intr[SIMPLE_MODE_INT], do_hpsa_intr,
- IRQF_DISABLED | IRQF_SHARED, h->devname, h)) {
+ rc = request_irq(h->intr[PERF_MODE_INT], do_hpsa_intr,
+ IRQF_DISABLED, h->devname, h);
+ if (rc) {
dev_err(&pdev->dev, "unable to get irq %d for %s\n",
- h->intr[SIMPLE_MODE_INT], h->devname);
+ h->intr[PERF_MODE_INT], h->devname);
goto clean2;
}
- dev_info(&pdev->dev, "%s: <0x%x> at PCI %s IRQ %d%s using DAC\n",
- h->devname, pdev->device, pci_name(pdev),
- h->intr[SIMPLE_MODE_INT], dac ? "" : " not");
+ dev_info(&pdev->dev, "%s: <0x%x> at IRQ %d%s using DAC\n",
+ h->devname, pdev->device,
+ h->intr[PERF_MODE_INT], dac ? "" : " not");
h->cmd_pool_bits =
kmalloc(((h->nr_cmds + BITS_PER_LONG -
@@ -3368,9 +3584,13 @@ static int __devinit hpsa_init_one(struct pci_dev *pdev,
|| (h->cmd_pool == NULL)
|| (h->errinfo_pool == NULL)) {
dev_err(&pdev->dev, "out of memory");
+ rc = -ENOMEM;
goto clean4;
}
spin_lock_init(&h->lock);
+ spin_lock_init(&h->scan_lock);
+ init_waitqueue_head(&h->scan_wait_queue);
+ h->scan_finished = 1; /* no scan currently in progress */
pci_set_drvdata(pdev, h);
memset(h->cmd_pool_bits, 0,
@@ -3382,6 +3602,8 @@ static int __devinit hpsa_init_one(struct pci_dev *pdev,
/* Turn the interrupts on so we can service requests */
h->access.set_intr_mask(h, HPSA_INTR_ON);
+ hpsa_put_ctlr_into_performant_mode(h);
+ hpsa_hba_inquiry(h);
hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
h->busy_initializing = 0;
return 1;
@@ -3397,12 +3619,12 @@ clean4:
h->nr_cmds * sizeof(struct ErrorInfo),
h->errinfo_pool,
h->errinfo_pool_dhandle);
- free_irq(h->intr[SIMPLE_MODE_INT], h);
+ free_irq(h->intr[PERF_MODE_INT], h);
clean2:
clean1:
h->busy_initializing = 0;
kfree(h);
- return -1;
+ return rc;
}
static void hpsa_flush_cache(struct ctlr_info *h)
@@ -3441,7 +3663,7 @@ static void hpsa_shutdown(struct pci_dev *pdev)
*/
hpsa_flush_cache(h);
h->access.set_intr_mask(h, HPSA_INTR_OFF);
- free_irq(h->intr[2], h);
+ free_irq(h->intr[PERF_MODE_INT], h);
#ifdef CONFIG_PCI_MSI
if (h->msix_vector)
pci_disable_msix(h->pdev);
@@ -3470,7 +3692,11 @@ static void __devexit hpsa_remove_one(struct pci_dev *pdev)
pci_free_consistent(h->pdev,
h->nr_cmds * sizeof(struct ErrorInfo),
h->errinfo_pool, h->errinfo_pool_dhandle);
+ pci_free_consistent(h->pdev, h->reply_pool_size,
+ h->reply_pool, h->reply_pool_dhandle);
kfree(h->cmd_pool_bits);
+ kfree(h->blockFetchTable);
+ kfree(h->hba_inquiry_data);
/*
* Deliberately omit pci_disable_device(): it does something nasty to
* Smart Array controllers that pci_enable_device does not undo
@@ -3502,6 +3728,129 @@ static struct pci_driver hpsa_pci_driver = {
.resume = hpsa_resume,
};
+/* Fill in bucket_map[], given nsgs (the max number of
+ * scatter gather elements supported) and bucket[],
+ * which is an array of 8 integers. The bucket[] array
+ * contains 8 different DMA transfer sizes (in 16
+ * byte increments) which the controller uses to fetch
+ * commands. This function fills in bucket_map[], which
+ * maps a given number of scatter gather elements to one of
+ * the 8 DMA transfer sizes. The point of it is to allow the
+ * controller to only do as much DMA as needed to fetch the
+ * command, with the DMA transfer size encoded in the lower
+ * bits of the command address.
+ */
+static void calc_bucket_map(int bucket[], int num_buckets,
+ int nsgs, int *bucket_map)
+{
+ int i, j, b, size;
+
+ /* even a command with 0 SGs requires 4 blocks */
+#define MINIMUM_TRANSFER_BLOCKS 4
+#define NUM_BUCKETS 8
+ /* Note, bucket_map must have nsgs+1 entries. */
+ for (i = 0; i <= nsgs; i++) {
+ /* Compute size of a command with i SG entries */
+ size = i + MINIMUM_TRANSFER_BLOCKS;
+ b = num_buckets; /* Assume the biggest bucket */
+ /* Find the bucket that is just big enough */
+ for (j = 0; j < 8; j++) {
+ if (bucket[j] >= size) {
+ b = j;
+ break;
+ }
+ }
+ /* for a command with i SG entries, use bucket b. */
+ bucket_map[i] = b;
+ }
+}
+
+static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
+{
+ u32 trans_support;
+ u64 trans_offset;
+ /* 5 = 1 s/g entry or 4k
+ * 6 = 2 s/g entry or 8k
+ * 8 = 4 s/g entry or 16k
+ * 10 = 6 s/g entry or 24k
+ */
+ int bft[8] = {5, 6, 8, 10, 12, 20, 28, 35}; /* for scatter/gathers */
+ int i = 0;
+ int l = 0;
+ unsigned long register_value;
+
+ trans_support = readl(&(h->cfgtable->TransportSupport));
+ if (!(trans_support & PERFORMANT_MODE))
+ return;
+
+ h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands));
+ h->max_sg_entries = 32;
+ /* Performant mode ring buffer and supporting data structures */
+ h->reply_pool_size = h->max_commands * sizeof(u64);
+ h->reply_pool = pci_alloc_consistent(h->pdev, h->reply_pool_size,
+ &(h->reply_pool_dhandle));
+
+ /* Need a block fetch table for performant mode */
+ h->blockFetchTable = kmalloc(((h->max_sg_entries+1) *
+ sizeof(u32)), GFP_KERNEL);
+
+ if ((h->reply_pool == NULL)
+ || (h->blockFetchTable == NULL))
+ goto clean_up;
+
+ h->reply_pool_wraparound = 1; /* spec: init to 1 */
+
+ /* Controller spec: zero out this buffer. */
+ memset(h->reply_pool, 0, h->reply_pool_size);
+ h->reply_pool_head = h->reply_pool;
+
+ trans_offset = readl(&(h->cfgtable->TransMethodOffset));
+ bft[7] = h->max_sg_entries + 4;
+ calc_bucket_map(bft, ARRAY_SIZE(bft), 32, h->blockFetchTable);
+ for (i = 0; i < 8; i++)
+ writel(bft[i], &h->transtable->BlockFetch[i]);
+
+ /* size of controller ring buffer */
+ writel(h->max_commands, &h->transtable->RepQSize);
+ writel(1, &h->transtable->RepQCount);
+ writel(0, &h->transtable->RepQCtrAddrLow32);
+ writel(0, &h->transtable->RepQCtrAddrHigh32);
+ writel(h->reply_pool_dhandle, &h->transtable->RepQAddr0Low32);
+ writel(0, &h->transtable->RepQAddr0High32);
+ writel(CFGTBL_Trans_Performant,
+ &(h->cfgtable->HostWrite.TransportRequest));
+ writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
+ /* under certain very rare conditions, this can take awhile.
+ * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
+ * as we enter this code.) */
+ for (l = 0; l < MAX_CONFIG_WAIT; l++) {
+ register_value = readl(h->vaddr + SA5_DOORBELL);
+ if (!(register_value & CFGTBL_ChangeReq))
+ break;
+ /* delay and try again */
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(10);
+ }
+ register_value = readl(&(h->cfgtable->TransportActive));
+ if (!(register_value & CFGTBL_Trans_Performant)) {
+ dev_warn(&h->pdev->dev, "unable to get board into"
+ " performant mode\n");
+ return;
+ }
+
+ /* Change the access methods to the performant access methods */
+ h->access = SA5_performant_access;
+ h->transMethod = CFGTBL_Trans_Performant;
+
+ return;
+
+clean_up:
+ if (h->reply_pool)
+ pci_free_consistent(h->pdev, h->reply_pool_size,
+ h->reply_pool, h->reply_pool_dhandle);
+ kfree(h->blockFetchTable);
+}
+
/*
* This is it. Register the PCI driver information for the cards we control
* the OS will call our registered routines when it finds one of our cards.
diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
index 6bd1949144b5..a0502b3ac17e 100644
--- a/drivers/scsi/hpsa.h
+++ b/drivers/scsi/hpsa.h
@@ -33,7 +33,7 @@ struct access_method {
struct CommandList *c);
void (*set_intr_mask)(struct ctlr_info *h, unsigned long val);
unsigned long (*fifo_full)(struct ctlr_info *h);
- unsigned long (*intr_pending)(struct ctlr_info *h);
+ bool (*intr_pending)(struct ctlr_info *h);
unsigned long (*command_completed)(struct ctlr_info *h);
};
@@ -55,19 +55,20 @@ struct ctlr_info {
char *product_name;
char firm_ver[4]; /* Firmware version */
struct pci_dev *pdev;
- __u32 board_id;
+ u32 board_id;
void __iomem *vaddr;
unsigned long paddr;
int nr_cmds; /* Number of commands allowed on this controller */
struct CfgTable __iomem *cfgtable;
+ int max_sg_entries;
int interrupts_enabled;
int major;
int max_commands;
int commands_outstanding;
int max_outstanding; /* Debug */
int usage_count; /* number of opens all all minor devices */
-# define DOORBELL_INT 0
-# define PERF_MODE_INT 1
+# define PERF_MODE_INT 0
+# define DOORBELL_INT 1
# define SIMPLE_MODE_INT 2
# define MEMQ_MODE_INT 3
unsigned int intr[4];
@@ -93,6 +94,9 @@ struct ctlr_info {
int nr_frees;
int busy_initializing;
int busy_scanning;
+ int scan_finished;
+ spinlock_t scan_lock;
+ wait_queue_head_t scan_wait_queue;
struct mutex busy_shutting_down;
struct list_head scan_list;
struct completion scan_wait;
@@ -102,6 +106,24 @@ struct ctlr_info {
int ndevices; /* number of used elements in .dev[] array. */
#define HPSA_MAX_SCSI_DEVS_PER_HBA 256
struct hpsa_scsi_dev_t *dev[HPSA_MAX_SCSI_DEVS_PER_HBA];
+ /*
+ * Performant mode tables.
+ */
+ u32 trans_support;
+ u32 trans_offset;
+ struct TransTable_struct *transtable;
+ unsigned long transMethod;
+
+ /*
+ * Performant mode completion buffer
+ */
+ u64 *reply_pool;
+ dma_addr_t reply_pool_dhandle;
+ u64 *reply_pool_head;
+ size_t reply_pool_size;
+ unsigned char reply_pool_wraparound;
+ u32 *blockFetchTable;
+ unsigned char *hba_inquiry_data;
};
#define HPSA_ABORT_MSG 0
#define HPSA_DEVICE_RESET_MSG 1
@@ -164,9 +186,16 @@ struct ctlr_info {
#define HPSA_FIRMWARE_READY 0xffff0000 /* value in scratchpad register */
#define HPSA_ERROR_BIT 0x02
-#define HPSA_TAG_CONTAINS_INDEX(tag) ((tag) & 0x04)
-#define HPSA_TAG_TO_INDEX(tag) ((tag) >> 3)
-#define HPSA_TAG_DISCARD_ERROR_BITS(tag) ((tag) & ~3)
+
+/* Performant mode flags */
+#define SA5_PERF_INTR_PENDING 0x04
+#define SA5_PERF_INTR_OFF 0x05
+#define SA5_OUTDB_STATUS_PERF_BIT 0x01
+#define SA5_OUTDB_CLEAR_PERF_BIT 0x01
+#define SA5_OUTDB_CLEAR 0xA0
+#define SA5_OUTDB_CLEAR_PERF_BIT 0x01
+#define SA5_OUTDB_STATUS 0x9C
+
#define HPSA_INTR_ON 1
#define HPSA_INTR_OFF 0
@@ -176,10 +205,8 @@ struct ctlr_info {
static void SA5_submit_command(struct ctlr_info *h,
struct CommandList *c)
{
-#ifdef HPSA_DEBUG
- printk(KERN_WARNING "hpsa: Sending %x - down to controller\n",
- c->busaddr);
-#endif /* HPSA_DEBUG */
+ dev_dbg(&h->pdev->dev, "Sending %x, tag = %x\n", c->busaddr,
+ c->Header.Tag.lower);
writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
h->commands_outstanding++;
if (h->commands_outstanding > h->max_outstanding)
@@ -202,6 +229,52 @@ static void SA5_intr_mask(struct ctlr_info *h, unsigned long val)
h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
}
}
+
+static void SA5_performant_intr_mask(struct ctlr_info *h, unsigned long val)
+{
+ if (val) { /* turn on interrupts */
+ h->interrupts_enabled = 1;
+ writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
+ } else {
+ h->interrupts_enabled = 0;
+ writel(SA5_PERF_INTR_OFF,
+ h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
+ }
+}
+
+static unsigned long SA5_performant_completed(struct ctlr_info *h)
+{
+ unsigned long register_value = FIFO_EMPTY;
+
+ /* flush the controller write of the reply queue by reading
+ * outbound doorbell status register.
+ */
+ register_value = readl(h->vaddr + SA5_OUTDB_STATUS);
+ /* msi auto clears the interrupt pending bit. */
+ if (!(h->msi_vector || h->msix_vector)) {
+ writel(SA5_OUTDB_CLEAR_PERF_BIT, h->vaddr + SA5_OUTDB_CLEAR);
+ /* Do a read in order to flush the write to the controller
+ * (as per spec.)
+ */
+ register_value = readl(h->vaddr + SA5_OUTDB_STATUS);
+ }
+
+ if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
+ register_value = *(h->reply_pool_head);
+ (h->reply_pool_head)++;
+ h->commands_outstanding--;
+ } else {
+ register_value = FIFO_EMPTY;
+ }
+ /* Check for wraparound */
+ if (h->reply_pool_head == (h->reply_pool + h->max_commands)) {
+ h->reply_pool_head = h->reply_pool;
+ h->reply_pool_wraparound ^= 1;
+ }
+
+ return register_value;
+}
+
/*
* Returns true if fifo is full.
*
@@ -228,10 +301,10 @@ static unsigned long SA5_completed(struct ctlr_info *h)
#ifdef HPSA_DEBUG
if (register_value != FIFO_EMPTY)
- printk(KERN_INFO "hpsa: Read %lx back from board\n",
+ dev_dbg(&h->pdev->dev, "Read %lx back from board\n",
register_value);
else
- printk(KERN_INFO "hpsa: FIFO Empty read\n");
+ dev_dbg(&h->pdev->dev, "hpsa: FIFO Empty read\n");
#endif
return register_value;
@@ -239,18 +312,28 @@ static unsigned long SA5_completed(struct ctlr_info *h)
/*
* Returns true if an interrupt is pending..
*/
-static unsigned long SA5_intr_pending(struct ctlr_info *h)
+static bool SA5_intr_pending(struct ctlr_info *h)
{
unsigned long register_value =
readl(h->vaddr + SA5_INTR_STATUS);
-#ifdef HPSA_DEBUG
- printk(KERN_INFO "hpsa: intr_pending %lx\n", register_value);
-#endif /* HPSA_DEBUG */
- if (register_value & SA5_INTR_PENDING)
- return 1;
- return 0 ;
+ dev_dbg(&h->pdev->dev, "intr_pending %lx\n", register_value);
+ return register_value & SA5_INTR_PENDING;
}
+static bool SA5_performant_intr_pending(struct ctlr_info *h)
+{
+ unsigned long register_value = readl(h->vaddr + SA5_INTR_STATUS);
+
+ if (!register_value)
+ return false;
+
+ if (h->msi_vector || h->msix_vector)
+ return true;
+
+ /* Read outbound doorbell to flush */
+ register_value = readl(h->vaddr + SA5_OUTDB_STATUS);
+ return register_value & SA5_OUTDB_STATUS_PERF_BIT;
+}
static struct access_method SA5_access = {
SA5_submit_command,
@@ -260,14 +343,19 @@ static struct access_method SA5_access = {
SA5_completed,
};
+static struct access_method SA5_performant_access = {
+ SA5_submit_command,
+ SA5_performant_intr_mask,
+ SA5_fifo_full,
+ SA5_performant_intr_pending,
+ SA5_performant_completed,
+};
+
struct board_type {
- __u32 board_id;
+ u32 board_id;
char *product_name;
struct access_method *access;
};
-
-/* end of old hpsa_scsi.h file */
-
#endif /* HPSA_H */
diff --git a/drivers/scsi/hpsa_cmd.h b/drivers/scsi/hpsa_cmd.h
index 12d71387ed9a..3e0abdf76689 100644
--- a/drivers/scsi/hpsa_cmd.h
+++ b/drivers/scsi/hpsa_cmd.h
@@ -101,19 +101,20 @@
#define CFGTBL_AccCmds 0x00000001l
#define CFGTBL_Trans_Simple 0x00000002l
+#define CFGTBL_Trans_Performant 0x00000004l
#define CFGTBL_BusType_Ultra2 0x00000001l
#define CFGTBL_BusType_Ultra3 0x00000002l
#define CFGTBL_BusType_Fibre1G 0x00000100l
#define CFGTBL_BusType_Fibre2G 0x00000200l
struct vals32 {
- __u32 lower;
- __u32 upper;
+ u32 lower;
+ u32 upper;
};
union u64bit {
struct vals32 val32;
- __u64 val;
+ u64 val;
};
/* FIXME this is a per controller value (barf!) */
@@ -126,34 +127,34 @@ union u64bit {
#define HPSA_INQUIRY 0x12
struct InquiryData {
- __u8 data_byte[36];
+ u8 data_byte[36];
};
#define HPSA_REPORT_LOG 0xc2 /* Report Logical LUNs */
#define HPSA_REPORT_PHYS 0xc3 /* Report Physical LUNs */
struct ReportLUNdata {
- __u8 LUNListLength[4];
- __u32 reserved;
- __u8 LUN[HPSA_MAX_LUN][8];
+ u8 LUNListLength[4];
+ u32 reserved;
+ u8 LUN[HPSA_MAX_LUN][8];
};
struct ReportExtendedLUNdata {
- __u8 LUNListLength[4];
- __u8 extended_response_flag;
- __u8 reserved[3];
- __u8 LUN[HPSA_MAX_LUN][24];
+ u8 LUNListLength[4];
+ u8 extended_response_flag;
+ u8 reserved[3];
+ u8 LUN[HPSA_MAX_LUN][24];
};
struct SenseSubsystem_info {
- __u8 reserved[36];
- __u8 portname[8];
- __u8 reserved1[1108];
+ u8 reserved[36];
+ u8 portname[8];
+ u8 reserved1[1108];
};
#define HPSA_READ_CAPACITY 0x25 /* Read Capacity */
struct ReadCapdata {
- __u8 total_size[4]; /* Total size in blocks */
- __u8 block_size[4]; /* Size of blocks in bytes */
+ u8 total_size[4]; /* Total size in blocks */
+ u8 block_size[4]; /* Size of blocks in bytes */
};
#if 0
@@ -174,112 +175,131 @@ struct ReadCapdata {
/* Command List Structure */
union SCSI3Addr {
struct {
- __u8 Dev;
- __u8 Bus:6;
- __u8 Mode:2; /* b00 */
+ u8 Dev;
+ u8 Bus:6;
+ u8 Mode:2; /* b00 */
} PeripDev;
struct {
- __u8 DevLSB;
- __u8 DevMSB:6;
- __u8 Mode:2; /* b01 */
+ u8 DevLSB;
+ u8 DevMSB:6;
+ u8 Mode:2; /* b01 */
} LogDev;
struct {
- __u8 Dev:5;
- __u8 Bus:3;
- __u8 Targ:6;
- __u8 Mode:2; /* b10 */
+ u8 Dev:5;
+ u8 Bus:3;
+ u8 Targ:6;
+ u8 Mode:2; /* b10 */
} LogUnit;
};
struct PhysDevAddr {
- __u32 TargetId:24;
- __u32 Bus:6;
- __u32 Mode:2;
+ u32 TargetId:24;
+ u32 Bus:6;
+ u32 Mode:2;
/* 2 level target device addr */
union SCSI3Addr Target[2];
};
struct LogDevAddr {
- __u32 VolId:30;
- __u32 Mode:2;
- __u8 reserved[4];
+ u32 VolId:30;
+ u32 Mode:2;
+ u8 reserved[4];
};
union LUNAddr {
- __u8 LunAddrBytes[8];
+ u8 LunAddrBytes[8];
union SCSI3Addr SCSI3Lun[4];
struct PhysDevAddr PhysDev;
struct LogDevAddr LogDev;
};
struct CommandListHeader {
- __u8 ReplyQueue;
- __u8 SGList;
- __u16 SGTotal;
+ u8 ReplyQueue;
+ u8 SGList;
+ u16 SGTotal;
struct vals32 Tag;
union LUNAddr LUN;
};
struct RequestBlock {
- __u8 CDBLen;
+ u8 CDBLen;
struct {
- __u8 Type:3;
- __u8 Attribute:3;
- __u8 Direction:2;
+ u8 Type:3;
+ u8 Attribute:3;
+ u8 Direction:2;
} Type;
- __u16 Timeout;
- __u8 CDB[16];
+ u16 Timeout;
+ u8 CDB[16];
};
struct ErrDescriptor {
struct vals32 Addr;
- __u32 Len;
+ u32 Len;
};
struct SGDescriptor {
struct vals32 Addr;
- __u32 Len;
- __u32 Ext;
+ u32 Len;
+ u32 Ext;
};
union MoreErrInfo {
struct {
- __u8 Reserved[3];
- __u8 Type;
- __u32 ErrorInfo;
+ u8 Reserved[3];
+ u8 Type;
+ u32 ErrorInfo;
} Common_Info;
struct {
- __u8 Reserved[2];
- __u8 offense_size; /* size of offending entry */
- __u8 offense_num; /* byte # of offense 0-base */
- __u32 offense_value;
+ u8 Reserved[2];
+ u8 offense_size; /* size of offending entry */
+ u8 offense_num; /* byte # of offense 0-base */
+ u32 offense_value;
} Invalid_Cmd;
};
struct ErrorInfo {
- __u8 ScsiStatus;
- __u8 SenseLen;
- __u16 CommandStatus;
- __u32 ResidualCnt;
+ u8 ScsiStatus;
+ u8 SenseLen;
+ u16 CommandStatus;
+ u32 ResidualCnt;
union MoreErrInfo MoreErrInfo;
- __u8 SenseInfo[SENSEINFOBYTES];
+ u8 SenseInfo[SENSEINFOBYTES];
};
/* Command types */
#define CMD_IOCTL_PEND 0x01
#define CMD_SCSI 0x03
+/* This structure needs to be divisible by 32 for new
+ * indexing method and performant mode.
+ */
+#define PAD32 32
+#define PAD64DIFF 0
+#define USEEXTRA ((sizeof(void *) - 4)/4)
+#define PADSIZE (PAD32 + PAD64DIFF * USEEXTRA)
+
+#define DIRECT_LOOKUP_SHIFT 5
+#define DIRECT_LOOKUP_BIT 0x10
+
+#define HPSA_ERROR_BIT 0x02
struct ctlr_info; /* defined in hpsa.h */
-/* The size of this structure needs to be divisible by 8
- * od on all architectures, because the controller uses 2
- * lower bits of the address, and the driver uses 1 lower
- * bit (3 bits total.)
+/* The size of this structure needs to be divisible by 32
+ * on all architectures because low 5 bits of the addresses
+ * are used as follows:
+ *
+ * bit 0: to device, used to indicate "performant mode" command
+ * from device, indidcates error status.
+ * bit 1-3: to device, indicates block fetch table entry for
+ * reducing DMA in fetching commands from host memory.
+ * bit 4: used to indicate whether tag is "direct lookup" (index),
+ * or a bus address.
*/
+
struct CommandList {
struct CommandListHeader Header;
struct RequestBlock Request;
struct ErrDescriptor ErrDesc;
struct SGDescriptor SG[MAXSGENTRIES];
/* information associated with the command */
- __u32 busaddr; /* physical addr of this record */
+ u32 busaddr; /* physical addr of this record */
struct ErrorInfo *err_info; /* pointer to the allocated mem */
struct ctlr_info *h;
int cmd_type;
@@ -291,35 +311,63 @@ struct CommandList {
struct completion *waiting;
int retry_count;
void *scsi_cmd;
+
+/* on 64 bit architectures, to get this to be 32-byte-aligned
+ * it so happens we need no padding, on 32 bit systems,
+ * we need 8 bytes of padding. This does that.
+ */
+#define COMMANDLIST_PAD ((8 - sizeof(long))/4 * 8)
+ u8 pad[COMMANDLIST_PAD];
+
};
/* Configuration Table Structure */
struct HostWrite {
- __u32 TransportRequest;
- __u32 Reserved;
- __u32 CoalIntDelay;
- __u32 CoalIntCount;
+ u32 TransportRequest;
+ u32 Reserved;
+ u32 CoalIntDelay;
+ u32 CoalIntCount;
};
+#define SIMPLE_MODE 0x02
+#define PERFORMANT_MODE 0x04
+#define MEMQ_MODE 0x08
+
struct CfgTable {
- __u8 Signature[4];
- __u32 SpecValence;
- __u32 TransportSupport;
- __u32 TransportActive;
- struct HostWrite HostWrite;
- __u32 CmdsOutMax;
- __u32 BusTypes;
- __u32 Reserved;
- __u8 ServerName[16];
- __u32 HeartBeat;
- __u32 SCSI_Prefetch;
+ u8 Signature[4];
+ u32 SpecValence;
+ u32 TransportSupport;
+ u32 TransportActive;
+ struct HostWrite HostWrite;
+ u32 CmdsOutMax;
+ u32 BusTypes;
+ u32 TransMethodOffset;
+ u8 ServerName[16];
+ u32 HeartBeat;
+ u32 SCSI_Prefetch;
+ u32 MaxScatterGatherElements;
+ u32 MaxLogicalUnits;
+ u32 MaxPhysicalDevices;
+ u32 MaxPhysicalDrivesPerLogicalUnit;
+ u32 MaxPerformantModeCommands;
+};
+
+#define NUM_BLOCKFETCH_ENTRIES 8
+struct TransTable_struct {
+ u32 BlockFetch[NUM_BLOCKFETCH_ENTRIES];
+ u32 RepQSize;
+ u32 RepQCount;
+ u32 RepQCtrAddrLow32;
+ u32 RepQCtrAddrHigh32;
+ u32 RepQAddr0Low32;
+ u32 RepQAddr0High32;
};
struct hpsa_pci_info {
unsigned char bus;
unsigned char dev_fn;
unsigned short domain;
- __u32 board_id;
+ u32 board_id;
};
#pragma pack()
diff --git a/drivers/scsi/ibmmca.c b/drivers/scsi/ibmmca.c
index 9c1e6a5b5af0..9a4b69d4f4eb 100644
--- a/drivers/scsi/ibmmca.c
+++ b/drivers/scsi/ibmmca.c
@@ -2336,7 +2336,7 @@ static int option_setup(char *str)
char *cur = str;
int i = 1;
- while (cur && isdigit(*cur) && i <= IM_MAX_HOSTS) {
+ while (cur && isdigit(*cur) && i < IM_MAX_HOSTS) {
ints[i++] = simple_strtoul(cur, NULL, 0);
if ((cur = strchr(cur, ',')) != NULL)
cur++;
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index e475b7957c2d..e3a18e0ef276 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -40,7 +40,7 @@
* (CRQ), which is just a buffer of 16 byte entries in the receiver's
* Senders cannot access the buffer directly, but send messages by
* making a hypervisor call and passing in the 16 bytes. The hypervisor
- * puts the message in the next 16 byte space in round-robbin fashion,
+ * puts the message in the next 16 byte space in round-robin fashion,
* turns on the high order bit of the message (the valid bit), and
* generates an interrupt to the receiver (if interrupts are turned on.)
* The receiver just turns off the valid bit when they have copied out
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index 517da3fd89d3..8a89ba900588 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -584,9 +584,10 @@ static void iscsi_sw_tcp_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
struct iscsi_conn *conn = cls_conn->dd_data;
struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
+ struct socket *sock = tcp_sw_conn->sock;
/* userspace may have goofed up and not bound us */
- if (!tcp_sw_conn->sock)
+ if (!sock)
return;
/*
* Make sure our recv side is stopped.
@@ -597,6 +598,11 @@ static void iscsi_sw_tcp_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
write_unlock_bh(&tcp_sw_conn->sock->sk->sk_callback_lock);
+ if (sock->sk->sk_sleep && waitqueue_active(sock->sk->sk_sleep)) {
+ sock->sk->sk_err = EIO;
+ wake_up_interruptible(sock->sk->sk_sleep);
+ }
+
iscsi_conn_stop(cls_conn, flag);
iscsi_sw_tcp_release_conn(conn);
}
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index c28a712fd4db..703eb6a88790 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -1919,10 +1919,11 @@ static int iscsi_has_ping_timed_out(struct iscsi_conn *conn)
static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc)
{
enum blk_eh_timer_return rc = BLK_EH_NOT_HANDLED;
- struct iscsi_task *task = NULL;
+ struct iscsi_task *task = NULL, *running_task;
struct iscsi_cls_session *cls_session;
struct iscsi_session *session;
struct iscsi_conn *conn;
+ int i;
cls_session = starget_to_session(scsi_target(sc->device));
session = cls_session->dd_data;
@@ -1947,8 +1948,15 @@ static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc)
}
task = (struct iscsi_task *)sc->SCp.ptr;
- if (!task)
+ if (!task) {
+ /*
+ * Raced with completion. Just reset timer, and let it
+ * complete normally
+ */
+ rc = BLK_EH_RESET_TIMER;
goto done;
+ }
+
/*
* If we have sent (at least queued to the network layer) a pdu or
* recvd one for the task since the last timeout ask for
@@ -1956,10 +1964,10 @@ static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc)
* we can check if it is the task or connection when we send the
* nop as a ping.
*/
- if (time_after_eq(task->last_xfer, task->last_timeout)) {
+ if (time_after(task->last_xfer, task->last_timeout)) {
ISCSI_DBG_EH(session, "Command making progress. Asking "
"scsi-ml for more time to complete. "
- "Last data recv at %lu. Last timeout was at "
+ "Last data xfer at %lu. Last timeout was at "
"%lu\n.", task->last_xfer, task->last_timeout);
task->have_checked_conn = false;
rc = BLK_EH_RESET_TIMER;
@@ -1977,6 +1985,43 @@ static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc)
goto done;
}
+ for (i = 0; i < conn->session->cmds_max; i++) {
+ running_task = conn->session->cmds[i];
+ if (!running_task->sc || running_task == task ||
+ running_task->state != ISCSI_TASK_RUNNING)
+ continue;
+
+ /*
+ * Only check if cmds started before this one have made
+ * progress, or this could never fail
+ */
+ if (time_after(running_task->sc->jiffies_at_alloc,
+ task->sc->jiffies_at_alloc))
+ continue;
+
+ if (time_after(running_task->last_xfer, task->last_timeout)) {
+ /*
+ * This task has not made progress, but a task
+ * started before us has transferred data since
+ * we started/last-checked. We could be queueing
+ * too many tasks or the LU is bad.
+ *
+ * If the device is bad the cmds ahead of us on
+ * other devs will complete, and this loop will
+ * eventually fail starting the scsi eh.
+ */
+ ISCSI_DBG_EH(session, "Command has not made progress "
+ "but commands ahead of it have. "
+ "Asking scsi-ml for more time to "
+ "complete. Our last xfer vs running task "
+ "last xfer %lu/%lu. Last check %lu.\n",
+ task->last_xfer, running_task->last_xfer,
+ task->last_timeout);
+ rc = BLK_EH_RESET_TIMER;
+ goto done;
+ }
+ }
+
/* Assumes nop timeout is shorter than scsi cmd timeout */
if (task->have_checked_conn)
goto done;
diff --git a/drivers/scsi/libsrp.c b/drivers/scsi/libsrp.c
index ab19b3b4be52..22775165bf6a 100644
--- a/drivers/scsi/libsrp.c
+++ b/drivers/scsi/libsrp.c
@@ -1,5 +1,5 @@
/*
- * SCSI RDAM Protocol lib functions
+ * SCSI RDMA Protocol lib functions
*
* Copyright (C) 2006 FUJITA Tomonori <tomof@acm.org>
*
@@ -328,7 +328,7 @@ int srp_transfer_data(struct scsi_cmnd *sc, struct srp_cmd *cmd,
int offset, err = 0;
u8 format;
- offset = cmd->add_cdb_len * 4;
+ offset = cmd->add_cdb_len & ~3;
dir = srp_cmd_direction(cmd);
if (dir == DMA_FROM_DEVICE)
@@ -366,7 +366,7 @@ static int vscsis_data_length(struct srp_cmd *cmd, enum dma_data_direction dir)
{
struct srp_direct_buf *md;
struct srp_indirect_buf *id;
- int len = 0, offset = cmd->add_cdb_len * 4;
+ int len = 0, offset = cmd->add_cdb_len & ~3;
u8 fmt;
if (dir == DMA_TO_DEVICE)
@@ -440,6 +440,6 @@ int srp_cmd_queue(struct Scsi_Host *shost, struct srp_cmd *cmd, void *info,
}
EXPORT_SYMBOL_GPL(srp_cmd_queue);
-MODULE_DESCRIPTION("SCSI RDAM Protocol lib functions");
+MODULE_DESCRIPTION("SCSI RDMA Protocol lib functions");
MODULE_AUTHOR("FUJITA Tomonori");
MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 1cc23a69db5e..84b696463a58 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2009 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2010 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -315,6 +315,9 @@ struct lpfc_vport {
#define FC_VPORT_NEEDS_REG_VPI 0x80000 /* Needs to have its vpi registered */
#define FC_RSCN_DEFERRED 0x100000 /* A deferred RSCN being processed */
#define FC_VPORT_NEEDS_INIT_VPI 0x200000 /* Need to INIT_VPI before FDISC */
+#define FC_VPORT_CVL_RCVD 0x400000 /* VLink failed due to CVL */
+#define FC_VFI_REGISTERED 0x800000 /* VFI is registered */
+#define FC_FDISC_COMPLETED 0x1000000/* FDISC completed */
uint32_t ct_flags;
#define FC_CT_RFF_ID 0x1 /* RFF_ID accepted by switch */
@@ -448,6 +451,8 @@ struct unsol_rcv_ct_ctx {
uint32_t ctxt_id;
uint32_t SID;
uint32_t oxid;
+ uint32_t flags;
+#define UNSOL_VALID 0x00000001
};
struct lpfc_hba {
@@ -499,6 +504,10 @@ struct lpfc_hba {
(struct lpfc_hba *);
void (*lpfc_stop_port)
(struct lpfc_hba *);
+ int (*lpfc_hba_init_link)
+ (struct lpfc_hba *);
+ int (*lpfc_hba_down_link)
+ (struct lpfc_hba *);
/* SLI4 specific HBA data structure */
@@ -613,6 +622,7 @@ struct lpfc_hba {
uint32_t cfg_enable_bg;
uint32_t cfg_log_verbose;
uint32_t cfg_aer_support;
+ uint32_t cfg_suppress_link_up;
lpfc_vpd_t vpd; /* vital product data */
@@ -790,7 +800,7 @@ struct lpfc_hba {
uint16_t vlan_id;
struct list_head fcf_conn_rec_list;
- struct mutex ct_event_mutex; /* synchronize access to ct_ev_waiters */
+ spinlock_t ct_ev_lock; /* synchronize access to ct_ev_waiters */
struct list_head ct_ev_waiters;
struct unsol_rcv_ct_ctx ct_ctx[64];
uint32_t ctx_idx;
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 91542f786edf..c992e8328f9e 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -482,6 +482,41 @@ lpfc_link_state_show(struct device *dev, struct device_attribute *attr,
}
/**
+ * lpfc_link_state_store - Transition the link_state on an HBA port
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: one or more lpfc_polling_flags values.
+ * @count: not used.
+ *
+ * Returns:
+ * -EINVAL if the buffer is not "up" or "down"
+ * return from link state change function if non-zero
+ * length of the buf on success
+ **/
+static ssize_t
+lpfc_link_state_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+
+ int status = -EINVAL;
+
+ if ((strncmp(buf, "up", sizeof("up") - 1) == 0) &&
+ (phba->link_state == LPFC_LINK_DOWN))
+ status = phba->lpfc_hba_init_link(phba);
+ else if ((strncmp(buf, "down", sizeof("down") - 1) == 0) &&
+ (phba->link_state >= LPFC_LINK_UP))
+ status = phba->lpfc_hba_down_link(phba);
+
+ if (status == 0)
+ return strlen(buf);
+ else
+ return status;
+}
+
+/**
* lpfc_num_discovered_ports_show - Return sum of mapped and unmapped vports
* @dev: class device that is converted into a Scsi_host.
* @attr: device attribute, not used.
@@ -1219,7 +1254,7 @@ lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
struct Scsi_Host *shost = class_to_shost(dev);\
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
struct lpfc_hba *phba = vport->phba;\
- int val = 0;\
+ uint val = 0;\
val = phba->cfg_##attr;\
return snprintf(buf, PAGE_SIZE, "%d\n",\
phba->cfg_##attr);\
@@ -1247,7 +1282,7 @@ lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
struct Scsi_Host *shost = class_to_shost(dev);\
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
struct lpfc_hba *phba = vport->phba;\
- int val = 0;\
+ uint val = 0;\
val = phba->cfg_##attr;\
return snprintf(buf, PAGE_SIZE, "%#x\n",\
phba->cfg_##attr);\
@@ -1274,7 +1309,7 @@ lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
**/
#define lpfc_param_init(attr, default, minval, maxval) \
static int \
-lpfc_##attr##_init(struct lpfc_hba *phba, int val) \
+lpfc_##attr##_init(struct lpfc_hba *phba, uint val) \
{ \
if (val >= minval && val <= maxval) {\
phba->cfg_##attr = val;\
@@ -1309,7 +1344,7 @@ lpfc_##attr##_init(struct lpfc_hba *phba, int val) \
**/
#define lpfc_param_set(attr, default, minval, maxval) \
static int \
-lpfc_##attr##_set(struct lpfc_hba *phba, int val) \
+lpfc_##attr##_set(struct lpfc_hba *phba, uint val) \
{ \
if (val >= minval && val <= maxval) {\
phba->cfg_##attr = val;\
@@ -1350,7 +1385,7 @@ lpfc_##attr##_store(struct device *dev, struct device_attribute *attr, \
struct Scsi_Host *shost = class_to_shost(dev);\
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
struct lpfc_hba *phba = vport->phba;\
- int val=0;\
+ uint val = 0;\
if (!isdigit(buf[0]))\
return -EINVAL;\
if (sscanf(buf, "%i", &val) != 1)\
@@ -1382,7 +1417,7 @@ lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
{ \
struct Scsi_Host *shost = class_to_shost(dev);\
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
- int val = 0;\
+ uint val = 0;\
val = vport->cfg_##attr;\
return snprintf(buf, PAGE_SIZE, "%d\n", vport->cfg_##attr);\
}
@@ -1409,7 +1444,7 @@ lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
{ \
struct Scsi_Host *shost = class_to_shost(dev);\
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
- int val = 0;\
+ uint val = 0;\
val = vport->cfg_##attr;\
return snprintf(buf, PAGE_SIZE, "%#x\n", vport->cfg_##attr);\
}
@@ -1434,7 +1469,7 @@ lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
**/
#define lpfc_vport_param_init(attr, default, minval, maxval) \
static int \
-lpfc_##attr##_init(struct lpfc_vport *vport, int val) \
+lpfc_##attr##_init(struct lpfc_vport *vport, uint val) \
{ \
if (val >= minval && val <= maxval) {\
vport->cfg_##attr = val;\
@@ -1466,7 +1501,7 @@ lpfc_##attr##_init(struct lpfc_vport *vport, int val) \
**/
#define lpfc_vport_param_set(attr, default, minval, maxval) \
static int \
-lpfc_##attr##_set(struct lpfc_vport *vport, int val) \
+lpfc_##attr##_set(struct lpfc_vport *vport, uint val) \
{ \
if (val >= minval && val <= maxval) {\
vport->cfg_##attr = val;\
@@ -1502,7 +1537,7 @@ lpfc_##attr##_store(struct device *dev, struct device_attribute *attr, \
{ \
struct Scsi_Host *shost = class_to_shost(dev);\
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
- int val=0;\
+ uint val = 0;\
if (!isdigit(buf[0]))\
return -EINVAL;\
if (sscanf(buf, "%i", &val) != 1)\
@@ -1515,22 +1550,22 @@ lpfc_##attr##_store(struct device *dev, struct device_attribute *attr, \
#define LPFC_ATTR(name, defval, minval, maxval, desc) \
-static int lpfc_##name = defval;\
-module_param(lpfc_##name, int, 0);\
+static uint lpfc_##name = defval;\
+module_param(lpfc_##name, uint, 0);\
MODULE_PARM_DESC(lpfc_##name, desc);\
lpfc_param_init(name, defval, minval, maxval)
#define LPFC_ATTR_R(name, defval, minval, maxval, desc) \
-static int lpfc_##name = defval;\
-module_param(lpfc_##name, int, 0);\
+static uint lpfc_##name = defval;\
+module_param(lpfc_##name, uint, 0);\
MODULE_PARM_DESC(lpfc_##name, desc);\
lpfc_param_show(name)\
lpfc_param_init(name, defval, minval, maxval)\
static DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL)
#define LPFC_ATTR_RW(name, defval, minval, maxval, desc) \
-static int lpfc_##name = defval;\
-module_param(lpfc_##name, int, 0);\
+static uint lpfc_##name = defval;\
+module_param(lpfc_##name, uint, 0);\
MODULE_PARM_DESC(lpfc_##name, desc);\
lpfc_param_show(name)\
lpfc_param_init(name, defval, minval, maxval)\
@@ -1540,16 +1575,16 @@ static DEVICE_ATTR(lpfc_##name, S_IRUGO | S_IWUSR,\
lpfc_##name##_show, lpfc_##name##_store)
#define LPFC_ATTR_HEX_R(name, defval, minval, maxval, desc) \
-static int lpfc_##name = defval;\
-module_param(lpfc_##name, int, 0);\
+static uint lpfc_##name = defval;\
+module_param(lpfc_##name, uint, 0);\
MODULE_PARM_DESC(lpfc_##name, desc);\
lpfc_param_hex_show(name)\
lpfc_param_init(name, defval, minval, maxval)\
static DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL)
#define LPFC_ATTR_HEX_RW(name, defval, minval, maxval, desc) \
-static int lpfc_##name = defval;\
-module_param(lpfc_##name, int, 0);\
+static uint lpfc_##name = defval;\
+module_param(lpfc_##name, uint, 0);\
MODULE_PARM_DESC(lpfc_##name, desc);\
lpfc_param_hex_show(name)\
lpfc_param_init(name, defval, minval, maxval)\
@@ -1559,22 +1594,22 @@ static DEVICE_ATTR(lpfc_##name, S_IRUGO | S_IWUSR,\
lpfc_##name##_show, lpfc_##name##_store)
#define LPFC_VPORT_ATTR(name, defval, minval, maxval, desc) \
-static int lpfc_##name = defval;\
-module_param(lpfc_##name, int, 0);\
+static uint lpfc_##name = defval;\
+module_param(lpfc_##name, uint, 0);\
MODULE_PARM_DESC(lpfc_##name, desc);\
lpfc_vport_param_init(name, defval, minval, maxval)
#define LPFC_VPORT_ATTR_R(name, defval, minval, maxval, desc) \
-static int lpfc_##name = defval;\
-module_param(lpfc_##name, int, 0);\
+static uint lpfc_##name = defval;\
+module_param(lpfc_##name, uint, 0);\
MODULE_PARM_DESC(lpfc_##name, desc);\
lpfc_vport_param_show(name)\
lpfc_vport_param_init(name, defval, minval, maxval)\
static DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL)
#define LPFC_VPORT_ATTR_RW(name, defval, minval, maxval, desc) \
-static int lpfc_##name = defval;\
-module_param(lpfc_##name, int, 0);\
+static uint lpfc_##name = defval;\
+module_param(lpfc_##name, uint, 0);\
MODULE_PARM_DESC(lpfc_##name, desc);\
lpfc_vport_param_show(name)\
lpfc_vport_param_init(name, defval, minval, maxval)\
@@ -1584,16 +1619,16 @@ static DEVICE_ATTR(lpfc_##name, S_IRUGO | S_IWUSR,\
lpfc_##name##_show, lpfc_##name##_store)
#define LPFC_VPORT_ATTR_HEX_R(name, defval, minval, maxval, desc) \
-static int lpfc_##name = defval;\
-module_param(lpfc_##name, int, 0);\
+static uint lpfc_##name = defval;\
+module_param(lpfc_##name, uint, 0);\
MODULE_PARM_DESC(lpfc_##name, desc);\
lpfc_vport_param_hex_show(name)\
lpfc_vport_param_init(name, defval, minval, maxval)\
static DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL)
#define LPFC_VPORT_ATTR_HEX_RW(name, defval, minval, maxval, desc) \
-static int lpfc_##name = defval;\
-module_param(lpfc_##name, int, 0);\
+static uint lpfc_##name = defval;\
+module_param(lpfc_##name, uint, 0);\
MODULE_PARM_DESC(lpfc_##name, desc);\
lpfc_vport_param_hex_show(name)\
lpfc_vport_param_init(name, defval, minval, maxval)\
@@ -1614,7 +1649,8 @@ static DEVICE_ATTR(programtype, S_IRUGO, lpfc_programtype_show, NULL);
static DEVICE_ATTR(portnum, S_IRUGO, lpfc_vportnum_show, NULL);
static DEVICE_ATTR(fwrev, S_IRUGO, lpfc_fwrev_show, NULL);
static DEVICE_ATTR(hdw, S_IRUGO, lpfc_hdw_show, NULL);
-static DEVICE_ATTR(link_state, S_IRUGO, lpfc_link_state_show, NULL);
+static DEVICE_ATTR(link_state, S_IRUGO | S_IWUSR, lpfc_link_state_show,
+ lpfc_link_state_store);
static DEVICE_ATTR(option_rom_version, S_IRUGO,
lpfc_option_rom_version_show, NULL);
static DEVICE_ATTR(num_discovered_ports, S_IRUGO,
@@ -1897,6 +1933,15 @@ static DEVICE_ATTR(lpfc_enable_npiv, S_IRUGO,
lpfc_enable_npiv_show, NULL);
/*
+# lpfc_suppress_link_up: Bring link up at initialization
+# 0x0 = bring link up (issue MBX_INIT_LINK)
+# 0x1 = do NOT bring link up at initialization(MBX_INIT_LINK)
+# 0x2 = never bring up link
+# Default value is 0.
+*/
+LPFC_ATTR_R(suppress_link_up, 0, 0, 2, "Suppress Link Up at initialization");
+
+/*
# lpfc_nodev_tmo: If set, it will hold all I/O errors on devices that disappear
# until the timer expires. Value range is [0,255]. Default value is 30.
*/
@@ -3114,12 +3159,12 @@ LPFC_ATTR_RW(poll_tmo, 10, 1, 255,
/*
# lpfc_use_msi: Use MSI (Message Signaled Interrupts) in systems that
# support this feature
-# 0 = MSI disabled (default)
+# 0 = MSI disabled
# 1 = MSI enabled
-# 2 = MSI-X enabled
-# Value range is [0,2]. Default value is 0.
+# 2 = MSI-X enabled (default)
+# Value range is [0,2]. Default value is 2.
*/
-LPFC_ATTR_R(use_msi, 0, 0, 2, "Use Message Signaled Interrupts (1) or "
+LPFC_ATTR_R(use_msi, 2, 0, 2, "Use Message Signaled Interrupts (1) or "
"MSI-X (2), if possible");
/*
@@ -3278,6 +3323,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
&dev_attr_lpfc_prot_sg_seg_cnt,
&dev_attr_lpfc_aer_support,
&dev_attr_lpfc_aer_state_cleanup,
+ &dev_attr_lpfc_suppress_link_up,
NULL,
};
@@ -4456,7 +4502,7 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
lpfc_hba_queue_depth_init(phba, lpfc_hba_queue_depth);
lpfc_hba_log_verbose_init(phba, lpfc_log_verbose);
lpfc_aer_support_init(phba, lpfc_aer_support);
-
+ lpfc_suppress_link_up_init(phba, lpfc_suppress_link_up);
return;
}
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index a5d9048235d9..f3f1bf1a0a71 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2009 Emulex. All rights reserved. *
+ * Copyright (C) 2009-2010 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
@@ -21,6 +21,7 @@
#include <linux/interrupt.h>
#include <linux/mempool.h>
#include <linux/pci.h>
+#include <linux/delay.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
@@ -33,6 +34,7 @@
#include "lpfc_sli.h"
#include "lpfc_sli4.h"
#include "lpfc_nl.h"
+#include "lpfc_bsg.h"
#include "lpfc_disc.h"
#include "lpfc_scsi.h"
#include "lpfc.h"
@@ -41,14 +43,183 @@
#include "lpfc_vport.h"
#include "lpfc_version.h"
+struct lpfc_bsg_event {
+ struct list_head node;
+ struct kref kref;
+ wait_queue_head_t wq;
+
+ /* Event type and waiter identifiers */
+ uint32_t type_mask;
+ uint32_t req_id;
+ uint32_t reg_id;
+
+ /* next two flags are here for the auto-delete logic */
+ unsigned long wait_time_stamp;
+ int waiting;
+
+ /* seen and not seen events */
+ struct list_head events_to_get;
+ struct list_head events_to_see;
+
+ /* job waiting for this event to finish */
+ struct fc_bsg_job *set_job;
+};
+
+struct lpfc_bsg_iocb {
+ struct lpfc_iocbq *cmdiocbq;
+ struct lpfc_iocbq *rspiocbq;
+ struct lpfc_dmabuf *bmp;
+ struct lpfc_nodelist *ndlp;
+
+ /* job waiting for this iocb to finish */
+ struct fc_bsg_job *set_job;
+};
+
+struct lpfc_bsg_mbox {
+ LPFC_MBOXQ_t *pmboxq;
+ MAILBOX_t *mb;
+
+ /* job waiting for this mbox command to finish */
+ struct fc_bsg_job *set_job;
+};
+
+#define TYPE_EVT 1
+#define TYPE_IOCB 2
+#define TYPE_MBOX 3
+struct bsg_job_data {
+ uint32_t type;
+ union {
+ struct lpfc_bsg_event *evt;
+ struct lpfc_bsg_iocb iocb;
+ struct lpfc_bsg_mbox mbox;
+ } context_un;
+};
+
+struct event_data {
+ struct list_head node;
+ uint32_t type;
+ uint32_t immed_dat;
+ void *data;
+ uint32_t len;
+};
+
+#define BUF_SZ_4K 4096
+#define SLI_CT_ELX_LOOPBACK 0x10
+
+enum ELX_LOOPBACK_CMD {
+ ELX_LOOPBACK_XRI_SETUP,
+ ELX_LOOPBACK_DATA,
+};
+
+#define ELX_LOOPBACK_HEADER_SZ \
+ (size_t)(&((struct lpfc_sli_ct_request *)NULL)->un)
+
+struct lpfc_dmabufext {
+ struct lpfc_dmabuf dma;
+ uint32_t size;
+ uint32_t flag;
+};
+
+/**
+ * lpfc_bsg_send_mgmt_cmd_cmp - lpfc_bsg_send_mgmt_cmd's completion handler
+ * @phba: Pointer to HBA context object.
+ * @cmdiocbq: Pointer to command iocb.
+ * @rspiocbq: Pointer to response iocb.
+ *
+ * This function is the completion handler for iocbs issued using
+ * lpfc_bsg_send_mgmt_cmd function. This function is called by the
+ * ring event handler function without any lock held. This function
+ * can be called from both worker thread context and interrupt
+ * context. This function also can be called from another thread which
+ * cleans up the SLI layer objects.
+ * This function copies the contents of the response iocb to the
+ * response iocb memory object provided by the caller of
+ * lpfc_sli_issue_iocb_wait and then wakes up the thread which
+ * sleeps for the iocb completion.
+ **/
+static void
+lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba,
+ struct lpfc_iocbq *cmdiocbq,
+ struct lpfc_iocbq *rspiocbq)
+{
+ unsigned long iflags;
+ struct bsg_job_data *dd_data;
+ struct fc_bsg_job *job;
+ IOCB_t *rsp;
+ struct lpfc_dmabuf *bmp;
+ struct lpfc_nodelist *ndlp;
+ struct lpfc_bsg_iocb *iocb;
+ unsigned long flags;
+ int rc = 0;
+
+ spin_lock_irqsave(&phba->ct_ev_lock, flags);
+ dd_data = cmdiocbq->context1;
+ if (!dd_data) {
+ spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+ return;
+ }
+
+ iocb = &dd_data->context_un.iocb;
+ job = iocb->set_job;
+ job->dd_data = NULL; /* so timeout handler does not reply */
+
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
+ if (cmdiocbq->context2 && rspiocbq)
+ memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
+ &rspiocbq->iocb, sizeof(IOCB_t));
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+
+ bmp = iocb->bmp;
+ rspiocbq = iocb->rspiocbq;
+ rsp = &rspiocbq->iocb;
+ ndlp = iocb->ndlp;
+
+ pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
+ job->request_payload.sg_cnt, DMA_TO_DEVICE);
+ pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
+ job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+
+ if (rsp->ulpStatus) {
+ if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
+ switch (rsp->un.ulpWord[4] & 0xff) {
+ case IOERR_SEQUENCE_TIMEOUT:
+ rc = -ETIMEDOUT;
+ break;
+ case IOERR_INVALID_RPI:
+ rc = -EFAULT;
+ break;
+ default:
+ rc = -EACCES;
+ break;
+ }
+ } else
+ rc = -EACCES;
+ } else
+ job->reply->reply_payload_rcv_len =
+ rsp->un.genreq64.bdl.bdeSize;
+
+ lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
+ lpfc_sli_release_iocbq(phba, rspiocbq);
+ lpfc_sli_release_iocbq(phba, cmdiocbq);
+ lpfc_nlp_put(ndlp);
+ kfree(bmp);
+ kfree(dd_data);
+ /* make error code available to userspace */
+ job->reply->result = rc;
+ /* complete the job back to userspace */
+ job->job_done(job);
+ spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+ return;
+}
+
/**
- * lpfc_bsg_rport_ct - send a CT command from a bsg request
+ * lpfc_bsg_send_mgmt_cmd - send a CT command from a bsg request
* @job: fc_bsg_job to handle
- */
+ **/
static int
-lpfc_bsg_rport_ct(struct fc_bsg_job *job)
+lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job)
{
- struct Scsi_Host *shost = job->shost;
struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
struct lpfc_hba *phba = vport->phba;
struct lpfc_rport_data *rdata = job->rport->dd_data;
@@ -65,57 +236,60 @@ lpfc_bsg_rport_ct(struct fc_bsg_job *job)
struct scatterlist *sgel = NULL;
int numbde;
dma_addr_t busaddr;
+ struct bsg_job_data *dd_data;
+ uint32_t creg_val;
int rc = 0;
/* in case no data is transferred */
job->reply->reply_payload_rcv_len = 0;
+ /* allocate our bsg tracking structure */
+ dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
+ if (!dd_data) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+ "2733 Failed allocation of dd_data\n");
+ rc = -ENOMEM;
+ goto no_dd_data;
+ }
+
if (!lpfc_nlp_get(ndlp)) {
- job->reply->result = -ENODEV;
- return 0;
+ rc = -ENODEV;
+ goto no_ndlp;
+ }
+
+ bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+ if (!bmp) {
+ rc = -ENOMEM;
+ goto free_ndlp;
}
if (ndlp->nlp_flag & NLP_ELS_SND_MASK) {
rc = -ENODEV;
- goto free_ndlp_exit;
+ goto free_bmp;
}
- spin_lock_irq(shost->host_lock);
cmdiocbq = lpfc_sli_get_iocbq(phba);
if (!cmdiocbq) {
rc = -ENOMEM;
- spin_unlock_irq(shost->host_lock);
- goto free_ndlp_exit;
+ goto free_bmp;
}
- cmd = &cmdiocbq->iocb;
+ cmd = &cmdiocbq->iocb;
rspiocbq = lpfc_sli_get_iocbq(phba);
if (!rspiocbq) {
rc = -ENOMEM;
goto free_cmdiocbq;
}
- spin_unlock_irq(shost->host_lock);
rsp = &rspiocbq->iocb;
-
- bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
- if (!bmp) {
- rc = -ENOMEM;
- spin_lock_irq(shost->host_lock);
- goto free_rspiocbq;
- }
-
- spin_lock_irq(shost->host_lock);
bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
if (!bmp->virt) {
rc = -ENOMEM;
- goto free_bmp;
+ goto free_rspiocbq;
}
- spin_unlock_irq(shost->host_lock);
INIT_LIST_HEAD(&bmp->list);
bpl = (struct ulp_bde64 *) bmp->virt;
-
request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list,
job->request_payload.sg_cnt, DMA_TO_DEVICE);
for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) {
@@ -157,78 +331,152 @@ lpfc_bsg_rport_ct(struct fc_bsg_job *job)
cmd->ulpContext = ndlp->nlp_rpi;
cmd->ulpOwner = OWN_CHIP;
cmdiocbq->vport = phba->pport;
- cmdiocbq->context1 = NULL;
- cmdiocbq->context2 = NULL;
+ cmdiocbq->context3 = bmp;
cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
-
timeout = phba->fc_ratov * 2;
- job->dd_data = cmdiocbq;
-
- rc = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq, rspiocbq,
- timeout + LPFC_DRVR_TIMEOUT);
-
- if (rc != IOCB_TIMEDOUT) {
- pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
- job->request_payload.sg_cnt, DMA_TO_DEVICE);
- pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
- job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+ cmd->ulpTimeout = timeout;
+
+ cmdiocbq->iocb_cmpl = lpfc_bsg_send_mgmt_cmd_cmp;
+ cmdiocbq->context1 = dd_data;
+ cmdiocbq->context2 = rspiocbq;
+ dd_data->type = TYPE_IOCB;
+ dd_data->context_un.iocb.cmdiocbq = cmdiocbq;
+ dd_data->context_un.iocb.rspiocbq = rspiocbq;
+ dd_data->context_un.iocb.set_job = job;
+ dd_data->context_un.iocb.bmp = bmp;
+ dd_data->context_un.iocb.ndlp = ndlp;
+
+ if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
+ creg_val = readl(phba->HCregaddr);
+ creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
+ writel(creg_val, phba->HCregaddr);
+ readl(phba->HCregaddr); /* flush */
}
- if (rc == IOCB_TIMEDOUT) {
- lpfc_sli_release_iocbq(phba, rspiocbq);
- rc = -EACCES;
- goto free_ndlp_exit;
- }
+ rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
- if (rc != IOCB_SUCCESS) {
- rc = -EACCES;
- goto free_outdmp;
- }
+ if (rc == IOCB_SUCCESS)
+ return 0; /* done for now */
- if (rsp->ulpStatus) {
- if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
- switch (rsp->un.ulpWord[4] & 0xff) {
- case IOERR_SEQUENCE_TIMEOUT:
- rc = -ETIMEDOUT;
- break;
- case IOERR_INVALID_RPI:
- rc = -EFAULT;
- break;
- default:
- rc = -EACCES;
- break;
- }
- goto free_outdmp;
- }
- } else
- job->reply->reply_payload_rcv_len =
- rsp->un.genreq64.bdl.bdeSize;
+ /* iocb failed so cleanup */
+ pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
+ job->request_payload.sg_cnt, DMA_TO_DEVICE);
+ pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
+ job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
-free_outdmp:
- spin_lock_irq(shost->host_lock);
lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
-free_bmp:
- kfree(bmp);
+
free_rspiocbq:
lpfc_sli_release_iocbq(phba, rspiocbq);
free_cmdiocbq:
lpfc_sli_release_iocbq(phba, cmdiocbq);
- spin_unlock_irq(shost->host_lock);
-free_ndlp_exit:
+free_bmp:
+ kfree(bmp);
+free_ndlp:
lpfc_nlp_put(ndlp);
+no_ndlp:
+ kfree(dd_data);
+no_dd_data:
+ /* make error code available to userspace */
+ job->reply->result = rc;
+ job->dd_data = NULL;
+ return rc;
+}
+
+/**
+ * lpfc_bsg_rport_els_cmp - lpfc_bsg_rport_els's completion handler
+ * @phba: Pointer to HBA context object.
+ * @cmdiocbq: Pointer to command iocb.
+ * @rspiocbq: Pointer to response iocb.
+ *
+ * This function is the completion handler for iocbs issued using
+ * lpfc_bsg_rport_els_cmp function. This function is called by the
+ * ring event handler function without any lock held. This function
+ * can be called from both worker thread context and interrupt
+ * context. This function also can be called from other thread which
+ * cleans up the SLI layer objects.
+ * This function copies the contents of the response iocb to the
+ * response iocb memory object provided by the caller of
+ * lpfc_sli_issue_iocb_wait and then wakes up the thread which
+ * sleeps for the iocb completion.
+ **/
+static void
+lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba,
+ struct lpfc_iocbq *cmdiocbq,
+ struct lpfc_iocbq *rspiocbq)
+{
+ struct bsg_job_data *dd_data;
+ struct fc_bsg_job *job;
+ IOCB_t *rsp;
+ struct lpfc_nodelist *ndlp;
+ struct lpfc_dmabuf *pbuflist = NULL;
+ struct fc_bsg_ctels_reply *els_reply;
+ uint8_t *rjt_data;
+ unsigned long flags;
+ int rc = 0;
+
+ spin_lock_irqsave(&phba->ct_ev_lock, flags);
+ dd_data = cmdiocbq->context1;
+ /* normal completion and timeout crossed paths, already done */
+ if (!dd_data) {
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+ return;
+ }
+
+ cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
+ if (cmdiocbq->context2 && rspiocbq)
+ memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
+ &rspiocbq->iocb, sizeof(IOCB_t));
+
+ job = dd_data->context_un.iocb.set_job;
+ cmdiocbq = dd_data->context_un.iocb.cmdiocbq;
+ rspiocbq = dd_data->context_un.iocb.rspiocbq;
+ rsp = &rspiocbq->iocb;
+ ndlp = dd_data->context_un.iocb.ndlp;
+
+ pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
+ job->request_payload.sg_cnt, DMA_TO_DEVICE);
+ pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
+ job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+ if (job->reply->result == -EAGAIN)
+ rc = -EAGAIN;
+ else if (rsp->ulpStatus == IOSTAT_SUCCESS)
+ job->reply->reply_payload_rcv_len =
+ rsp->un.elsreq64.bdl.bdeSize;
+ else if (rsp->ulpStatus == IOSTAT_LS_RJT) {
+ job->reply->reply_payload_rcv_len =
+ sizeof(struct fc_bsg_ctels_reply);
+ /* LS_RJT data returned in word 4 */
+ rjt_data = (uint8_t *)&rsp->un.ulpWord[4];
+ els_reply = &job->reply->reply_data.ctels_reply;
+ els_reply->status = FC_CTELS_STATUS_REJECT;
+ els_reply->rjt_data.action = rjt_data[3];
+ els_reply->rjt_data.reason_code = rjt_data[2];
+ els_reply->rjt_data.reason_explanation = rjt_data[1];
+ els_reply->rjt_data.vendor_unique = rjt_data[0];
+ } else
+ rc = -EIO;
+
+ pbuflist = (struct lpfc_dmabuf *) cmdiocbq->context3;
+ lpfc_mbuf_free(phba, pbuflist->virt, pbuflist->phys);
+ lpfc_sli_release_iocbq(phba, rspiocbq);
+ lpfc_sli_release_iocbq(phba, cmdiocbq);
+ lpfc_nlp_put(ndlp);
+ kfree(dd_data);
/* make error code available to userspace */
job->reply->result = rc;
+ job->dd_data = NULL;
/* complete the job back to userspace */
job->job_done(job);
-
- return 0;
+ spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+ return;
}
/**
* lpfc_bsg_rport_els - send an ELS command from a bsg request
* @job: fc_bsg_job to handle
- */
+ **/
static int
lpfc_bsg_rport_els(struct fc_bsg_job *job)
{
@@ -236,7 +484,6 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job)
struct lpfc_hba *phba = vport->phba;
struct lpfc_rport_data *rdata = job->rport->dd_data;
struct lpfc_nodelist *ndlp = rdata->pnode;
-
uint32_t elscmd;
uint32_t cmdsize;
uint32_t rspsize;
@@ -248,20 +495,30 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job)
struct lpfc_dmabuf *prsp;
struct lpfc_dmabuf *pbuflist = NULL;
struct ulp_bde64 *bpl;
- int iocb_status;
int request_nseg;
int reply_nseg;
struct scatterlist *sgel = NULL;
int numbde;
dma_addr_t busaddr;
+ struct bsg_job_data *dd_data;
+ uint32_t creg_val;
int rc = 0;
/* in case no data is transferred */
job->reply->reply_payload_rcv_len = 0;
+ /* allocate our bsg tracking structure */
+ dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
+ if (!dd_data) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+ "2735 Failed allocation of dd_data\n");
+ rc = -ENOMEM;
+ goto no_dd_data;
+ }
+
if (!lpfc_nlp_get(ndlp)) {
rc = -ENODEV;
- goto out;
+ goto free_dd_data;
}
elscmd = job->request->rqst_data.r_els.els_code;
@@ -271,24 +528,24 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job)
if (!rspiocbq) {
lpfc_nlp_put(ndlp);
rc = -ENOMEM;
- goto out;
+ goto free_dd_data;
}
rsp = &rspiocbq->iocb;
rpi = ndlp->nlp_rpi;
- cmdiocbq = lpfc_prep_els_iocb(phba->pport, 1, cmdsize, 0, ndlp,
+ cmdiocbq = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp,
ndlp->nlp_DID, elscmd);
-
if (!cmdiocbq) {
- lpfc_sli_release_iocbq(phba, rspiocbq);
- return -EIO;
+ rc = -EIO;
+ goto free_rspiocbq;
}
- job->dd_data = cmdiocbq;
+ /* prep els iocb set context1 to the ndlp, context2 to the command
+ * dmabuf, context3 holds the data dmabuf
+ */
pcmd = (struct lpfc_dmabuf *) cmdiocbq->context2;
prsp = (struct lpfc_dmabuf *) pcmd->list.next;
-
lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys);
kfree(pcmd);
lpfc_mbuf_free(phba, prsp->virt, prsp->phys);
@@ -300,7 +557,6 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job)
request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list,
job->request_payload.sg_cnt, DMA_TO_DEVICE);
-
for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) {
busaddr = sg_dma_address(sgel);
bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
@@ -322,7 +578,6 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job)
bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
bpl++;
}
-
cmdiocbq->iocb.un.elsreq64.bdl.bdeSize =
(request_nseg + reply_nseg) * sizeof(struct ulp_bde64);
cmdiocbq->iocb.ulpContext = rpi;
@@ -330,102 +585,62 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job)
cmdiocbq->context1 = NULL;
cmdiocbq->context2 = NULL;
- iocb_status = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq,
- rspiocbq, (phba->fc_ratov * 2)
- + LPFC_DRVR_TIMEOUT);
-
- /* release the new ndlp once the iocb completes */
- lpfc_nlp_put(ndlp);
- if (iocb_status != IOCB_TIMEDOUT) {
- pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
- job->request_payload.sg_cnt, DMA_TO_DEVICE);
- pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
- job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+ cmdiocbq->iocb_cmpl = lpfc_bsg_rport_els_cmp;
+ cmdiocbq->context1 = dd_data;
+ cmdiocbq->context2 = rspiocbq;
+ dd_data->type = TYPE_IOCB;
+ dd_data->context_un.iocb.cmdiocbq = cmdiocbq;
+ dd_data->context_un.iocb.rspiocbq = rspiocbq;
+ dd_data->context_un.iocb.set_job = job;
+ dd_data->context_un.iocb.bmp = NULL;;
+ dd_data->context_un.iocb.ndlp = ndlp;
+
+ if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
+ creg_val = readl(phba->HCregaddr);
+ creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
+ writel(creg_val, phba->HCregaddr);
+ readl(phba->HCregaddr); /* flush */
}
+ rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
+ lpfc_nlp_put(ndlp);
+ if (rc == IOCB_SUCCESS)
+ return 0; /* done for now */
- if (iocb_status == IOCB_SUCCESS) {
- if (rsp->ulpStatus == IOSTAT_SUCCESS) {
- job->reply->reply_payload_rcv_len =
- rsp->un.elsreq64.bdl.bdeSize;
- rc = 0;
- } else if (rsp->ulpStatus == IOSTAT_LS_RJT) {
- struct fc_bsg_ctels_reply *els_reply;
- /* LS_RJT data returned in word 4 */
- uint8_t *rjt_data = (uint8_t *)&rsp->un.ulpWord[4];
-
- els_reply = &job->reply->reply_data.ctels_reply;
- job->reply->result = 0;
- els_reply->status = FC_CTELS_STATUS_REJECT;
- els_reply->rjt_data.action = rjt_data[0];
- els_reply->rjt_data.reason_code = rjt_data[1];
- els_reply->rjt_data.reason_explanation = rjt_data[2];
- els_reply->rjt_data.vendor_unique = rjt_data[3];
- } else
- rc = -EIO;
- } else
- rc = -EIO;
+ pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
+ job->request_payload.sg_cnt, DMA_TO_DEVICE);
+ pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
+ job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+
+ lpfc_mbuf_free(phba, pbuflist->virt, pbuflist->phys);
- if (iocb_status != IOCB_TIMEDOUT)
- lpfc_els_free_iocb(phba, cmdiocbq);
+ lpfc_sli_release_iocbq(phba, cmdiocbq);
+free_rspiocbq:
lpfc_sli_release_iocbq(phba, rspiocbq);
-out:
+free_dd_data:
+ kfree(dd_data);
+
+no_dd_data:
/* make error code available to userspace */
job->reply->result = rc;
- /* complete the job back to userspace */
- job->job_done(job);
-
- return 0;
-}
-
-struct lpfc_ct_event {
- struct list_head node;
- int ref;
- wait_queue_head_t wq;
-
- /* Event type and waiter identifiers */
- uint32_t type_mask;
- uint32_t req_id;
- uint32_t reg_id;
-
- /* next two flags are here for the auto-delete logic */
- unsigned long wait_time_stamp;
- int waiting;
-
- /* seen and not seen events */
- struct list_head events_to_get;
- struct list_head events_to_see;
-};
-
-struct event_data {
- struct list_head node;
- uint32_t type;
- uint32_t immed_dat;
- void *data;
- uint32_t len;
-};
-
-static struct lpfc_ct_event *
-lpfc_ct_event_new(int ev_reg_id, uint32_t ev_req_id)
-{
- struct lpfc_ct_event *evt = kzalloc(sizeof(*evt), GFP_KERNEL);
- if (!evt)
- return NULL;
-
- INIT_LIST_HEAD(&evt->events_to_get);
- INIT_LIST_HEAD(&evt->events_to_see);
- evt->req_id = ev_req_id;
- evt->reg_id = ev_reg_id;
- evt->wait_time_stamp = jiffies;
- init_waitqueue_head(&evt->wq);
-
- return evt;
+ job->dd_data = NULL;
+ return rc;
}
+/**
+ * lpfc_bsg_event_free - frees an allocated event structure
+ * @kref: Pointer to a kref.
+ *
+ * Called from kref_put. Back cast the kref into an event structure address.
+ * Free any events to get, delete associated nodes, free any events to see,
+ * free any data then free the event itself.
+ **/
static void
-lpfc_ct_event_free(struct lpfc_ct_event *evt)
+lpfc_bsg_event_free(struct kref *kref)
{
+ struct lpfc_bsg_event *evt = container_of(kref, struct lpfc_bsg_event,
+ kref);
struct event_data *ed;
list_del(&evt->node);
@@ -447,25 +662,82 @@ lpfc_ct_event_free(struct lpfc_ct_event *evt)
kfree(evt);
}
+/**
+ * lpfc_bsg_event_ref - increments the kref for an event
+ * @evt: Pointer to an event structure.
+ **/
static inline void
-lpfc_ct_event_ref(struct lpfc_ct_event *evt)
+lpfc_bsg_event_ref(struct lpfc_bsg_event *evt)
{
- evt->ref++;
+ kref_get(&evt->kref);
}
+/**
+ * lpfc_bsg_event_unref - Uses kref_put to free an event structure
+ * @evt: Pointer to an event structure.
+ **/
static inline void
-lpfc_ct_event_unref(struct lpfc_ct_event *evt)
+lpfc_bsg_event_unref(struct lpfc_bsg_event *evt)
{
- if (--evt->ref < 0)
- lpfc_ct_event_free(evt);
+ kref_put(&evt->kref, lpfc_bsg_event_free);
}
-#define SLI_CT_ELX_LOOPBACK 0x10
+/**
+ * lpfc_bsg_event_new - allocate and initialize a event structure
+ * @ev_mask: Mask of events.
+ * @ev_reg_id: Event reg id.
+ * @ev_req_id: Event request id.
+ **/
+static struct lpfc_bsg_event *
+lpfc_bsg_event_new(uint32_t ev_mask, int ev_reg_id, uint32_t ev_req_id)
+{
+ struct lpfc_bsg_event *evt = kzalloc(sizeof(*evt), GFP_KERNEL);
-enum ELX_LOOPBACK_CMD {
- ELX_LOOPBACK_XRI_SETUP,
- ELX_LOOPBACK_DATA,
-};
+ if (!evt)
+ return NULL;
+
+ INIT_LIST_HEAD(&evt->events_to_get);
+ INIT_LIST_HEAD(&evt->events_to_see);
+ evt->type_mask = ev_mask;
+ evt->req_id = ev_req_id;
+ evt->reg_id = ev_reg_id;
+ evt->wait_time_stamp = jiffies;
+ init_waitqueue_head(&evt->wq);
+ kref_init(&evt->kref);
+ return evt;
+}
+
+/**
+ * diag_cmd_data_free - Frees an lpfc dma buffer extension
+ * @phba: Pointer to HBA context object.
+ * @mlist: Pointer to an lpfc dma buffer extension.
+ **/
+static int
+diag_cmd_data_free(struct lpfc_hba *phba, struct lpfc_dmabufext *mlist)
+{
+ struct lpfc_dmabufext *mlast;
+ struct pci_dev *pcidev;
+ struct list_head head, *curr, *next;
+
+ if ((!mlist) || (!lpfc_is_link_up(phba) &&
+ (phba->link_flag & LS_LOOPBACK_MODE))) {
+ return 0;
+ }
+
+ pcidev = phba->pcidev;
+ list_add_tail(&head, &mlist->dma.list);
+
+ list_for_each_safe(curr, next, &head) {
+ mlast = list_entry(curr, struct lpfc_dmabufext , dma.list);
+ if (mlast->dma.virt)
+ dma_free_coherent(&pcidev->dev,
+ mlast->size,
+ mlast->dma.virt,
+ mlast->dma.phys);
+ kfree(mlast);
+ }
+ return 0;
+}
/**
* lpfc_bsg_ct_unsol_event - process an unsolicited CT command
@@ -474,9 +746,9 @@ enum ELX_LOOPBACK_CMD {
* @piocbq:
*
* This function is called when an unsolicited CT command is received. It
- * forwards the event to any processes registerd to receive CT events.
- */
-void
+ * forwards the event to any processes registered to receive CT events.
+ **/
+int
lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
struct lpfc_iocbq *piocbq)
{
@@ -484,7 +756,7 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
uint32_t cmd;
uint32_t len;
struct lpfc_dmabuf *dmabuf = NULL;
- struct lpfc_ct_event *evt;
+ struct lpfc_bsg_event *evt;
struct event_data *evt_dat = NULL;
struct lpfc_iocbq *iocbq;
size_t offset = 0;
@@ -496,6 +768,9 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
struct lpfc_dmabuf *bdeBuf2 = piocbq->context3;
struct lpfc_hbq_entry *hbqe;
struct lpfc_sli_ct_request *ct_req;
+ struct fc_bsg_job *job = NULL;
+ unsigned long flags;
+ int size = 0;
INIT_LIST_HEAD(&head);
list_add_tail(&head, &piocbq->list);
@@ -504,6 +779,10 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
piocbq->iocb.un.cont64[0].tus.f.bdeSize == 0)
goto error_ct_unsol_exit;
+ if (phba->link_state == LPFC_HBA_ERROR ||
+ (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)))
+ goto error_ct_unsol_exit;
+
if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
dmabuf = bdeBuf1;
else {
@@ -511,7 +790,8 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
piocbq->iocb.un.cont64[0].addrLow);
dmabuf = lpfc_sli_ringpostbuf_get(phba, pring, dma_addr);
}
-
+ if (dmabuf == NULL)
+ goto error_ct_unsol_exit;
ct_req = (struct lpfc_sli_ct_request *)dmabuf->virt;
evt_req_id = ct_req->FsType;
cmd = ct_req->CommandResponse.bits.CmdRsp;
@@ -519,24 +799,24 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED))
lpfc_sli_ringpostbuf_put(phba, pring, dmabuf);
- mutex_lock(&phba->ct_event_mutex);
+ spin_lock_irqsave(&phba->ct_ev_lock, flags);
list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
- if (evt->req_id != evt_req_id)
+ if (!(evt->type_mask & FC_REG_CT_EVENT) ||
+ evt->req_id != evt_req_id)
continue;
- lpfc_ct_event_ref(evt);
-
+ lpfc_bsg_event_ref(evt);
+ spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
evt_dat = kzalloc(sizeof(*evt_dat), GFP_KERNEL);
- if (!evt_dat) {
- lpfc_ct_event_unref(evt);
+ if (evt_dat == NULL) {
+ spin_lock_irqsave(&phba->ct_ev_lock, flags);
+ lpfc_bsg_event_unref(evt);
lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
"2614 Memory allocation failed for "
"CT event\n");
break;
}
- mutex_unlock(&phba->ct_event_mutex);
-
if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
/* take accumulated byte count from the last iocbq */
iocbq = list_entry(head.prev, typeof(*iocbq), list);
@@ -550,25 +830,25 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
}
evt_dat->data = kzalloc(evt_dat->len, GFP_KERNEL);
- if (!evt_dat->data) {
+ if (evt_dat->data == NULL) {
lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
"2615 Memory allocation failed for "
"CT event data, size %d\n",
evt_dat->len);
kfree(evt_dat);
- mutex_lock(&phba->ct_event_mutex);
- lpfc_ct_event_unref(evt);
- mutex_unlock(&phba->ct_event_mutex);
+ spin_lock_irqsave(&phba->ct_ev_lock, flags);
+ lpfc_bsg_event_unref(evt);
+ spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
goto error_ct_unsol_exit;
}
list_for_each_entry(iocbq, &head, list) {
+ size = 0;
if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
bdeBuf1 = iocbq->context2;
bdeBuf2 = iocbq->context3;
}
for (i = 0; i < iocbq->iocb.ulpBdeCount; i++) {
- int size = 0;
if (phba->sli3_options &
LPFC_SLI3_HBQ_ENABLED) {
if (i == 0) {
@@ -601,9 +881,11 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
iocbq);
kfree(evt_dat->data);
kfree(evt_dat);
- mutex_lock(&phba->ct_event_mutex);
- lpfc_ct_event_unref(evt);
- mutex_unlock(&phba->ct_event_mutex);
+ spin_lock_irqsave(&phba->ct_ev_lock,
+ flags);
+ lpfc_bsg_event_unref(evt);
+ spin_unlock_irqrestore(
+ &phba->ct_ev_lock, flags);
goto error_ct_unsol_exit;
}
memcpy((char *)(evt_dat->data) + offset,
@@ -616,15 +898,24 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
dmabuf);
} else {
switch (cmd) {
+ case ELX_LOOPBACK_DATA:
+ diag_cmd_data_free(phba,
+ (struct lpfc_dmabufext *)
+ dmabuf);
+ break;
case ELX_LOOPBACK_XRI_SETUP:
- if (!(phba->sli3_options &
- LPFC_SLI3_HBQ_ENABLED))
+ if ((phba->sli_rev ==
+ LPFC_SLI_REV2) ||
+ (phba->sli3_options &
+ LPFC_SLI3_HBQ_ENABLED
+ )) {
+ lpfc_in_buf_free(phba,
+ dmabuf);
+ } else {
lpfc_post_buffer(phba,
pring,
1);
- else
- lpfc_in_buf_free(phba,
- dmabuf);
+ }
break;
default:
if (!(phba->sli3_options &
@@ -638,7 +929,7 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
}
}
- mutex_lock(&phba->ct_event_mutex);
+ spin_lock_irqsave(&phba->ct_ev_lock, flags);
if (phba->sli_rev == LPFC_SLI_REV4) {
evt_dat->immed_dat = phba->ctx_idx;
phba->ctx_idx = (phba->ctx_idx + 1) % 64;
@@ -651,122 +942,144 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
evt_dat->type = FC_REG_CT_EVENT;
list_add(&evt_dat->node, &evt->events_to_see);
- wake_up_interruptible(&evt->wq);
- lpfc_ct_event_unref(evt);
- if (evt_req_id == SLI_CT_ELX_LOOPBACK)
+ if (evt_req_id == SLI_CT_ELX_LOOPBACK) {
+ wake_up_interruptible(&evt->wq);
+ lpfc_bsg_event_unref(evt);
break;
+ }
+
+ list_move(evt->events_to_see.prev, &evt->events_to_get);
+ lpfc_bsg_event_unref(evt);
+
+ job = evt->set_job;
+ evt->set_job = NULL;
+ if (job) {
+ job->reply->reply_payload_rcv_len = size;
+ /* make error code available to userspace */
+ job->reply->result = 0;
+ job->dd_data = NULL;
+ /* complete the job back to userspace */
+ spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+ job->job_done(job);
+ spin_lock_irqsave(&phba->ct_ev_lock, flags);
+ }
}
- mutex_unlock(&phba->ct_event_mutex);
+ spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
error_ct_unsol_exit:
if (!list_empty(&head))
list_del(&head);
-
- return;
+ if (evt_req_id == SLI_CT_ELX_LOOPBACK)
+ return 0;
+ return 1;
}
/**
- * lpfc_bsg_set_event - process a SET_EVENT bsg vendor command
+ * lpfc_bsg_hba_set_event - process a SET_EVENT bsg vendor command
* @job: SET_EVENT fc_bsg_job
- */
+ **/
static int
-lpfc_bsg_set_event(struct fc_bsg_job *job)
+lpfc_bsg_hba_set_event(struct fc_bsg_job *job)
{
struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
struct lpfc_hba *phba = vport->phba;
struct set_ct_event *event_req;
- struct lpfc_ct_event *evt;
+ struct lpfc_bsg_event *evt;
int rc = 0;
+ struct bsg_job_data *dd_data = NULL;
+ uint32_t ev_mask;
+ unsigned long flags;
if (job->request_len <
sizeof(struct fc_bsg_request) + sizeof(struct set_ct_event)) {
lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
"2612 Received SET_CT_EVENT below minimum "
"size\n");
- return -EINVAL;
+ rc = -EINVAL;
+ goto job_error;
+ }
+
+ dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
+ if (dd_data == NULL) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+ "2734 Failed allocation of dd_data\n");
+ rc = -ENOMEM;
+ goto job_error;
}
event_req = (struct set_ct_event *)
job->request->rqst_data.h_vendor.vendor_cmd;
-
- mutex_lock(&phba->ct_event_mutex);
+ ev_mask = ((uint32_t)(unsigned long)event_req->type_mask &
+ FC_REG_EVENT_MASK);
+ spin_lock_irqsave(&phba->ct_ev_lock, flags);
list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
if (evt->reg_id == event_req->ev_reg_id) {
- lpfc_ct_event_ref(evt);
+ lpfc_bsg_event_ref(evt);
evt->wait_time_stamp = jiffies;
break;
}
}
- mutex_unlock(&phba->ct_event_mutex);
+ spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
if (&evt->node == &phba->ct_ev_waiters) {
/* no event waiting struct yet - first call */
- evt = lpfc_ct_event_new(event_req->ev_reg_id,
+ evt = lpfc_bsg_event_new(ev_mask, event_req->ev_reg_id,
event_req->ev_req_id);
if (!evt) {
lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
"2617 Failed allocation of event "
"waiter\n");
- return -ENOMEM;
+ rc = -ENOMEM;
+ goto job_error;
}
- mutex_lock(&phba->ct_event_mutex);
+ spin_lock_irqsave(&phba->ct_ev_lock, flags);
list_add(&evt->node, &phba->ct_ev_waiters);
- lpfc_ct_event_ref(evt);
- mutex_unlock(&phba->ct_event_mutex);
+ lpfc_bsg_event_ref(evt);
+ evt->wait_time_stamp = jiffies;
+ spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
}
+ spin_lock_irqsave(&phba->ct_ev_lock, flags);
evt->waiting = 1;
- if (wait_event_interruptible(evt->wq,
- !list_empty(&evt->events_to_see))) {
- mutex_lock(&phba->ct_event_mutex);
- lpfc_ct_event_unref(evt); /* release ref */
- lpfc_ct_event_unref(evt); /* delete */
- mutex_unlock(&phba->ct_event_mutex);
- rc = -EINTR;
- goto set_event_out;
- }
-
- evt->wait_time_stamp = jiffies;
- evt->waiting = 0;
-
- mutex_lock(&phba->ct_event_mutex);
- list_move(evt->events_to_see.prev, &evt->events_to_get);
- lpfc_ct_event_unref(evt); /* release ref */
- mutex_unlock(&phba->ct_event_mutex);
-
-set_event_out:
- /* set_event carries no reply payload */
- job->reply->reply_payload_rcv_len = 0;
- /* make error code available to userspace */
- job->reply->result = rc;
- /* complete the job back to userspace */
- job->job_done(job);
-
- return 0;
+ dd_data->type = TYPE_EVT;
+ dd_data->context_un.evt = evt;
+ evt->set_job = job; /* for unsolicited command */
+ job->dd_data = dd_data; /* for fc transport timeout callback*/
+ spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+ return 0; /* call job done later */
+
+job_error:
+ if (dd_data != NULL)
+ kfree(dd_data);
+
+ job->dd_data = NULL;
+ return rc;
}
/**
- * lpfc_bsg_get_event - process a GET_EVENT bsg vendor command
+ * lpfc_bsg_hba_get_event - process a GET_EVENT bsg vendor command
* @job: GET_EVENT fc_bsg_job
- */
+ **/
static int
-lpfc_bsg_get_event(struct fc_bsg_job *job)
+lpfc_bsg_hba_get_event(struct fc_bsg_job *job)
{
struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
struct lpfc_hba *phba = vport->phba;
struct get_ct_event *event_req;
struct get_ct_event_reply *event_reply;
- struct lpfc_ct_event *evt;
+ struct lpfc_bsg_event *evt;
struct event_data *evt_dat = NULL;
- int rc = 0;
+ unsigned long flags;
+ uint32_t rc = 0;
if (job->request_len <
sizeof(struct fc_bsg_request) + sizeof(struct get_ct_event)) {
lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
"2613 Received GET_CT_EVENT request below "
"minimum size\n");
- return -EINVAL;
+ rc = -EINVAL;
+ goto job_error;
}
event_req = (struct get_ct_event *)
@@ -774,13 +1087,12 @@ lpfc_bsg_get_event(struct fc_bsg_job *job)
event_reply = (struct get_ct_event_reply *)
job->reply->reply_data.vendor_reply.vendor_rsp;
-
- mutex_lock(&phba->ct_event_mutex);
+ spin_lock_irqsave(&phba->ct_ev_lock, flags);
list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
if (evt->reg_id == event_req->ev_reg_id) {
if (list_empty(&evt->events_to_get))
break;
- lpfc_ct_event_ref(evt);
+ lpfc_bsg_event_ref(evt);
evt->wait_time_stamp = jiffies;
evt_dat = list_entry(evt->events_to_get.prev,
struct event_data, node);
@@ -788,45 +1100,1539 @@ lpfc_bsg_get_event(struct fc_bsg_job *job)
break;
}
}
- mutex_unlock(&phba->ct_event_mutex);
+ spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
- if (!evt_dat) {
+ /* The app may continue to ask for event data until it gets
+ * an error indicating that there isn't anymore
+ */
+ if (evt_dat == NULL) {
job->reply->reply_payload_rcv_len = 0;
rc = -ENOENT;
- goto error_get_event_exit;
+ goto job_error;
}
- if (evt_dat->len > job->reply_payload.payload_len) {
- evt_dat->len = job->reply_payload.payload_len;
- lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
- "2618 Truncated event data at %d "
- "bytes\n",
- job->reply_payload.payload_len);
+ if (evt_dat->len > job->request_payload.payload_len) {
+ evt_dat->len = job->request_payload.payload_len;
+ lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+ "2618 Truncated event data at %d "
+ "bytes\n",
+ job->request_payload.payload_len);
}
+ event_reply->type = evt_dat->type;
event_reply->immed_data = evt_dat->immed_dat;
-
if (evt_dat->len > 0)
job->reply->reply_payload_rcv_len =
- sg_copy_from_buffer(job->reply_payload.sg_list,
- job->reply_payload.sg_cnt,
+ sg_copy_from_buffer(job->request_payload.sg_list,
+ job->request_payload.sg_cnt,
evt_dat->data, evt_dat->len);
else
job->reply->reply_payload_rcv_len = 0;
- rc = 0;
- if (evt_dat)
+ if (evt_dat) {
kfree(evt_dat->data);
- kfree(evt_dat);
- mutex_lock(&phba->ct_event_mutex);
- lpfc_ct_event_unref(evt);
- mutex_unlock(&phba->ct_event_mutex);
+ kfree(evt_dat);
+ }
+
+ spin_lock_irqsave(&phba->ct_ev_lock, flags);
+ lpfc_bsg_event_unref(evt);
+ spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+ job->dd_data = NULL;
+ job->reply->result = 0;
+ job->job_done(job);
+ return 0;
+
+job_error:
+ job->dd_data = NULL;
+ job->reply->result = rc;
+ return rc;
+}
+
+/**
+ * lpfc_issue_ct_rsp_cmp - lpfc_issue_ct_rsp's completion handler
+ * @phba: Pointer to HBA context object.
+ * @cmdiocbq: Pointer to command iocb.
+ * @rspiocbq: Pointer to response iocb.
+ *
+ * This function is the completion handler for iocbs issued using
+ * lpfc_issue_ct_rsp_cmp function. This function is called by the
+ * ring event handler function without any lock held. This function
+ * can be called from both worker thread context and interrupt
+ * context. This function also can be called from other thread which
+ * cleans up the SLI layer objects.
+ * This function copy the contents of the response iocb to the
+ * response iocb memory object provided by the caller of
+ * lpfc_sli_issue_iocb_wait and then wakes up the thread which
+ * sleeps for the iocb completion.
+ **/
+static void
+lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba,
+ struct lpfc_iocbq *cmdiocbq,
+ struct lpfc_iocbq *rspiocbq)
+{
+ struct bsg_job_data *dd_data;
+ struct fc_bsg_job *job;
+ IOCB_t *rsp;
+ struct lpfc_dmabuf *bmp;
+ struct lpfc_nodelist *ndlp;
+ unsigned long flags;
+ int rc = 0;
+
+ spin_lock_irqsave(&phba->ct_ev_lock, flags);
+ dd_data = cmdiocbq->context1;
+ /* normal completion and timeout crossed paths, already done */
+ if (!dd_data) {
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+ return;
+ }
-error_get_event_exit:
+ job = dd_data->context_un.iocb.set_job;
+ bmp = dd_data->context_un.iocb.bmp;
+ rsp = &rspiocbq->iocb;
+ ndlp = dd_data->context_un.iocb.ndlp;
+
+ pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
+ job->request_payload.sg_cnt, DMA_TO_DEVICE);
+
+ if (rsp->ulpStatus) {
+ if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
+ switch (rsp->un.ulpWord[4] & 0xff) {
+ case IOERR_SEQUENCE_TIMEOUT:
+ rc = -ETIMEDOUT;
+ break;
+ case IOERR_INVALID_RPI:
+ rc = -EFAULT;
+ break;
+ default:
+ rc = -EACCES;
+ break;
+ }
+ } else
+ rc = -EACCES;
+ } else
+ job->reply->reply_payload_rcv_len =
+ rsp->un.genreq64.bdl.bdeSize;
+
+ lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
+ lpfc_sli_release_iocbq(phba, cmdiocbq);
+ lpfc_nlp_put(ndlp);
+ kfree(bmp);
+ kfree(dd_data);
/* make error code available to userspace */
job->reply->result = rc;
+ job->dd_data = NULL;
/* complete the job back to userspace */
job->job_done(job);
+ spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+ return;
+}
+
+/**
+ * lpfc_issue_ct_rsp - issue a ct response
+ * @phba: Pointer to HBA context object.
+ * @job: Pointer to the job object.
+ * @tag: tag index value into the ports context exchange array.
+ * @bmp: Pointer to a dma buffer descriptor.
+ * @num_entry: Number of enties in the bde.
+ **/
+static int
+lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag,
+ struct lpfc_dmabuf *bmp, int num_entry)
+{
+ IOCB_t *icmd;
+ struct lpfc_iocbq *ctiocb = NULL;
+ int rc = 0;
+ struct lpfc_nodelist *ndlp = NULL;
+ struct bsg_job_data *dd_data;
+ uint32_t creg_val;
+
+ /* allocate our bsg tracking structure */
+ dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
+ if (!dd_data) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+ "2736 Failed allocation of dd_data\n");
+ rc = -ENOMEM;
+ goto no_dd_data;
+ }
+
+ /* Allocate buffer for command iocb */
+ ctiocb = lpfc_sli_get_iocbq(phba);
+ if (!ctiocb) {
+ rc = ENOMEM;
+ goto no_ctiocb;
+ }
+
+ icmd = &ctiocb->iocb;
+ icmd->un.xseq64.bdl.ulpIoTag32 = 0;
+ icmd->un.xseq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
+ icmd->un.xseq64.bdl.addrLow = putPaddrLow(bmp->phys);
+ icmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
+ icmd->un.xseq64.bdl.bdeSize = (num_entry * sizeof(struct ulp_bde64));
+ icmd->un.xseq64.w5.hcsw.Fctl = (LS | LA);
+ icmd->un.xseq64.w5.hcsw.Dfctl = 0;
+ icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_SOL_CTL;
+ icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT;
+
+ /* Fill in rest of iocb */
+ icmd->ulpCommand = CMD_XMIT_SEQUENCE64_CX;
+ icmd->ulpBdeCount = 1;
+ icmd->ulpLe = 1;
+ icmd->ulpClass = CLASS3;
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ /* Do not issue unsol response if oxid not marked as valid */
+ if (!(phba->ct_ctx[tag].flags & UNSOL_VALID)) {
+ rc = IOCB_ERROR;
+ goto issue_ct_rsp_exit;
+ }
+ icmd->ulpContext = phba->ct_ctx[tag].oxid;
+ ndlp = lpfc_findnode_did(phba->pport, phba->ct_ctx[tag].SID);
+ if (!ndlp) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
+ "2721 ndlp null for oxid %x SID %x\n",
+ icmd->ulpContext,
+ phba->ct_ctx[tag].SID);
+ rc = IOCB_ERROR;
+ goto issue_ct_rsp_exit;
+ }
+ icmd->un.ulpWord[3] = ndlp->nlp_rpi;
+ /* The exchange is done, mark the entry as invalid */
+ phba->ct_ctx[tag].flags &= ~UNSOL_VALID;
+ } else
+ icmd->ulpContext = (ushort) tag;
+
+ icmd->ulpTimeout = phba->fc_ratov * 2;
+
+ /* Xmit CT response on exchange <xid> */
+ lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+ "2722 Xmit CT response on exchange x%x Data: x%x x%x\n",
+ icmd->ulpContext, icmd->ulpIoTag, phba->link_state);
+
+ ctiocb->iocb_cmpl = NULL;
+ ctiocb->iocb_flag |= LPFC_IO_LIBDFC;
+ ctiocb->vport = phba->pport;
+ ctiocb->context3 = bmp;
+
+ ctiocb->iocb_cmpl = lpfc_issue_ct_rsp_cmp;
+ ctiocb->context1 = dd_data;
+ ctiocb->context2 = NULL;
+ dd_data->type = TYPE_IOCB;
+ dd_data->context_un.iocb.cmdiocbq = ctiocb;
+ dd_data->context_un.iocb.rspiocbq = NULL;
+ dd_data->context_un.iocb.set_job = job;
+ dd_data->context_un.iocb.bmp = bmp;
+ dd_data->context_un.iocb.ndlp = ndlp;
+
+ if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
+ creg_val = readl(phba->HCregaddr);
+ creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
+ writel(creg_val, phba->HCregaddr);
+ readl(phba->HCregaddr); /* flush */
+ }
+
+ rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
+
+ if (rc == IOCB_SUCCESS)
+ return 0; /* done for now */
+
+issue_ct_rsp_exit:
+ lpfc_sli_release_iocbq(phba, ctiocb);
+no_ctiocb:
+ kfree(dd_data);
+no_dd_data:
+ return rc;
+}
+
+/**
+ * lpfc_bsg_send_mgmt_rsp - process a SEND_MGMT_RESP bsg vendor command
+ * @job: SEND_MGMT_RESP fc_bsg_job
+ **/
+static int
+lpfc_bsg_send_mgmt_rsp(struct fc_bsg_job *job)
+{
+ struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+ struct send_mgmt_resp *mgmt_resp = (struct send_mgmt_resp *)
+ job->request->rqst_data.h_vendor.vendor_cmd;
+ struct ulp_bde64 *bpl;
+ struct lpfc_dmabuf *bmp = NULL;
+ struct scatterlist *sgel = NULL;
+ int request_nseg;
+ int numbde;
+ dma_addr_t busaddr;
+ uint32_t tag = mgmt_resp->tag;
+ unsigned long reqbfrcnt =
+ (unsigned long)job->request_payload.payload_len;
+ int rc = 0;
+
+ /* in case no data is transferred */
+ job->reply->reply_payload_rcv_len = 0;
+
+ if (!reqbfrcnt || (reqbfrcnt > (80 * BUF_SZ_4K))) {
+ rc = -ERANGE;
+ goto send_mgmt_rsp_exit;
+ }
+
+ bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+ if (!bmp) {
+ rc = -ENOMEM;
+ goto send_mgmt_rsp_exit;
+ }
+
+ bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
+ if (!bmp->virt) {
+ rc = -ENOMEM;
+ goto send_mgmt_rsp_free_bmp;
+ }
+
+ INIT_LIST_HEAD(&bmp->list);
+ bpl = (struct ulp_bde64 *) bmp->virt;
+ request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list,
+ job->request_payload.sg_cnt, DMA_TO_DEVICE);
+ for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) {
+ busaddr = sg_dma_address(sgel);
+ bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
+ bpl->tus.f.bdeSize = sg_dma_len(sgel);
+ bpl->tus.w = cpu_to_le32(bpl->tus.w);
+ bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
+ bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
+ bpl++;
+ }
+
+ rc = lpfc_issue_ct_rsp(phba, job, tag, bmp, request_nseg);
+
+ if (rc == IOCB_SUCCESS)
+ return 0; /* done for now */
+
+ /* TBD need to handle a timeout */
+ pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
+ job->request_payload.sg_cnt, DMA_TO_DEVICE);
+ rc = -EACCES;
+ lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
+
+send_mgmt_rsp_free_bmp:
+ kfree(bmp);
+send_mgmt_rsp_exit:
+ /* make error code available to userspace */
+ job->reply->result = rc;
+ job->dd_data = NULL;
+ return rc;
+}
+
+/**
+ * lpfc_bsg_diag_mode - process a LPFC_BSG_VENDOR_DIAG_MODE bsg vendor command
+ * @job: LPFC_BSG_VENDOR_DIAG_MODE
+ *
+ * This function is responsible for placing a port into diagnostic loopback
+ * mode in order to perform a diagnostic loopback test.
+ * All new scsi requests are blocked, a small delay is used to allow the
+ * scsi requests to complete then the link is brought down. If the link is
+ * is placed in loopback mode then scsi requests are again allowed
+ * so the scsi mid-layer doesn't give up on the port.
+ * All of this is done in-line.
+ */
+static int
+lpfc_bsg_diag_mode(struct fc_bsg_job *job)
+{
+ struct Scsi_Host *shost = job->shost;
+ struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+ struct diag_mode_set *loopback_mode;
+ struct lpfc_sli *psli = &phba->sli;
+ struct lpfc_sli_ring *pring = &psli->ring[LPFC_FCP_RING];
+ uint32_t link_flags;
+ uint32_t timeout;
+ struct lpfc_vport **vports;
+ LPFC_MBOXQ_t *pmboxq;
+ int mbxstatus;
+ int i = 0;
+ int rc = 0;
+
+ /* no data to return just the return code */
+ job->reply->reply_payload_rcv_len = 0;
+
+ if (job->request_len <
+ sizeof(struct fc_bsg_request) + sizeof(struct diag_mode_set)) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+ "2738 Received DIAG MODE request below minimum "
+ "size\n");
+ rc = -EINVAL;
+ goto job_error;
+ }
+
+ loopback_mode = (struct diag_mode_set *)
+ job->request->rqst_data.h_vendor.vendor_cmd;
+ link_flags = loopback_mode->type;
+ timeout = loopback_mode->timeout;
+
+ if ((phba->link_state == LPFC_HBA_ERROR) ||
+ (psli->sli_flag & LPFC_BLOCK_MGMT_IO) ||
+ (!(psli->sli_flag & LPFC_SLI_ACTIVE))) {
+ rc = -EACCES;
+ goto job_error;
+ }
+
+ pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!pmboxq) {
+ rc = -ENOMEM;
+ goto job_error;
+ }
+
+ vports = lpfc_create_vport_work_array(phba);
+ if (vports) {
+ for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
+ shost = lpfc_shost_from_vport(vports[i]);
+ scsi_block_requests(shost);
+ }
+
+ lpfc_destroy_vport_work_array(phba, vports);
+ } else {
+ shost = lpfc_shost_from_vport(phba->pport);
+ scsi_block_requests(shost);
+ }
+
+ while (pring->txcmplq_cnt) {
+ if (i++ > 500) /* wait up to 5 seconds */
+ break;
+
+ msleep(10);
+ }
+
+ memset((void *)pmboxq, 0, sizeof(LPFC_MBOXQ_t));
+ pmboxq->u.mb.mbxCommand = MBX_DOWN_LINK;
+ pmboxq->u.mb.mbxOwner = OWN_HOST;
+
+ mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO);
+
+ if ((mbxstatus == MBX_SUCCESS) && (pmboxq->u.mb.mbxStatus == 0)) {
+ /* wait for link down before proceeding */
+ i = 0;
+ while (phba->link_state != LPFC_LINK_DOWN) {
+ if (i++ > timeout) {
+ rc = -ETIMEDOUT;
+ goto loopback_mode_exit;
+ }
+
+ msleep(10);
+ }
+
+ memset((void *)pmboxq, 0, sizeof(LPFC_MBOXQ_t));
+ if (link_flags == INTERNAL_LOOP_BACK)
+ pmboxq->u.mb.un.varInitLnk.link_flags = FLAGS_LOCAL_LB;
+ else
+ pmboxq->u.mb.un.varInitLnk.link_flags =
+ FLAGS_TOPOLOGY_MODE_LOOP;
+
+ pmboxq->u.mb.mbxCommand = MBX_INIT_LINK;
+ pmboxq->u.mb.mbxOwner = OWN_HOST;
+
+ mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq,
+ LPFC_MBOX_TMO);
+
+ if ((mbxstatus != MBX_SUCCESS) || (pmboxq->u.mb.mbxStatus))
+ rc = -ENODEV;
+ else {
+ phba->link_flag |= LS_LOOPBACK_MODE;
+ /* wait for the link attention interrupt */
+ msleep(100);
+
+ i = 0;
+ while (phba->link_state != LPFC_HBA_READY) {
+ if (i++ > timeout) {
+ rc = -ETIMEDOUT;
+ break;
+ }
+
+ msleep(10);
+ }
+ }
+
+ } else
+ rc = -ENODEV;
+
+loopback_mode_exit:
+ vports = lpfc_create_vport_work_array(phba);
+ if (vports) {
+ for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
+ shost = lpfc_shost_from_vport(vports[i]);
+ scsi_unblock_requests(shost);
+ }
+ lpfc_destroy_vport_work_array(phba, vports);
+ } else {
+ shost = lpfc_shost_from_vport(phba->pport);
+ scsi_unblock_requests(shost);
+ }
+
+ /*
+ * Let SLI layer release mboxq if mbox command completed after timeout.
+ */
+ if (mbxstatus != MBX_TIMEOUT)
+ mempool_free(pmboxq, phba->mbox_mem_pool);
+
+job_error:
+ /* make error code available to userspace */
+ job->reply->result = rc;
+ /* complete the job back to userspace if no error */
+ if (rc == 0)
+ job->job_done(job);
+ return rc;
+}
+
+/**
+ * lpfcdiag_loop_self_reg - obtains a remote port login id
+ * @phba: Pointer to HBA context object
+ * @rpi: Pointer to a remote port login id
+ *
+ * This function obtains a remote port login id so the diag loopback test
+ * can send and receive its own unsolicited CT command.
+ **/
+static int lpfcdiag_loop_self_reg(struct lpfc_hba *phba, uint16_t * rpi)
+{
+ LPFC_MBOXQ_t *mbox;
+ struct lpfc_dmabuf *dmabuff;
+ int status;
+
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mbox)
+ return ENOMEM;
+
+ status = lpfc_reg_rpi(phba, 0, phba->pport->fc_myDID,
+ (uint8_t *)&phba->pport->fc_sparam, mbox, 0);
+ if (status) {
+ mempool_free(mbox, phba->mbox_mem_pool);
+ return ENOMEM;
+ }
+
+ dmabuff = (struct lpfc_dmabuf *) mbox->context1;
+ mbox->context1 = NULL;
+ status = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
+
+ if ((status != MBX_SUCCESS) || (mbox->u.mb.mbxStatus)) {
+ lpfc_mbuf_free(phba, dmabuff->virt, dmabuff->phys);
+ kfree(dmabuff);
+ if (status != MBX_TIMEOUT)
+ mempool_free(mbox, phba->mbox_mem_pool);
+ return ENODEV;
+ }
+
+ *rpi = mbox->u.mb.un.varWords[0];
+
+ lpfc_mbuf_free(phba, dmabuff->virt, dmabuff->phys);
+ kfree(dmabuff);
+ mempool_free(mbox, phba->mbox_mem_pool);
+ return 0;
+}
+
+/**
+ * lpfcdiag_loop_self_unreg - unregs from the rpi
+ * @phba: Pointer to HBA context object
+ * @rpi: Remote port login id
+ *
+ * This function unregisters the rpi obtained in lpfcdiag_loop_self_reg
+ **/
+static int lpfcdiag_loop_self_unreg(struct lpfc_hba *phba, uint16_t rpi)
+{
+ LPFC_MBOXQ_t *mbox;
+ int status;
+
+ /* Allocate mboxq structure */
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (mbox == NULL)
+ return ENOMEM;
+
+ lpfc_unreg_login(phba, 0, rpi, mbox);
+ status = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
+
+ if ((status != MBX_SUCCESS) || (mbox->u.mb.mbxStatus)) {
+ if (status != MBX_TIMEOUT)
+ mempool_free(mbox, phba->mbox_mem_pool);
+ return EIO;
+ }
+
+ mempool_free(mbox, phba->mbox_mem_pool);
+ return 0;
+}
+
+/**
+ * lpfcdiag_loop_get_xri - obtains the transmit and receive ids
+ * @phba: Pointer to HBA context object
+ * @rpi: Remote port login id
+ * @txxri: Pointer to transmit exchange id
+ * @rxxri: Pointer to response exchabge id
+ *
+ * This function obtains the transmit and receive ids required to send
+ * an unsolicited ct command with a payload. A special lpfc FsType and CmdRsp
+ * flags are used to the unsolicted response handler is able to process
+ * the ct command sent on the same port.
+ **/
+static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi,
+ uint16_t *txxri, uint16_t * rxxri)
+{
+ struct lpfc_bsg_event *evt;
+ struct lpfc_iocbq *cmdiocbq, *rspiocbq;
+ IOCB_t *cmd, *rsp;
+ struct lpfc_dmabuf *dmabuf;
+ struct ulp_bde64 *bpl = NULL;
+ struct lpfc_sli_ct_request *ctreq = NULL;
+ int ret_val = 0;
+ unsigned long flags;
+
+ *txxri = 0;
+ *rxxri = 0;
+ evt = lpfc_bsg_event_new(FC_REG_CT_EVENT, current->pid,
+ SLI_CT_ELX_LOOPBACK);
+ if (!evt)
+ return ENOMEM;
+
+ spin_lock_irqsave(&phba->ct_ev_lock, flags);
+ list_add(&evt->node, &phba->ct_ev_waiters);
+ lpfc_bsg_event_ref(evt);
+ spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+
+ cmdiocbq = lpfc_sli_get_iocbq(phba);
+ rspiocbq = lpfc_sli_get_iocbq(phba);
+
+ dmabuf = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+ if (dmabuf) {
+ dmabuf->virt = lpfc_mbuf_alloc(phba, 0, &dmabuf->phys);
+ INIT_LIST_HEAD(&dmabuf->list);
+ bpl = (struct ulp_bde64 *) dmabuf->virt;
+ memset(bpl, 0, sizeof(*bpl));
+ ctreq = (struct lpfc_sli_ct_request *)(bpl + 1);
+ bpl->addrHigh =
+ le32_to_cpu(putPaddrHigh(dmabuf->phys + sizeof(*bpl)));
+ bpl->addrLow =
+ le32_to_cpu(putPaddrLow(dmabuf->phys + sizeof(*bpl)));
+ bpl->tus.f.bdeFlags = 0;
+ bpl->tus.f.bdeSize = ELX_LOOPBACK_HEADER_SZ;
+ bpl->tus.w = le32_to_cpu(bpl->tus.w);
+ }
+
+ if (cmdiocbq == NULL || rspiocbq == NULL ||
+ dmabuf == NULL || bpl == NULL || ctreq == NULL) {
+ ret_val = ENOMEM;
+ goto err_get_xri_exit;
+ }
+
+ cmd = &cmdiocbq->iocb;
+ rsp = &rspiocbq->iocb;
+
+ memset(ctreq, 0, ELX_LOOPBACK_HEADER_SZ);
+
+ ctreq->RevisionId.bits.Revision = SLI_CT_REVISION;
+ ctreq->RevisionId.bits.InId = 0;
+ ctreq->FsType = SLI_CT_ELX_LOOPBACK;
+ ctreq->FsSubType = 0;
+ ctreq->CommandResponse.bits.CmdRsp = ELX_LOOPBACK_XRI_SETUP;
+ ctreq->CommandResponse.bits.Size = 0;
+
+
+ cmd->un.xseq64.bdl.addrHigh = putPaddrHigh(dmabuf->phys);
+ cmd->un.xseq64.bdl.addrLow = putPaddrLow(dmabuf->phys);
+ cmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
+ cmd->un.xseq64.bdl.bdeSize = sizeof(*bpl);
+
+ cmd->un.xseq64.w5.hcsw.Fctl = LA;
+ cmd->un.xseq64.w5.hcsw.Dfctl = 0;
+ cmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
+ cmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT;
+
+ cmd->ulpCommand = CMD_XMIT_SEQUENCE64_CR;
+ cmd->ulpBdeCount = 1;
+ cmd->ulpLe = 1;
+ cmd->ulpClass = CLASS3;
+ cmd->ulpContext = rpi;
+
+ cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
+ cmdiocbq->vport = phba->pport;
+
+ ret_val = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq,
+ rspiocbq,
+ (phba->fc_ratov * 2)
+ + LPFC_DRVR_TIMEOUT);
+ if (ret_val)
+ goto err_get_xri_exit;
+
+ *txxri = rsp->ulpContext;
+
+ evt->waiting = 1;
+ evt->wait_time_stamp = jiffies;
+ ret_val = wait_event_interruptible_timeout(
+ evt->wq, !list_empty(&evt->events_to_see),
+ ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT) * HZ);
+ if (list_empty(&evt->events_to_see))
+ ret_val = (ret_val) ? EINTR : ETIMEDOUT;
+ else {
+ ret_val = IOCB_SUCCESS;
+ spin_lock_irqsave(&phba->ct_ev_lock, flags);
+ list_move(evt->events_to_see.prev, &evt->events_to_get);
+ spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+ *rxxri = (list_entry(evt->events_to_get.prev,
+ typeof(struct event_data),
+ node))->immed_dat;
+ }
+ evt->waiting = 0;
+
+err_get_xri_exit:
+ spin_lock_irqsave(&phba->ct_ev_lock, flags);
+ lpfc_bsg_event_unref(evt); /* release ref */
+ lpfc_bsg_event_unref(evt); /* delete */
+ spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+
+ if (dmabuf) {
+ if (dmabuf->virt)
+ lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
+ kfree(dmabuf);
+ }
+
+ if (cmdiocbq && (ret_val != IOCB_TIMEDOUT))
+ lpfc_sli_release_iocbq(phba, cmdiocbq);
+ if (rspiocbq)
+ lpfc_sli_release_iocbq(phba, rspiocbq);
+ return ret_val;
+}
+
+/**
+ * diag_cmd_data_alloc - fills in a bde struct with dma buffers
+ * @phba: Pointer to HBA context object
+ * @bpl: Pointer to 64 bit bde structure
+ * @size: Number of bytes to process
+ * @nocopydata: Flag to copy user data into the allocated buffer
+ *
+ * This function allocates page size buffers and populates an lpfc_dmabufext.
+ * If allowed the user data pointed to with indataptr is copied into the kernel
+ * memory. The chained list of page size buffers is returned.
+ **/
+static struct lpfc_dmabufext *
+diag_cmd_data_alloc(struct lpfc_hba *phba,
+ struct ulp_bde64 *bpl, uint32_t size,
+ int nocopydata)
+{
+ struct lpfc_dmabufext *mlist = NULL;
+ struct lpfc_dmabufext *dmp;
+ int cnt, offset = 0, i = 0;
+ struct pci_dev *pcidev;
+
+ pcidev = phba->pcidev;
+
+ while (size) {
+ /* We get chunks of 4K */
+ if (size > BUF_SZ_4K)
+ cnt = BUF_SZ_4K;
+ else
+ cnt = size;
+
+ /* allocate struct lpfc_dmabufext buffer header */
+ dmp = kmalloc(sizeof(struct lpfc_dmabufext), GFP_KERNEL);
+ if (!dmp)
+ goto out;
+
+ INIT_LIST_HEAD(&dmp->dma.list);
+
+ /* Queue it to a linked list */
+ if (mlist)
+ list_add_tail(&dmp->dma.list, &mlist->dma.list);
+ else
+ mlist = dmp;
+
+ /* allocate buffer */
+ dmp->dma.virt = dma_alloc_coherent(&pcidev->dev,
+ cnt,
+ &(dmp->dma.phys),
+ GFP_KERNEL);
+
+ if (!dmp->dma.virt)
+ goto out;
+
+ dmp->size = cnt;
+
+ if (nocopydata) {
+ bpl->tus.f.bdeFlags = 0;
+ pci_dma_sync_single_for_device(phba->pcidev,
+ dmp->dma.phys, LPFC_BPL_SIZE, PCI_DMA_TODEVICE);
+
+ } else {
+ memset((uint8_t *)dmp->dma.virt, 0, cnt);
+ bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
+ }
+
+ /* build buffer ptr list for IOCB */
+ bpl->addrLow = le32_to_cpu(putPaddrLow(dmp->dma.phys));
+ bpl->addrHigh = le32_to_cpu(putPaddrHigh(dmp->dma.phys));
+ bpl->tus.f.bdeSize = (ushort) cnt;
+ bpl->tus.w = le32_to_cpu(bpl->tus.w);
+ bpl++;
+
+ i++;
+ offset += cnt;
+ size -= cnt;
+ }
+
+ mlist->flag = i;
+ return mlist;
+out:
+ diag_cmd_data_free(phba, mlist);
+ return NULL;
+}
+
+/**
+ * lpfcdiag_loop_post_rxbufs - post the receive buffers for an unsol CT cmd
+ * @phba: Pointer to HBA context object
+ * @rxxri: Receive exchange id
+ * @len: Number of data bytes
+ *
+ * This function allocates and posts a data buffer of sufficient size to recieve
+ * an unsolicted CT command.
+ **/
+static int lpfcdiag_loop_post_rxbufs(struct lpfc_hba *phba, uint16_t rxxri,
+ size_t len)
+{
+ struct lpfc_sli *psli = &phba->sli;
+ struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
+ struct lpfc_iocbq *cmdiocbq;
+ IOCB_t *cmd = NULL;
+ struct list_head head, *curr, *next;
+ struct lpfc_dmabuf *rxbmp;
+ struct lpfc_dmabuf *dmp;
+ struct lpfc_dmabuf *mp[2] = {NULL, NULL};
+ struct ulp_bde64 *rxbpl = NULL;
+ uint32_t num_bde;
+ struct lpfc_dmabufext *rxbuffer = NULL;
+ int ret_val = 0;
+ int i = 0;
+
+ cmdiocbq = lpfc_sli_get_iocbq(phba);
+ rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+ if (rxbmp != NULL) {
+ rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys);
+ INIT_LIST_HEAD(&rxbmp->list);
+ rxbpl = (struct ulp_bde64 *) rxbmp->virt;
+ rxbuffer = diag_cmd_data_alloc(phba, rxbpl, len, 0);
+ }
+
+ if (!cmdiocbq || !rxbmp || !rxbpl || !rxbuffer) {
+ ret_val = ENOMEM;
+ goto err_post_rxbufs_exit;
+ }
+
+ /* Queue buffers for the receive exchange */
+ num_bde = (uint32_t)rxbuffer->flag;
+ dmp = &rxbuffer->dma;
+
+ cmd = &cmdiocbq->iocb;
+ i = 0;
+
+ INIT_LIST_HEAD(&head);
+ list_add_tail(&head, &dmp->list);
+ list_for_each_safe(curr, next, &head) {
+ mp[i] = list_entry(curr, struct lpfc_dmabuf, list);
+ list_del(curr);
+
+ if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
+ mp[i]->buffer_tag = lpfc_sli_get_buffer_tag(phba);
+ cmd->un.quexri64cx.buff.bde.addrHigh =
+ putPaddrHigh(mp[i]->phys);
+ cmd->un.quexri64cx.buff.bde.addrLow =
+ putPaddrLow(mp[i]->phys);
+ cmd->un.quexri64cx.buff.bde.tus.f.bdeSize =
+ ((struct lpfc_dmabufext *)mp[i])->size;
+ cmd->un.quexri64cx.buff.buffer_tag = mp[i]->buffer_tag;
+ cmd->ulpCommand = CMD_QUE_XRI64_CX;
+ cmd->ulpPU = 0;
+ cmd->ulpLe = 1;
+ cmd->ulpBdeCount = 1;
+ cmd->unsli3.que_xri64cx_ext_words.ebde_count = 0;
+
+ } else {
+ cmd->un.cont64[i].addrHigh = putPaddrHigh(mp[i]->phys);
+ cmd->un.cont64[i].addrLow = putPaddrLow(mp[i]->phys);
+ cmd->un.cont64[i].tus.f.bdeSize =
+ ((struct lpfc_dmabufext *)mp[i])->size;
+ cmd->ulpBdeCount = ++i;
+
+ if ((--num_bde > 0) && (i < 2))
+ continue;
+
+ cmd->ulpCommand = CMD_QUE_XRI_BUF64_CX;
+ cmd->ulpLe = 1;
+ }
+
+ cmd->ulpClass = CLASS3;
+ cmd->ulpContext = rxxri;
+
+ ret_val = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
+
+ if (ret_val == IOCB_ERROR) {
+ diag_cmd_data_free(phba,
+ (struct lpfc_dmabufext *)mp[0]);
+ if (mp[1])
+ diag_cmd_data_free(phba,
+ (struct lpfc_dmabufext *)mp[1]);
+ dmp = list_entry(next, struct lpfc_dmabuf, list);
+ ret_val = EIO;
+ goto err_post_rxbufs_exit;
+ }
+
+ lpfc_sli_ringpostbuf_put(phba, pring, mp[0]);
+ if (mp[1]) {
+ lpfc_sli_ringpostbuf_put(phba, pring, mp[1]);
+ mp[1] = NULL;
+ }
+
+ /* The iocb was freed by lpfc_sli_issue_iocb */
+ cmdiocbq = lpfc_sli_get_iocbq(phba);
+ if (!cmdiocbq) {
+ dmp = list_entry(next, struct lpfc_dmabuf, list);
+ ret_val = EIO;
+ goto err_post_rxbufs_exit;
+ }
+
+ cmd = &cmdiocbq->iocb;
+ i = 0;
+ }
+ list_del(&head);
+
+err_post_rxbufs_exit:
+
+ if (rxbmp) {
+ if (rxbmp->virt)
+ lpfc_mbuf_free(phba, rxbmp->virt, rxbmp->phys);
+ kfree(rxbmp);
+ }
+
+ if (cmdiocbq)
+ lpfc_sli_release_iocbq(phba, cmdiocbq);
+ return ret_val;
+}
+
+/**
+ * lpfc_bsg_diag_test - with a port in loopback issues a Ct cmd to itself
+ * @job: LPFC_BSG_VENDOR_DIAG_TEST fc_bsg_job
+ *
+ * This function receives a user data buffer to be transmitted and received on
+ * the same port, the link must be up and in loopback mode prior
+ * to being called.
+ * 1. A kernel buffer is allocated to copy the user data into.
+ * 2. The port registers with "itself".
+ * 3. The transmit and receive exchange ids are obtained.
+ * 4. The receive exchange id is posted.
+ * 5. A new els loopback event is created.
+ * 6. The command and response iocbs are allocated.
+ * 7. The cmd iocb FsType is set to elx loopback and the CmdRsp to looppback.
+ *
+ * This function is meant to be called n times while the port is in loopback
+ * so it is the apps responsibility to issue a reset to take the port out
+ * of loopback mode.
+ **/
+static int
+lpfc_bsg_diag_test(struct fc_bsg_job *job)
+{
+ struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+ struct diag_mode_test *diag_mode;
+ struct lpfc_bsg_event *evt;
+ struct event_data *evdat;
+ struct lpfc_sli *psli = &phba->sli;
+ uint32_t size;
+ uint32_t full_size;
+ size_t segment_len = 0, segment_offset = 0, current_offset = 0;
+ uint16_t rpi;
+ struct lpfc_iocbq *cmdiocbq, *rspiocbq;
+ IOCB_t *cmd, *rsp;
+ struct lpfc_sli_ct_request *ctreq;
+ struct lpfc_dmabuf *txbmp;
+ struct ulp_bde64 *txbpl = NULL;
+ struct lpfc_dmabufext *txbuffer = NULL;
+ struct list_head head;
+ struct lpfc_dmabuf *curr;
+ uint16_t txxri, rxxri;
+ uint32_t num_bde;
+ uint8_t *ptr = NULL, *rx_databuf = NULL;
+ int rc = 0;
+ unsigned long flags;
+ void *dataout = NULL;
+ uint32_t total_mem;
+
+ /* in case no data is returned return just the return code */
+ job->reply->reply_payload_rcv_len = 0;
+
+ if (job->request_len <
+ sizeof(struct fc_bsg_request) + sizeof(struct diag_mode_test)) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+ "2739 Received DIAG TEST request below minimum "
+ "size\n");
+ rc = -EINVAL;
+ goto loopback_test_exit;
+ }
+
+ if (job->request_payload.payload_len !=
+ job->reply_payload.payload_len) {
+ rc = -EINVAL;
+ goto loopback_test_exit;
+ }
+
+ diag_mode = (struct diag_mode_test *)
+ job->request->rqst_data.h_vendor.vendor_cmd;
+
+ if ((phba->link_state == LPFC_HBA_ERROR) ||
+ (psli->sli_flag & LPFC_BLOCK_MGMT_IO) ||
+ (!(psli->sli_flag & LPFC_SLI_ACTIVE))) {
+ rc = -EACCES;
+ goto loopback_test_exit;
+ }
+
+ if (!lpfc_is_link_up(phba) || !(phba->link_flag & LS_LOOPBACK_MODE)) {
+ rc = -EACCES;
+ goto loopback_test_exit;
+ }
+
+ size = job->request_payload.payload_len;
+ full_size = size + ELX_LOOPBACK_HEADER_SZ; /* plus the header */
+
+ if ((size == 0) || (size > 80 * BUF_SZ_4K)) {
+ rc = -ERANGE;
+ goto loopback_test_exit;
+ }
+
+ if (size >= BUF_SZ_4K) {
+ /*
+ * Allocate memory for ioctl data. If buffer is bigger than 64k,
+ * then we allocate 64k and re-use that buffer over and over to
+ * xfer the whole block. This is because Linux kernel has a
+ * problem allocating more than 120k of kernel space memory. Saw
+ * problem with GET_FCPTARGETMAPPING...
+ */
+ if (size <= (64 * 1024))
+ total_mem = size;
+ else
+ total_mem = 64 * 1024;
+ } else
+ /* Allocate memory for ioctl data */
+ total_mem = BUF_SZ_4K;
+
+ dataout = kmalloc(total_mem, GFP_KERNEL);
+ if (dataout == NULL) {
+ rc = -ENOMEM;
+ goto loopback_test_exit;
+ }
+
+ ptr = dataout;
+ ptr += ELX_LOOPBACK_HEADER_SZ;
+ sg_copy_to_buffer(job->request_payload.sg_list,
+ job->request_payload.sg_cnt,
+ ptr, size);
+
+ rc = lpfcdiag_loop_self_reg(phba, &rpi);
+ if (rc) {
+ rc = -ENOMEM;
+ goto loopback_test_exit;
+ }
+
+ rc = lpfcdiag_loop_get_xri(phba, rpi, &txxri, &rxxri);
+ if (rc) {
+ lpfcdiag_loop_self_unreg(phba, rpi);
+ rc = -ENOMEM;
+ goto loopback_test_exit;
+ }
+
+ rc = lpfcdiag_loop_post_rxbufs(phba, rxxri, full_size);
+ if (rc) {
+ lpfcdiag_loop_self_unreg(phba, rpi);
+ rc = -ENOMEM;
+ goto loopback_test_exit;
+ }
+
+ evt = lpfc_bsg_event_new(FC_REG_CT_EVENT, current->pid,
+ SLI_CT_ELX_LOOPBACK);
+ if (!evt) {
+ lpfcdiag_loop_self_unreg(phba, rpi);
+ rc = -ENOMEM;
+ goto loopback_test_exit;
+ }
+
+ spin_lock_irqsave(&phba->ct_ev_lock, flags);
+ list_add(&evt->node, &phba->ct_ev_waiters);
+ lpfc_bsg_event_ref(evt);
+ spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+
+ cmdiocbq = lpfc_sli_get_iocbq(phba);
+ rspiocbq = lpfc_sli_get_iocbq(phba);
+ txbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+
+ if (txbmp) {
+ txbmp->virt = lpfc_mbuf_alloc(phba, 0, &txbmp->phys);
+ INIT_LIST_HEAD(&txbmp->list);
+ txbpl = (struct ulp_bde64 *) txbmp->virt;
+ if (txbpl)
+ txbuffer = diag_cmd_data_alloc(phba,
+ txbpl, full_size, 0);
+ }
+
+ if (!cmdiocbq || !rspiocbq || !txbmp || !txbpl || !txbuffer) {
+ rc = -ENOMEM;
+ goto err_loopback_test_exit;
+ }
+
+ cmd = &cmdiocbq->iocb;
+ rsp = &rspiocbq->iocb;
+
+ INIT_LIST_HEAD(&head);
+ list_add_tail(&head, &txbuffer->dma.list);
+ list_for_each_entry(curr, &head, list) {
+ segment_len = ((struct lpfc_dmabufext *)curr)->size;
+ if (current_offset == 0) {
+ ctreq = curr->virt;
+ memset(ctreq, 0, ELX_LOOPBACK_HEADER_SZ);
+ ctreq->RevisionId.bits.Revision = SLI_CT_REVISION;
+ ctreq->RevisionId.bits.InId = 0;
+ ctreq->FsType = SLI_CT_ELX_LOOPBACK;
+ ctreq->FsSubType = 0;
+ ctreq->CommandResponse.bits.CmdRsp = ELX_LOOPBACK_DATA;
+ ctreq->CommandResponse.bits.Size = size;
+ segment_offset = ELX_LOOPBACK_HEADER_SZ;
+ } else
+ segment_offset = 0;
+
+ BUG_ON(segment_offset >= segment_len);
+ memcpy(curr->virt + segment_offset,
+ ptr + current_offset,
+ segment_len - segment_offset);
+
+ current_offset += segment_len - segment_offset;
+ BUG_ON(current_offset > size);
+ }
+ list_del(&head);
+
+ /* Build the XMIT_SEQUENCE iocb */
+
+ num_bde = (uint32_t)txbuffer->flag;
+
+ cmd->un.xseq64.bdl.addrHigh = putPaddrHigh(txbmp->phys);
+ cmd->un.xseq64.bdl.addrLow = putPaddrLow(txbmp->phys);
+ cmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
+ cmd->un.xseq64.bdl.bdeSize = (num_bde * sizeof(struct ulp_bde64));
+
+ cmd->un.xseq64.w5.hcsw.Fctl = (LS | LA);
+ cmd->un.xseq64.w5.hcsw.Dfctl = 0;
+ cmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
+ cmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT;
+
+ cmd->ulpCommand = CMD_XMIT_SEQUENCE64_CX;
+ cmd->ulpBdeCount = 1;
+ cmd->ulpLe = 1;
+ cmd->ulpClass = CLASS3;
+ cmd->ulpContext = txxri;
+
+ cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
+ cmdiocbq->vport = phba->pport;
+
+ rc = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq, rspiocbq,
+ (phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT);
+
+ if ((rc != IOCB_SUCCESS) || (rsp->ulpStatus != IOCB_SUCCESS)) {
+ rc = -EIO;
+ goto err_loopback_test_exit;
+ }
+
+ evt->waiting = 1;
+ rc = wait_event_interruptible_timeout(
+ evt->wq, !list_empty(&evt->events_to_see),
+ ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT) * HZ);
+ evt->waiting = 0;
+ if (list_empty(&evt->events_to_see))
+ rc = (rc) ? -EINTR : -ETIMEDOUT;
+ else {
+ spin_lock_irqsave(&phba->ct_ev_lock, flags);
+ list_move(evt->events_to_see.prev, &evt->events_to_get);
+ evdat = list_entry(evt->events_to_get.prev,
+ typeof(*evdat), node);
+ spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+ rx_databuf = evdat->data;
+ if (evdat->len != full_size) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+ "1603 Loopback test did not receive expected "
+ "data length. actual length 0x%x expected "
+ "length 0x%x\n",
+ evdat->len, full_size);
+ rc = -EIO;
+ } else if (rx_databuf == NULL)
+ rc = -EIO;
+ else {
+ rc = IOCB_SUCCESS;
+ /* skip over elx loopback header */
+ rx_databuf += ELX_LOOPBACK_HEADER_SZ;
+ job->reply->reply_payload_rcv_len =
+ sg_copy_from_buffer(job->reply_payload.sg_list,
+ job->reply_payload.sg_cnt,
+ rx_databuf, size);
+ job->reply->reply_payload_rcv_len = size;
+ }
+ }
+
+err_loopback_test_exit:
+ lpfcdiag_loop_self_unreg(phba, rpi);
+
+ spin_lock_irqsave(&phba->ct_ev_lock, flags);
+ lpfc_bsg_event_unref(evt); /* release ref */
+ lpfc_bsg_event_unref(evt); /* delete */
+ spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+
+ if (cmdiocbq != NULL)
+ lpfc_sli_release_iocbq(phba, cmdiocbq);
+
+ if (rspiocbq != NULL)
+ lpfc_sli_release_iocbq(phba, rspiocbq);
+
+ if (txbmp != NULL) {
+ if (txbpl != NULL) {
+ if (txbuffer != NULL)
+ diag_cmd_data_free(phba, txbuffer);
+ lpfc_mbuf_free(phba, txbmp->virt, txbmp->phys);
+ }
+ kfree(txbmp);
+ }
+
+loopback_test_exit:
+ kfree(dataout);
+ /* make error code available to userspace */
+ job->reply->result = rc;
+ job->dd_data = NULL;
+ /* complete the job back to userspace if no error */
+ if (rc == 0)
+ job->job_done(job);
+ return rc;
+}
+
+/**
+ * lpfc_bsg_get_dfc_rev - process a GET_DFC_REV bsg vendor command
+ * @job: GET_DFC_REV fc_bsg_job
+ **/
+static int
+lpfc_bsg_get_dfc_rev(struct fc_bsg_job *job)
+{
+ struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+ struct get_mgmt_rev *event_req;
+ struct get_mgmt_rev_reply *event_reply;
+ int rc = 0;
+
+ if (job->request_len <
+ sizeof(struct fc_bsg_request) + sizeof(struct get_mgmt_rev)) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+ "2740 Received GET_DFC_REV request below "
+ "minimum size\n");
+ rc = -EINVAL;
+ goto job_error;
+ }
+
+ event_req = (struct get_mgmt_rev *)
+ job->request->rqst_data.h_vendor.vendor_cmd;
+
+ event_reply = (struct get_mgmt_rev_reply *)
+ job->reply->reply_data.vendor_reply.vendor_rsp;
+
+ if (job->reply_len <
+ sizeof(struct fc_bsg_request) + sizeof(struct get_mgmt_rev_reply)) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+ "2741 Received GET_DFC_REV reply below "
+ "minimum size\n");
+ rc = -EINVAL;
+ goto job_error;
+ }
+
+ event_reply->info.a_Major = MANAGEMENT_MAJOR_REV;
+ event_reply->info.a_Minor = MANAGEMENT_MINOR_REV;
+job_error:
+ job->reply->result = rc;
+ if (rc == 0)
+ job->job_done(job);
+ return rc;
+}
+
+/**
+ * lpfc_bsg_wake_mbox_wait - lpfc_bsg_issue_mbox mbox completion handler
+ * @phba: Pointer to HBA context object.
+ * @pmboxq: Pointer to mailbox command.
+ *
+ * This is completion handler function for mailbox commands issued from
+ * lpfc_bsg_issue_mbox function. This function is called by the
+ * mailbox event handler function with no lock held. This function
+ * will wake up thread waiting on the wait queue pointed by context1
+ * of the mailbox.
+ **/
+void
+lpfc_bsg_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
+{
+ struct bsg_job_data *dd_data;
+ MAILBOX_t *pmb;
+ MAILBOX_t *mb;
+ struct fc_bsg_job *job;
+ uint32_t size;
+ unsigned long flags;
+
+ spin_lock_irqsave(&phba->ct_ev_lock, flags);
+ dd_data = pmboxq->context1;
+ if (!dd_data) {
+ spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+ return;
+ }
+
+ pmb = &dd_data->context_un.mbox.pmboxq->u.mb;
+ mb = dd_data->context_un.mbox.mb;
+ job = dd_data->context_un.mbox.set_job;
+ memcpy(mb, pmb, sizeof(*pmb));
+ size = job->request_payload.payload_len;
+ job->reply->reply_payload_rcv_len =
+ sg_copy_from_buffer(job->reply_payload.sg_list,
+ job->reply_payload.sg_cnt,
+ mb, size);
+ job->reply->result = 0;
+ dd_data->context_un.mbox.set_job = NULL;
+ job->dd_data = NULL;
+ job->job_done(job);
+ spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+ mempool_free(dd_data->context_un.mbox.pmboxq, phba->mbox_mem_pool);
+ kfree(mb);
+ kfree(dd_data);
+ return;
+}
+
+/**
+ * lpfc_bsg_check_cmd_access - test for a supported mailbox command
+ * @phba: Pointer to HBA context object.
+ * @mb: Pointer to a mailbox object.
+ * @vport: Pointer to a vport object.
+ *
+ * Some commands require the port to be offline, some may not be called from
+ * the application.
+ **/
+static int lpfc_bsg_check_cmd_access(struct lpfc_hba *phba,
+ MAILBOX_t *mb, struct lpfc_vport *vport)
+{
+ /* return negative error values for bsg job */
+ switch (mb->mbxCommand) {
+ /* Offline only */
+ case MBX_INIT_LINK:
+ case MBX_DOWN_LINK:
+ case MBX_CONFIG_LINK:
+ case MBX_CONFIG_RING:
+ case MBX_RESET_RING:
+ case MBX_UNREG_LOGIN:
+ case MBX_CLEAR_LA:
+ case MBX_DUMP_CONTEXT:
+ case MBX_RUN_DIAGS:
+ case MBX_RESTART:
+ case MBX_SET_MASK:
+ if (!(vport->fc_flag & FC_OFFLINE_MODE)) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+ "2743 Command 0x%x is illegal in on-line "
+ "state\n",
+ mb->mbxCommand);
+ return -EPERM;
+ }
+ case MBX_WRITE_NV:
+ case MBX_WRITE_VPARMS:
+ case MBX_LOAD_SM:
+ case MBX_READ_NV:
+ case MBX_READ_CONFIG:
+ case MBX_READ_RCONFIG:
+ case MBX_READ_STATUS:
+ case MBX_READ_XRI:
+ case MBX_READ_REV:
+ case MBX_READ_LNK_STAT:
+ case MBX_DUMP_MEMORY:
+ case MBX_DOWN_LOAD:
+ case MBX_UPDATE_CFG:
+ case MBX_KILL_BOARD:
+ case MBX_LOAD_AREA:
+ case MBX_LOAD_EXP_ROM:
+ case MBX_BEACON:
+ case MBX_DEL_LD_ENTRY:
+ case MBX_SET_DEBUG:
+ case MBX_WRITE_WWN:
+ case MBX_SLI4_CONFIG:
+ case MBX_READ_EVENT_LOG_STATUS:
+ case MBX_WRITE_EVENT_LOG:
+ case MBX_PORT_CAPABILITIES:
+ case MBX_PORT_IOV_CONTROL:
+ break;
+ case MBX_SET_VARIABLE:
+ case MBX_RUN_BIU_DIAG64:
+ case MBX_READ_EVENT_LOG:
+ case MBX_READ_SPARM64:
+ case MBX_READ_LA:
+ case MBX_READ_LA64:
+ case MBX_REG_LOGIN:
+ case MBX_REG_LOGIN64:
+ case MBX_CONFIG_PORT:
+ case MBX_RUN_BIU_DIAG:
+ default:
+ lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+ "2742 Unknown Command 0x%x\n",
+ mb->mbxCommand);
+ return -EPERM;
+ }
+
+ return 0; /* ok */
+}
+
+/**
+ * lpfc_bsg_issue_mbox - issues a mailbox command on behalf of an app
+ * @phba: Pointer to HBA context object.
+ * @mb: Pointer to a mailbox object.
+ * @vport: Pointer to a vport object.
+ *
+ * Allocate a tracking object, mailbox command memory, get a mailbox
+ * from the mailbox pool, copy the caller mailbox command.
+ *
+ * If offline and the sli is active we need to poll for the command (port is
+ * being reset) and com-plete the job, otherwise issue the mailbox command and
+ * let our completion handler finish the command.
+ **/
+static uint32_t
+lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
+ struct lpfc_vport *vport)
+{
+ LPFC_MBOXQ_t *pmboxq;
+ MAILBOX_t *pmb;
+ MAILBOX_t *mb;
+ struct bsg_job_data *dd_data;
+ uint32_t size;
+ int rc = 0;
+
+ /* allocate our bsg tracking structure */
+ dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
+ if (!dd_data) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+ "2727 Failed allocation of dd_data\n");
+ return -ENOMEM;
+ }
+
+ mb = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!mb) {
+ kfree(dd_data);
+ return -ENOMEM;
+ }
+
+ pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!pmboxq) {
+ kfree(dd_data);
+ kfree(mb);
+ return -ENOMEM;
+ }
+
+ size = job->request_payload.payload_len;
+ job->reply->reply_payload_rcv_len =
+ sg_copy_to_buffer(job->request_payload.sg_list,
+ job->request_payload.sg_cnt,
+ mb, size);
+
+ rc = lpfc_bsg_check_cmd_access(phba, mb, vport);
+ if (rc != 0) {
+ kfree(dd_data);
+ kfree(mb);
+ mempool_free(pmboxq, phba->mbox_mem_pool);
+ return rc; /* must be negative */
+ }
+
+ memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
+ pmb = &pmboxq->u.mb;
+ memcpy(pmb, mb, sizeof(*pmb));
+ pmb->mbxOwner = OWN_HOST;
+ pmboxq->context1 = NULL;
+ pmboxq->vport = vport;
+
+ if ((vport->fc_flag & FC_OFFLINE_MODE) ||
+ (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE))) {
+ rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
+ if (rc != MBX_SUCCESS) {
+ if (rc != MBX_TIMEOUT) {
+ kfree(dd_data);
+ kfree(mb);
+ mempool_free(pmboxq, phba->mbox_mem_pool);
+ }
+ return (rc == MBX_TIMEOUT) ? -ETIME : -ENODEV;
+ }
+
+ memcpy(mb, pmb, sizeof(*pmb));
+ job->reply->reply_payload_rcv_len =
+ sg_copy_from_buffer(job->reply_payload.sg_list,
+ job->reply_payload.sg_cnt,
+ mb, size);
+ kfree(dd_data);
+ kfree(mb);
+ mempool_free(pmboxq, phba->mbox_mem_pool);
+ /* not waiting mbox already done */
+ return 0;
+ }
+
+ /* setup wake call as IOCB callback */
+ pmboxq->mbox_cmpl = lpfc_bsg_wake_mbox_wait;
+ /* setup context field to pass wait_queue pointer to wake function */
+ pmboxq->context1 = dd_data;
+ dd_data->type = TYPE_MBOX;
+ dd_data->context_un.mbox.pmboxq = pmboxq;
+ dd_data->context_un.mbox.mb = mb;
+ dd_data->context_un.mbox.set_job = job;
+ job->dd_data = dd_data;
+ rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
+ if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
+ kfree(dd_data);
+ kfree(mb);
+ mempool_free(pmboxq, phba->mbox_mem_pool);
+ return -EIO;
+ }
+
+ return 1;
+}
+
+/**
+ * lpfc_bsg_mbox_cmd - process an fc bsg LPFC_BSG_VENDOR_MBOX command
+ * @job: MBOX fc_bsg_job for LPFC_BSG_VENDOR_MBOX.
+ **/
+static int
+lpfc_bsg_mbox_cmd(struct fc_bsg_job *job)
+{
+ struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+ int rc = 0;
+
+ /* in case no data is transferred */
+ job->reply->reply_payload_rcv_len = 0;
+ if (job->request_len <
+ sizeof(struct fc_bsg_request) + sizeof(struct dfc_mbox_req)) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+ "2737 Received MBOX_REQ request below "
+ "minimum size\n");
+ rc = -EINVAL;
+ goto job_error;
+ }
+
+ if (job->request_payload.payload_len != PAGE_SIZE) {
+ rc = -EINVAL;
+ goto job_error;
+ }
+
+ if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) {
+ rc = -EAGAIN;
+ goto job_error;
+ }
+
+ rc = lpfc_bsg_issue_mbox(phba, job, vport);
+
+job_error:
+ if (rc == 0) {
+ /* job done */
+ job->reply->result = 0;
+ job->dd_data = NULL;
+ job->job_done(job);
+ } else if (rc == 1)
+ /* job submitted, will complete later*/
+ rc = 0; /* return zero, no error */
+ else {
+ /* some error occurred */
+ job->reply->result = rc;
+ job->dd_data = NULL;
+ }
return rc;
}
@@ -834,38 +2640,57 @@ error_get_event_exit:
/**
* lpfc_bsg_hst_vendor - process a vendor-specific fc_bsg_job
* @job: fc_bsg_job to handle
- */
+ **/
static int
lpfc_bsg_hst_vendor(struct fc_bsg_job *job)
{
int command = job->request->rqst_data.h_vendor.vendor_cmd[0];
+ int rc;
switch (command) {
case LPFC_BSG_VENDOR_SET_CT_EVENT:
- return lpfc_bsg_set_event(job);
+ rc = lpfc_bsg_hba_set_event(job);
break;
-
case LPFC_BSG_VENDOR_GET_CT_EVENT:
- return lpfc_bsg_get_event(job);
+ rc = lpfc_bsg_hba_get_event(job);
+ break;
+ case LPFC_BSG_VENDOR_SEND_MGMT_RESP:
+ rc = lpfc_bsg_send_mgmt_rsp(job);
+ break;
+ case LPFC_BSG_VENDOR_DIAG_MODE:
+ rc = lpfc_bsg_diag_mode(job);
+ break;
+ case LPFC_BSG_VENDOR_DIAG_TEST:
+ rc = lpfc_bsg_diag_test(job);
+ break;
+ case LPFC_BSG_VENDOR_GET_MGMT_REV:
+ rc = lpfc_bsg_get_dfc_rev(job);
+ break;
+ case LPFC_BSG_VENDOR_MBOX:
+ rc = lpfc_bsg_mbox_cmd(job);
break;
-
default:
- return -EINVAL;
+ rc = -EINVAL;
+ job->reply->reply_payload_rcv_len = 0;
+ /* make error code available to userspace */
+ job->reply->result = rc;
+ break;
}
+
+ return rc;
}
/**
* lpfc_bsg_request - handle a bsg request from the FC transport
* @job: fc_bsg_job to handle
- */
+ **/
int
lpfc_bsg_request(struct fc_bsg_job *job)
{
uint32_t msgcode;
- int rc = -EINVAL;
+ int rc;
msgcode = job->request->msgcode;
-
switch (msgcode) {
case FC_BSG_HST_VENDOR:
rc = lpfc_bsg_hst_vendor(job);
@@ -874,9 +2699,13 @@ lpfc_bsg_request(struct fc_bsg_job *job)
rc = lpfc_bsg_rport_els(job);
break;
case FC_BSG_RPT_CT:
- rc = lpfc_bsg_rport_ct(job);
+ rc = lpfc_bsg_send_mgmt_cmd(job);
break;
default:
+ rc = -EINVAL;
+ job->reply->reply_payload_rcv_len = 0;
+ /* make error code available to userspace */
+ job->reply->result = rc;
break;
}
@@ -889,17 +2718,71 @@ lpfc_bsg_request(struct fc_bsg_job *job)
*
* This function just aborts the job's IOCB. The aborted IOCB will return to
* the waiting function which will handle passing the error back to userspace
- */
+ **/
int
lpfc_bsg_timeout(struct fc_bsg_job *job)
{
struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
struct lpfc_hba *phba = vport->phba;
- struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)job->dd_data;
+ struct lpfc_iocbq *cmdiocb;
+ struct lpfc_bsg_event *evt;
+ struct lpfc_bsg_iocb *iocb;
+ struct lpfc_bsg_mbox *mbox;
struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
+ struct bsg_job_data *dd_data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&phba->ct_ev_lock, flags);
+ dd_data = (struct bsg_job_data *)job->dd_data;
+ /* timeout and completion crossed paths if no dd_data */
+ if (!dd_data) {
+ spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+ return 0;
+ }
- if (cmdiocb)
+ switch (dd_data->type) {
+ case TYPE_IOCB:
+ iocb = &dd_data->context_un.iocb;
+ cmdiocb = iocb->cmdiocbq;
+ /* hint to completion handler that the job timed out */
+ job->reply->result = -EAGAIN;
+ spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+ /* this will call our completion handler */
+ spin_lock_irq(&phba->hbalock);
lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb);
+ spin_unlock_irq(&phba->hbalock);
+ break;
+ case TYPE_EVT:
+ evt = dd_data->context_un.evt;
+ /* this event has no job anymore */
+ evt->set_job = NULL;
+ job->dd_data = NULL;
+ job->reply->reply_payload_rcv_len = 0;
+ /* Return -EAGAIN which is our way of signallying the
+ * app to retry.
+ */
+ job->reply->result = -EAGAIN;
+ spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+ job->job_done(job);
+ break;
+ case TYPE_MBOX:
+ mbox = &dd_data->context_un.mbox;
+ /* this mbox has no job anymore */
+ mbox->set_job = NULL;
+ job->dd_data = NULL;
+ job->reply->reply_payload_rcv_len = 0;
+ job->reply->result = -EAGAIN;
+ spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+ job->job_done(job);
+ break;
+ default:
+ spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+ break;
+ }
+ /* scsi transport fc fc_bsg_job_timeout expects a zero return code,
+ * otherwise an error message will be displayed on the console
+ * so always return success (zero)
+ */
return 0;
}
diff --git a/drivers/scsi/lpfc/lpfc_bsg.h b/drivers/scsi/lpfc/lpfc_bsg.h
new file mode 100644
index 000000000000..6c8f87e39b98
--- /dev/null
+++ b/drivers/scsi/lpfc/lpfc_bsg.h
@@ -0,0 +1,98 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for *
+ * Fibre Channel Host Bus Adapters. *
+ * Copyright (C) 2010 Emulex. All rights reserved. *
+ * EMULEX and SLI are trademarks of Emulex. *
+ * www.emulex.com *
+ * *
+ * This program is free software; you can redistribute it and/or *
+ * modify it under the terms of version 2 of the GNU General *
+ * Public License as published by the Free Software Foundation. *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID. See the GNU General Public License for *
+ * more details, a copy of which can be found in the file COPYING *
+ * included with this package. *
+ *******************************************************************/
+/* bsg definitions
+ * No pointers to user data are allowed, all application buffers and sizes will
+ * derived through the bsg interface.
+ *
+ * These are the vendor unique structures passed in using the bsg
+ * FC_BSG_HST_VENDOR message code type.
+ */
+#define LPFC_BSG_VENDOR_SET_CT_EVENT 1
+#define LPFC_BSG_VENDOR_GET_CT_EVENT 2
+#define LPFC_BSG_VENDOR_SEND_MGMT_RESP 3
+#define LPFC_BSG_VENDOR_DIAG_MODE 4
+#define LPFC_BSG_VENDOR_DIAG_TEST 5
+#define LPFC_BSG_VENDOR_GET_MGMT_REV 6
+#define LPFC_BSG_VENDOR_MBOX 7
+
+struct set_ct_event {
+ uint32_t command;
+ uint32_t type_mask;
+ uint32_t ev_req_id;
+ uint32_t ev_reg_id;
+};
+
+struct get_ct_event {
+ uint32_t command;
+ uint32_t ev_reg_id;
+ uint32_t ev_req_id;
+};
+
+struct get_ct_event_reply {
+ uint32_t immed_data;
+ uint32_t type;
+};
+
+struct send_mgmt_resp {
+ uint32_t command;
+ uint32_t tag;
+};
+
+
+#define INTERNAL_LOOP_BACK 0x1 /* adapter short cuts the loop internally */
+#define EXTERNAL_LOOP_BACK 0x2 /* requires an external loopback plug */
+
+struct diag_mode_set {
+ uint32_t command;
+ uint32_t type;
+ uint32_t timeout;
+};
+
+struct diag_mode_test {
+ uint32_t command;
+};
+
+#define LPFC_WWNN_TYPE 0
+#define LPFC_WWPN_TYPE 1
+
+struct get_mgmt_rev {
+ uint32_t command;
+};
+
+#define MANAGEMENT_MAJOR_REV 1
+#define MANAGEMENT_MINOR_REV 0
+
+/* the MgmtRevInfo structure */
+struct MgmtRevInfo {
+ uint32_t a_Major;
+ uint32_t a_Minor;
+};
+
+struct get_mgmt_rev_reply {
+ struct MgmtRevInfo info;
+};
+
+struct dfc_mbox_req {
+ uint32_t command;
+ uint32_t inExtWLen;
+ uint32_t outExtWLen;
+ uint8_t mbOffset;
+};
+
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 650494d622c1..6f0fb51eb461 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2008 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2010 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
@@ -44,18 +44,26 @@ int lpfc_reg_rpi(struct lpfc_hba *, uint16_t, uint32_t, uint8_t *,
void lpfc_unreg_login(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *);
void lpfc_unreg_did(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *);
void lpfc_reg_vpi(struct lpfc_vport *, LPFC_MBOXQ_t *);
+void lpfc_register_new_vport(struct lpfc_hba *, struct lpfc_vport *,
+ struct lpfc_nodelist *);
void lpfc_unreg_vpi(struct lpfc_hba *, uint16_t, LPFC_MBOXQ_t *);
void lpfc_init_link(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t);
void lpfc_request_features(struct lpfc_hba *, struct lpfcMboxq *);
+void lpfc_supported_pages(struct lpfcMboxq *);
+void lpfc_sli4_params(struct lpfcMboxq *);
+int lpfc_pc_sli4_params_get(struct lpfc_hba *, LPFC_MBOXQ_t *);
struct lpfc_vport *lpfc_find_vport_by_did(struct lpfc_hba *, uint32_t);
void lpfc_cleanup_rcv_buffers(struct lpfc_vport *);
void lpfc_rcv_seq_check_edtov(struct lpfc_vport *);
void lpfc_cleanup_rpis(struct lpfc_vport *, int);
+void lpfc_cleanup_pending_mbox(struct lpfc_vport *);
int lpfc_linkdown(struct lpfc_hba *);
void lpfc_linkdown_port(struct lpfc_vport *);
void lpfc_port_link_failure(struct lpfc_vport *);
void lpfc_mbx_cmpl_read_la(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_init_vpi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_retry_pport_discovery(struct lpfc_hba *);
void lpfc_mbx_cmpl_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *, LPFC_MBOXQ_t *);
@@ -73,6 +81,7 @@ void lpfc_set_disctmo(struct lpfc_vport *);
int lpfc_can_disctmo(struct lpfc_vport *);
int lpfc_unreg_rpi(struct lpfc_vport *, struct lpfc_nodelist *);
void lpfc_unreg_all_rpis(struct lpfc_vport *);
+void lpfc_unreg_hba_rpis(struct lpfc_hba *);
void lpfc_unreg_default_rpis(struct lpfc_vport *);
void lpfc_issue_reg_vpi(struct lpfc_hba *, struct lpfc_vport *);
@@ -99,7 +108,7 @@ int lpfc_disc_state_machine(struct lpfc_vport *, struct lpfc_nodelist *, void *,
void lpfc_do_scr_ns_plogi(struct lpfc_hba *, struct lpfc_vport *);
int lpfc_check_sparm(struct lpfc_vport *, struct lpfc_nodelist *,
- struct serv_parm *, uint32_t);
+ struct serv_parm *, uint32_t, int);
int lpfc_els_abort(struct lpfc_hba *, struct lpfc_nodelist *);
void lpfc_more_plogi(struct lpfc_vport *);
void lpfc_more_adisc(struct lpfc_vport *);
@@ -197,6 +206,7 @@ void lpfc_reg_fcfi(struct lpfc_hba *, struct lpfcMboxq *);
void lpfc_unreg_fcfi(struct lpfcMboxq *, uint16_t);
void lpfc_resume_rpi(struct lpfcMboxq *, struct lpfc_nodelist *);
int lpfc_check_pending_fcoe_event(struct lpfc_hba *, uint8_t);
+void lpfc_issue_init_vpi(struct lpfc_vport *);
void lpfc_config_hbq(struct lpfc_hba *, uint32_t, struct lpfc_hbq_init *,
uint32_t , LPFC_MBOXQ_t *);
@@ -206,7 +216,11 @@ struct hbq_dmabuf *lpfc_sli4_rb_alloc(struct lpfc_hba *);
void lpfc_sli4_rb_free(struct lpfc_hba *, struct hbq_dmabuf *);
void lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *, struct fcf_record *,
uint16_t);
+void lpfc_unregister_fcf(struct lpfc_hba *);
+void lpfc_unregister_fcf_rescan(struct lpfc_hba *);
void lpfc_unregister_unused_fcf(struct lpfc_hba *);
+int lpfc_sli4_redisc_fcf_table(struct lpfc_hba *);
+void lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *);
int lpfc_mem_alloc(struct lpfc_hba *, int align);
void lpfc_mem_free(struct lpfc_hba *);
@@ -365,6 +379,8 @@ void lpfc_free_fast_evt(struct lpfc_hba *, struct lpfc_fast_path_event *);
void lpfc_create_static_vport(struct lpfc_hba *);
void lpfc_stop_hba_timers(struct lpfc_hba *);
void lpfc_stop_port(struct lpfc_hba *);
+void __lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *);
+void lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *);
void lpfc_parse_fcoe_conf(struct lpfc_hba *, uint8_t *, uint32_t);
int lpfc_parse_vpd(struct lpfc_hba *, uint8_t *, int);
void lpfc_start_fdiscs(struct lpfc_hba *phba);
@@ -378,5 +394,5 @@ struct lpfc_vport *lpfc_find_vport_by_vpid(struct lpfc_hba *, uint16_t);
/* functions to support SGIOv4/bsg interface */
int lpfc_bsg_request(struct fc_bsg_job *);
int lpfc_bsg_timeout(struct fc_bsg_job *);
-void lpfc_bsg_ct_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *,
+int lpfc_bsg_ct_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *,
struct lpfc_iocbq *);
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 0ebcd9baca79..c7e921973f66 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2009 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2010 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
@@ -97,7 +97,8 @@ lpfc_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
struct list_head head;
struct lpfc_dmabuf *bdeBuf;
- lpfc_bsg_ct_unsol_event(phba, pring, piocbq);
+ if (lpfc_bsg_ct_unsol_event(phba, pring, piocbq) == 0)
+ return;
if (unlikely(icmd->ulpStatus == IOSTAT_NEED_BUFFER)) {
lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ);
@@ -181,7 +182,8 @@ lpfc_sli4_ct_abort_unsol_event(struct lpfc_hba *phba,
uint32_t size;
/* Forward abort event to any process registered to receive ct event */
- lpfc_bsg_ct_unsol_event(phba, pring, piocbq);
+ if (lpfc_bsg_ct_unsol_event(phba, pring, piocbq) == 0)
+ return;
/* If there is no BDE associated with IOCB, there is nothing to do */
if (icmd->ulpBdeCount == 0)
@@ -1843,12 +1845,7 @@ lpfc_decode_firmware_rev(struct lpfc_hba *phba, char *fwrevision, int flag)
c = (rev & 0x0000ff00) >> 8;
b4 = (rev & 0x000000ff);
- if (flag)
- sprintf(fwrevision, "%d.%d%d%c%d ", b1,
- b2, b3, c, b4);
- else
- sprintf(fwrevision, "%d.%d%d%c%d ", b1,
- b2, b3, c, b4);
+ sprintf(fwrevision, "%d.%d%d%c%d", b1, b2, b3, c, b4);
}
return;
}
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 2cc39684ce97..08b6634cb994 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -50,9 +50,6 @@ static int lpfc_issue_els_fdisc(struct lpfc_vport *vport,
struct lpfc_nodelist *ndlp, uint8_t retry);
static int lpfc_issue_fabric_iocb(struct lpfc_hba *phba,
struct lpfc_iocbq *iocb);
-static void lpfc_register_new_vport(struct lpfc_hba *phba,
- struct lpfc_vport *vport,
- struct lpfc_nodelist *ndlp);
static int lpfc_max_els_tries = 3;
@@ -592,6 +589,15 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
spin_unlock_irq(shost->host_lock);
}
+ /*
+ * If VPI is unreged, driver need to do INIT_VPI
+ * before re-registering
+ */
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
+ spin_unlock_irq(shost->host_lock);
+ }
}
if (phba->sli_rev < LPFC_SLI_REV4) {
@@ -604,10 +610,13 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
} else {
ndlp->nlp_type |= NLP_FABRIC;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
- if (vport->vpi_state & LPFC_VPI_REGISTERED) {
+ if ((!(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) &&
+ (vport->vpi_state & LPFC_VPI_REGISTERED)) {
lpfc_start_fdiscs(phba);
lpfc_do_scr_ns_plogi(phba, vport);
- } else
+ } else if (vport->fc_flag & FC_VFI_REGISTERED)
+ lpfc_issue_init_vpi(vport);
+ else
lpfc_issue_reg_vfi(vport);
}
return 0;
@@ -804,6 +813,9 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
irsp->ulpTimeout);
goto flogifail;
}
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag &= ~FC_VPORT_CVL_RCVD;
+ spin_unlock_irq(shost->host_lock);
/*
* The FLogI succeeded. Sync the data for the CPU before
@@ -2720,7 +2732,7 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
if (did == FDMI_DID)
retry = 1;
- if ((cmd == ELS_CMD_FLOGI) &&
+ if (((cmd == ELS_CMD_FLOGI) || (cmd == ELS_CMD_FDISC)) &&
(phba->fc_topology != TOPOLOGY_LOOP) &&
!lpfc_error_lost_link(irsp)) {
/* FLOGI retry policy */
@@ -4385,7 +4397,7 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
did = Fabric_DID;
- if ((lpfc_check_sparm(vport, ndlp, sp, CLASS3))) {
+ if ((lpfc_check_sparm(vport, ndlp, sp, CLASS3, 1))) {
/* For a FLOGI we accept, then if our portname is greater
* then the remote portname we initiate Nport login.
*/
@@ -5915,6 +5927,7 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
MAILBOX_t *mb = &pmb->u.mb;
+ int rc;
spin_lock_irq(shost->host_lock);
vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
@@ -5936,6 +5949,26 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
spin_unlock_irq(shost->host_lock);
lpfc_can_disctmo(vport);
break;
+ /* If reg_vpi fail with invalid VPI status, re-init VPI */
+ case 0x20:
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
+ spin_unlock_irq(shost->host_lock);
+ lpfc_init_vpi(phba, pmb, vport->vpi);
+ pmb->vport = vport;
+ pmb->mbox_cmpl = lpfc_init_vpi_cmpl;
+ rc = lpfc_sli_issue_mbox(phba, pmb,
+ MBX_NOWAIT);
+ if (rc == MBX_NOT_FINISHED) {
+ lpfc_printf_vlog(vport,
+ KERN_ERR, LOG_MBOX,
+ "2732 Failed to issue INIT_VPI"
+ " mailbox command\n");
+ } else {
+ lpfc_nlp_put(ndlp);
+ return;
+ }
+
default:
/* Try to recover from this error */
lpfc_mbx_unreg_vpi(vport);
@@ -5949,13 +5982,17 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
break;
}
} else {
+ spin_lock_irq(shost->host_lock);
vport->vpi_state |= LPFC_VPI_REGISTERED;
- if (vport == phba->pport)
+ spin_unlock_irq(shost->host_lock);
+ if (vport == phba->pport) {
if (phba->sli_rev < LPFC_SLI_REV4)
lpfc_issue_fabric_reglogin(vport);
- else
- lpfc_issue_reg_vfi(vport);
- else
+ else {
+ lpfc_start_fdiscs(phba);
+ lpfc_do_scr_ns_plogi(phba, vport);
+ }
+ } else
lpfc_do_scr_ns_plogi(phba, vport);
}
@@ -5977,7 +6014,7 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
* This routine registers the @vport as a new virtual port with a HBA.
* It is done through a registering vpi mailbox command.
**/
-static void
+void
lpfc_register_new_vport(struct lpfc_hba *phba, struct lpfc_vport *vport,
struct lpfc_nodelist *ndlp)
{
@@ -6018,6 +6055,78 @@ mbox_err_exit:
}
/**
+ * lpfc_retry_pport_discovery - Start timer to retry FLOGI.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine abort all pending discovery commands and
+ * start a timer to retry FLOGI for the physical port
+ * discovery.
+ **/
+void
+lpfc_retry_pport_discovery(struct lpfc_hba *phba)
+{
+ struct lpfc_vport **vports;
+ struct lpfc_nodelist *ndlp;
+ struct Scsi_Host *shost;
+ int i;
+ uint32_t link_state;
+
+ /* Treat this failure as linkdown for all vports */
+ link_state = phba->link_state;
+ lpfc_linkdown(phba);
+ phba->link_state = link_state;
+
+ vports = lpfc_create_vport_work_array(phba);
+
+ if (vports) {
+ for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
+ ndlp = lpfc_findnode_did(vports[i], Fabric_DID);
+ if (ndlp)
+ lpfc_cancel_retry_delay_tmo(vports[i], ndlp);
+ lpfc_els_flush_cmd(vports[i]);
+ }
+ lpfc_destroy_vport_work_array(phba, vports);
+ }
+
+ /* If fabric require FLOGI, then re-instantiate physical login */
+ ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
+ if (!ndlp)
+ return;
+
+
+ shost = lpfc_shost_from_vport(phba->pport);
+ mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
+ spin_lock_irq(shost->host_lock);
+ ndlp->nlp_flag |= NLP_DELAY_TMO;
+ spin_unlock_irq(shost->host_lock);
+ ndlp->nlp_last_elscmd = ELS_CMD_FLOGI;
+ phba->pport->port_state = LPFC_FLOGI;
+ return;
+}
+
+/**
+ * lpfc_fabric_login_reqd - Check if FLOGI required.
+ * @phba: pointer to lpfc hba data structure.
+ * @cmdiocb: pointer to FDISC command iocb.
+ * @rspiocb: pointer to FDISC response iocb.
+ *
+ * This routine checks if a FLOGI is reguired for FDISC
+ * to succeed.
+ **/
+static int
+lpfc_fabric_login_reqd(struct lpfc_hba *phba,
+ struct lpfc_iocbq *cmdiocb,
+ struct lpfc_iocbq *rspiocb)
+{
+
+ if ((rspiocb->iocb.ulpStatus != IOSTAT_FABRIC_RJT) ||
+ (rspiocb->iocb.un.ulpWord[4] != RJT_LOGIN_REQUIRED))
+ return 0;
+ else
+ return 1;
+}
+
+/**
* lpfc_cmpl_els_fdisc - Completion function for fdisc iocb command
* @phba: pointer to lpfc hba data structure.
* @cmdiocb: pointer to lpfc command iocb data structure.
@@ -6066,6 +6175,12 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
irsp->ulpStatus, irsp->un.ulpWord[4], vport->fc_prevDID);
if (irsp->ulpStatus) {
+
+ if (lpfc_fabric_login_reqd(phba, cmdiocb, rspiocb)) {
+ lpfc_retry_pport_discovery(phba);
+ goto out;
+ }
+
/* Check for retry */
if (lpfc_els_retry(phba, cmdiocb, rspiocb))
goto out;
@@ -6076,6 +6191,7 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
goto fdisc_failed;
}
spin_lock_irq(shost->host_lock);
+ vport->fc_flag &= ~FC_VPORT_CVL_RCVD;
vport->fc_flag |= FC_FABRIC;
if (vport->phba->fc_topology == TOPOLOGY_LOOP)
vport->fc_flag |= FC_PUBLIC_LOOP;
@@ -6103,10 +6219,13 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
lpfc_mbx_unreg_vpi(vport);
spin_lock_irq(shost->host_lock);
vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
+ vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
spin_unlock_irq(shost->host_lock);
}
- if (vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)
+ if (vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI)
+ lpfc_issue_init_vpi(vport);
+ else if (vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)
lpfc_register_new_vport(phba, vport, ndlp);
else
lpfc_do_scr_ns_plogi(phba, vport);
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 2445e399fd60..2359d0bfb734 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -525,6 +525,8 @@ lpfc_work_done(struct lpfc_hba *phba)
spin_unlock_irq(&phba->hbalock);
lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ);
}
+ if (phba->fcf.fcf_flag & FCF_REDISC_EVT)
+ lpfc_sli4_fcf_redisc_event_proc(phba);
}
vports = lpfc_create_vport_work_array(phba);
@@ -706,6 +708,8 @@ lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove)
void
lpfc_port_link_failure(struct lpfc_vport *vport)
{
+ lpfc_vport_set_state(vport, FC_VPORT_LINKDOWN);
+
/* Cleanup any outstanding received buffers */
lpfc_cleanup_rcv_buffers(vport);
@@ -752,12 +756,14 @@ lpfc_linkdown(struct lpfc_hba *phba)
lpfc_scsi_dev_block(phba);
spin_lock_irq(&phba->hbalock);
- phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_DISCOVERED);
+ phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
+ spin_unlock_irq(&phba->hbalock);
if (phba->link_state > LPFC_LINK_DOWN) {
phba->link_state = LPFC_LINK_DOWN;
+ spin_lock_irq(shost->host_lock);
phba->pport->fc_flag &= ~FC_LBIT;
+ spin_unlock_irq(shost->host_lock);
}
- spin_unlock_irq(&phba->hbalock);
vports = lpfc_create_vport_work_array(phba);
if (vports != NULL)
for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
@@ -1023,7 +1029,7 @@ lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
return;
}
spin_lock_irqsave(&phba->hbalock, flags);
- phba->fcf.fcf_flag |= (FCF_DISCOVERED | FCF_IN_USE);
+ phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE);
phba->hba_flag &= ~FCF_DISC_INPROGRESS;
spin_unlock_irqrestore(&phba->hbalock, flags);
if (vport->port_state != LPFC_FLOGI)
@@ -1045,25 +1051,23 @@ lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
static uint32_t
lpfc_fab_name_match(uint8_t *fab_name, struct fcf_record *new_fcf_record)
{
- if ((fab_name[0] ==
- bf_get(lpfc_fcf_record_fab_name_0, new_fcf_record)) &&
- (fab_name[1] ==
- bf_get(lpfc_fcf_record_fab_name_1, new_fcf_record)) &&
- (fab_name[2] ==
- bf_get(lpfc_fcf_record_fab_name_2, new_fcf_record)) &&
- (fab_name[3] ==
- bf_get(lpfc_fcf_record_fab_name_3, new_fcf_record)) &&
- (fab_name[4] ==
- bf_get(lpfc_fcf_record_fab_name_4, new_fcf_record)) &&
- (fab_name[5] ==
- bf_get(lpfc_fcf_record_fab_name_5, new_fcf_record)) &&
- (fab_name[6] ==
- bf_get(lpfc_fcf_record_fab_name_6, new_fcf_record)) &&
- (fab_name[7] ==
- bf_get(lpfc_fcf_record_fab_name_7, new_fcf_record)))
- return 1;
- else
+ if (fab_name[0] != bf_get(lpfc_fcf_record_fab_name_0, new_fcf_record))
+ return 0;
+ if (fab_name[1] != bf_get(lpfc_fcf_record_fab_name_1, new_fcf_record))
+ return 0;
+ if (fab_name[2] != bf_get(lpfc_fcf_record_fab_name_2, new_fcf_record))
return 0;
+ if (fab_name[3] != bf_get(lpfc_fcf_record_fab_name_3, new_fcf_record))
+ return 0;
+ if (fab_name[4] != bf_get(lpfc_fcf_record_fab_name_4, new_fcf_record))
+ return 0;
+ if (fab_name[5] != bf_get(lpfc_fcf_record_fab_name_5, new_fcf_record))
+ return 0;
+ if (fab_name[6] != bf_get(lpfc_fcf_record_fab_name_6, new_fcf_record))
+ return 0;
+ if (fab_name[7] != bf_get(lpfc_fcf_record_fab_name_7, new_fcf_record))
+ return 0;
+ return 1;
}
/**
@@ -1078,30 +1082,28 @@ lpfc_fab_name_match(uint8_t *fab_name, struct fcf_record *new_fcf_record)
static uint32_t
lpfc_sw_name_match(uint8_t *sw_name, struct fcf_record *new_fcf_record)
{
- if ((sw_name[0] ==
- bf_get(lpfc_fcf_record_switch_name_0, new_fcf_record)) &&
- (sw_name[1] ==
- bf_get(lpfc_fcf_record_switch_name_1, new_fcf_record)) &&
- (sw_name[2] ==
- bf_get(lpfc_fcf_record_switch_name_2, new_fcf_record)) &&
- (sw_name[3] ==
- bf_get(lpfc_fcf_record_switch_name_3, new_fcf_record)) &&
- (sw_name[4] ==
- bf_get(lpfc_fcf_record_switch_name_4, new_fcf_record)) &&
- (sw_name[5] ==
- bf_get(lpfc_fcf_record_switch_name_5, new_fcf_record)) &&
- (sw_name[6] ==
- bf_get(lpfc_fcf_record_switch_name_6, new_fcf_record)) &&
- (sw_name[7] ==
- bf_get(lpfc_fcf_record_switch_name_7, new_fcf_record)))
- return 1;
- else
+ if (sw_name[0] != bf_get(lpfc_fcf_record_switch_name_0, new_fcf_record))
return 0;
+ if (sw_name[1] != bf_get(lpfc_fcf_record_switch_name_1, new_fcf_record))
+ return 0;
+ if (sw_name[2] != bf_get(lpfc_fcf_record_switch_name_2, new_fcf_record))
+ return 0;
+ if (sw_name[3] != bf_get(lpfc_fcf_record_switch_name_3, new_fcf_record))
+ return 0;
+ if (sw_name[4] != bf_get(lpfc_fcf_record_switch_name_4, new_fcf_record))
+ return 0;
+ if (sw_name[5] != bf_get(lpfc_fcf_record_switch_name_5, new_fcf_record))
+ return 0;
+ if (sw_name[6] != bf_get(lpfc_fcf_record_switch_name_6, new_fcf_record))
+ return 0;
+ if (sw_name[7] != bf_get(lpfc_fcf_record_switch_name_7, new_fcf_record))
+ return 0;
+ return 1;
}
/**
* lpfc_mac_addr_match - Check if the fcf mac address match.
- * @phba: pointer to lpfc hba data structure.
+ * @mac_addr: pointer to mac address.
* @new_fcf_record: pointer to fcf record.
*
* This routine compare the fcf record's mac address with HBA's
@@ -1109,85 +1111,115 @@ lpfc_sw_name_match(uint8_t *sw_name, struct fcf_record *new_fcf_record)
* returns 1 else return 0.
**/
static uint32_t
-lpfc_mac_addr_match(struct lpfc_hba *phba, struct fcf_record *new_fcf_record)
+lpfc_mac_addr_match(uint8_t *mac_addr, struct fcf_record *new_fcf_record)
{
- if ((phba->fcf.mac_addr[0] ==
- bf_get(lpfc_fcf_record_mac_0, new_fcf_record)) &&
- (phba->fcf.mac_addr[1] ==
- bf_get(lpfc_fcf_record_mac_1, new_fcf_record)) &&
- (phba->fcf.mac_addr[2] ==
- bf_get(lpfc_fcf_record_mac_2, new_fcf_record)) &&
- (phba->fcf.mac_addr[3] ==
- bf_get(lpfc_fcf_record_mac_3, new_fcf_record)) &&
- (phba->fcf.mac_addr[4] ==
- bf_get(lpfc_fcf_record_mac_4, new_fcf_record)) &&
- (phba->fcf.mac_addr[5] ==
- bf_get(lpfc_fcf_record_mac_5, new_fcf_record)))
- return 1;
- else
+ if (mac_addr[0] != bf_get(lpfc_fcf_record_mac_0, new_fcf_record))
+ return 0;
+ if (mac_addr[1] != bf_get(lpfc_fcf_record_mac_1, new_fcf_record))
+ return 0;
+ if (mac_addr[2] != bf_get(lpfc_fcf_record_mac_2, new_fcf_record))
return 0;
+ if (mac_addr[3] != bf_get(lpfc_fcf_record_mac_3, new_fcf_record))
+ return 0;
+ if (mac_addr[4] != bf_get(lpfc_fcf_record_mac_4, new_fcf_record))
+ return 0;
+ if (mac_addr[5] != bf_get(lpfc_fcf_record_mac_5, new_fcf_record))
+ return 0;
+ return 1;
+}
+
+static bool
+lpfc_vlan_id_match(uint16_t curr_vlan_id, uint16_t new_vlan_id)
+{
+ return (curr_vlan_id == new_vlan_id);
}
/**
* lpfc_copy_fcf_record - Copy fcf information to lpfc_hba.
- * @phba: pointer to lpfc hba data structure.
+ * @fcf: pointer to driver fcf record.
* @new_fcf_record: pointer to fcf record.
*
* This routine copies the FCF information from the FCF
* record to lpfc_hba data structure.
**/
static void
-lpfc_copy_fcf_record(struct lpfc_hba *phba, struct fcf_record *new_fcf_record)
+lpfc_copy_fcf_record(struct lpfc_fcf_rec *fcf_rec,
+ struct fcf_record *new_fcf_record)
{
- phba->fcf.fabric_name[0] =
+ /* Fabric name */
+ fcf_rec->fabric_name[0] =
bf_get(lpfc_fcf_record_fab_name_0, new_fcf_record);
- phba->fcf.fabric_name[1] =
+ fcf_rec->fabric_name[1] =
bf_get(lpfc_fcf_record_fab_name_1, new_fcf_record);
- phba->fcf.fabric_name[2] =
+ fcf_rec->fabric_name[2] =
bf_get(lpfc_fcf_record_fab_name_2, new_fcf_record);
- phba->fcf.fabric_name[3] =
+ fcf_rec->fabric_name[3] =
bf_get(lpfc_fcf_record_fab_name_3, new_fcf_record);
- phba->fcf.fabric_name[4] =
+ fcf_rec->fabric_name[4] =
bf_get(lpfc_fcf_record_fab_name_4, new_fcf_record);
- phba->fcf.fabric_name[5] =
+ fcf_rec->fabric_name[5] =
bf_get(lpfc_fcf_record_fab_name_5, new_fcf_record);
- phba->fcf.fabric_name[6] =
+ fcf_rec->fabric_name[6] =
bf_get(lpfc_fcf_record_fab_name_6, new_fcf_record);
- phba->fcf.fabric_name[7] =
+ fcf_rec->fabric_name[7] =
bf_get(lpfc_fcf_record_fab_name_7, new_fcf_record);
- phba->fcf.mac_addr[0] =
- bf_get(lpfc_fcf_record_mac_0, new_fcf_record);
- phba->fcf.mac_addr[1] =
- bf_get(lpfc_fcf_record_mac_1, new_fcf_record);
- phba->fcf.mac_addr[2] =
- bf_get(lpfc_fcf_record_mac_2, new_fcf_record);
- phba->fcf.mac_addr[3] =
- bf_get(lpfc_fcf_record_mac_3, new_fcf_record);
- phba->fcf.mac_addr[4] =
- bf_get(lpfc_fcf_record_mac_4, new_fcf_record);
- phba->fcf.mac_addr[5] =
- bf_get(lpfc_fcf_record_mac_5, new_fcf_record);
- phba->fcf.fcf_indx = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record);
- phba->fcf.priority = new_fcf_record->fip_priority;
- phba->fcf.switch_name[0] =
+ /* Mac address */
+ fcf_rec->mac_addr[0] = bf_get(lpfc_fcf_record_mac_0, new_fcf_record);
+ fcf_rec->mac_addr[1] = bf_get(lpfc_fcf_record_mac_1, new_fcf_record);
+ fcf_rec->mac_addr[2] = bf_get(lpfc_fcf_record_mac_2, new_fcf_record);
+ fcf_rec->mac_addr[3] = bf_get(lpfc_fcf_record_mac_3, new_fcf_record);
+ fcf_rec->mac_addr[4] = bf_get(lpfc_fcf_record_mac_4, new_fcf_record);
+ fcf_rec->mac_addr[5] = bf_get(lpfc_fcf_record_mac_5, new_fcf_record);
+ /* FCF record index */
+ fcf_rec->fcf_indx = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record);
+ /* FCF record priority */
+ fcf_rec->priority = new_fcf_record->fip_priority;
+ /* Switch name */
+ fcf_rec->switch_name[0] =
bf_get(lpfc_fcf_record_switch_name_0, new_fcf_record);
- phba->fcf.switch_name[1] =
+ fcf_rec->switch_name[1] =
bf_get(lpfc_fcf_record_switch_name_1, new_fcf_record);
- phba->fcf.switch_name[2] =
+ fcf_rec->switch_name[2] =
bf_get(lpfc_fcf_record_switch_name_2, new_fcf_record);
- phba->fcf.switch_name[3] =
+ fcf_rec->switch_name[3] =
bf_get(lpfc_fcf_record_switch_name_3, new_fcf_record);
- phba->fcf.switch_name[4] =
+ fcf_rec->switch_name[4] =
bf_get(lpfc_fcf_record_switch_name_4, new_fcf_record);
- phba->fcf.switch_name[5] =
+ fcf_rec->switch_name[5] =
bf_get(lpfc_fcf_record_switch_name_5, new_fcf_record);
- phba->fcf.switch_name[6] =
+ fcf_rec->switch_name[6] =
bf_get(lpfc_fcf_record_switch_name_6, new_fcf_record);
- phba->fcf.switch_name[7] =
+ fcf_rec->switch_name[7] =
bf_get(lpfc_fcf_record_switch_name_7, new_fcf_record);
}
/**
+ * lpfc_update_fcf_record - Update driver fcf record
+ * @phba: pointer to lpfc hba data structure.
+ * @fcf_rec: pointer to driver fcf record.
+ * @new_fcf_record: pointer to hba fcf record.
+ * @addr_mode: address mode to be set to the driver fcf record.
+ * @vlan_id: vlan tag to be set to the driver fcf record.
+ * @flag: flag bits to be set to the driver fcf record.
+ *
+ * This routine updates the driver FCF record from the new HBA FCF record
+ * together with the address mode, vlan_id, and other informations. This
+ * routine is called with the host lock held.
+ **/
+static void
+__lpfc_update_fcf_record(struct lpfc_hba *phba, struct lpfc_fcf_rec *fcf_rec,
+ struct fcf_record *new_fcf_record, uint32_t addr_mode,
+ uint16_t vlan_id, uint32_t flag)
+{
+ /* Copy the fields from the HBA's FCF record */
+ lpfc_copy_fcf_record(fcf_rec, new_fcf_record);
+ /* Update other fields of driver FCF record */
+ fcf_rec->addr_mode = addr_mode;
+ fcf_rec->vlan_id = vlan_id;
+ fcf_rec->flag |= (flag | RECORD_VALID);
+}
+
+/**
* lpfc_register_fcf - Register the FCF with hba.
* @phba: pointer to lpfc hba data structure.
*
@@ -1212,7 +1244,7 @@ lpfc_register_fcf(struct lpfc_hba *phba)
/* The FCF is already registered, start discovery */
if (phba->fcf.fcf_flag & FCF_REGISTERED) {
- phba->fcf.fcf_flag |= (FCF_DISCOVERED | FCF_IN_USE);
+ phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE);
phba->hba_flag &= ~FCF_DISC_INPROGRESS;
spin_unlock_irqrestore(&phba->hbalock, flags);
if (phba->pport->port_state != LPFC_FLOGI)
@@ -1250,6 +1282,7 @@ lpfc_register_fcf(struct lpfc_hba *phba)
* @new_fcf_record: pointer to fcf record.
* @boot_flag: Indicates if this record used by boot bios.
* @addr_mode: The address mode to be used by this FCF
+ * @vlan_id: The vlan id to be used as vlan tagging by this FCF.
*
* This routine compare the fcf record with connect list obtained from the
* config region to decide if this FCF can be used for SAN discovery. It returns
@@ -1323,7 +1356,8 @@ lpfc_match_fcf_conn_list(struct lpfc_hba *phba,
return 1;
}
- list_for_each_entry(conn_entry, &phba->fcf_conn_rec_list, list) {
+ list_for_each_entry(conn_entry,
+ &phba->fcf_conn_rec_list, list) {
if (!(conn_entry->conn_rec.flags & FCFCNCT_VALID))
continue;
@@ -1470,6 +1504,7 @@ lpfc_check_pending_fcoe_event(struct lpfc_hba *phba, uint8_t unreg_fcf)
*/
spin_lock_irq(&phba->hbalock);
phba->hba_flag &= ~FCF_DISC_INPROGRESS;
+ phba->fcf.fcf_flag &= ~FCF_REDISC_FOV;
spin_unlock_irq(&phba->hbalock);
}
@@ -1524,11 +1559,12 @@ lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
uint32_t shdr_status, shdr_add_status;
union lpfc_sli4_cfg_shdr *shdr;
struct fcf_record *new_fcf_record;
- int rc;
uint32_t boot_flag, addr_mode;
uint32_t next_fcf_index;
- unsigned long flags;
+ struct lpfc_fcf_rec *fcf_rec = NULL;
+ unsigned long iflags;
uint16_t vlan_id;
+ int rc;
/* If there is pending FCoE event restart FCF table scan */
if (lpfc_check_pending_fcoe_event(phba, 0)) {
@@ -1583,9 +1619,8 @@ lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
sizeof(struct fcf_record));
bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
- rc = lpfc_match_fcf_conn_list(phba, new_fcf_record,
- &boot_flag, &addr_mode,
- &vlan_id);
+ rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag,
+ &addr_mode, &vlan_id);
/*
* If the fcf record does not match with connect list entries
* read the next entry.
@@ -1594,90 +1629,159 @@ lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
goto read_next_fcf;
/*
* If this is not the first FCF discovery of the HBA, use last
- * FCF record for the discovery.
+ * FCF record for the discovery. The condition that a rescan
+ * matches the in-use FCF record: fabric name, switch name, mac
+ * address, and vlan_id.
*/
- spin_lock_irqsave(&phba->hbalock, flags);
+ spin_lock_irqsave(&phba->hbalock, iflags);
if (phba->fcf.fcf_flag & FCF_IN_USE) {
- if (lpfc_fab_name_match(phba->fcf.fabric_name,
+ if (lpfc_fab_name_match(phba->fcf.current_rec.fabric_name,
new_fcf_record) &&
- lpfc_sw_name_match(phba->fcf.switch_name,
+ lpfc_sw_name_match(phba->fcf.current_rec.switch_name,
new_fcf_record) &&
- lpfc_mac_addr_match(phba, new_fcf_record)) {
+ lpfc_mac_addr_match(phba->fcf.current_rec.mac_addr,
+ new_fcf_record) &&
+ lpfc_vlan_id_match(phba->fcf.current_rec.vlan_id,
+ vlan_id)) {
phba->fcf.fcf_flag |= FCF_AVAILABLE;
- spin_unlock_irqrestore(&phba->hbalock, flags);
+ if (phba->fcf.fcf_flag & FCF_REDISC_PEND)
+ /* Stop FCF redisc wait timer if pending */
+ __lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
+ else if (phba->fcf.fcf_flag & FCF_REDISC_FOV)
+ /* If in fast failover, mark it's completed */
+ phba->fcf.fcf_flag &= ~FCF_REDISC_FOV;
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
goto out;
}
- spin_unlock_irqrestore(&phba->hbalock, flags);
- goto read_next_fcf;
+ /*
+ * Read next FCF record from HBA searching for the matching
+ * with in-use record only if not during the fast failover
+ * period. In case of fast failover period, it shall try to
+ * determine whether the FCF record just read should be the
+ * next candidate.
+ */
+ if (!(phba->fcf.fcf_flag & FCF_REDISC_FOV)) {
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ goto read_next_fcf;
+ }
}
+ /*
+ * Update on failover FCF record only if it's in FCF fast-failover
+ * period; otherwise, update on current FCF record.
+ */
+ if (phba->fcf.fcf_flag & FCF_REDISC_FOV) {
+ /* Fast FCF failover only to the same fabric name */
+ if (lpfc_fab_name_match(phba->fcf.current_rec.fabric_name,
+ new_fcf_record))
+ fcf_rec = &phba->fcf.failover_rec;
+ else
+ goto read_next_fcf;
+ } else
+ fcf_rec = &phba->fcf.current_rec;
+
if (phba->fcf.fcf_flag & FCF_AVAILABLE) {
/*
- * If the current FCF record does not have boot flag
- * set and new fcf record has boot flag set, use the
- * new fcf record.
+ * If the driver FCF record does not have boot flag
+ * set and new hba fcf record has boot flag set, use
+ * the new hba fcf record.
*/
- if (boot_flag && !(phba->fcf.fcf_flag & FCF_BOOT_ENABLE)) {
- /* Use this FCF record */
- lpfc_copy_fcf_record(phba, new_fcf_record);
- phba->fcf.addr_mode = addr_mode;
- phba->fcf.fcf_flag |= FCF_BOOT_ENABLE;
- if (vlan_id != 0xFFFF) {
- phba->fcf.fcf_flag |= FCF_VALID_VLAN;
- phba->fcf.vlan_id = vlan_id;
- }
- spin_unlock_irqrestore(&phba->hbalock, flags);
+ if (boot_flag && !(fcf_rec->flag & BOOT_ENABLE)) {
+ /* Choose this FCF record */
+ __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record,
+ addr_mode, vlan_id, BOOT_ENABLE);
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
goto read_next_fcf;
}
/*
- * If the current FCF record has boot flag set and the
- * new FCF record does not have boot flag, read the next
- * FCF record.
+ * If the driver FCF record has boot flag set and the
+ * new hba FCF record does not have boot flag, read
+ * the next FCF record.
*/
- if (!boot_flag && (phba->fcf.fcf_flag & FCF_BOOT_ENABLE)) {
- spin_unlock_irqrestore(&phba->hbalock, flags);
+ if (!boot_flag && (fcf_rec->flag & BOOT_ENABLE)) {
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
goto read_next_fcf;
}
/*
- * If there is a record with lower priority value for
- * the current FCF, use that record.
+ * If the new hba FCF record has lower priority value
+ * than the driver FCF record, use the new record.
*/
- if (lpfc_fab_name_match(phba->fcf.fabric_name,
- new_fcf_record) &&
- (new_fcf_record->fip_priority < phba->fcf.priority)) {
- /* Use this FCF record */
- lpfc_copy_fcf_record(phba, new_fcf_record);
- phba->fcf.addr_mode = addr_mode;
- if (vlan_id != 0xFFFF) {
- phba->fcf.fcf_flag |= FCF_VALID_VLAN;
- phba->fcf.vlan_id = vlan_id;
- }
- spin_unlock_irqrestore(&phba->hbalock, flags);
- goto read_next_fcf;
+ if (lpfc_fab_name_match(fcf_rec->fabric_name, new_fcf_record) &&
+ (new_fcf_record->fip_priority < fcf_rec->priority)) {
+ /* Choose this FCF record */
+ __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record,
+ addr_mode, vlan_id, 0);
}
- spin_unlock_irqrestore(&phba->hbalock, flags);
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
goto read_next_fcf;
}
/*
- * This is the first available FCF record, use this
- * record.
+ * This is the first suitable FCF record, choose this record for
+ * initial best-fit FCF.
*/
- lpfc_copy_fcf_record(phba, new_fcf_record);
- phba->fcf.addr_mode = addr_mode;
- if (boot_flag)
- phba->fcf.fcf_flag |= FCF_BOOT_ENABLE;
- phba->fcf.fcf_flag |= FCF_AVAILABLE;
- if (vlan_id != 0xFFFF) {
- phba->fcf.fcf_flag |= FCF_VALID_VLAN;
- phba->fcf.vlan_id = vlan_id;
+ if (fcf_rec) {
+ __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record,
+ addr_mode, vlan_id, (boot_flag ?
+ BOOT_ENABLE : 0));
+ phba->fcf.fcf_flag |= FCF_AVAILABLE;
}
- spin_unlock_irqrestore(&phba->hbalock, flags);
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
goto read_next_fcf;
read_next_fcf:
lpfc_sli4_mbox_cmd_free(phba, mboxq);
- if (next_fcf_index == LPFC_FCOE_FCF_NEXT_NONE || next_fcf_index == 0)
- lpfc_register_fcf(phba);
- else
+ if (next_fcf_index == LPFC_FCOE_FCF_NEXT_NONE || next_fcf_index == 0) {
+ if (phba->fcf.fcf_flag & FCF_REDISC_FOV) {
+ /*
+ * Case of FCF fast failover scan
+ */
+
+ /*
+ * It has not found any suitable FCF record, cancel
+ * FCF scan inprogress, and do nothing
+ */
+ if (!(phba->fcf.failover_rec.flag & RECORD_VALID)) {
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ phba->hba_flag &= ~FCF_DISC_INPROGRESS;
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ return;
+ }
+ /*
+ * It has found a suitable FCF record that is not
+ * the same as in-use FCF record, unregister the
+ * in-use FCF record, replace the in-use FCF record
+ * with the new FCF record, mark FCF fast failover
+ * completed, and then start register the new FCF
+ * record.
+ */
+
+ /* unregister the current in-use FCF record */
+ lpfc_unregister_fcf(phba);
+ /* replace in-use record with the new record */
+ memcpy(&phba->fcf.current_rec,
+ &phba->fcf.failover_rec,
+ sizeof(struct lpfc_fcf_rec));
+ /* mark the FCF fast failover completed */
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ phba->fcf.fcf_flag &= ~FCF_REDISC_FOV;
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ /* Register to the new FCF record */
+ lpfc_register_fcf(phba);
+ } else {
+ /*
+ * In case of transaction period to fast FCF failover,
+ * do nothing when search to the end of the FCF table.
+ */
+ if ((phba->fcf.fcf_flag & FCF_REDISC_EVT) ||
+ (phba->fcf.fcf_flag & FCF_REDISC_PEND))
+ return;
+ /*
+ * Otherwise, initial scan or post linkdown rescan,
+ * register with the best fit FCF record found so
+ * far through the scanning process.
+ */
+ lpfc_register_fcf(phba);
+ }
+ } else
lpfc_sli4_read_fcf_record(phba, next_fcf_index);
return;
@@ -1695,10 +1799,13 @@ out:
*
* This function handles completion of init vpi mailbox command.
*/
-static void
+void
lpfc_init_vpi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
{
struct lpfc_vport *vport = mboxq->vport;
+ struct lpfc_nodelist *ndlp;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
if (mboxq->u.mb.mbxStatus) {
lpfc_printf_vlog(vport, KERN_ERR,
LOG_MBOX,
@@ -1708,9 +1815,23 @@ lpfc_init_vpi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
return;
}
- spin_lock_irq(&phba->hbalock);
+ spin_lock_irq(shost->host_lock);
vport->fc_flag &= ~FC_VPORT_NEEDS_INIT_VPI;
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irq(shost->host_lock);
+
+ /* If this port is physical port or FDISC is done, do reg_vpi */
+ if ((phba->pport == vport) || (vport->port_state == LPFC_FDISC)) {
+ ndlp = lpfc_findnode_did(vport, Fabric_DID);
+ if (!ndlp)
+ lpfc_printf_vlog(vport, KERN_ERR,
+ LOG_DISCOVERY,
+ "2731 Cannot find fabric "
+ "controller node\n");
+ else
+ lpfc_register_new_vport(phba, vport, ndlp);
+ mempool_free(mboxq, phba->mbox_mem_pool);
+ return;
+ }
if (phba->link_flag & LS_NPIV_FAB_SUPPORTED)
lpfc_initial_fdisc(vport);
@@ -1719,10 +1840,42 @@ lpfc_init_vpi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
"2606 No NPIV Fabric support\n");
}
+ mempool_free(mboxq, phba->mbox_mem_pool);
return;
}
/**
+ * lpfc_issue_init_vpi - Issue init_vpi mailbox command.
+ * @vport: pointer to lpfc_vport data structure.
+ *
+ * This function issue a init_vpi mailbox command to initialize
+ * VPI for the vport.
+ */
+void
+lpfc_issue_init_vpi(struct lpfc_vport *vport)
+{
+ LPFC_MBOXQ_t *mboxq;
+ int rc;
+
+ mboxq = mempool_alloc(vport->phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mboxq) {
+ lpfc_printf_vlog(vport, KERN_ERR,
+ LOG_MBOX, "2607 Failed to allocate "
+ "init_vpi mailbox\n");
+ return;
+ }
+ lpfc_init_vpi(vport->phba, mboxq, vport->vpi);
+ mboxq->vport = vport;
+ mboxq->mbox_cmpl = lpfc_init_vpi_cmpl;
+ rc = lpfc_sli_issue_mbox(vport->phba, mboxq, MBX_NOWAIT);
+ if (rc == MBX_NOT_FINISHED) {
+ lpfc_printf_vlog(vport, KERN_ERR,
+ LOG_MBOX, "2608 Failed to issue init_vpi mailbox\n");
+ mempool_free(mboxq, vport->phba->mbox_mem_pool);
+ }
+}
+
+/**
* lpfc_start_fdiscs - send fdiscs for each vports on this port.
* @phba: pointer to lpfc hba data structure.
*
@@ -1734,8 +1887,6 @@ lpfc_start_fdiscs(struct lpfc_hba *phba)
{
struct lpfc_vport **vports;
int i;
- LPFC_MBOXQ_t *mboxq;
- int rc;
vports = lpfc_create_vport_work_array(phba);
if (vports != NULL) {
@@ -1754,26 +1905,7 @@ lpfc_start_fdiscs(struct lpfc_hba *phba)
continue;
}
if (vports[i]->fc_flag & FC_VPORT_NEEDS_INIT_VPI) {
- mboxq = mempool_alloc(phba->mbox_mem_pool,
- GFP_KERNEL);
- if (!mboxq) {
- lpfc_printf_vlog(vports[i], KERN_ERR,
- LOG_MBOX, "2607 Failed to allocate "
- "init_vpi mailbox\n");
- continue;
- }
- lpfc_init_vpi(phba, mboxq, vports[i]->vpi);
- mboxq->vport = vports[i];
- mboxq->mbox_cmpl = lpfc_init_vpi_cmpl;
- rc = lpfc_sli_issue_mbox(phba, mboxq,
- MBX_NOWAIT);
- if (rc == MBX_NOT_FINISHED) {
- lpfc_printf_vlog(vports[i], KERN_ERR,
- LOG_MBOX, "2608 Failed to issue "
- "init_vpi mailbox\n");
- mempool_free(mboxq,
- phba->mbox_mem_pool);
- }
+ lpfc_issue_init_vpi(vports[i]);
continue;
}
if (phba->link_flag & LS_NPIV_FAB_SUPPORTED)
@@ -1796,6 +1928,7 @@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
{
struct lpfc_dmabuf *dmabuf = mboxq->context1;
struct lpfc_vport *vport = mboxq->vport;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
if (mboxq->u.mb.mbxStatus) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
@@ -1813,7 +1946,11 @@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
goto fail_free_mem;
}
/* The VPI is implicitly registered when the VFI is registered */
+ spin_lock_irq(shost->host_lock);
vport->vpi_state |= LPFC_VPI_REGISTERED;
+ vport->fc_flag |= FC_VFI_REGISTERED;
+ vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
+ spin_unlock_irq(shost->host_lock);
if (vport->port_state == LPFC_FABRIC_CFG_LINK) {
lpfc_start_fdiscs(phba);
@@ -2050,8 +2187,7 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
return;
}
spin_unlock_irq(&phba->hbalock);
- rc = lpfc_sli4_read_fcf_record(phba,
- LPFC_FCOE_FCF_GET_FIRST);
+ rc = lpfc_sli4_read_fcf_record(phba, LPFC_FCOE_FCF_GET_FIRST);
if (rc)
goto out;
}
@@ -2139,10 +2275,12 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
}
phba->fc_eventTag = la->eventTag;
+ spin_lock_irq(&phba->hbalock);
if (la->mm)
phba->sli.sli_flag |= LPFC_MENLO_MAINT;
else
phba->sli.sli_flag &= ~LPFC_MENLO_MAINT;
+ spin_unlock_irq(&phba->hbalock);
phba->link_events++;
if (la->attType == AT_LINK_UP && (!la->mm)) {
@@ -2271,10 +2409,10 @@ lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
mb->mbxStatus);
break;
}
- spin_lock_irq(&phba->hbalock);
+ spin_lock_irq(shost->host_lock);
vport->vpi_state &= ~LPFC_VPI_REGISTERED;
vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irq(shost->host_lock);
vport->unreg_vpi_cmpl = VPORT_OK;
mempool_free(pmb, phba->mbox_mem_pool);
/*
@@ -2332,7 +2470,10 @@ lpfc_mbx_cmpl_reg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
goto out;
}
+ spin_lock_irq(shost->host_lock);
vport->vpi_state |= LPFC_VPI_REGISTERED;
+ vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
+ spin_unlock_irq(shost->host_lock);
vport->num_disc_nodes = 0;
/* go thru NPR list and issue ELS PLOGIs */
if (vport->fc_npr_cnt)
@@ -3218,6 +3359,34 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
return 0;
}
+/**
+ * lpfc_unreg_hba_rpis - Unregister rpis registered to the hba.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to unregister all the currently registered RPIs
+ * to the HBA.
+ **/
+void
+lpfc_unreg_hba_rpis(struct lpfc_hba *phba)
+{
+ struct lpfc_vport **vports;
+ struct lpfc_nodelist *ndlp;
+ struct Scsi_Host *shost;
+ int i;
+
+ vports = lpfc_create_vport_work_array(phba);
+ for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
+ shost = lpfc_shost_from_vport(vports[i]);
+ spin_lock_irq(shost->host_lock);
+ list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) {
+ if (ndlp->nlp_flag & NLP_RPI_VALID)
+ lpfc_unreg_rpi(vports[i], ndlp);
+ }
+ spin_unlock_irq(shost->host_lock);
+ }
+ lpfc_destroy_vport_work_array(phba, vports);
+}
+
void
lpfc_unreg_all_rpis(struct lpfc_vport *vport)
{
@@ -4448,63 +4617,56 @@ lpfc_unregister_fcfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
}
/**
- * lpfc_unregister_unused_fcf - Unregister FCF if all devices are disconnected.
+ * lpfc_unregister_fcf_prep - Unregister fcf record preparation
* @phba: Pointer to hba context object.
*
- * This function check if there are any connected remote port for the FCF and
- * if all the devices are disconnected, this function unregister FCFI.
- * This function also tries to use another FCF for discovery.
+ * This function prepare the HBA for unregistering the currently registered
+ * FCF from the HBA. It performs unregistering, in order, RPIs, VPIs, and
+ * VFIs.
*/
-void
-lpfc_unregister_unused_fcf(struct lpfc_hba *phba)
+int
+lpfc_unregister_fcf_prep(struct lpfc_hba *phba)
{
LPFC_MBOXQ_t *mbox;
- int rc;
struct lpfc_vport **vports;
- int i;
-
- spin_lock_irq(&phba->hbalock);
- /*
- * If HBA is not running in FIP mode or
- * If HBA does not support FCoE or
- * If FCF is not registered.
- * do nothing.
- */
- if (!(phba->hba_flag & HBA_FCOE_SUPPORT) ||
- !(phba->fcf.fcf_flag & FCF_REGISTERED) ||
- (!(phba->hba_flag & HBA_FIP_SUPPORT))) {
- spin_unlock_irq(&phba->hbalock);
- return;
- }
- spin_unlock_irq(&phba->hbalock);
+ struct lpfc_nodelist *ndlp;
+ struct Scsi_Host *shost;
+ int i, rc;
+ /* Unregister RPIs */
if (lpfc_fcf_inuse(phba))
- return;
+ lpfc_unreg_hba_rpis(phba);
/* At this point, all discovery is aborted */
phba->pport->port_state = LPFC_VPORT_UNKNOWN;
/* Unregister VPIs */
vports = lpfc_create_vport_work_array(phba);
- if (vports &&
- (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED))
+ if (vports && (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED))
for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
+ /* Stop FLOGI/FDISC retries */
+ ndlp = lpfc_findnode_did(vports[i], Fabric_DID);
+ if (ndlp)
+ lpfc_cancel_retry_delay_tmo(vports[i], ndlp);
lpfc_mbx_unreg_vpi(vports[i]);
- spin_lock_irq(&phba->hbalock);
+ shost = lpfc_shost_from_vport(vports[i]);
+ spin_lock_irq(shost->host_lock);
vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED;
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irq(shost->host_lock);
}
lpfc_destroy_vport_work_array(phba, vports);
+ /* Cleanup any outstanding ELS commands */
+ lpfc_els_flush_all_cmd(phba);
+
/* Unregister VFI */
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mbox) {
lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
- "2556 UNREG_VFI mbox allocation failed"
- "HBA state x%x\n",
- phba->pport->port_state);
- return;
+ "2556 UNREG_VFI mbox allocation failed"
+ "HBA state x%x\n", phba->pport->port_state);
+ return -ENOMEM;
}
lpfc_unreg_vfi(mbox, phba->pport);
@@ -4514,58 +4676,163 @@ lpfc_unregister_unused_fcf(struct lpfc_hba *phba)
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
if (rc == MBX_NOT_FINISHED) {
lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
- "2557 UNREG_VFI issue mbox failed rc x%x "
- "HBA state x%x\n",
- rc, phba->pport->port_state);
+ "2557 UNREG_VFI issue mbox failed rc x%x "
+ "HBA state x%x\n",
+ rc, phba->pport->port_state);
mempool_free(mbox, phba->mbox_mem_pool);
- return;
+ return -EIO;
}
- /* Unregister FCF */
+ shost = lpfc_shost_from_vport(phba->pport);
+ spin_lock_irq(shost->host_lock);
+ phba->pport->fc_flag &= ~FC_VFI_REGISTERED;
+ spin_unlock_irq(shost->host_lock);
+
+ return 0;
+}
+
+/**
+ * lpfc_sli4_unregister_fcf - Unregister currently registered FCF record
+ * @phba: Pointer to hba context object.
+ *
+ * This function issues synchronous unregister FCF mailbox command to HBA to
+ * unregister the currently registered FCF record. The driver does not reset
+ * the driver FCF usage state flags.
+ *
+ * Return 0 if successfully issued, none-zero otherwise.
+ */
+int
+lpfc_sli4_unregister_fcf(struct lpfc_hba *phba)
+{
+ LPFC_MBOXQ_t *mbox;
+ int rc;
+
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mbox) {
lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
- "2551 UNREG_FCFI mbox allocation failed"
- "HBA state x%x\n",
- phba->pport->port_state);
- return;
+ "2551 UNREG_FCFI mbox allocation failed"
+ "HBA state x%x\n", phba->pport->port_state);
+ return -ENOMEM;
}
-
lpfc_unreg_fcfi(mbox, phba->fcf.fcfi);
mbox->vport = phba->pport;
mbox->mbox_cmpl = lpfc_unregister_fcfi_cmpl;
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
if (rc == MBX_NOT_FINISHED) {
- lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
- "2552 UNREG_FCFI issue mbox failed rc x%x "
- "HBA state x%x\n",
- rc, phba->pport->port_state);
- mempool_free(mbox, phba->mbox_mem_pool);
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "2552 Unregister FCFI command failed rc x%x "
+ "HBA state x%x\n",
+ rc, phba->pport->port_state);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/**
+ * lpfc_unregister_fcf_rescan - Unregister currently registered fcf and rescan
+ * @phba: Pointer to hba context object.
+ *
+ * This function unregisters the currently reigstered FCF. This function
+ * also tries to find another FCF for discovery by rescan the HBA FCF table.
+ */
+void
+lpfc_unregister_fcf_rescan(struct lpfc_hba *phba)
+{
+ int rc;
+
+ /* Preparation for unregistering fcf */
+ rc = lpfc_unregister_fcf_prep(phba);
+ if (rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
+ "2748 Failed to prepare for unregistering "
+ "HBA's FCF record: rc=%d\n", rc);
return;
}
- spin_lock_irq(&phba->hbalock);
- phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_REGISTERED |
- FCF_DISCOVERED | FCF_BOOT_ENABLE | FCF_IN_USE |
- FCF_VALID_VLAN);
- spin_unlock_irq(&phba->hbalock);
+ /* Now, unregister FCF record and reset HBA FCF state */
+ rc = lpfc_sli4_unregister_fcf(phba);
+ if (rc)
+ return;
+ /* Reset HBA FCF states after successful unregister FCF */
+ phba->fcf.fcf_flag = 0;
/*
* If driver is not unloading, check if there is any other
* FCF record that can be used for discovery.
*/
if ((phba->pport->load_flag & FC_UNLOADING) ||
- (phba->link_state < LPFC_LINK_UP))
+ (phba->link_state < LPFC_LINK_UP))
return;
rc = lpfc_sli4_read_fcf_record(phba, LPFC_FCOE_FCF_GET_FIRST);
if (rc)
lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
- "2553 lpfc_unregister_unused_fcf failed to read FCF"
- " record HBA state x%x\n",
- phba->pport->port_state);
+ "2553 lpfc_unregister_unused_fcf failed "
+ "to read FCF record HBA state x%x\n",
+ phba->pport->port_state);
+}
+
+/**
+ * lpfc_unregister_fcf - Unregister the currently registered fcf record
+ * @phba: Pointer to hba context object.
+ *
+ * This function just unregisters the currently reigstered FCF. It does not
+ * try to find another FCF for discovery.
+ */
+void
+lpfc_unregister_fcf(struct lpfc_hba *phba)
+{
+ int rc;
+
+ /* Preparation for unregistering fcf */
+ rc = lpfc_unregister_fcf_prep(phba);
+ if (rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
+ "2749 Failed to prepare for unregistering "
+ "HBA's FCF record: rc=%d\n", rc);
+ return;
+ }
+
+ /* Now, unregister FCF record and reset HBA FCF state */
+ rc = lpfc_sli4_unregister_fcf(phba);
+ if (rc)
+ return;
+ /* Set proper HBA FCF states after successful unregister FCF */
+ spin_lock_irq(&phba->hbalock);
+ phba->fcf.fcf_flag &= ~FCF_REGISTERED;
+ spin_unlock_irq(&phba->hbalock);
+}
+
+/**
+ * lpfc_unregister_unused_fcf - Unregister FCF if all devices are disconnected.
+ * @phba: Pointer to hba context object.
+ *
+ * This function check if there are any connected remote port for the FCF and
+ * if all the devices are disconnected, this function unregister FCFI.
+ * This function also tries to use another FCF for discovery.
+ */
+void
+lpfc_unregister_unused_fcf(struct lpfc_hba *phba)
+{
+ /*
+ * If HBA is not running in FIP mode or if HBA does not support
+ * FCoE or if FCF is not registered, do nothing.
+ */
+ spin_lock_irq(&phba->hbalock);
+ if (!(phba->hba_flag & HBA_FCOE_SUPPORT) ||
+ !(phba->fcf.fcf_flag & FCF_REGISTERED) ||
+ !(phba->hba_flag & HBA_FIP_SUPPORT)) {
+ spin_unlock_irq(&phba->hbalock);
+ return;
+ }
+ spin_unlock_irq(&phba->hbalock);
+
+ if (lpfc_fcf_inuse(phba))
+ return;
+
+ lpfc_unregister_fcf_rescan(phba);
}
/**
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index c9faa1d8c3c8..89ff7c09e298 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2009 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2010 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
@@ -1346,6 +1346,9 @@ typedef struct { /* FireFly BIU registers */
#define MBX_HEARTBEAT 0x31
#define MBX_WRITE_VPARMS 0x32
#define MBX_ASYNCEVT_ENABLE 0x33
+#define MBX_READ_EVENT_LOG_STATUS 0x37
+#define MBX_READ_EVENT_LOG 0x38
+#define MBX_WRITE_EVENT_LOG 0x39
#define MBX_PORT_CAPABILITIES 0x3B
#define MBX_PORT_IOV_CONTROL 0x3C
@@ -1465,17 +1468,13 @@ typedef struct { /* FireFly BIU registers */
#define CMD_IOCB_LOGENTRY_CN 0x94
#define CMD_IOCB_LOGENTRY_ASYNC_CN 0x96
-/* Unhandled Data Security SLI Commands */
-#define DSSCMD_IWRITE64_CR 0xD8
-#define DSSCMD_IWRITE64_CX 0xD9
-#define DSSCMD_IREAD64_CR 0xDA
-#define DSSCMD_IREAD64_CX 0xDB
-#define DSSCMD_INVALIDATE_DEK 0xDC
-#define DSSCMD_SET_KEK 0xDD
-#define DSSCMD_GET_KEK_ID 0xDE
-#define DSSCMD_GEN_XFER 0xDF
-
-#define CMD_MAX_IOCB_CMD 0xE6
+/* Data Security SLI Commands */
+#define DSSCMD_IWRITE64_CR 0xF8
+#define DSSCMD_IWRITE64_CX 0xF9
+#define DSSCMD_IREAD64_CR 0xFA
+#define DSSCMD_IREAD64_CX 0xFB
+
+#define CMD_MAX_IOCB_CMD 0xFB
#define CMD_IOCB_MASK 0xff
#define MAX_MSG_DATA 28 /* max msg data in CMD_ADAPTER_MSG
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 8a2a1c5935c6..820015fbc4d6 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -52,35 +52,37 @@ struct dma_address {
uint32_t addr_hi;
};
-#define LPFC_SLIREV_CONF_WORD 0x58
struct lpfc_sli_intf {
uint32_t word0;
-#define lpfc_sli_intf_iftype_MASK 0x00000007
-#define lpfc_sli_intf_iftype_SHIFT 0
-#define lpfc_sli_intf_iftype_WORD word0
-#define lpfc_sli_intf_rev_MASK 0x0000000f
-#define lpfc_sli_intf_rev_SHIFT 4
-#define lpfc_sli_intf_rev_WORD word0
-#define LPFC_SLIREV_CONF_SLI4 4
-#define lpfc_sli_intf_family_MASK 0x000000ff
-#define lpfc_sli_intf_family_SHIFT 8
-#define lpfc_sli_intf_family_WORD word0
-#define lpfc_sli_intf_feat1_MASK 0x000000ff
-#define lpfc_sli_intf_feat1_SHIFT 16
-#define lpfc_sli_intf_feat1_WORD word0
-#define lpfc_sli_intf_feat2_MASK 0x0000001f
-#define lpfc_sli_intf_feat2_SHIFT 24
-#define lpfc_sli_intf_feat2_WORD word0
-#define lpfc_sli_intf_valid_MASK 0x00000007
-#define lpfc_sli_intf_valid_SHIFT 29
-#define lpfc_sli_intf_valid_WORD word0
+#define lpfc_sli_intf_valid_SHIFT 29
+#define lpfc_sli_intf_valid_MASK 0x00000007
+#define lpfc_sli_intf_valid_WORD word0
#define LPFC_SLI_INTF_VALID 6
+#define lpfc_sli_intf_featurelevel2_SHIFT 24
+#define lpfc_sli_intf_featurelevel2_MASK 0x0000001F
+#define lpfc_sli_intf_featurelevel2_WORD word0
+#define lpfc_sli_intf_featurelevel1_SHIFT 16
+#define lpfc_sli_intf_featurelevel1_MASK 0x000000FF
+#define lpfc_sli_intf_featurelevel1_WORD word0
+#define LPFC_SLI_INTF_FEATURELEVEL1_1 1
+#define LPFC_SLI_INTF_FEATURELEVEL1_2 2
+#define lpfc_sli_intf_sli_family_SHIFT 8
+#define lpfc_sli_intf_sli_family_MASK 0x000000FF
+#define lpfc_sli_intf_sli_family_WORD word0
+#define LPFC_SLI_INTF_FAMILY_BE2 0
+#define LPFC_SLI_INTF_FAMILY_BE3 1
+#define lpfc_sli_intf_slirev_SHIFT 4
+#define lpfc_sli_intf_slirev_MASK 0x0000000F
+#define lpfc_sli_intf_slirev_WORD word0
+#define LPFC_SLI_INTF_REV_SLI3 3
+#define LPFC_SLI_INTF_REV_SLI4 4
+#define lpfc_sli_intf_if_type_SHIFT 0
+#define lpfc_sli_intf_if_type_MASK 0x00000007
+#define lpfc_sli_intf_if_type_WORD word0
+#define LPFC_SLI_INTF_IF_TYPE_0 0
+#define LPFC_SLI_INTF_IF_TYPE_1 1
};
-#define LPFC_SLI4_BAR0 1
-#define LPFC_SLI4_BAR1 2
-#define LPFC_SLI4_BAR2 4
-
#define LPFC_SLI4_MBX_EMBED true
#define LPFC_SLI4_MBX_NEMBED false
@@ -161,6 +163,9 @@ struct lpfc_sli_intf {
#define LPFC_FP_DEF_IMAX 10000
#define LPFC_SP_DEF_IMAX 10000
+/* PORT_CAPABILITIES constants. */
+#define LPFC_MAX_SUPPORTED_PAGES 8
+
struct ulp_bde64 {
union ULP_BDE_TUS {
uint32_t w;
@@ -516,7 +521,7 @@ struct lpfc_register {
#define LPFC_UERR_STATUS_LO 0x00A0
#define LPFC_UE_MASK_HI 0x00AC
#define LPFC_UE_MASK_LO 0x00A8
-#define LPFC_SCRATCHPAD 0x0058
+#define LPFC_SLI_INTF 0x0058
/* BAR0 Registers */
#define LPFC_HST_STATE 0x00AC
@@ -576,19 +581,6 @@ struct lpfc_register {
#define LPFC_POST_STAGE_ARMFW_READY 0xC000
#define LPFC_POST_STAGE_ARMFW_UE 0xF000
-#define lpfc_scratchpad_slirev_SHIFT 4
-#define lpfc_scratchpad_slirev_MASK 0xF
-#define lpfc_scratchpad_slirev_WORD word0
-#define lpfc_scratchpad_chiptype_SHIFT 8
-#define lpfc_scratchpad_chiptype_MASK 0xFF
-#define lpfc_scratchpad_chiptype_WORD word0
-#define lpfc_scratchpad_featurelevel1_SHIFT 16
-#define lpfc_scratchpad_featurelevel1_MASK 0xFF
-#define lpfc_scratchpad_featurelevel1_WORD word0
-#define lpfc_scratchpad_featurelevel2_SHIFT 24
-#define lpfc_scratchpad_featurelevel2_MASK 0xFF
-#define lpfc_scratchpad_featurelevel2_WORD word0
-
/* BAR1 Registers */
#define LPFC_IMR_MASK_ALL 0xFFFFFFFF
#define LPFC_ISCR_CLEAR_ALL 0xFFFFFFFF
@@ -801,6 +793,7 @@ struct mbox_header {
#define LPFC_MBOX_OPCODE_FCOE_ADD_FCF 0x09
#define LPFC_MBOX_OPCODE_FCOE_DELETE_FCF 0x0A
#define LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE 0x0B
+#define LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF 0x10
/* Mailbox command structures */
struct eq_context {
@@ -1149,10 +1142,7 @@ struct sli4_sge { /* SLI-4 */
this flag !! */
#define lpfc_sli4_sge_last_MASK 0x00000001
#define lpfc_sli4_sge_last_WORD word2
- uint32_t word3;
-#define lpfc_sli4_sge_len_SHIFT 0
-#define lpfc_sli4_sge_len_MASK 0x0001FFFF
-#define lpfc_sli4_sge_len_WORD word3
+ uint32_t sge_len;
};
struct fcf_record {
@@ -1301,6 +1291,19 @@ struct lpfc_mbx_del_fcf_tbl_entry {
#define lpfc_mbx_del_fcf_tbl_index_WORD word10
};
+struct lpfc_mbx_redisc_fcf_tbl {
+ struct mbox_header header;
+ uint32_t word10;
+#define lpfc_mbx_redisc_fcf_count_SHIFT 0
+#define lpfc_mbx_redisc_fcf_count_MASK 0x0000FFFF
+#define lpfc_mbx_redisc_fcf_count_WORD word10
+ uint32_t resvd;
+ uint32_t word12;
+#define lpfc_mbx_redisc_fcf_index_SHIFT 0
+#define lpfc_mbx_redisc_fcf_index_MASK 0x0000FFFF
+#define lpfc_mbx_redisc_fcf_index_WORD word12
+};
+
struct lpfc_mbx_query_fw_cfg {
struct mbox_header header;
uint32_t config_number;
@@ -1834,6 +1837,177 @@ struct lpfc_mbx_request_features {
#define lpfc_mbx_rq_ftr_rsp_ifip_WORD word3
};
+struct lpfc_mbx_supp_pages {
+ uint32_t word1;
+#define qs_SHIFT 0
+#define qs_MASK 0x00000001
+#define qs_WORD word1
+#define wr_SHIFT 1
+#define wr_MASK 0x00000001
+#define wr_WORD word1
+#define pf_SHIFT 8
+#define pf_MASK 0x000000ff
+#define pf_WORD word1
+#define cpn_SHIFT 16
+#define cpn_MASK 0x000000ff
+#define cpn_WORD word1
+ uint32_t word2;
+#define list_offset_SHIFT 0
+#define list_offset_MASK 0x000000ff
+#define list_offset_WORD word2
+#define next_offset_SHIFT 8
+#define next_offset_MASK 0x000000ff
+#define next_offset_WORD word2
+#define elem_cnt_SHIFT 16
+#define elem_cnt_MASK 0x000000ff
+#define elem_cnt_WORD word2
+ uint32_t word3;
+#define pn_0_SHIFT 24
+#define pn_0_MASK 0x000000ff
+#define pn_0_WORD word3
+#define pn_1_SHIFT 16
+#define pn_1_MASK 0x000000ff
+#define pn_1_WORD word3
+#define pn_2_SHIFT 8
+#define pn_2_MASK 0x000000ff
+#define pn_2_WORD word3
+#define pn_3_SHIFT 0
+#define pn_3_MASK 0x000000ff
+#define pn_3_WORD word3
+ uint32_t word4;
+#define pn_4_SHIFT 24
+#define pn_4_MASK 0x000000ff
+#define pn_4_WORD word4
+#define pn_5_SHIFT 16
+#define pn_5_MASK 0x000000ff
+#define pn_5_WORD word4
+#define pn_6_SHIFT 8
+#define pn_6_MASK 0x000000ff
+#define pn_6_WORD word4
+#define pn_7_SHIFT 0
+#define pn_7_MASK 0x000000ff
+#define pn_7_WORD word4
+ uint32_t rsvd[27];
+#define LPFC_SUPP_PAGES 0
+#define LPFC_BLOCK_GUARD_PROFILES 1
+#define LPFC_SLI4_PARAMETERS 2
+};
+
+struct lpfc_mbx_sli4_params {
+ uint32_t word1;
+#define qs_SHIFT 0
+#define qs_MASK 0x00000001
+#define qs_WORD word1
+#define wr_SHIFT 1
+#define wr_MASK 0x00000001
+#define wr_WORD word1
+#define pf_SHIFT 8
+#define pf_MASK 0x000000ff
+#define pf_WORD word1
+#define cpn_SHIFT 16
+#define cpn_MASK 0x000000ff
+#define cpn_WORD word1
+ uint32_t word2;
+#define if_type_SHIFT 0
+#define if_type_MASK 0x00000007
+#define if_type_WORD word2
+#define sli_rev_SHIFT 4
+#define sli_rev_MASK 0x0000000f
+#define sli_rev_WORD word2
+#define sli_family_SHIFT 8
+#define sli_family_MASK 0x000000ff
+#define sli_family_WORD word2
+#define featurelevel_1_SHIFT 16
+#define featurelevel_1_MASK 0x000000ff
+#define featurelevel_1_WORD word2
+#define featurelevel_2_SHIFT 24
+#define featurelevel_2_MASK 0x0000001f
+#define featurelevel_2_WORD word2
+ uint32_t word3;
+#define fcoe_SHIFT 0
+#define fcoe_MASK 0x00000001
+#define fcoe_WORD word3
+#define fc_SHIFT 1
+#define fc_MASK 0x00000001
+#define fc_WORD word3
+#define nic_SHIFT 2
+#define nic_MASK 0x00000001
+#define nic_WORD word3
+#define iscsi_SHIFT 3
+#define iscsi_MASK 0x00000001
+#define iscsi_WORD word3
+#define rdma_SHIFT 4
+#define rdma_MASK 0x00000001
+#define rdma_WORD word3
+ uint32_t sge_supp_len;
+ uint32_t word5;
+#define if_page_sz_SHIFT 0
+#define if_page_sz_MASK 0x0000ffff
+#define if_page_sz_WORD word5
+#define loopbk_scope_SHIFT 24
+#define loopbk_scope_MASK 0x0000000f
+#define loopbk_scope_WORD word5
+#define rq_db_window_SHIFT 28
+#define rq_db_window_MASK 0x0000000f
+#define rq_db_window_WORD word5
+ uint32_t word6;
+#define eq_pages_SHIFT 0
+#define eq_pages_MASK 0x0000000f
+#define eq_pages_WORD word6
+#define eqe_size_SHIFT 8
+#define eqe_size_MASK 0x000000ff
+#define eqe_size_WORD word6
+ uint32_t word7;
+#define cq_pages_SHIFT 0
+#define cq_pages_MASK 0x0000000f
+#define cq_pages_WORD word7
+#define cqe_size_SHIFT 8
+#define cqe_size_MASK 0x000000ff
+#define cqe_size_WORD word7
+ uint32_t word8;
+#define mq_pages_SHIFT 0
+#define mq_pages_MASK 0x0000000f
+#define mq_pages_WORD word8
+#define mqe_size_SHIFT 8
+#define mqe_size_MASK 0x000000ff
+#define mqe_size_WORD word8
+#define mq_elem_cnt_SHIFT 16
+#define mq_elem_cnt_MASK 0x000000ff
+#define mq_elem_cnt_WORD word8
+ uint32_t word9;
+#define wq_pages_SHIFT 0
+#define wq_pages_MASK 0x0000ffff
+#define wq_pages_WORD word9
+#define wqe_size_SHIFT 8
+#define wqe_size_MASK 0x000000ff
+#define wqe_size_WORD word9
+ uint32_t word10;
+#define rq_pages_SHIFT 0
+#define rq_pages_MASK 0x0000ffff
+#define rq_pages_WORD word10
+#define rqe_size_SHIFT 8
+#define rqe_size_MASK 0x000000ff
+#define rqe_size_WORD word10
+ uint32_t word11;
+#define hdr_pages_SHIFT 0
+#define hdr_pages_MASK 0x0000000f
+#define hdr_pages_WORD word11
+#define hdr_size_SHIFT 8
+#define hdr_size_MASK 0x0000000f
+#define hdr_size_WORD word11
+#define hdr_pp_align_SHIFT 16
+#define hdr_pp_align_MASK 0x0000ffff
+#define hdr_pp_align_WORD word11
+ uint32_t word12;
+#define sgl_pages_SHIFT 0
+#define sgl_pages_MASK 0x0000000f
+#define sgl_pages_WORD word12
+#define sgl_pp_align_SHIFT 16
+#define sgl_pp_align_MASK 0x0000ffff
+#define sgl_pp_align_WORD word12
+ uint32_t rsvd_13_63[51];
+};
+
/* Mailbox Completion Queue Error Messages */
#define MB_CQE_STATUS_SUCCESS 0x0
#define MB_CQE_STATUS_INSUFFICIENT_PRIVILEGES 0x1
@@ -1863,6 +2037,7 @@ struct lpfc_mqe {
struct lpfc_mbx_read_fcf_tbl read_fcf_tbl;
struct lpfc_mbx_add_fcf_tbl_entry add_fcf_entry;
struct lpfc_mbx_del_fcf_tbl_entry del_fcf_entry;
+ struct lpfc_mbx_redisc_fcf_tbl redisc_fcf_tbl;
struct lpfc_mbx_reg_fcfi reg_fcfi;
struct lpfc_mbx_unreg_fcfi unreg_fcfi;
struct lpfc_mbx_mq_create mq_create;
@@ -1883,6 +2058,8 @@ struct lpfc_mqe {
struct lpfc_mbx_request_features req_ftrs;
struct lpfc_mbx_post_hdr_tmpl hdr_tmpl;
struct lpfc_mbx_query_fw_cfg query_fw_cfg;
+ struct lpfc_mbx_supp_pages supp_pages;
+ struct lpfc_mbx_sli4_params sli4_params;
struct lpfc_mbx_nop nop;
} un;
};
@@ -1959,6 +2136,9 @@ struct lpfc_acqe_link {
#define LPFC_ASYNC_LINK_FAULT_NONE 0x0
#define LPFC_ASYNC_LINK_FAULT_LOCAL 0x1
#define LPFC_ASYNC_LINK_FAULT_REMOTE 0x2
+#define lpfc_acqe_qos_link_speed_SHIFT 16
+#define lpfc_acqe_qos_link_speed_MASK 0x0000FFFF
+#define lpfc_acqe_qos_link_speed_WORD word1
uint32_t event_tag;
uint32_t trailer;
};
@@ -1976,6 +2156,7 @@ struct lpfc_acqe_fcoe {
#define LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL 0x2
#define LPFC_FCOE_EVENT_TYPE_FCF_DEAD 0x3
#define LPFC_FCOE_EVENT_TYPE_CVL 0x4
+#define LPFC_FCOE_EVENT_TYPE_FCF_PARAM_MOD 0x5
uint32_t event_tag;
uint32_t trailer;
};
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index b8eb1b6e5e77..d29ac7c317d9 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2009 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2010 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -544,7 +544,7 @@ lpfc_config_port_post(struct lpfc_hba *phba)
mempool_free(pmb, phba->mbox_mem_pool);
return -EIO;
}
- } else {
+ } else if (phba->cfg_suppress_link_up == 0) {
lpfc_init_link(phba, pmb, phba->cfg_topology,
phba->cfg_link_speed);
pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
@@ -603,6 +603,102 @@ lpfc_config_port_post(struct lpfc_hba *phba)
}
/**
+ * lpfc_hba_init_link - Initialize the FC link
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine will issue the INIT_LINK mailbox command call.
+ * It is available to other drivers through the lpfc_hba data
+ * structure for use as a delayed link up mechanism with the
+ * module parameter lpfc_suppress_link_up.
+ *
+ * Return code
+ * 0 - success
+ * Any other value - error
+ **/
+int
+lpfc_hba_init_link(struct lpfc_hba *phba)
+{
+ struct lpfc_vport *vport = phba->pport;
+ LPFC_MBOXQ_t *pmb;
+ MAILBOX_t *mb;
+ int rc;
+
+ pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!pmb) {
+ phba->link_state = LPFC_HBA_ERROR;
+ return -ENOMEM;
+ }
+ mb = &pmb->u.mb;
+ pmb->vport = vport;
+
+ lpfc_init_link(phba, pmb, phba->cfg_topology,
+ phba->cfg_link_speed);
+ pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ lpfc_set_loopback_flag(phba);
+ rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
+ if (rc != MBX_SUCCESS) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0498 Adapter failed to init, mbxCmd x%x "
+ "INIT_LINK, mbxStatus x%x\n",
+ mb->mbxCommand, mb->mbxStatus);
+ /* Clear all interrupt enable conditions */
+ writel(0, phba->HCregaddr);
+ readl(phba->HCregaddr); /* flush */
+ /* Clear all pending interrupts */
+ writel(0xffffffff, phba->HAregaddr);
+ readl(phba->HAregaddr); /* flush */
+ phba->link_state = LPFC_HBA_ERROR;
+ if (rc != MBX_BUSY)
+ mempool_free(pmb, phba->mbox_mem_pool);
+ return -EIO;
+ }
+ phba->cfg_suppress_link_up = 0;
+
+ return 0;
+}
+
+/**
+ * lpfc_hba_down_link - this routine downs the FC link
+ *
+ * This routine will issue the DOWN_LINK mailbox command call.
+ * It is available to other drivers through the lpfc_hba data
+ * structure for use to stop the link.
+ *
+ * Return code
+ * 0 - success
+ * Any other value - error
+ **/
+int
+lpfc_hba_down_link(struct lpfc_hba *phba)
+{
+ LPFC_MBOXQ_t *pmb;
+ int rc;
+
+ pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!pmb) {
+ phba->link_state = LPFC_HBA_ERROR;
+ return -ENOMEM;
+ }
+
+ lpfc_printf_log(phba,
+ KERN_ERR, LOG_INIT,
+ "0491 Adapter Link is disabled.\n");
+ lpfc_down_link(phba, pmb);
+ pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
+ if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
+ lpfc_printf_log(phba,
+ KERN_ERR, LOG_INIT,
+ "2522 Adapter failed to issue DOWN_LINK"
+ " mbox command rc 0x%x\n", rc);
+
+ mempool_free(pmb, phba->mbox_mem_pool);
+ return -EIO;
+ }
+ return 0;
+}
+
+/**
* lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset
* @phba: pointer to lpfc HBA data structure.
*
@@ -2073,6 +2169,44 @@ lpfc_stop_vport_timers(struct lpfc_vport *vport)
}
/**
+ * __lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine stops the SLI4 FCF rediscover wait timer if it's on. The
+ * caller of this routine should already hold the host lock.
+ **/
+void
+__lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
+{
+ /* Clear pending FCF rediscovery wait timer */
+ phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
+ /* Now, try to stop the timer */
+ del_timer(&phba->fcf.redisc_wait);
+}
+
+/**
+ * lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine stops the SLI4 FCF rediscover wait timer if it's on. It
+ * checks whether the FCF rediscovery wait timer is pending with the host
+ * lock held before proceeding with disabling the timer and clearing the
+ * wait timer pendig flag.
+ **/
+void
+lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
+{
+ spin_lock_irq(&phba->hbalock);
+ if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
+ /* FCF rediscovery timer already fired or stopped */
+ spin_unlock_irq(&phba->hbalock);
+ return;
+ }
+ __lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
+ spin_unlock_irq(&phba->hbalock);
+}
+
+/**
* lpfc_stop_hba_timers - Stop all the timers associated with an HBA
* @phba: pointer to lpfc hba data structure.
*
@@ -2096,6 +2230,7 @@ lpfc_stop_hba_timers(struct lpfc_hba *phba)
break;
case LPFC_PCI_DEV_OC:
/* Stop any OneConnect device sepcific driver timers */
+ lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
break;
default:
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -2228,6 +2363,7 @@ lpfc_offline_prep(struct lpfc_hba * phba)
struct lpfc_vport *vport = phba->pport;
struct lpfc_nodelist *ndlp, *next_ndlp;
struct lpfc_vport **vports;
+ struct Scsi_Host *shost;
int i;
if (vport->fc_flag & FC_OFFLINE_MODE)
@@ -2241,11 +2377,15 @@ lpfc_offline_prep(struct lpfc_hba * phba)
vports = lpfc_create_vport_work_array(phba);
if (vports != NULL) {
for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
- struct Scsi_Host *shost;
-
if (vports[i]->load_flag & FC_UNLOADING)
continue;
+ shost = lpfc_shost_from_vport(vports[i]);
+ spin_lock_irq(shost->host_lock);
vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED;
+ vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
+ vports[i]->fc_flag &= ~FC_VFI_REGISTERED;
+ spin_unlock_irq(shost->host_lock);
+
shost = lpfc_shost_from_vport(vports[i]);
list_for_each_entry_safe(ndlp, next_ndlp,
&vports[i]->fc_nodes,
@@ -2401,7 +2541,8 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
shost->this_id = -1;
shost->max_cmd_len = 16;
if (phba->sli_rev == LPFC_SLI_REV4) {
- shost->dma_boundary = LPFC_SLI4_MAX_SEGMENT_SIZE;
+ shost->dma_boundary =
+ phba->sli4_hba.pc_sli4_params.sge_supp_len;
shost->sg_tablesize = phba->cfg_sg_seg_cnt;
}
@@ -2650,8 +2791,6 @@ lpfc_stop_port_s4(struct lpfc_hba *phba)
lpfc_stop_hba_timers(phba);
phba->pport->work_port_events = 0;
phba->sli4_hba.intr_enable = 0;
- /* Hard clear it for now, shall have more graceful way to wait later */
- phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
}
/**
@@ -2703,7 +2842,7 @@ lpfc_sli_remove_dflt_fcf(struct lpfc_hba *phba)
del_fcf_record = &mboxq->u.mqe.un.del_fcf_entry;
bf_set(lpfc_mbx_del_fcf_tbl_count, del_fcf_record, 1);
bf_set(lpfc_mbx_del_fcf_tbl_index, del_fcf_record,
- phba->fcf.fcf_indx);
+ phba->fcf.current_rec.fcf_indx);
if (!phba->sli4_hba.intr_enable)
rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
@@ -2727,6 +2866,57 @@ lpfc_sli_remove_dflt_fcf(struct lpfc_hba *phba)
}
/**
+ * lpfc_fcf_redisc_wait_start_timer - Start fcf rediscover wait timer
+ * @phba: Pointer to hba for which this call is being executed.
+ *
+ * This routine starts the timer waiting for the FCF rediscovery to complete.
+ **/
+void
+lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *phba)
+{
+ unsigned long fcf_redisc_wait_tmo =
+ (jiffies + msecs_to_jiffies(LPFC_FCF_REDISCOVER_WAIT_TMO));
+ /* Start fcf rediscovery wait period timer */
+ mod_timer(&phba->fcf.redisc_wait, fcf_redisc_wait_tmo);
+ spin_lock_irq(&phba->hbalock);
+ /* Allow action to new fcf asynchronous event */
+ phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
+ /* Mark the FCF rediscovery pending state */
+ phba->fcf.fcf_flag |= FCF_REDISC_PEND;
+ spin_unlock_irq(&phba->hbalock);
+}
+
+/**
+ * lpfc_sli4_fcf_redisc_wait_tmo - FCF table rediscover wait timeout
+ * @ptr: Map to lpfc_hba data structure pointer.
+ *
+ * This routine is invoked when waiting for FCF table rediscover has been
+ * timed out. If new FCF record(s) has (have) been discovered during the
+ * wait period, a new FCF event shall be added to the FCOE async event
+ * list, and then worker thread shall be waked up for processing from the
+ * worker thread context.
+ **/
+void
+lpfc_sli4_fcf_redisc_wait_tmo(unsigned long ptr)
+{
+ struct lpfc_hba *phba = (struct lpfc_hba *)ptr;
+
+ /* Don't send FCF rediscovery event if timer cancelled */
+ spin_lock_irq(&phba->hbalock);
+ if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
+ spin_unlock_irq(&phba->hbalock);
+ return;
+ }
+ /* Clear FCF rediscovery timer pending flag */
+ phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
+ /* FCF rediscovery event to worker thread */
+ phba->fcf.fcf_flag |= FCF_REDISC_EVT;
+ spin_unlock_irq(&phba->hbalock);
+ /* wake up worker thread */
+ lpfc_worker_wake_up(phba);
+}
+
+/**
* lpfc_sli4_fw_cfg_check - Read the firmware config and verify FCoE support
* @phba: pointer to lpfc hba data structure.
*
@@ -2978,6 +3168,8 @@ lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
bf_get(lpfc_acqe_link_physical, acqe_link);
phba->sli4_hba.link_state.fault =
bf_get(lpfc_acqe_link_fault, acqe_link);
+ phba->sli4_hba.link_state.logical_speed =
+ bf_get(lpfc_acqe_qos_link_speed, acqe_link);
/* Invoke the lpfc_handle_latt mailbox command callback function */
lpfc_mbx_cmpl_read_la(phba, pmb);
@@ -3007,22 +3199,34 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
struct lpfc_nodelist *ndlp;
struct Scsi_Host *shost;
uint32_t link_state;
+ int active_vlink_present;
+ struct lpfc_vport **vports;
+ int i;
phba->fc_eventTag = acqe_fcoe->event_tag;
phba->fcoe_eventtag = acqe_fcoe->event_tag;
switch (event_type) {
case LPFC_FCOE_EVENT_TYPE_NEW_FCF:
+ case LPFC_FCOE_EVENT_TYPE_FCF_PARAM_MOD:
lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
"2546 New FCF found index 0x%x tag 0x%x\n",
acqe_fcoe->index,
acqe_fcoe->event_tag);
- /*
- * If the current FCF is in discovered state, or
- * FCF discovery is in progress do nothing.
- */
spin_lock_irq(&phba->hbalock);
- if ((phba->fcf.fcf_flag & FCF_DISCOVERED) ||
- (phba->hba_flag & FCF_DISC_INPROGRESS)) {
+ if ((phba->fcf.fcf_flag & FCF_SCAN_DONE) ||
+ (phba->hba_flag & FCF_DISC_INPROGRESS)) {
+ /*
+ * If the current FCF is in discovered state or
+ * FCF discovery is in progress, do nothing.
+ */
+ spin_unlock_irq(&phba->hbalock);
+ break;
+ }
+ if (phba->fcf.fcf_flag & FCF_REDISC_EVT) {
+ /*
+ * If fast FCF failover rescan event is pending,
+ * do nothing.
+ */
spin_unlock_irq(&phba->hbalock);
break;
}
@@ -3049,7 +3253,7 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
" tag 0x%x\n", acqe_fcoe->index,
acqe_fcoe->event_tag);
/* If the event is not for currently used fcf do nothing */
- if (phba->fcf.fcf_indx != acqe_fcoe->index)
+ if (phba->fcf.current_rec.fcf_indx != acqe_fcoe->index)
break;
/*
* Currently, driver support only one FCF - so treat this as
@@ -3074,14 +3278,58 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
if (!ndlp)
break;
shost = lpfc_shost_from_vport(vport);
+ if (phba->pport->port_state <= LPFC_FLOGI)
+ break;
+ /* If virtual link is not yet instantiated ignore CVL */
+ if (vport->port_state <= LPFC_FDISC)
+ break;
+
lpfc_linkdown_port(vport);
- if (vport->port_type != LPFC_NPIV_PORT) {
+ lpfc_cleanup_pending_mbox(vport);
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag |= FC_VPORT_CVL_RCVD;
+ spin_unlock_irq(shost->host_lock);
+ active_vlink_present = 0;
+
+ vports = lpfc_create_vport_work_array(phba);
+ if (vports) {
+ for (i = 0; i <= phba->max_vports && vports[i] != NULL;
+ i++) {
+ if ((!(vports[i]->fc_flag &
+ FC_VPORT_CVL_RCVD)) &&
+ (vports[i]->port_state > LPFC_FDISC)) {
+ active_vlink_present = 1;
+ break;
+ }
+ }
+ lpfc_destroy_vport_work_array(phba, vports);
+ }
+
+ if (active_vlink_present) {
+ /*
+ * If there are other active VLinks present,
+ * re-instantiate the Vlink using FDISC.
+ */
mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_DELAY_TMO;
spin_unlock_irq(shost->host_lock);
- ndlp->nlp_last_elscmd = ELS_CMD_FLOGI;
- vport->port_state = LPFC_FLOGI;
+ ndlp->nlp_last_elscmd = ELS_CMD_FDISC;
+ vport->port_state = LPFC_FDISC;
+ } else {
+ /*
+ * Otherwise, we request port to rediscover
+ * the entire FCF table for a fast recovery
+ * from possible case that the current FCF
+ * is no longer valid.
+ */
+ rc = lpfc_sli4_redisc_fcf_table(phba);
+ if (rc)
+ /*
+ * Last resort will be re-try on the
+ * the current registered FCF entry.
+ */
+ lpfc_retry_pport_discovery(phba);
}
break;
default:
@@ -3158,6 +3406,34 @@ void lpfc_sli4_async_event_proc(struct lpfc_hba *phba)
}
/**
+ * lpfc_sli4_fcf_redisc_event_proc - Process fcf table rediscovery event
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked by the worker thread to process FCF table
+ * rediscovery pending completion event.
+ **/
+void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba)
+{
+ int rc;
+
+ spin_lock_irq(&phba->hbalock);
+ /* Clear FCF rediscovery timeout event */
+ phba->fcf.fcf_flag &= ~FCF_REDISC_EVT;
+ /* Clear driver fast failover FCF record flag */
+ phba->fcf.failover_rec.flag = 0;
+ /* Set state for FCF fast failover */
+ phba->fcf.fcf_flag |= FCF_REDISC_FOV;
+ spin_unlock_irq(&phba->hbalock);
+
+ /* Scan FCF table from the first entry to re-discover SAN */
+ rc = lpfc_sli4_read_fcf_record(phba, LPFC_FCOE_FCF_GET_FIRST);
+ if (rc)
+ lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
+ "2747 Post FCF rediscovery read FCF record "
+ "failed 0x%x\n", rc);
+}
+
+/**
* lpfc_api_table_setup - Set up per hba pci-device group func api jump table
* @phba: pointer to lpfc hba data structure.
* @dev_grp: The HBA PCI-Device group number.
@@ -3442,8 +3718,10 @@ static int
lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
{
struct lpfc_sli *psli;
- int rc;
- int i, hbq_count;
+ LPFC_MBOXQ_t *mboxq;
+ int rc, i, hbq_count, buf_size, dma_buf_size, max_buf_size;
+ uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0};
+ struct lpfc_mqe *mqe;
/* Before proceed, wait for POST done and device ready */
rc = lpfc_sli4_post_status_check(phba);
@@ -3472,6 +3750,11 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
init_timer(&phba->eratt_poll);
phba->eratt_poll.function = lpfc_poll_eratt;
phba->eratt_poll.data = (unsigned long) phba;
+ /* FCF rediscover timer */
+ init_timer(&phba->fcf.redisc_wait);
+ phba->fcf.redisc_wait.function = lpfc_sli4_fcf_redisc_wait_tmo;
+ phba->fcf.redisc_wait.data = (unsigned long)phba;
+
/*
* We need to do a READ_CONFIG mailbox command here before
* calling lpfc_get_cfgparam. For VFs this will report the
@@ -3496,31 +3779,26 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
* used to create the sg_dma_buf_pool must be dynamically calculated.
* 2 segments are added since the IOCB needs a command and response bde.
* To insure that the scsi sgl does not cross a 4k page boundary only
- * sgl sizes of 1k, 2k, 4k, and 8k are supported.
- * Table of sgl sizes and seg_cnt:
- * sgl size, sg_seg_cnt total seg
- * 1k 50 52
- * 2k 114 116
- * 4k 242 244
- * 8k 498 500
- * cmd(32) + rsp(160) + (52 * sizeof(sli4_sge)) = 1024
- * cmd(32) + rsp(160) + (116 * sizeof(sli4_sge)) = 2048
- * cmd(32) + rsp(160) + (244 * sizeof(sli4_sge)) = 4096
- * cmd(32) + rsp(160) + (500 * sizeof(sli4_sge)) = 8192
+ * sgl sizes of must be a power of 2.
*/
- if (phba->cfg_sg_seg_cnt <= LPFC_DEFAULT_SG_SEG_CNT)
- phba->cfg_sg_seg_cnt = 50;
- else if (phba->cfg_sg_seg_cnt <= 114)
- phba->cfg_sg_seg_cnt = 114;
- else if (phba->cfg_sg_seg_cnt <= 242)
- phba->cfg_sg_seg_cnt = 242;
+ buf_size = (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp) +
+ ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct sli4_sge)));
+ /* Feature Level 1 hardware is limited to 2 pages */
+ if ((bf_get(lpfc_sli_intf_featurelevel1, &phba->sli4_hba.sli_intf) ==
+ LPFC_SLI_INTF_FEATURELEVEL1_1))
+ max_buf_size = LPFC_SLI4_FL1_MAX_BUF_SIZE;
else
- phba->cfg_sg_seg_cnt = 498;
-
- phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd)
- + sizeof(struct fcp_rsp);
- phba->cfg_sg_dma_buf_size +=
- ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct sli4_sge));
+ max_buf_size = LPFC_SLI4_MAX_BUF_SIZE;
+ for (dma_buf_size = LPFC_SLI4_MIN_BUF_SIZE;
+ dma_buf_size < max_buf_size && buf_size > dma_buf_size;
+ dma_buf_size = dma_buf_size << 1)
+ ;
+ if (dma_buf_size == max_buf_size)
+ phba->cfg_sg_seg_cnt = (dma_buf_size -
+ sizeof(struct fcp_cmnd) - sizeof(struct fcp_rsp) -
+ (2 * sizeof(struct sli4_sge))) /
+ sizeof(struct sli4_sge);
+ phba->cfg_sg_dma_buf_size = dma_buf_size;
/* Initialize buffer queue management fields */
hbq_count = lpfc_sli_hbq_count();
@@ -3638,6 +3916,43 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
goto out_free_fcp_eq_hdl;
}
+ mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
+ GFP_KERNEL);
+ if (!mboxq) {
+ rc = -ENOMEM;
+ goto out_free_fcp_eq_hdl;
+ }
+
+ /* Get the Supported Pages. It is always available. */
+ lpfc_supported_pages(mboxq);
+ rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+ if (unlikely(rc)) {
+ rc = -EIO;
+ mempool_free(mboxq, phba->mbox_mem_pool);
+ goto out_free_fcp_eq_hdl;
+ }
+
+ mqe = &mboxq->u.mqe;
+ memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3),
+ LPFC_MAX_SUPPORTED_PAGES);
+ for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) {
+ switch (pn_page[i]) {
+ case LPFC_SLI4_PARAMETERS:
+ phba->sli4_hba.pc_sli4_params.supported = 1;
+ break;
+ default:
+ break;
+ }
+ }
+
+ /* Read the port's SLI4 Parameters capabilities if supported. */
+ if (phba->sli4_hba.pc_sli4_params.supported)
+ rc = lpfc_pc_sli4_params_get(phba, mboxq);
+ mempool_free(mboxq, phba->mbox_mem_pool);
+ if (rc) {
+ rc = -EIO;
+ goto out_free_fcp_eq_hdl;
+ }
return rc;
out_free_fcp_eq_hdl:
@@ -3733,6 +4048,8 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
int
lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
{
+ phba->lpfc_hba_init_link = lpfc_hba_init_link;
+ phba->lpfc_hba_down_link = lpfc_hba_down_link;
switch (dev_grp) {
case LPFC_PCI_DEV_LP:
phba->lpfc_hba_down_post = lpfc_hba_down_post_s3;
@@ -4291,7 +4608,7 @@ lpfc_hba_alloc(struct pci_dev *pdev)
return NULL;
}
- mutex_init(&phba->ct_event_mutex);
+ spin_lock_init(&phba->ct_ev_lock);
INIT_LIST_HEAD(&phba->ct_ev_waiters);
return phba;
@@ -4641,7 +4958,7 @@ lpfc_sli_pci_mem_unset(struct lpfc_hba *phba)
int
lpfc_sli4_post_status_check(struct lpfc_hba *phba)
{
- struct lpfc_register sta_reg, uerrlo_reg, uerrhi_reg, scratchpad;
+ struct lpfc_register sta_reg, uerrlo_reg, uerrhi_reg;
int i, port_error = -ENODEV;
if (!phba->sli4_hba.STAregaddr)
@@ -4677,14 +4994,21 @@ lpfc_sli4_post_status_check(struct lpfc_hba *phba)
bf_get(lpfc_hst_state_port_status, &sta_reg));
/* Log device information */
- scratchpad.word0 = readl(phba->sli4_hba.SCRATCHPADregaddr);
- lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
- "2534 Device Info: ChipType=0x%x, SliRev=0x%x, "
- "FeatureL1=0x%x, FeatureL2=0x%x\n",
- bf_get(lpfc_scratchpad_chiptype, &scratchpad),
- bf_get(lpfc_scratchpad_slirev, &scratchpad),
- bf_get(lpfc_scratchpad_featurelevel1, &scratchpad),
- bf_get(lpfc_scratchpad_featurelevel2, &scratchpad));
+ phba->sli4_hba.sli_intf.word0 = readl(phba->sli4_hba.SLIINTFregaddr);
+ if (bf_get(lpfc_sli_intf_valid,
+ &phba->sli4_hba.sli_intf) == LPFC_SLI_INTF_VALID) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "2534 Device Info: ChipType=0x%x, SliRev=0x%x, "
+ "FeatureL1=0x%x, FeatureL2=0x%x\n",
+ bf_get(lpfc_sli_intf_sli_family,
+ &phba->sli4_hba.sli_intf),
+ bf_get(lpfc_sli_intf_slirev,
+ &phba->sli4_hba.sli_intf),
+ bf_get(lpfc_sli_intf_featurelevel1,
+ &phba->sli4_hba.sli_intf),
+ bf_get(lpfc_sli_intf_featurelevel2,
+ &phba->sli4_hba.sli_intf));
+ }
phba->sli4_hba.ue_mask_lo = readl(phba->sli4_hba.UEMASKLOregaddr);
phba->sli4_hba.ue_mask_hi = readl(phba->sli4_hba.UEMASKHIregaddr);
/* With uncoverable error, log the error message and return error */
@@ -4723,8 +5047,8 @@ lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba)
LPFC_UE_MASK_LO;
phba->sli4_hba.UEMASKHIregaddr = phba->sli4_hba.conf_regs_memmap_p +
LPFC_UE_MASK_HI;
- phba->sli4_hba.SCRATCHPADregaddr = phba->sli4_hba.conf_regs_memmap_p +
- LPFC_SCRATCHPAD;
+ phba->sli4_hba.SLIINTFregaddr = phba->sli4_hba.conf_regs_memmap_p +
+ LPFC_SLI_INTF;
}
/**
@@ -5999,7 +6323,7 @@ lpfc_sli4_fcfi_unreg(struct lpfc_hba *phba, uint16_t fcfi)
spin_lock_irqsave(&phba->hbalock, flags);
/* Mark the FCFI is no longer registered */
phba->fcf.fcf_flag &=
- ~(FCF_AVAILABLE | FCF_REGISTERED | FCF_DISCOVERED);
+ ~(FCF_AVAILABLE | FCF_REGISTERED | FCF_SCAN_DONE);
spin_unlock_irqrestore(&phba->hbalock, flags);
}
}
@@ -6039,16 +6363,20 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
/* Get the bus address of SLI4 device Bar0, Bar1, and Bar2 and the
* number of bytes required by each mapping. They are actually
- * mapping to the PCI BAR regions 1, 2, and 4 by the SLI4 device.
+ * mapping to the PCI BAR regions 0 or 1, 2, and 4 by the SLI4 device.
*/
- phba->pci_bar0_map = pci_resource_start(pdev, LPFC_SLI4_BAR0);
- bar0map_len = pci_resource_len(pdev, LPFC_SLI4_BAR0);
-
- phba->pci_bar1_map = pci_resource_start(pdev, LPFC_SLI4_BAR1);
- bar1map_len = pci_resource_len(pdev, LPFC_SLI4_BAR1);
+ if (pci_resource_start(pdev, 0)) {
+ phba->pci_bar0_map = pci_resource_start(pdev, 0);
+ bar0map_len = pci_resource_len(pdev, 0);
+ } else {
+ phba->pci_bar0_map = pci_resource_start(pdev, 1);
+ bar0map_len = pci_resource_len(pdev, 1);
+ }
+ phba->pci_bar1_map = pci_resource_start(pdev, 2);
+ bar1map_len = pci_resource_len(pdev, 2);
- phba->pci_bar2_map = pci_resource_start(pdev, LPFC_SLI4_BAR2);
- bar2map_len = pci_resource_len(pdev, LPFC_SLI4_BAR2);
+ phba->pci_bar2_map = pci_resource_start(pdev, 4);
+ bar2map_len = pci_resource_len(pdev, 4);
/* Map SLI4 PCI Config Space Register base to a kernel virtual addr */
phba->sli4_hba.conf_regs_memmap_p =
@@ -6793,6 +7121,73 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba)
phba->pport->work_port_events = 0;
}
+ /**
+ * lpfc_pc_sli4_params_get - Get the SLI4_PARAMS port capabilities.
+ * @phba: Pointer to HBA context object.
+ * @mboxq: Pointer to the mailboxq memory for the mailbox command response.
+ *
+ * This function is called in the SLI4 code path to read the port's
+ * sli4 capabilities.
+ *
+ * This function may be be called from any context that can block-wait
+ * for the completion. The expectation is that this routine is called
+ * typically from probe_one or from the online routine.
+ **/
+int
+lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
+{
+ int rc;
+ struct lpfc_mqe *mqe;
+ struct lpfc_pc_sli4_params *sli4_params;
+ uint32_t mbox_tmo;
+
+ rc = 0;
+ mqe = &mboxq->u.mqe;
+
+ /* Read the port's SLI4 Parameters port capabilities */
+ lpfc_sli4_params(mboxq);
+ if (!phba->sli4_hba.intr_enable)
+ rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+ else {
+ mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_PORT_CAPABILITIES);
+ rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
+ }
+
+ if (unlikely(rc))
+ return 1;
+
+ sli4_params = &phba->sli4_hba.pc_sli4_params;
+ sli4_params->if_type = bf_get(if_type, &mqe->un.sli4_params);
+ sli4_params->sli_rev = bf_get(sli_rev, &mqe->un.sli4_params);
+ sli4_params->sli_family = bf_get(sli_family, &mqe->un.sli4_params);
+ sli4_params->featurelevel_1 = bf_get(featurelevel_1,
+ &mqe->un.sli4_params);
+ sli4_params->featurelevel_2 = bf_get(featurelevel_2,
+ &mqe->un.sli4_params);
+ sli4_params->proto_types = mqe->un.sli4_params.word3;
+ sli4_params->sge_supp_len = mqe->un.sli4_params.sge_supp_len;
+ sli4_params->if_page_sz = bf_get(if_page_sz, &mqe->un.sli4_params);
+ sli4_params->rq_db_window = bf_get(rq_db_window, &mqe->un.sli4_params);
+ sli4_params->loopbk_scope = bf_get(loopbk_scope, &mqe->un.sli4_params);
+ sli4_params->eq_pages_max = bf_get(eq_pages, &mqe->un.sli4_params);
+ sli4_params->eqe_size = bf_get(eqe_size, &mqe->un.sli4_params);
+ sli4_params->cq_pages_max = bf_get(cq_pages, &mqe->un.sli4_params);
+ sli4_params->cqe_size = bf_get(cqe_size, &mqe->un.sli4_params);
+ sli4_params->mq_pages_max = bf_get(mq_pages, &mqe->un.sli4_params);
+ sli4_params->mqe_size = bf_get(mqe_size, &mqe->un.sli4_params);
+ sli4_params->mq_elem_cnt = bf_get(mq_elem_cnt, &mqe->un.sli4_params);
+ sli4_params->wq_pages_max = bf_get(wq_pages, &mqe->un.sli4_params);
+ sli4_params->wqe_size = bf_get(wqe_size, &mqe->un.sli4_params);
+ sli4_params->rq_pages_max = bf_get(rq_pages, &mqe->un.sli4_params);
+ sli4_params->rqe_size = bf_get(rqe_size, &mqe->un.sli4_params);
+ sli4_params->hdr_pages_max = bf_get(hdr_pages, &mqe->un.sli4_params);
+ sli4_params->hdr_size = bf_get(hdr_size, &mqe->un.sli4_params);
+ sli4_params->hdr_pp_align = bf_get(hdr_pp_align, &mqe->un.sli4_params);
+ sli4_params->sgl_pages_max = bf_get(sgl_pages, &mqe->un.sli4_params);
+ sli4_params->sgl_pp_align = bf_get(sgl_pp_align, &mqe->un.sli4_params);
+ return rc;
+}
+
/**
* lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem.
* @pdev: pointer to PCI device
@@ -7134,6 +7529,12 @@ lpfc_pci_resume_one_s3(struct pci_dev *pdev)
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
+ /*
+ * As the new kernel behavior of pci_restore_state() API call clears
+ * device saved_state flag, need to save the restored state again.
+ */
+ pci_save_state(pdev);
+
if (pdev->is_busmaster)
pci_set_master(pdev);
@@ -7317,6 +7718,13 @@ lpfc_io_slot_reset_s3(struct pci_dev *pdev)
}
pci_restore_state(pdev);
+
+ /*
+ * As the new kernel behavior of pci_restore_state() API call clears
+ * device saved_state flag, need to save the restored state again.
+ */
+ pci_save_state(pdev);
+
if (pdev->is_busmaster)
pci_set_master(pdev);
@@ -7726,6 +8134,13 @@ lpfc_pci_resume_one_s4(struct pci_dev *pdev)
/* Restore device state from PCI config space */
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
+
+ /*
+ * As the new kernel behavior of pci_restore_state() API call clears
+ * device saved_state flag, need to save the restored state again.
+ */
+ pci_save_state(pdev);
+
if (pdev->is_busmaster)
pci_set_master(pdev);
@@ -7845,11 +8260,11 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
int rc;
struct lpfc_sli_intf intf;
- if (pci_read_config_dword(pdev, LPFC_SLIREV_CONF_WORD, &intf.word0))
+ if (pci_read_config_dword(pdev, LPFC_SLI_INTF, &intf.word0))
return -ENODEV;
if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) &&
- (bf_get(lpfc_sli_intf_rev, &intf) == LPFC_SLIREV_CONF_SLI4))
+ (bf_get(lpfc_sli_intf_slirev, &intf) == LPFC_SLI_INTF_REV_SLI4))
rc = lpfc_pci_probe_one_s4(pdev, pid);
else
rc = lpfc_pci_probe_one_s3(pdev, pid);
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index a9afd8b94b6a..6c4dce1a30ca 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -1707,7 +1707,8 @@ lpfc_sli4_config(struct lpfc_hba *phba, struct lpfcMboxq *mbox,
alloc_len - sizeof(union lpfc_sli4_cfg_shdr);
}
/* The sub-header is in DMA memory, which needs endian converstion */
- lpfc_sli_pcimem_bcopy(cfg_shdr, cfg_shdr,
+ if (cfg_shdr)
+ lpfc_sli_pcimem_bcopy(cfg_shdr, cfg_shdr,
sizeof(union lpfc_sli4_cfg_shdr));
return alloc_len;
@@ -1747,6 +1748,65 @@ lpfc_sli4_mbox_opcode_get(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
}
/**
+ * lpfc_sli4_mbx_read_fcf_record - Allocate and construct read fcf mbox cmd
+ * @phba: pointer to lpfc hba data structure.
+ * @fcf_index: index to fcf table.
+ *
+ * This routine routine allocates and constructs non-embedded mailbox command
+ * for reading a FCF table entry refered by @fcf_index.
+ *
+ * Return: pointer to the mailbox command constructed if successful, otherwise
+ * NULL.
+ **/
+int
+lpfc_sli4_mbx_read_fcf_record(struct lpfc_hba *phba,
+ struct lpfcMboxq *mboxq,
+ uint16_t fcf_index)
+{
+ void *virt_addr;
+ dma_addr_t phys_addr;
+ uint8_t *bytep;
+ struct lpfc_mbx_sge sge;
+ uint32_t alloc_len, req_len;
+ struct lpfc_mbx_read_fcf_tbl *read_fcf;
+
+ if (!mboxq)
+ return -ENOMEM;
+
+ req_len = sizeof(struct fcf_record) +
+ sizeof(union lpfc_sli4_cfg_shdr) + 2 * sizeof(uint32_t);
+
+ /* Set up READ_FCF SLI4_CONFIG mailbox-ioctl command */
+ alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
+ LPFC_MBOX_OPCODE_FCOE_READ_FCF_TABLE, req_len,
+ LPFC_SLI4_MBX_NEMBED);
+
+ if (alloc_len < req_len) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
+ "0291 Allocated DMA memory size (x%x) is "
+ "less than the requested DMA memory "
+ "size (x%x)\n", alloc_len, req_len);
+ return -ENOMEM;
+ }
+
+ /* Get the first SGE entry from the non-embedded DMA memory. This
+ * routine only uses a single SGE.
+ */
+ lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
+ phys_addr = getPaddr(sge.pa_hi, sge.pa_lo);
+ virt_addr = mboxq->sge_array->addr[0];
+ read_fcf = (struct lpfc_mbx_read_fcf_tbl *)virt_addr;
+
+ /* Set up command fields */
+ bf_set(lpfc_mbx_read_fcf_tbl_indx, &read_fcf->u.request, fcf_index);
+ /* Perform necessary endian conversion */
+ bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
+ lpfc_sli_pcimem_bcopy(bytep, bytep, sizeof(uint32_t));
+
+ return 0;
+}
+
+/**
* lpfc_request_features: Configure SLI4 REQUEST_FEATURES mailbox
* @mboxq: pointer to lpfc mbox command.
*
@@ -1946,13 +2006,14 @@ lpfc_reg_fcfi(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
bf_set(lpfc_reg_fcfi_rq_id1, reg_fcfi, REG_FCF_INVALID_QID);
bf_set(lpfc_reg_fcfi_rq_id2, reg_fcfi, REG_FCF_INVALID_QID);
bf_set(lpfc_reg_fcfi_rq_id3, reg_fcfi, REG_FCF_INVALID_QID);
- bf_set(lpfc_reg_fcfi_info_index, reg_fcfi, phba->fcf.fcf_indx);
+ bf_set(lpfc_reg_fcfi_info_index, reg_fcfi,
+ phba->fcf.current_rec.fcf_indx);
/* reg_fcf addr mode is bit wise inverted value of fcf addr_mode */
- bf_set(lpfc_reg_fcfi_mam, reg_fcfi,
- (~phba->fcf.addr_mode) & 0x3);
- if (phba->fcf.fcf_flag & FCF_VALID_VLAN) {
+ bf_set(lpfc_reg_fcfi_mam, reg_fcfi, (~phba->fcf.addr_mode) & 0x3);
+ if (phba->fcf.current_rec.vlan_id != 0xFFFF) {
bf_set(lpfc_reg_fcfi_vv, reg_fcfi, 1);
- bf_set(lpfc_reg_fcfi_vlan_tag, reg_fcfi, phba->fcf.vlan_id);
+ bf_set(lpfc_reg_fcfi_vlan_tag, reg_fcfi,
+ phba->fcf.current_rec.vlan_id);
}
}
@@ -1992,3 +2053,41 @@ lpfc_resume_rpi(struct lpfcMboxq *mbox, struct lpfc_nodelist *ndlp)
bf_set(lpfc_resume_rpi_ii, resume_rpi, RESUME_INDEX_RPI);
resume_rpi->event_tag = ndlp->phba->fc_eventTag;
}
+
+/**
+ * lpfc_supported_pages - Initialize the PORT_CAPABILITIES supported pages
+ * mailbox command.
+ * @mbox: pointer to lpfc mbox command to initialize.
+ *
+ * The PORT_CAPABILITIES supported pages mailbox command is issued to
+ * retrieve the particular feature pages supported by the port.
+ **/
+void
+lpfc_supported_pages(struct lpfcMboxq *mbox)
+{
+ struct lpfc_mbx_supp_pages *supp_pages;
+
+ memset(mbox, 0, sizeof(*mbox));
+ supp_pages = &mbox->u.mqe.un.supp_pages;
+ bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_PORT_CAPABILITIES);
+ bf_set(cpn, supp_pages, LPFC_SUPP_PAGES);
+}
+
+/**
+ * lpfc_sli4_params - Initialize the PORT_CAPABILITIES SLI4 Params
+ * mailbox command.
+ * @mbox: pointer to lpfc mbox command to initialize.
+ *
+ * The PORT_CAPABILITIES SLI4 parameters mailbox command is issued to
+ * retrieve the particular SLI4 features supported by the port.
+ **/
+void
+lpfc_sli4_params(struct lpfcMboxq *mbox)
+{
+ struct lpfc_mbx_sli4_params *sli4_params;
+
+ memset(mbox, 0, sizeof(*mbox));
+ sli4_params = &mbox->u.mqe.un.sli4_params;
+ bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_PORT_CAPABILITIES);
+ bf_set(cpn, sli4_params, LPFC_SLI4_PARAMETERS);
+}
diff --git a/drivers/scsi/lpfc/lpfc_nl.h b/drivers/scsi/lpfc/lpfc_nl.h
index d655ed3eebef..f3cfbe2ce986 100644
--- a/drivers/scsi/lpfc/lpfc_nl.h
+++ b/drivers/scsi/lpfc/lpfc_nl.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2008 Emulex. All rights reserved. *
+ * Copyright (C) 2010 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
@@ -177,23 +177,3 @@ struct temp_event {
uint32_t data;
};
-/* bsg definitions */
-#define LPFC_BSG_VENDOR_SET_CT_EVENT 1
-#define LPFC_BSG_VENDOR_GET_CT_EVENT 2
-
-struct set_ct_event {
- uint32_t command;
- uint32_t ev_req_id;
- uint32_t ev_reg_id;
-};
-
-struct get_ct_event {
- uint32_t command;
- uint32_t ev_reg_id;
- uint32_t ev_req_id;
-};
-
-struct get_ct_event_reply {
- uint32_t immed_data;
- uint32_t type;
-};
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 2ed6af194932..d20ae6b3b3cf 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -62,7 +62,7 @@ lpfc_check_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
int
lpfc_check_sparm(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
- struct serv_parm * sp, uint32_t class)
+ struct serv_parm *sp, uint32_t class, int flogi)
{
volatile struct serv_parm *hsp = &vport->fc_sparam;
uint16_t hsp_value, ssp_value = 0;
@@ -75,49 +75,56 @@ lpfc_check_sparm(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
* correcting the byte values.
*/
if (sp->cls1.classValid) {
- hsp_value = (hsp->cls1.rcvDataSizeMsb << 8) |
- hsp->cls1.rcvDataSizeLsb;
- ssp_value = (sp->cls1.rcvDataSizeMsb << 8) |
- sp->cls1.rcvDataSizeLsb;
- if (!ssp_value)
- goto bad_service_param;
- if (ssp_value > hsp_value) {
- sp->cls1.rcvDataSizeLsb = hsp->cls1.rcvDataSizeLsb;
- sp->cls1.rcvDataSizeMsb = hsp->cls1.rcvDataSizeMsb;
+ if (!flogi) {
+ hsp_value = ((hsp->cls1.rcvDataSizeMsb << 8) |
+ hsp->cls1.rcvDataSizeLsb);
+ ssp_value = ((sp->cls1.rcvDataSizeMsb << 8) |
+ sp->cls1.rcvDataSizeLsb);
+ if (!ssp_value)
+ goto bad_service_param;
+ if (ssp_value > hsp_value) {
+ sp->cls1.rcvDataSizeLsb =
+ hsp->cls1.rcvDataSizeLsb;
+ sp->cls1.rcvDataSizeMsb =
+ hsp->cls1.rcvDataSizeMsb;
+ }
}
- } else if (class == CLASS1) {
+ } else if (class == CLASS1)
goto bad_service_param;
- }
-
if (sp->cls2.classValid) {
- hsp_value = (hsp->cls2.rcvDataSizeMsb << 8) |
- hsp->cls2.rcvDataSizeLsb;
- ssp_value = (sp->cls2.rcvDataSizeMsb << 8) |
- sp->cls2.rcvDataSizeLsb;
- if (!ssp_value)
- goto bad_service_param;
- if (ssp_value > hsp_value) {
- sp->cls2.rcvDataSizeLsb = hsp->cls2.rcvDataSizeLsb;
- sp->cls2.rcvDataSizeMsb = hsp->cls2.rcvDataSizeMsb;
+ if (!flogi) {
+ hsp_value = ((hsp->cls2.rcvDataSizeMsb << 8) |
+ hsp->cls2.rcvDataSizeLsb);
+ ssp_value = ((sp->cls2.rcvDataSizeMsb << 8) |
+ sp->cls2.rcvDataSizeLsb);
+ if (!ssp_value)
+ goto bad_service_param;
+ if (ssp_value > hsp_value) {
+ sp->cls2.rcvDataSizeLsb =
+ hsp->cls2.rcvDataSizeLsb;
+ sp->cls2.rcvDataSizeMsb =
+ hsp->cls2.rcvDataSizeMsb;
+ }
}
- } else if (class == CLASS2) {
+ } else if (class == CLASS2)
goto bad_service_param;
- }
-
if (sp->cls3.classValid) {
- hsp_value = (hsp->cls3.rcvDataSizeMsb << 8) |
- hsp->cls3.rcvDataSizeLsb;
- ssp_value = (sp->cls3.rcvDataSizeMsb << 8) |
- sp->cls3.rcvDataSizeLsb;
- if (!ssp_value)
- goto bad_service_param;
- if (ssp_value > hsp_value) {
- sp->cls3.rcvDataSizeLsb = hsp->cls3.rcvDataSizeLsb;
- sp->cls3.rcvDataSizeMsb = hsp->cls3.rcvDataSizeMsb;
+ if (!flogi) {
+ hsp_value = ((hsp->cls3.rcvDataSizeMsb << 8) |
+ hsp->cls3.rcvDataSizeLsb);
+ ssp_value = ((sp->cls3.rcvDataSizeMsb << 8) |
+ sp->cls3.rcvDataSizeLsb);
+ if (!ssp_value)
+ goto bad_service_param;
+ if (ssp_value > hsp_value) {
+ sp->cls3.rcvDataSizeLsb =
+ hsp->cls3.rcvDataSizeLsb;
+ sp->cls3.rcvDataSizeMsb =
+ hsp->cls3.rcvDataSizeMsb;
+ }
}
- } else if (class == CLASS3) {
+ } else if (class == CLASS3)
goto bad_service_param;
- }
/*
* Preserve the upper four bits of the MSB from the PLOGI response.
@@ -247,7 +254,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
int rc;
memset(&stat, 0, sizeof (struct ls_rjt));
- if (vport->port_state <= LPFC_FLOGI) {
+ if (vport->port_state <= LPFC_FDISC) {
/* Before responding to PLOGI, check for pt2pt mode.
* If we are pt2pt, with an outstanding FLOGI, abort
* the FLOGI and resend it first.
@@ -295,7 +302,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
NULL);
return 0;
}
- if ((lpfc_check_sparm(vport, ndlp, sp, CLASS3) == 0)) {
+ if ((lpfc_check_sparm(vport, ndlp, sp, CLASS3, 0) == 0)) {
/* Reject this request because invalid parameters */
stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS;
@@ -831,7 +838,7 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
"0142 PLOGI RSP: Invalid WWN.\n");
goto out;
}
- if (!lpfc_check_sparm(vport, ndlp, sp, CLASS3))
+ if (!lpfc_check_sparm(vport, ndlp, sp, CLASS3, 0))
goto out;
/* PLOGI chkparm OK */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index a246410ce9df..7f21b47db791 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -626,6 +626,7 @@ lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba,
&phba->sli4_hba.lpfc_abts_scsi_buf_list, list) {
if (psb->cur_iocbq.sli4_xritag == xri) {
list_del(&psb->list);
+ psb->exch_busy = 0;
psb->status = IOSTAT_SUCCESS;
spin_unlock_irqrestore(
&phba->sli4_hba.abts_scsi_buf_list_lock,
@@ -688,11 +689,12 @@ lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *phba)
list);
if (status) {
/* Put this back on the abort scsi list */
- psb->status = IOSTAT_LOCAL_REJECT;
- psb->result = IOERR_ABORT_REQUESTED;
+ psb->exch_busy = 1;
rc++;
- } else
+ } else {
+ psb->exch_busy = 0;
psb->status = IOSTAT_SUCCESS;
+ }
/* Put it back into the SCSI buffer list */
lpfc_release_scsi_buf_s4(phba, psb);
}
@@ -796,19 +798,17 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
*/
sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd));
sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd));
- bf_set(lpfc_sli4_sge_len, sgl, sizeof(struct fcp_cmnd));
bf_set(lpfc_sli4_sge_last, sgl, 0);
sgl->word2 = cpu_to_le32(sgl->word2);
- sgl->word3 = cpu_to_le32(sgl->word3);
+ sgl->sge_len = cpu_to_le32(sizeof(struct fcp_cmnd));
sgl++;
/* Setup the physical region for the FCP RSP */
sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_rsp));
sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_rsp));
- bf_set(lpfc_sli4_sge_len, sgl, sizeof(struct fcp_rsp));
bf_set(lpfc_sli4_sge_last, sgl, 1);
sgl->word2 = cpu_to_le32(sgl->word2);
- sgl->word3 = cpu_to_le32(sgl->word3);
+ sgl->sge_len = cpu_to_le32(sizeof(struct fcp_rsp));
/*
* Since the IOCB for the FCP I/O is built into this
@@ -839,11 +839,12 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
psb->cur_iocbq.sli4_xritag);
if (status) {
/* Put this back on the abort scsi list */
- psb->status = IOSTAT_LOCAL_REJECT;
- psb->result = IOERR_ABORT_REQUESTED;
+ psb->exch_busy = 1;
rc++;
- } else
+ } else {
+ psb->exch_busy = 0;
psb->status = IOSTAT_SUCCESS;
+ }
/* Put it back into the SCSI buffer list */
lpfc_release_scsi_buf_s4(phba, psb);
break;
@@ -857,11 +858,12 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
list);
if (status) {
/* Put this back on the abort scsi list */
- psb->status = IOSTAT_LOCAL_REJECT;
- psb->result = IOERR_ABORT_REQUESTED;
+ psb->exch_busy = 1;
rc++;
- } else
+ } else {
+ psb->exch_busy = 0;
psb->status = IOSTAT_SUCCESS;
+ }
/* Put it back into the SCSI buffer list */
lpfc_release_scsi_buf_s4(phba, psb);
}
@@ -951,8 +953,7 @@ lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
{
unsigned long iflag = 0;
- if (psb->status == IOSTAT_LOCAL_REJECT
- && psb->result == IOERR_ABORT_REQUESTED) {
+ if (psb->exch_busy) {
spin_lock_irqsave(&phba->sli4_hba.abts_scsi_buf_list_lock,
iflag);
psb->pCmd = NULL;
@@ -1869,7 +1870,6 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) {
physaddr = sg_dma_address(sgel);
dma_len = sg_dma_len(sgel);
- bf_set(lpfc_sli4_sge_len, sgl, sg_dma_len(sgel));
sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
if ((num_bde + 1) == nseg)
@@ -1878,7 +1878,7 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
bf_set(lpfc_sli4_sge_last, sgl, 0);
bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
sgl->word2 = cpu_to_le32(sgl->word2);
- sgl->word3 = cpu_to_le32(sgl->word3);
+ sgl->sge_len = cpu_to_le32(dma_len);
dma_offset += dma_len;
sgl++;
}
@@ -2221,6 +2221,9 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
lpfc_cmd->result = pIocbOut->iocb.un.ulpWord[4];
lpfc_cmd->status = pIocbOut->iocb.ulpStatus;
+ /* pick up SLI4 exhange busy status from HBA */
+ lpfc_cmd->exch_busy = pIocbOut->iocb_flag & LPFC_EXCHANGE_BUSY;
+
if (pnode && NLP_CHK_NODE_ACT(pnode))
atomic_dec(&pnode->cmd_pending);
@@ -2637,6 +2640,7 @@ lpfc_scsi_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
}
phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf;
phba->lpfc_rampdown_queue_depth = lpfc_rampdown_queue_depth;
+ phba->lpfc_scsi_cmd_iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
return 0;
}
@@ -2695,6 +2699,13 @@ lpfc_info(struct Scsi_Host *host)
" port %s",
phba->Port);
}
+ len = strlen(lpfcinfobuf);
+ if (phba->sli4_hba.link_state.logical_speed) {
+ snprintf(lpfcinfobuf + len,
+ 384-len,
+ " Logical Link Speed: %d Mbps",
+ phba->sli4_hba.link_state.logical_speed * 10);
+ }
}
return lpfcinfobuf;
}
@@ -2990,6 +3001,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
/* ABTS WQE must go to the same WQ as the WQE to be aborted */
abtsiocb->fcp_wqidx = iocb->fcp_wqidx;
+ abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX;
if (lpfc_is_link_up(phba))
icmd->ulpCommand = CMD_ABORT_XRI_CN;
diff --git a/drivers/scsi/lpfc/lpfc_scsi.h b/drivers/scsi/lpfc/lpfc_scsi.h
index 65dfc8bd5b49..5932273870a5 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.h
+++ b/drivers/scsi/lpfc/lpfc_scsi.h
@@ -118,6 +118,7 @@ struct lpfc_scsi_buf {
uint32_t timeout;
+ uint16_t exch_busy; /* SLI4 hba reported XB on complete WCQE */
uint16_t status; /* From IOCB Word 7- ulpStatus */
uint32_t result; /* From IOCB Word 4. */
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 589549b2bf0e..35e3b96d4e07 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -580,10 +580,7 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
else
sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_xritag);
if (sglq) {
- if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED
- && ((iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT)
- && (iocbq->iocb.un.ulpWord[4]
- == IOERR_ABORT_REQUESTED))) {
+ if (iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) {
spin_lock_irqsave(&phba->sli4_hba.abts_sgl_list_lock,
iflag);
list_add(&sglq->list,
@@ -764,10 +761,6 @@ lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
case DSSCMD_IWRITE64_CX:
case DSSCMD_IREAD64_CR:
case DSSCMD_IREAD64_CX:
- case DSSCMD_INVALIDATE_DEK:
- case DSSCMD_SET_KEK:
- case DSSCMD_GET_KEK_ID:
- case DSSCMD_GEN_XFER:
type = LPFC_SOL_IOCB;
break;
case CMD_ABORT_XRI_CN:
@@ -1717,6 +1710,7 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
struct lpfc_dmabuf *mp;
uint16_t rpi, vpi;
int rc;
+ struct lpfc_vport *vport = pmb->vport;
mp = (struct lpfc_dmabuf *) (pmb->context1);
@@ -1745,6 +1739,18 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
return;
}
+ /* Unreg VPI, if the REG_VPI succeed after VLink failure */
+ if ((pmb->u.mb.mbxCommand == MBX_REG_VPI) &&
+ !(phba->pport->load_flag & FC_UNLOADING) &&
+ !pmb->u.mb.mbxStatus) {
+ lpfc_unreg_vpi(phba, pmb->u.mb.un.varRegVpi.vpi, pmb);
+ pmb->vport = vport;
+ pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
+ if (rc != MBX_NOT_FINISHED)
+ return;
+ }
+
if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG)
lpfc_sli4_mbox_cmd_free(phba, pmb);
else
@@ -2228,9 +2234,15 @@ lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
* All other are passed to the completion callback.
*/
if (pring->ringno == LPFC_ELS_RING) {
- if (cmdiocbp->iocb_flag & LPFC_DRIVER_ABORTED) {
+ if ((phba->sli_rev < LPFC_SLI_REV4) &&
+ (cmdiocbp->iocb_flag &
+ LPFC_DRIVER_ABORTED)) {
+ spin_lock_irqsave(&phba->hbalock,
+ iflag);
cmdiocbp->iocb_flag &=
~LPFC_DRIVER_ABORTED;
+ spin_unlock_irqrestore(&phba->hbalock,
+ iflag);
saveq->iocb.ulpStatus =
IOSTAT_LOCAL_REJECT;
saveq->iocb.un.ulpWord[4] =
@@ -2240,7 +2252,47 @@ lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
* of DMAing payload, so don't free data
* buffer till after a hbeat.
*/
+ spin_lock_irqsave(&phba->hbalock,
+ iflag);
saveq->iocb_flag |= LPFC_DELAY_MEM_FREE;
+ spin_unlock_irqrestore(&phba->hbalock,
+ iflag);
+ }
+ if ((phba->sli_rev == LPFC_SLI_REV4) &&
+ (saveq->iocb_flag & LPFC_EXCHANGE_BUSY)) {
+ /* Set cmdiocb flag for the exchange
+ * busy so sgl (xri) will not be
+ * released until the abort xri is
+ * received from hba, clear the
+ * LPFC_DRIVER_ABORTED bit in case
+ * it was driver initiated abort.
+ */
+ spin_lock_irqsave(&phba->hbalock,
+ iflag);
+ cmdiocbp->iocb_flag &=
+ ~LPFC_DRIVER_ABORTED;
+ cmdiocbp->iocb_flag |=
+ LPFC_EXCHANGE_BUSY;
+ spin_unlock_irqrestore(&phba->hbalock,
+ iflag);
+ cmdiocbp->iocb.ulpStatus =
+ IOSTAT_LOCAL_REJECT;
+ cmdiocbp->iocb.un.ulpWord[4] =
+ IOERR_ABORT_REQUESTED;
+ /*
+ * For SLI4, irsiocb contains NO_XRI
+ * in sli_xritag, it shall not affect
+ * releasing sgl (xri) process.
+ */
+ saveq->iocb.ulpStatus =
+ IOSTAT_LOCAL_REJECT;
+ saveq->iocb.un.ulpWord[4] =
+ IOERR_SLI_ABORTED;
+ spin_lock_irqsave(&phba->hbalock,
+ iflag);
+ saveq->iocb_flag |= LPFC_DELAY_MEM_FREE;
+ spin_unlock_irqrestore(&phba->hbalock,
+ iflag);
}
}
(cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
@@ -5687,19 +5739,19 @@ lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
for (i = 0; i < numBdes; i++) {
/* Should already be byte swapped. */
- sgl->addr_hi = bpl->addrHigh;
- sgl->addr_lo = bpl->addrLow;
- /* swap the size field back to the cpu so we
- * can assign it to the sgl.
- */
- bde.tus.w = le32_to_cpu(bpl->tus.w);
- bf_set(lpfc_sli4_sge_len, sgl, bde.tus.f.bdeSize);
+ sgl->addr_hi = bpl->addrHigh;
+ sgl->addr_lo = bpl->addrLow;
+
if ((i+1) == numBdes)
bf_set(lpfc_sli4_sge_last, sgl, 1);
else
bf_set(lpfc_sli4_sge_last, sgl, 0);
sgl->word2 = cpu_to_le32(sgl->word2);
- sgl->word3 = cpu_to_le32(sgl->word3);
+ /* swap the size field back to the cpu so we
+ * can assign it to the sgl.
+ */
+ bde.tus.w = le32_to_cpu(bpl->tus.w);
+ sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize);
bpl++;
sgl++;
}
@@ -5712,11 +5764,10 @@ lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
cpu_to_le32(icmd->un.genreq64.bdl.addrHigh);
sgl->addr_lo =
cpu_to_le32(icmd->un.genreq64.bdl.addrLow);
- bf_set(lpfc_sli4_sge_len, sgl,
- icmd->un.genreq64.bdl.bdeSize);
bf_set(lpfc_sli4_sge_last, sgl, 1);
sgl->word2 = cpu_to_le32(sgl->word2);
- sgl->word3 = cpu_to_le32(sgl->word3);
+ sgl->sge_len =
+ cpu_to_le32(icmd->un.genreq64.bdl.bdeSize);
}
return sglq->sli4_xritag;
}
@@ -5987,12 +6038,10 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
else
bf_set(abort_cmd_ia, &wqe->abort_cmd, 0);
bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG);
- abort_tag = iocbq->iocb.un.acxri.abortIoTag;
wqe->words[5] = 0;
bf_set(lpfc_wqe_gen_ct, &wqe->generic,
((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
abort_tag = iocbq->iocb.un.acxri.abortIoTag;
- wqe->generic.abort_tag = abort_tag;
/*
* The abort handler will send us CMD_ABORT_XRI_CN or
* CMD_CLOSE_XRI_CN and the fw only accepts CMD_ABORT_XRI_CX
@@ -6121,15 +6170,15 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
if (lpfc_sli4_iocb2wqe(phba, piocb, &wqe))
return IOCB_ERROR;
- if (piocb->iocb_flag & LPFC_IO_FCP) {
+ if ((piocb->iocb_flag & LPFC_IO_FCP) ||
+ (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
/*
* For FCP command IOCB, get a new WQ index to distribute
* WQE across the WQsr. On the other hand, for abort IOCB,
* it carries the same WQ index to the original command
* IOCB.
*/
- if ((piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
- (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN))
+ if (piocb->iocb_flag & LPFC_IO_FCP)
piocb->fcp_wqidx = lpfc_sli4_scmd_to_wqidx_distr(phba);
if (lpfc_sli4_wq_put(phba->sli4_hba.fcp_wq[piocb->fcp_wqidx],
&wqe))
@@ -7004,7 +7053,14 @@ lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
abort_iocb->iocb.ulpContext != abort_context ||
(abort_iocb->iocb_flag & LPFC_DRIVER_ABORTED) == 0)
spin_unlock_irq(&phba->hbalock);
- else {
+ else if (phba->sli_rev < LPFC_SLI_REV4) {
+ /*
+ * leave the SLI4 aborted command on the txcmplq
+ * list and the command complete WCQE's XB bit
+ * will tell whether the SGL (XRI) can be released
+ * immediately or to the aborted SGL list for the
+ * following abort XRI from the HBA.
+ */
list_del_init(&abort_iocb->list);
pring->txcmplq_cnt--;
spin_unlock_irq(&phba->hbalock);
@@ -7013,11 +7069,13 @@ lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
* payload, so don't free data buffer till after
* a hbeat.
*/
+ spin_lock_irq(&phba->hbalock);
abort_iocb->iocb_flag |= LPFC_DELAY_MEM_FREE;
-
abort_iocb->iocb_flag &= ~LPFC_DRIVER_ABORTED;
+ spin_unlock_irq(&phba->hbalock);
+
abort_iocb->iocb.ulpStatus = IOSTAT_LOCAL_REJECT;
- abort_iocb->iocb.un.ulpWord[4] = IOERR_SLI_ABORTED;
+ abort_iocb->iocb.un.ulpWord[4] = IOERR_ABORT_REQUESTED;
(abort_iocb->iocb_cmpl)(phba, abort_iocb, abort_iocb);
}
}
@@ -7106,7 +7164,7 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
return 0;
/* This signals the response to set the correct status
- * before calling the completion handler.
+ * before calling the completion handler
*/
cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED;
@@ -7124,6 +7182,8 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
/* ABTS WQE must go to the same WQ as the WQE to be aborted */
abtsiocbp->fcp_wqidx = cmdiocb->fcp_wqidx;
+ if (cmdiocb->iocb_flag & LPFC_IO_FCP)
+ abtsiocbp->iocb_flag |= LPFC_USE_FCPWQIDX;
if (phba->link_state >= LPFC_LINK_UP)
iabt->ulpCommand = CMD_ABORT_XRI_CN;
@@ -7330,6 +7390,8 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
/* ABTS WQE must go to the same WQ as the WQE to be aborted */
abtsiocb->fcp_wqidx = iocbq->fcp_wqidx;
+ if (iocbq->iocb_flag & LPFC_IO_FCP)
+ abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX;
if (lpfc_is_link_up(phba))
abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN;
@@ -8359,11 +8421,24 @@ void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba)
}
}
+/**
+ * lpfc_sli4_iocb_param_transfer - Transfer pIocbOut and cmpl status to pIocbIn
+ * @phba: pointer to lpfc hba data structure
+ * @pIocbIn: pointer to the rspiocbq
+ * @pIocbOut: pointer to the cmdiocbq
+ * @wcqe: pointer to the complete wcqe
+ *
+ * This routine transfers the fields of a command iocbq to a response iocbq
+ * by copying all the IOCB fields from command iocbq and transferring the
+ * completion status information from the complete wcqe.
+ **/
static void
-lpfc_sli4_iocb_param_transfer(struct lpfc_iocbq *pIocbIn,
+lpfc_sli4_iocb_param_transfer(struct lpfc_hba *phba,
+ struct lpfc_iocbq *pIocbIn,
struct lpfc_iocbq *pIocbOut,
struct lpfc_wcqe_complete *wcqe)
{
+ unsigned long iflags;
size_t offset = offsetof(struct lpfc_iocbq, iocb);
memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset,
@@ -8377,8 +8452,17 @@ lpfc_sli4_iocb_param_transfer(struct lpfc_iocbq *pIocbIn,
wcqe->total_data_placed;
else
pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
- else
+ else {
pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
+ pIocbIn->iocb.un.genreq64.bdl.bdeSize = wcqe->total_data_placed;
+ }
+
+ /* Pick up HBA exchange busy condition */
+ if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ pIocbIn->iocb_flag |= LPFC_EXCHANGE_BUSY;
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ }
}
/**
@@ -8419,7 +8503,7 @@ lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba,
}
/* Fake the irspiocbq and copy necessary response information */
- lpfc_sli4_iocb_param_transfer(irspiocbq, cmdiocbq, wcqe);
+ lpfc_sli4_iocb_param_transfer(phba, irspiocbq, cmdiocbq, wcqe);
return irspiocbq;
}
@@ -8849,8 +8933,7 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
int ecount = 0;
uint16_t cqid;
- if (bf_get(lpfc_eqe_major_code, eqe) != 0 ||
- bf_get(lpfc_eqe_minor_code, eqe) != 0) {
+ if (bf_get(lpfc_eqe_major_code, eqe) != 0) {
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"0359 Not a valid slow-path completion "
"event: majorcode=x%x, minorcode=x%x\n",
@@ -8976,7 +9059,7 @@ lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba,
}
/* Fake the irspiocb and copy necessary response information */
- lpfc_sli4_iocb_param_transfer(&irspiocbq, cmdiocbq, wcqe);
+ lpfc_sli4_iocb_param_transfer(phba, &irspiocbq, cmdiocbq, wcqe);
/* Pass the cmd_iocb and the rsp state to the upper layer */
(cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &irspiocbq);
@@ -9082,8 +9165,7 @@ lpfc_sli4_fp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
uint16_t cqid;
int ecount = 0;
- if (unlikely(bf_get(lpfc_eqe_major_code, eqe) != 0) ||
- unlikely(bf_get(lpfc_eqe_minor_code, eqe) != 0)) {
+ if (unlikely(bf_get(lpfc_eqe_major_code, eqe) != 0)) {
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"0366 Not a valid fast-path completion "
"event: majorcode=x%x, minorcode=x%x\n",
@@ -11871,12 +11953,6 @@ lpfc_sli4_read_fcf_record(struct lpfc_hba *phba, uint16_t fcf_index)
{
int rc = 0, error;
LPFC_MBOXQ_t *mboxq;
- void *virt_addr;
- dma_addr_t phys_addr;
- uint8_t *bytep;
- struct lpfc_mbx_sge sge;
- uint32_t alloc_len, req_len;
- struct lpfc_mbx_read_fcf_tbl *read_fcf;
phba->fcoe_eventtag_at_fcf_scan = phba->fcoe_eventtag;
mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
@@ -11887,43 +11963,19 @@ lpfc_sli4_read_fcf_record(struct lpfc_hba *phba, uint16_t fcf_index)
error = -ENOMEM;
goto fail_fcfscan;
}
-
- req_len = sizeof(struct fcf_record) +
- sizeof(union lpfc_sli4_cfg_shdr) + 2 * sizeof(uint32_t);
-
- /* Set up READ_FCF SLI4_CONFIG mailbox-ioctl command */
- alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
- LPFC_MBOX_OPCODE_FCOE_READ_FCF_TABLE, req_len,
- LPFC_SLI4_MBX_NEMBED);
-
- if (alloc_len < req_len) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "0291 Allocated DMA memory size (x%x) is "
- "less than the requested DMA memory "
- "size (x%x)\n", alloc_len, req_len);
- error = -ENOMEM;
+ /* Construct the read FCF record mailbox command */
+ rc = lpfc_sli4_mbx_read_fcf_record(phba, mboxq, fcf_index);
+ if (rc) {
+ error = -EINVAL;
goto fail_fcfscan;
}
-
- /* Get the first SGE entry from the non-embedded DMA memory. This
- * routine only uses a single SGE.
- */
- lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
- phys_addr = getPaddr(sge.pa_hi, sge.pa_lo);
- virt_addr = mboxq->sge_array->addr[0];
- read_fcf = (struct lpfc_mbx_read_fcf_tbl *)virt_addr;
-
- /* Set up command fields */
- bf_set(lpfc_mbx_read_fcf_tbl_indx, &read_fcf->u.request, fcf_index);
- /* Perform necessary endian conversion */
- bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
- lpfc_sli_pcimem_bcopy(bytep, bytep, sizeof(uint32_t));
+ /* Issue the mailbox command asynchronously */
mboxq->vport = phba->pport;
mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_record;
rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
- if (rc == MBX_NOT_FINISHED) {
+ if (rc == MBX_NOT_FINISHED)
error = -EIO;
- } else {
+ else {
spin_lock_irq(&phba->hbalock);
phba->hba_flag |= FCF_DISC_INPROGRESS;
spin_unlock_irq(&phba->hbalock);
@@ -11942,6 +11994,90 @@ fail_fcfscan:
}
/**
+ * lpfc_mbx_cmpl_redisc_fcf_table - completion routine for rediscover FCF table
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is the completion routine for the rediscover FCF table mailbox
+ * command. If the mailbox command returned failure, it will try to stop the
+ * FCF rediscover wait timer.
+ **/
+void
+lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
+{
+ struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
+ uint32_t shdr_status, shdr_add_status;
+
+ redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
+
+ shdr_status = bf_get(lpfc_mbox_hdr_status,
+ &redisc_fcf->header.cfg_shdr.response);
+ shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
+ &redisc_fcf->header.cfg_shdr.response);
+ if (shdr_status || shdr_add_status) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "2746 Requesting for FCF rediscovery failed "
+ "status x%x add_status x%x\n",
+ shdr_status, shdr_add_status);
+ /*
+ * Request failed, last resort to re-try current
+ * registered FCF entry
+ */
+ lpfc_retry_pport_discovery(phba);
+ } else
+ /*
+ * Start FCF rediscovery wait timer for pending FCF
+ * before rescan FCF record table.
+ */
+ lpfc_fcf_redisc_wait_start_timer(phba);
+
+ mempool_free(mbox, phba->mbox_mem_pool);
+}
+
+/**
+ * lpfc_sli4_redisc_all_fcf - Request to rediscover entire FCF table by port.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to request for rediscovery of the entire FCF table
+ * by the port.
+ **/
+int
+lpfc_sli4_redisc_fcf_table(struct lpfc_hba *phba)
+{
+ LPFC_MBOXQ_t *mbox;
+ struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
+ int rc, length;
+
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mbox) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "2745 Failed to allocate mbox for "
+ "requesting FCF rediscover.\n");
+ return -ENOMEM;
+ }
+
+ length = (sizeof(struct lpfc_mbx_redisc_fcf_tbl) -
+ sizeof(struct lpfc_sli4_cfg_mhdr));
+ lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
+ LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF,
+ length, LPFC_SLI4_MBX_EMBED);
+
+ redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
+ /* Set count to 0 for invalidating the entire FCF database */
+ bf_set(lpfc_mbx_redisc_fcf_count, redisc_fcf, 0);
+
+ /* Issue the mailbox command asynchronously */
+ mbox->vport = phba->pport;
+ mbox->mbox_cmpl = lpfc_mbx_cmpl_redisc_fcf_table;
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
+
+ if (rc == MBX_NOT_FINISHED) {
+ mempool_free(mbox, phba->mbox_mem_pool);
+ return -EIO;
+ }
+ return 0;
+}
+
+/**
* lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled.
* @phba: pointer to lpfc hba data structure.
*
@@ -12069,3 +12205,48 @@ out:
kfree(rgn23_data);
return;
}
+
+/**
+ * lpfc_cleanup_pending_mbox - Free up vport discovery mailbox commands.
+ * @vport: pointer to vport data structure.
+ *
+ * This function iterate through the mailboxq and clean up all REG_LOGIN
+ * and REG_VPI mailbox commands associated with the vport. This function
+ * is called when driver want to restart discovery of the vport due to
+ * a Clear Virtual Link event.
+ **/
+void
+lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
+{
+ struct lpfc_hba *phba = vport->phba;
+ LPFC_MBOXQ_t *mb, *nextmb;
+ struct lpfc_dmabuf *mp;
+
+ spin_lock_irq(&phba->hbalock);
+ list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
+ if (mb->vport != vport)
+ continue;
+
+ if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
+ (mb->u.mb.mbxCommand != MBX_REG_VPI))
+ continue;
+
+ if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
+ mp = (struct lpfc_dmabuf *) (mb->context1);
+ if (mp) {
+ __lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+ }
+ }
+ list_del(&mb->list);
+ mempool_free(mb, phba->mbox_mem_pool);
+ }
+ mb = phba->sli.mbox_active;
+ if (mb && (mb->vport == vport)) {
+ if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) ||
+ (mb->u.mb.mbxCommand == MBX_REG_VPI))
+ mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ }
+ spin_unlock_irq(&phba->hbalock);
+}
+
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index ba38de3c28f1..dfcf5437d1f5 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -53,17 +53,19 @@ struct lpfc_iocbq {
IOCB_t iocb; /* IOCB cmd */
uint8_t retry; /* retry counter for IOCB cmd - if needed */
- uint8_t iocb_flag;
+ uint16_t iocb_flag;
#define LPFC_IO_LIBDFC 1 /* libdfc iocb */
#define LPFC_IO_WAKE 2 /* High Priority Queue signal flag */
#define LPFC_IO_FCP 4 /* FCP command -- iocbq in scsi_buf */
#define LPFC_DRIVER_ABORTED 8 /* driver aborted this request */
#define LPFC_IO_FABRIC 0x10 /* Iocb send using fabric scheduler */
#define LPFC_DELAY_MEM_FREE 0x20 /* Defer free'ing of FC data */
-#define LPFC_FIP_ELS_ID_MASK 0xc0 /* ELS_ID range 0-3 */
-#define LPFC_FIP_ELS_ID_SHIFT 6
+#define LPFC_EXCHANGE_BUSY 0x40 /* SLI4 hba reported XB in response */
+#define LPFC_USE_FCPWQIDX 0x80 /* Submit to specified FCPWQ index */
+
+#define LPFC_FIP_ELS_ID_MASK 0xc000 /* ELS_ID range 0-3, non-shifted mask */
+#define LPFC_FIP_ELS_ID_SHIFT 14
- uint8_t abort_count;
uint8_t rsvd2;
uint32_t drvrTimeout; /* driver timeout in seconds */
uint32_t fcp_wqidx; /* index to FCP work queue */
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 44e5f574236b..86308836600f 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -22,6 +22,10 @@
#define LPFC_RELEASE_NOTIFICATION_INTERVAL 32
#define LPFC_GET_QE_REL_INT 32
#define LPFC_RPI_LOW_WATER_MARK 10
+
+/* Amount of time in seconds for waiting FCF rediscovery to complete */
+#define LPFC_FCF_REDISCOVER_WAIT_TMO 2000 /* msec */
+
/* Number of SGL entries can be posted in a 4KB nonembedded mbox command */
#define LPFC_NEMBED_MBOX_SGL_CNT 254
@@ -126,24 +130,36 @@ struct lpfc_sli4_link {
uint8_t status;
uint8_t physical;
uint8_t fault;
+ uint16_t logical_speed;
};
-struct lpfc_fcf {
- uint8_t fabric_name[8];
- uint8_t switch_name[8];
+struct lpfc_fcf_rec {
+ uint8_t fabric_name[8];
+ uint8_t switch_name[8];
uint8_t mac_addr[6];
uint16_t fcf_indx;
+ uint32_t priority;
+ uint16_t vlan_id;
+ uint32_t addr_mode;
+ uint32_t flag;
+#define BOOT_ENABLE 0x01
+#define RECORD_VALID 0x02
+};
+
+struct lpfc_fcf {
uint16_t fcfi;
uint32_t fcf_flag;
#define FCF_AVAILABLE 0x01 /* FCF available for discovery */
#define FCF_REGISTERED 0x02 /* FCF registered with FW */
-#define FCF_DISCOVERED 0x04 /* FCF discovery started */
-#define FCF_BOOT_ENABLE 0x08 /* Boot bios use this FCF */
-#define FCF_IN_USE 0x10 /* Atleast one discovery completed */
-#define FCF_VALID_VLAN 0x20 /* Use the vlan id specified */
- uint32_t priority;
+#define FCF_SCAN_DONE 0x04 /* FCF table scan done */
+#define FCF_IN_USE 0x08 /* Atleast one discovery completed */
+#define FCF_REDISC_PEND 0x10 /* FCF rediscovery pending */
+#define FCF_REDISC_EVT 0x20 /* FCF rediscovery event to worker thread */
+#define FCF_REDISC_FOV 0x40 /* Post FCF rediscovery fast failover */
uint32_t addr_mode;
- uint16_t vlan_id;
+ struct lpfc_fcf_rec current_rec;
+ struct lpfc_fcf_rec failover_rec;
+ struct timer_list redisc_wait;
};
#define LPFC_REGION23_SIGNATURE "RG23"
@@ -248,7 +264,10 @@ struct lpfc_bmbx {
#define SLI4_CT_VFI 2
#define SLI4_CT_FCFI 3
-#define LPFC_SLI4_MAX_SEGMENT_SIZE 0x10000
+#define LPFC_SLI4_FL1_MAX_SEGMENT_SIZE 0x10000
+#define LPFC_SLI4_FL1_MAX_BUF_SIZE 0X2000
+#define LPFC_SLI4_MIN_BUF_SIZE 0x400
+#define LPFC_SLI4_MAX_BUF_SIZE 0x20000
/*
* SLI4 specific data structures
@@ -282,6 +301,42 @@ struct lpfc_fcp_eq_hdl {
struct lpfc_hba *phba;
};
+/* Port Capabilities for SLI4 Parameters */
+struct lpfc_pc_sli4_params {
+ uint32_t supported;
+ uint32_t if_type;
+ uint32_t sli_rev;
+ uint32_t sli_family;
+ uint32_t featurelevel_1;
+ uint32_t featurelevel_2;
+ uint32_t proto_types;
+#define LPFC_SLI4_PROTO_FCOE 0x0000001
+#define LPFC_SLI4_PROTO_FC 0x0000002
+#define LPFC_SLI4_PROTO_NIC 0x0000004
+#define LPFC_SLI4_PROTO_ISCSI 0x0000008
+#define LPFC_SLI4_PROTO_RDMA 0x0000010
+ uint32_t sge_supp_len;
+ uint32_t if_page_sz;
+ uint32_t rq_db_window;
+ uint32_t loopbk_scope;
+ uint32_t eq_pages_max;
+ uint32_t eqe_size;
+ uint32_t cq_pages_max;
+ uint32_t cqe_size;
+ uint32_t mq_pages_max;
+ uint32_t mqe_size;
+ uint32_t mq_elem_cnt;
+ uint32_t wq_pages_max;
+ uint32_t wqe_size;
+ uint32_t rq_pages_max;
+ uint32_t rqe_size;
+ uint32_t hdr_pages_max;
+ uint32_t hdr_size;
+ uint32_t hdr_pp_align;
+ uint32_t sgl_pages_max;
+ uint32_t sgl_pp_align;
+};
+
/* SLI4 HBA data structure entries */
struct lpfc_sli4_hba {
void __iomem *conf_regs_memmap_p; /* Kernel memory mapped address for
@@ -295,7 +350,7 @@ struct lpfc_sli4_hba {
void __iomem *UERRHIregaddr; /* Address to UERR_STATUS_HI register */
void __iomem *UEMASKLOregaddr; /* Address to UE_MASK_LO register */
void __iomem *UEMASKHIregaddr; /* Address to UE_MASK_HI register */
- void __iomem *SCRATCHPADregaddr; /* Address to scratchpad register */
+ void __iomem *SLIINTFregaddr; /* Address to SLI_INTF register */
/* BAR1 FCoE function CSR register memory map */
void __iomem *STAregaddr; /* Address to HST_STATE register */
void __iomem *ISRregaddr; /* Address to HST_ISR register */
@@ -310,6 +365,8 @@ struct lpfc_sli4_hba {
uint32_t ue_mask_lo;
uint32_t ue_mask_hi;
+ struct lpfc_register sli_intf;
+ struct lpfc_pc_sli4_params pc_sli4_params;
struct msix_entry *msix_entries;
uint32_t cfg_eqn;
struct lpfc_fcp_eq_hdl *fcp_eq_hdl; /* FCP per-WQ handle */
@@ -406,6 +463,8 @@ void lpfc_sli4_mbox_cmd_free(struct lpfc_hba *, struct lpfcMboxq *);
void lpfc_sli4_mbx_sge_set(struct lpfcMboxq *, uint32_t, dma_addr_t, uint32_t);
void lpfc_sli4_mbx_sge_get(struct lpfcMboxq *, uint32_t,
struct lpfc_mbx_sge *);
+int lpfc_sli4_mbx_read_fcf_record(struct lpfc_hba *, struct lpfcMboxq *,
+ uint16_t);
void lpfc_sli4_hba_reset(struct lpfc_hba *);
struct lpfc_queue *lpfc_sli4_queue_alloc(struct lpfc_hba *, uint32_t,
@@ -448,6 +507,7 @@ int lpfc_sli4_alloc_rpi(struct lpfc_hba *);
void lpfc_sli4_free_rpi(struct lpfc_hba *, int);
void lpfc_sli4_remove_rpis(struct lpfc_hba *);
void lpfc_sli4_async_event_proc(struct lpfc_hba *);
+void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *);
int lpfc_sli4_resume_rpi(struct lpfc_nodelist *);
void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *);
void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *);
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 792f72263f1a..ac276aa46fba 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2009 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2010 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
@@ -18,7 +18,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "8.3.7"
+#define LPFC_DRIVER_VERSION "8.3.9"
#define LPFC_DRIVER_NAME "lpfc"
#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp"
#define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp"
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index e3c7fa642306..dc86e873102a 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -389,7 +389,7 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
* by the port.
*/
if ((phba->sli_rev == LPFC_SLI_REV4) &&
- (pport->vpi_state & LPFC_VPI_REGISTERED)) {
+ (pport->fc_flag & FC_VFI_REGISTERED)) {
rc = lpfc_sli4_init_vpi(phba, vpi);
if (rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
@@ -505,6 +505,7 @@ enable_vport(struct fc_vport *fc_vport)
struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data;
struct lpfc_hba *phba = vport->phba;
struct lpfc_nodelist *ndlp = NULL;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
if ((phba->link_state < LPFC_LINK_UP) ||
(phba->fc_topology == TOPOLOGY_LOOP)) {
@@ -512,10 +513,10 @@ enable_vport(struct fc_vport *fc_vport)
return VPORT_OK;
}
- spin_lock_irq(&phba->hbalock);
+ spin_lock_irq(shost->host_lock);
vport->load_flag |= FC_LOADING;
vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irq(shost->host_lock);
/* Use the Physical nodes Fabric NDLP to determine if the link is
* up and ready to FDISC.
diff --git a/drivers/scsi/mac_esp.c b/drivers/scsi/mac_esp.c
index c24e86f07804..dd808ae942a1 100644
--- a/drivers/scsi/mac_esp.c
+++ b/drivers/scsi/mac_esp.c
@@ -22,7 +22,6 @@
#include <asm/irq.h>
#include <asm/dma.h>
-
#include <asm/macints.h>
#include <asm/macintosh.h>
@@ -279,24 +278,27 @@ static void mac_esp_send_pdma_cmd(struct esp *esp, u32 addr, u32 esp_count,
* Programmed IO routines follow.
*/
-static inline int mac_esp_wait_for_fifo(struct esp *esp)
+static inline unsigned int mac_esp_wait_for_fifo(struct esp *esp)
{
int i = 500000;
do {
- if (esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES)
- return 0;
+ unsigned int fbytes = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
+
+ if (fbytes)
+ return fbytes;
udelay(2);
} while (--i);
printk(KERN_ERR PFX "FIFO is empty (sreg %02x)\n",
esp_read8(ESP_STATUS));
- return 1;
+ return 0;
}
static inline int mac_esp_wait_for_intr(struct esp *esp)
{
+ struct mac_esp_priv *mep = MAC_ESP_GET_PRIV(esp);
int i = 500000;
do {
@@ -308,6 +310,7 @@ static inline int mac_esp_wait_for_intr(struct esp *esp)
} while (--i);
printk(KERN_ERR PFX "IRQ timeout (sreg %02x)\n", esp->sreg);
+ mep->error = 1;
return 1;
}
@@ -347,11 +350,10 @@ static inline int mac_esp_wait_for_intr(struct esp *esp)
static void mac_esp_send_pio_cmd(struct esp *esp, u32 addr, u32 esp_count,
u32 dma_count, int write, u8 cmd)
{
- unsigned long flags;
struct mac_esp_priv *mep = MAC_ESP_GET_PRIV(esp);
u8 *fifo = esp->regs + ESP_FDATA * 16;
- local_irq_save(flags);
+ disable_irq(esp->host->irq);
cmd &= ~ESP_CMD_DMA;
mep->error = 0;
@@ -359,11 +361,35 @@ static void mac_esp_send_pio_cmd(struct esp *esp, u32 addr, u32 esp_count,
if (write) {
scsi_esp_cmd(esp, cmd);
- if (!mac_esp_wait_for_intr(esp)) {
- if (mac_esp_wait_for_fifo(esp))
- esp_count = 0;
- } else {
- esp_count = 0;
+ while (1) {
+ unsigned int n;
+
+ n = mac_esp_wait_for_fifo(esp);
+ if (!n)
+ break;
+
+ if (n > esp_count)
+ n = esp_count;
+ esp_count -= n;
+
+ MAC_ESP_PIO_LOOP("%2@,%0@+", n);
+
+ if (!esp_count)
+ break;
+
+ if (mac_esp_wait_for_intr(esp))
+ break;
+
+ if (((esp->sreg & ESP_STAT_PMASK) != ESP_DIP) &&
+ ((esp->sreg & ESP_STAT_PMASK) != ESP_MIP))
+ break;
+
+ esp->ireg = esp_read8(ESP_INTRPT);
+ if ((esp->ireg & (ESP_INTR_DC | ESP_INTR_BSERV)) !=
+ ESP_INTR_BSERV)
+ break;
+
+ scsi_esp_cmd(esp, ESP_CMD_TI);
}
} else {
scsi_esp_cmd(esp, ESP_CMD_FLUSH);
@@ -374,47 +400,24 @@ static void mac_esp_send_pio_cmd(struct esp *esp, u32 addr, u32 esp_count,
MAC_ESP_PIO_LOOP("%0@+,%2@", esp_count);
scsi_esp_cmd(esp, cmd);
- }
-
- while (esp_count) {
- unsigned int n;
-
- if (mac_esp_wait_for_intr(esp)) {
- mep->error = 1;
- break;
- }
-
- if (esp->sreg & ESP_STAT_SPAM) {
- printk(KERN_ERR PFX "gross error\n");
- mep->error = 1;
- break;
- }
- n = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
-
- if (write) {
- if (n > esp_count)
- n = esp_count;
- esp_count -= n;
-
- MAC_ESP_PIO_LOOP("%2@,%0@+", n);
+ while (esp_count) {
+ unsigned int n;
- if ((esp->sreg & ESP_STAT_PMASK) == ESP_STATP)
+ if (mac_esp_wait_for_intr(esp))
break;
- if (esp_count) {
- esp->ireg = esp_read8(ESP_INTRPT);
- if (esp->ireg & ESP_INTR_DC)
- break;
+ if (((esp->sreg & ESP_STAT_PMASK) != ESP_DOP) &&
+ ((esp->sreg & ESP_STAT_PMASK) != ESP_MOP))
+ break;
- scsi_esp_cmd(esp, ESP_CMD_TI);
- }
- } else {
esp->ireg = esp_read8(ESP_INTRPT);
- if (esp->ireg & ESP_INTR_DC)
+ if ((esp->ireg & (ESP_INTR_DC | ESP_INTR_BSERV)) !=
+ ESP_INTR_BSERV)
break;
- n = MAC_ESP_FIFO_SIZE - n;
+ n = MAC_ESP_FIFO_SIZE -
+ (esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES);
if (n > esp_count)
n = esp_count;
@@ -429,7 +432,7 @@ static void mac_esp_send_pio_cmd(struct esp *esp, u32 addr, u32 esp_count,
}
}
- local_irq_restore(flags);
+ enable_irq(esp->host->irq);
}
static int mac_esp_irq_pending(struct esp *esp)
diff --git a/drivers/scsi/megaraid/megaraid_sas.c b/drivers/scsi/megaraid/megaraid_sas.c
index d9b8ca5116bc..409648f5845f 100644
--- a/drivers/scsi/megaraid/megaraid_sas.c
+++ b/drivers/scsi/megaraid/megaraid_sas.c
@@ -10,7 +10,7 @@
* 2 of the License, or (at your option) any later version.
*
* FILE : megaraid_sas.c
- * Version : v00.00.04.12-rc1
+ * Version : v00.00.04.17.1-rc1
*
* Authors:
* (email-id : megaraidlinux@lsi.com)
@@ -843,6 +843,7 @@ megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp,
pthru->lun = scp->device->lun;
pthru->cdb_len = scp->cmd_len;
pthru->timeout = 0;
+ pthru->pad_0 = 0;
pthru->flags = flags;
pthru->data_xfer_len = scsi_bufflen(scp);
@@ -874,6 +875,12 @@ megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp,
pthru->sge_count = megasas_make_sgl32(instance, scp,
&pthru->sgl);
+ if (pthru->sge_count > instance->max_num_sge) {
+ printk(KERN_ERR "megasas: DCDB two many SGE NUM=%x\n",
+ pthru->sge_count);
+ return 0;
+ }
+
/*
* Sense info specific
*/
@@ -1000,6 +1007,12 @@ megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp,
} else
ldio->sge_count = megasas_make_sgl32(instance, scp, &ldio->sgl);
+ if (ldio->sge_count > instance->max_num_sge) {
+ printk(KERN_ERR "megasas: build_ld_io: sge_count = %x\n",
+ ldio->sge_count);
+ return 0;
+ }
+
/*
* Sense info specific
*/
@@ -2250,6 +2263,7 @@ megasas_get_pd_list(struct megasas_instance *instance)
dcmd->sge_count = 1;
dcmd->flags = MFI_FRAME_DIR_READ;
dcmd->timeout = 0;
+ dcmd->pad_0 = 0;
dcmd->data_xfer_len = MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST);
dcmd->opcode = MR_DCMD_PD_LIST_QUERY;
dcmd->sgl.sge32[0].phys_addr = ci_h;
@@ -2294,6 +2308,86 @@ megasas_get_pd_list(struct megasas_instance *instance)
return ret;
}
+/*
+ * megasas_get_ld_list_info - Returns FW's ld_list structure
+ * @instance: Adapter soft state
+ * @ld_list: ld_list structure
+ *
+ * Issues an internal command (DCMD) to get the FW's controller PD
+ * list structure. This information is mainly used to find out SYSTEM
+ * supported by the FW.
+ */
+static int
+megasas_get_ld_list(struct megasas_instance *instance)
+{
+ int ret = 0, ld_index = 0, ids = 0;
+ struct megasas_cmd *cmd;
+ struct megasas_dcmd_frame *dcmd;
+ struct MR_LD_LIST *ci;
+ dma_addr_t ci_h = 0;
+
+ cmd = megasas_get_cmd(instance);
+
+ if (!cmd) {
+ printk(KERN_DEBUG "megasas_get_ld_list: Failed to get cmd\n");
+ return -ENOMEM;
+ }
+
+ dcmd = &cmd->frame->dcmd;
+
+ ci = pci_alloc_consistent(instance->pdev,
+ sizeof(struct MR_LD_LIST),
+ &ci_h);
+
+ if (!ci) {
+ printk(KERN_DEBUG "Failed to alloc mem in get_ld_list\n");
+ megasas_return_cmd(instance, cmd);
+ return -ENOMEM;
+ }
+
+ memset(ci, 0, sizeof(*ci));
+ memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+
+ dcmd->cmd = MFI_CMD_DCMD;
+ dcmd->cmd_status = 0xFF;
+ dcmd->sge_count = 1;
+ dcmd->flags = MFI_FRAME_DIR_READ;
+ dcmd->timeout = 0;
+ dcmd->data_xfer_len = sizeof(struct MR_LD_LIST);
+ dcmd->opcode = MR_DCMD_LD_GET_LIST;
+ dcmd->sgl.sge32[0].phys_addr = ci_h;
+ dcmd->sgl.sge32[0].length = sizeof(struct MR_LD_LIST);
+ dcmd->pad_0 = 0;
+
+ if (!megasas_issue_polled(instance, cmd)) {
+ ret = 0;
+ } else {
+ ret = -1;
+ }
+
+ /* the following function will get the instance PD LIST */
+
+ if ((ret == 0) && (ci->ldCount < MAX_LOGICAL_DRIVES)) {
+ memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
+
+ for (ld_index = 0; ld_index < ci->ldCount; ld_index++) {
+ if (ci->ldList[ld_index].state != 0) {
+ ids = ci->ldList[ld_index].ref.targetId;
+ instance->ld_ids[ids] =
+ ci->ldList[ld_index].ref.targetId;
+ }
+ }
+ }
+
+ pci_free_consistent(instance->pdev,
+ sizeof(struct MR_LD_LIST),
+ ci,
+ ci_h);
+
+ megasas_return_cmd(instance, cmd);
+ return ret;
+}
+
/**
* megasas_get_controller_info - Returns FW's controller structure
* @instance: Adapter soft state
@@ -2339,6 +2433,7 @@ megasas_get_ctrl_info(struct megasas_instance *instance,
dcmd->sge_count = 1;
dcmd->flags = MFI_FRAME_DIR_READ;
dcmd->timeout = 0;
+ dcmd->pad_0 = 0;
dcmd->data_xfer_len = sizeof(struct megasas_ctrl_info);
dcmd->opcode = MR_DCMD_CTRL_GET_INFO;
dcmd->sgl.sge32[0].phys_addr = ci_h;
@@ -2590,6 +2685,9 @@ static int megasas_init_mfi(struct megasas_instance *instance)
(MEGASAS_MAX_PD * sizeof(struct megasas_pd_list)));
megasas_get_pd_list(instance);
+ memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
+ megasas_get_ld_list(instance);
+
ctrl_info = kmalloc(sizeof(struct megasas_ctrl_info), GFP_KERNEL);
/*
@@ -2714,6 +2812,7 @@ megasas_get_seq_num(struct megasas_instance *instance,
dcmd->sge_count = 1;
dcmd->flags = MFI_FRAME_DIR_READ;
dcmd->timeout = 0;
+ dcmd->pad_0 = 0;
dcmd->data_xfer_len = sizeof(struct megasas_evt_log_info);
dcmd->opcode = MR_DCMD_CTRL_EVENT_GET_INFO;
dcmd->sgl.sge32[0].phys_addr = el_info_h;
@@ -2828,6 +2927,7 @@ megasas_register_aen(struct megasas_instance *instance, u32 seq_num,
dcmd->sge_count = 1;
dcmd->flags = MFI_FRAME_DIR_READ;
dcmd->timeout = 0;
+ dcmd->pad_0 = 0;
dcmd->data_xfer_len = sizeof(struct megasas_evt_detail);
dcmd->opcode = MR_DCMD_CTRL_EVENT_WAIT;
dcmd->mbox.w[0] = seq_num;
@@ -3166,6 +3266,7 @@ static void megasas_flush_cache(struct megasas_instance *instance)
dcmd->sge_count = 0;
dcmd->flags = MFI_FRAME_DIR_NONE;
dcmd->timeout = 0;
+ dcmd->pad_0 = 0;
dcmd->data_xfer_len = 0;
dcmd->opcode = MR_DCMD_CTRL_CACHE_FLUSH;
dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
@@ -3205,6 +3306,7 @@ static void megasas_shutdown_controller(struct megasas_instance *instance,
dcmd->sge_count = 0;
dcmd->flags = MFI_FRAME_DIR_NONE;
dcmd->timeout = 0;
+ dcmd->pad_0 = 0;
dcmd->data_xfer_len = 0;
dcmd->opcode = opcode;
@@ -3984,6 +4086,7 @@ megasas_aen_polling(struct work_struct *work)
struct Scsi_Host *host;
struct scsi_device *sdev1;
u16 pd_index = 0;
+ u16 ld_index = 0;
int i, j, doscan = 0;
u32 seq_num;
int error;
@@ -3999,8 +4102,124 @@ megasas_aen_polling(struct work_struct *work)
switch (instance->evt_detail->code) {
case MR_EVT_PD_INSERTED:
+ if (megasas_get_pd_list(instance) == 0) {
+ for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) {
+ for (j = 0;
+ j < MEGASAS_MAX_DEV_PER_CHANNEL;
+ j++) {
+
+ pd_index =
+ (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
+
+ sdev1 =
+ scsi_device_lookup(host, i, j, 0);
+
+ if (instance->pd_list[pd_index].driveState
+ == MR_PD_STATE_SYSTEM) {
+ if (!sdev1) {
+ scsi_add_device(host, i, j, 0);
+ }
+
+ if (sdev1)
+ scsi_device_put(sdev1);
+ }
+ }
+ }
+ }
+ doscan = 0;
+ break;
+
case MR_EVT_PD_REMOVED:
+ if (megasas_get_pd_list(instance) == 0) {
+ megasas_get_pd_list(instance);
+ for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) {
+ for (j = 0;
+ j < MEGASAS_MAX_DEV_PER_CHANNEL;
+ j++) {
+
+ pd_index =
+ (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
+
+ sdev1 =
+ scsi_device_lookup(host, i, j, 0);
+
+ if (instance->pd_list[pd_index].driveState
+ == MR_PD_STATE_SYSTEM) {
+ if (sdev1) {
+ scsi_device_put(sdev1);
+ }
+ } else {
+ if (sdev1) {
+ scsi_remove_device(sdev1);
+ scsi_device_put(sdev1);
+ }
+ }
+ }
+ }
+ }
+ doscan = 0;
+ break;
+
+ case MR_EVT_LD_OFFLINE:
+ case MR_EVT_LD_DELETED:
+ megasas_get_ld_list(instance);
+ for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
+ for (j = 0;
+ j < MEGASAS_MAX_DEV_PER_CHANNEL;
+ j++) {
+
+ ld_index =
+ (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
+
+ sdev1 = scsi_device_lookup(host,
+ i + MEGASAS_MAX_LD_CHANNELS,
+ j,
+ 0);
+
+ if (instance->ld_ids[ld_index] != 0xff) {
+ if (sdev1) {
+ scsi_device_put(sdev1);
+ }
+ } else {
+ if (sdev1) {
+ scsi_remove_device(sdev1);
+ scsi_device_put(sdev1);
+ }
+ }
+ }
+ }
+ doscan = 0;
+ break;
+ case MR_EVT_LD_CREATED:
+ megasas_get_ld_list(instance);
+ for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
+ for (j = 0;
+ j < MEGASAS_MAX_DEV_PER_CHANNEL;
+ j++) {
+ ld_index =
+ (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
+
+ sdev1 = scsi_device_lookup(host,
+ i+MEGASAS_MAX_LD_CHANNELS,
+ j, 0);
+
+ if (instance->ld_ids[ld_index] !=
+ 0xff) {
+ if (!sdev1) {
+ scsi_add_device(host,
+ i + 2,
+ j, 0);
+ }
+ }
+ if (sdev1) {
+ scsi_device_put(sdev1);
+ }
+ }
+ }
+ doscan = 0;
+ break;
case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED:
+ case MR_EVT_FOREIGN_CFG_IMPORTED:
doscan = 1;
break;
default:
@@ -4035,6 +4254,31 @@ megasas_aen_polling(struct work_struct *work)
}
}
}
+
+ megasas_get_ld_list(instance);
+ for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
+ for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) {
+ ld_index =
+ (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
+
+ sdev1 = scsi_device_lookup(host,
+ i+MEGASAS_MAX_LD_CHANNELS, j, 0);
+ if (instance->ld_ids[ld_index] != 0xff) {
+ if (!sdev1) {
+ scsi_add_device(host,
+ i+2,
+ j, 0);
+ } else {
+ scsi_device_put(sdev1);
+ }
+ } else {
+ if (sdev1) {
+ scsi_remove_device(sdev1);
+ scsi_device_put(sdev1);
+ }
+ }
+ }
+ }
}
if ( instance->aen_cmd != NULL ) {
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index 72b28e436e32..9d8b6bf605aa 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -18,9 +18,9 @@
/*
* MegaRAID SAS Driver meta data
*/
-#define MEGASAS_VERSION "00.00.04.12-rc1"
-#define MEGASAS_RELDATE "Sep. 17, 2009"
-#define MEGASAS_EXT_VERSION "Thu Sep. 17 11:41:51 PST 2009"
+#define MEGASAS_VERSION "00.00.04.17.1-rc1"
+#define MEGASAS_RELDATE "Oct. 29, 2009"
+#define MEGASAS_EXT_VERSION "Thu. Oct. 29, 11:41:51 PST 2009"
/*
* Device IDs
@@ -117,6 +117,7 @@
#define MFI_CMD_STP 0x08
#define MR_DCMD_CTRL_GET_INFO 0x01010000
+#define MR_DCMD_LD_GET_LIST 0x03010000
#define MR_DCMD_CTRL_CACHE_FLUSH 0x01101000
#define MR_FLUSH_CTRL_CACHE 0x01
@@ -349,6 +350,32 @@ struct megasas_pd_list {
u8 driveState;
} __packed;
+ /*
+ * defines the logical drive reference structure
+ */
+union MR_LD_REF {
+ struct {
+ u8 targetId;
+ u8 reserved;
+ u16 seqNum;
+ };
+ u32 ref;
+} __packed;
+
+/*
+ * defines the logical drive list structure
+ */
+struct MR_LD_LIST {
+ u32 ldCount;
+ u32 reserved;
+ struct {
+ union MR_LD_REF ref;
+ u8 state;
+ u8 reserved[3];
+ u64 size;
+ } ldList[MAX_LOGICAL_DRIVES];
+} __packed;
+
/*
* SAS controller properties
*/
@@ -637,6 +664,8 @@ struct megasas_ctrl_info {
#define MEGASAS_MAX_LD 64
#define MEGASAS_MAX_PD (MEGASAS_MAX_PD_CHANNELS * \
MEGASAS_MAX_DEV_PER_CHANNEL)
+#define MEGASAS_MAX_LD_IDS (MEGASAS_MAX_LD_CHANNELS * \
+ MEGASAS_MAX_DEV_PER_CHANNEL)
#define MEGASAS_DBG_LVL 1
@@ -1187,6 +1216,7 @@ struct megasas_instance {
struct megasas_register_set __iomem *reg_set;
struct megasas_pd_list pd_list[MEGASAS_MAX_PD];
+ u8 ld_ids[MEGASAS_MAX_LD_IDS];
s8 init_id;
u16 max_num_sge;
diff --git a/drivers/scsi/mpt2sas/Kconfig b/drivers/scsi/mpt2sas/Kconfig
index 70c4c2467dd8..ba8e128de238 100644
--- a/drivers/scsi/mpt2sas/Kconfig
+++ b/drivers/scsi/mpt2sas/Kconfig
@@ -44,6 +44,7 @@ config SCSI_MPT2SAS
tristate "LSI MPT Fusion SAS 2.0 Device Driver"
depends on PCI && SCSI
select SCSI_SAS_ATTRS
+ select RAID_ATTRS
---help---
This driver supports PCI-Express SAS 6Gb/s Host Adapters.
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2.h b/drivers/scsi/mpt2sas/mpi/mpi2.h
index 914168105297..9958d847a88d 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2.h
@@ -8,7 +8,7 @@
* scatter/gather formats.
* Creation Date: June 21, 2006
*
- * mpi2.h Version: 02.00.13
+ * mpi2.h Version: 02.00.14
*
* Version History
* ---------------
@@ -53,6 +53,10 @@
* bytes reserved.
* Added RAID Accelerator functionality.
* 07-30-09 02.00.13 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 10-28-09 02.00.14 Bumped MPI2_HEADER_VERSION_UNIT.
+ * Added MSI-x index mask and shift for Reply Post Host
+ * Index register.
+ * Added function code for Host Based Discovery Action.
* --------------------------------------------------------------------------
*/
@@ -78,7 +82,7 @@
#define MPI2_VERSION_02_00 (0x0200)
/* versioning for this MPI header set */
-#define MPI2_HEADER_VERSION_UNIT (0x0D)
+#define MPI2_HEADER_VERSION_UNIT (0x0E)
#define MPI2_HEADER_VERSION_DEV (0x00)
#define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00)
#define MPI2_HEADER_VERSION_UNIT_SHIFT (8)
@@ -232,9 +236,12 @@ typedef volatile struct _MPI2_SYSTEM_INTERFACE_REGS
#define MPI2_REPLY_FREE_HOST_INDEX_OFFSET (0x00000048)
/*
- * Offset for the Reply Descriptor Post Queue
+ * Defines for the Reply Descriptor Post Queue
*/
#define MPI2_REPLY_POST_HOST_INDEX_OFFSET (0x0000006C)
+#define MPI2_REPLY_POST_HOST_INDEX_MASK (0x00FFFFFF)
+#define MPI2_RPHI_MSIX_INDEX_MASK (0xFF000000)
+#define MPI2_RPHI_MSIX_INDEX_SHIFT (24)
/*
* Defines for the HCBSize and address
@@ -497,12 +504,13 @@ typedef union _MPI2_REPLY_DESCRIPTORS_UNION
#define MPI2_FUNCTION_TARGET_CMD_BUF_BASE_POST (0x24) /* Target Command Buffer Post Base */
#define MPI2_FUNCTION_TARGET_CMD_BUF_LIST_POST (0x25) /* Target Command Buffer Post List */
#define MPI2_FUNCTION_RAID_ACCELERATOR (0x2C) /* RAID Accelerator*/
+/* Host Based Discovery Action */
+#define MPI2_FUNCTION_HOST_BASED_DISCOVERY_ACTION (0x2F)
/* Doorbell functions */
#define MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET (0x40)
-/* #define MPI2_FUNCTION_IO_UNIT_RESET (0x41) */
#define MPI2_FUNCTION_HANDSHAKE (0x42)
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h b/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h
index 1611c57a6fdf..cf0ac9f40c97 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h
@@ -6,7 +6,7 @@
* Title: MPI Configuration messages and pages
* Creation Date: November 10, 2006
*
- * mpi2_cnfg.h Version: 02.00.12
+ * mpi2_cnfg.h Version: 02.00.13
*
* Version History
* ---------------
@@ -107,6 +107,8 @@
* to SAS Device Page 0 Flags field.
* Added PhyInfo defines for power condition.
* Added Ethernet configuration pages.
+ * 10-28-09 02.00.13 Added MPI2_IOUNITPAGE1_ENABLE_HOST_BASED_DISCOVERY.
+ * Added SAS PHY Page 4 structure and defines.
* --------------------------------------------------------------------------
*/
@@ -712,6 +714,7 @@ typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_1
#define MPI2_IOUNITPAGE1_PAGEVERSION (0x04)
/* IO Unit Page 1 Flags defines */
+#define MPI2_IOUNITPAGE1_ENABLE_HOST_BASED_DISCOVERY (0x00000800)
#define MPI2_IOUNITPAGE1_MASK_SATA_WRITE_CACHE (0x00000600)
#define MPI2_IOUNITPAGE1_ENABLE_SATA_WRITE_CACHE (0x00000000)
#define MPI2_IOUNITPAGE1_DISABLE_SATA_WRITE_CACHE (0x00000200)
@@ -2291,6 +2294,26 @@ typedef struct _MPI2_CONFIG_PAGE_SAS_PHY_3 {
#define MPI2_SASPHY3_PAGEVERSION (0x00)
+/* SAS PHY Page 4 */
+
+typedef struct _MPI2_CONFIG_PAGE_SAS_PHY_4 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */
+ U16 Reserved1; /* 0x08 */
+ U8 Reserved2; /* 0x0A */
+ U8 Flags; /* 0x0B */
+ U8 InitialFrame[28]; /* 0x0C */
+} MPI2_CONFIG_PAGE_SAS_PHY_4, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SAS_PHY_4,
+ Mpi2SasPhyPage4_t, MPI2_POINTER pMpi2SasPhyPage4_t;
+
+#define MPI2_SASPHY4_PAGEVERSION (0x00)
+
+/* values for the Flags field */
+#define MPI2_SASPHY4_FLAGS_FRAME_VALID (0x02)
+#define MPI2_SASPHY4_FLAGS_SATA_FRAME (0x01)
+
+
+
+
/****************************************************************************
* SAS Port Config Pages
****************************************************************************/
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_history.txt b/drivers/scsi/mpt2sas/mpi/mpi2_history.txt
index 65fcaa31cb30..c4adf76b49d9 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_history.txt
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_history.txt
@@ -5,23 +5,24 @@
Copyright (c) 2000-2009 LSI Corporation.
---------------------------------------
- Header Set Release Version: 02.00.12
- Header Set Release Date: 05-06-09
+ Header Set Release Version: 02.00.14
+ Header Set Release Date: 10-28-09
---------------------------------------
Filename Current version Prior version
---------- --------------- -------------
- mpi2.h 02.00.12 02.00.11
- mpi2_cnfg.h 02.00.11 02.00.10
- mpi2_init.h 02.00.07 02.00.06
- mpi2_ioc.h 02.00.11 02.00.10
- mpi2_raid.h 02.00.03 02.00.03
- mpi2_sas.h 02.00.02 02.00.02
+ mpi2.h 02.00.14 02.00.13
+ mpi2_cnfg.h 02.00.13 02.00.12
+ mpi2_init.h 02.00.08 02.00.07
+ mpi2_ioc.h 02.00.13 02.00.12
+ mpi2_raid.h 02.00.04 02.00.04
+ mpi2_sas.h 02.00.03 02.00.02
mpi2_targ.h 02.00.03 02.00.03
- mpi2_tool.h 02.00.03 02.00.02
+ mpi2_tool.h 02.00.04 02.00.04
mpi2_type.h 02.00.00 02.00.00
- mpi2_ra.h 02.00.00
- mpi2_history.txt 02.00.11 02.00.12
+ mpi2_ra.h 02.00.00 02.00.00
+ mpi2_hbd.h 02.00.00
+ mpi2_history.txt 02.00.14 02.00.13
* Date Version Description
@@ -65,6 +66,11 @@ mpi2.h
* MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR and made those
* bytes reserved.
* Added RAID Accelerator functionality.
+ * 07-30-09 02.00.13 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 10-28-09 02.00.14 Bumped MPI2_HEADER_VERSION_UNIT.
+ * Added MSI-x index mask and shift for Reply Post Host
+ * Index register.
+ * Added function code for Host Based Discovery Action.
* --------------------------------------------------------------------------
mpi2_cnfg.h
@@ -155,6 +161,15 @@ mpi2_cnfg.h
* Added expander reduced functionality data to SAS
* Expander Page 0.
* Added SAS PHY Page 2 and SAS PHY Page 3.
+ * 07-30-09 02.00.12 Added IO Unit Page 7.
+ * Added new device ids.
+ * Added SAS IO Unit Page 5.
+ * Added partial and slumber power management capable flags
+ * to SAS Device Page 0 Flags field.
+ * Added PhyInfo defines for power condition.
+ * Added Ethernet configuration pages.
+ * 10-28-09 02.00.13 Added MPI2_IOUNITPAGE1_ENABLE_HOST_BASED_DISCOVERY.
+ * Added SAS PHY Page 4 structure and defines.
* --------------------------------------------------------------------------
mpi2_init.h
@@ -172,6 +187,10 @@ mpi2_init.h
* Query Asynchronous Event.
* Defined two new bits in the SlotStatus field of the SCSI
* Enclosure Processor Request and Reply.
+ * 10-28-09 02.00.08 Added defines for decoding the ResponseInfo bytes for
+ * both SCSI IO Error Reply and SCSI Task Management Reply.
+ * Added ResponseInfo field to MPI2_SCSI_TASK_MANAGE_REPLY.
+ * Added MPI2_SCSITASKMGMT_RSP_TM_OVERLAPPED_TAG define.
* --------------------------------------------------------------------------
mpi2_ioc.h
@@ -246,6 +265,20 @@ mpi2_ioc.h
* Added two new reason codes for SAS Device Status Change
* Event.
* Added new event: SAS PHY Counter.
+ * 07-30-09 02.00.12 Added GPIO Interrupt event define and structure.
+ * Added MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER define.
+ * Added new product id family for 2208.
+ * 10-28-09 02.00.13 Added HostMSIxVectors field to MPI2_IOC_INIT_REQUEST.
+ * Added MaxMSIxVectors field to MPI2_IOC_FACTS_REPLY.
+ * Added MinDevHandle field to MPI2_IOC_FACTS_REPLY.
+ * Added MPI2_IOCFACTS_CAPABILITY_HOST_BASED_DISCOVERY.
+ * Added MPI2_EVENT_HOST_BASED_DISCOVERY_PHY define.
+ * Added MPI2_EVENT_SAS_TOPO_ES_NO_EXPANDER define.
+ * Added Host Based Discovery Phy Event data.
+ * Added defines for ProductID Product field
+ * (MPI2_FW_HEADER_PID_).
+ * Modified values for SAS ProductID Family
+ * (MPI2_FW_HEADER_PID_FAMILY_).
* --------------------------------------------------------------------------
mpi2_raid.h
@@ -256,6 +289,8 @@ mpi2_raid.h
* 05-21-08 02.00.03 Added MPI2_RAID_VOL_CREATION_NUM_PHYSDISKS so that
* the PhysDisk array in MPI2_RAID_VOLUME_CREATION_STRUCT
* can be sized by the build environment.
+ * 07-30-09 02.00.04 Added proper define for the Use Default Settings bit of
+ * VolumeCreationFlags and marked the old one as obsolete.
* --------------------------------------------------------------------------
mpi2_sas.h
@@ -264,6 +299,8 @@ mpi2_sas.h
* Control Request.
* 10-02-08 02.00.02 Added Set IOC Parameter Operation to SAS IO Unit Control
* Request.
+ * 10-28-09 02.00.03 Changed the type of SGL in MPI2_SATA_PASSTHROUGH_REQUEST
+ * to MPI2_SGE_IO_UNION since it supports chained SGLs.
* --------------------------------------------------------------------------
mpi2_targ.h
@@ -283,6 +320,10 @@ mpi2_tool.h
* structures and defines.
* 02-29-08 02.00.02 Modified various names to make them 32-character unique.
* 05-06-09 02.00.03 Added ISTWI Read Write Tool and Diagnostic CLI Tool.
+ * 07-30-09 02.00.04 Added ExtendedType field to DiagnosticBufferPost request
+ * and reply messages.
+ * Added MPI2_DIAG_BUF_TYPE_EXTENDED.
+ * Incremented MPI2_DIAG_BUF_TYPE_COUNT.
* --------------------------------------------------------------------------
mpi2_type.h
@@ -293,20 +334,26 @@ mpi2_ra.h
* 05-06-09 02.00.00 Initial version.
* --------------------------------------------------------------------------
+mpi2_hbd.h
+ * 10-28-09 02.00.00 Initial version.
+ * --------------------------------------------------------------------------
+
+
mpi2_history.txt Parts list history
-Filename 02.00.12
----------- --------
-mpi2.h 02.00.12
-mpi2_cnfg.h 02.00.11
-mpi2_init.h 02.00.07
-mpi2_ioc.h 02.00.11
-mpi2_raid.h 02.00.03
-mpi2_sas.h 02.00.02
-mpi2_targ.h 02.00.03
-mpi2_tool.h 02.00.03
-mpi2_type.h 02.00.00
-mpi2_ra.h 02.00.00
+Filename 02.00.14 02.00.13 02.00.12
+---------- -------- -------- --------
+mpi2.h 02.00.14 02.00.13 02.00.12
+mpi2_cnfg.h 02.00.13 02.00.12 02.00.11
+mpi2_init.h 02.00.08 02.00.07 02.00.07
+mpi2_ioc.h 02.00.13 02.00.12 02.00.11
+mpi2_raid.h 02.00.04 02.00.04 02.00.03
+mpi2_sas.h 02.00.03 02.00.02 02.00.02
+mpi2_targ.h 02.00.03 02.00.03 02.00.03
+mpi2_tool.h 02.00.04 02.00.04 02.00.03
+mpi2_type.h 02.00.00 02.00.00 02.00.00
+mpi2_ra.h 02.00.00 02.00.00 02.00.00
+mpi2_hbd.h 02.00.00
Filename 02.00.11 02.00.10 02.00.09 02.00.08 02.00.07 02.00.06
---------- -------- -------- -------- -------- -------- --------
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_init.h b/drivers/scsi/mpt2sas/mpi/mpi2_init.h
index 563e56d2e945..6541945e97c3 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_init.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_init.h
@@ -6,7 +6,7 @@
* Title: MPI SCSI initiator mode messages and structures
* Creation Date: June 23, 2006
*
- * mpi2_init.h Version: 02.00.07
+ * mpi2_init.h Version: 02.00.08
*
* Version History
* ---------------
@@ -27,6 +27,10 @@
* Query Asynchronous Event.
* Defined two new bits in the SlotStatus field of the SCSI
* Enclosure Processor Request and Reply.
+ * 10-28-09 02.00.08 Added defines for decoding the ResponseInfo bytes for
+ * both SCSI IO Error Reply and SCSI Task Management Reply.
+ * Added ResponseInfo field to MPI2_SCSI_TASK_MANAGE_REPLY.
+ * Added MPI2_SCSITASKMGMT_RSP_TM_OVERLAPPED_TAG define.
* --------------------------------------------------------------------------
*/
@@ -254,6 +258,11 @@ typedef struct _MPI2_SCSI_IO_REPLY
#define MPI2_SCSI_STATE_AUTOSENSE_FAILED (0x02)
#define MPI2_SCSI_STATE_AUTOSENSE_VALID (0x01)
+/* masks and shifts for the ResponseInfo field */
+
+#define MPI2_SCSI_RI_MASK_REASONCODE (0x000000FF)
+#define MPI2_SCSI_RI_SHIFT_REASONCODE (0)
+
#define MPI2_SCSI_TASKTAG_UNKNOWN (0xFFFF)
@@ -327,6 +336,7 @@ typedef struct _MPI2_SCSI_TASK_MANAGE_REPLY
U16 IOCStatus; /* 0x0E */
U32 IOCLogInfo; /* 0x10 */
U32 TerminationCount; /* 0x14 */
+ U32 ResponseInfo; /* 0x18 */
} MPI2_SCSI_TASK_MANAGE_REPLY,
MPI2_POINTER PTR_MPI2_SCSI_TASK_MANAGE_REPLY,
Mpi2SCSITaskManagementReply_t, MPI2_POINTER pMpi2SCSIManagementReply_t;
@@ -339,8 +349,20 @@ typedef struct _MPI2_SCSI_TASK_MANAGE_REPLY
#define MPI2_SCSITASKMGMT_RSP_TM_FAILED (0x05)
#define MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED (0x08)
#define MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN (0x09)
+#define MPI2_SCSITASKMGMT_RSP_TM_OVERLAPPED_TAG (0x0A)
#define MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC (0x80)
+/* masks and shifts for the ResponseInfo field */
+
+#define MPI2_SCSITASKMGMT_RI_MASK_REASONCODE (0x000000FF)
+#define MPI2_SCSITASKMGMT_RI_SHIFT_REASONCODE (0)
+#define MPI2_SCSITASKMGMT_RI_MASK_ARI2 (0x0000FF00)
+#define MPI2_SCSITASKMGMT_RI_SHIFT_ARI2 (8)
+#define MPI2_SCSITASKMGMT_RI_MASK_ARI1 (0x00FF0000)
+#define MPI2_SCSITASKMGMT_RI_SHIFT_ARI1 (16)
+#define MPI2_SCSITASKMGMT_RI_MASK_ARI0 (0xFF000000)
+#define MPI2_SCSITASKMGMT_RI_SHIFT_ARI0 (24)
+
/****************************************************************************
* SCSI Enclosure Processor messages
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h b/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h
index ea51ce868690..754938422f6a 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h
@@ -6,7 +6,7 @@
* Title: MPI IOC, Port, Event, FW Download, and FW Upload messages
* Creation Date: October 11, 2006
*
- * mpi2_ioc.h Version: 02.00.12
+ * mpi2_ioc.h Version: 02.00.13
*
* Version History
* ---------------
@@ -87,6 +87,17 @@
* 07-30-09 02.00.12 Added GPIO Interrupt event define and structure.
* Added MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER define.
* Added new product id family for 2208.
+ * 10-28-09 02.00.13 Added HostMSIxVectors field to MPI2_IOC_INIT_REQUEST.
+ * Added MaxMSIxVectors field to MPI2_IOC_FACTS_REPLY.
+ * Added MinDevHandle field to MPI2_IOC_FACTS_REPLY.
+ * Added MPI2_IOCFACTS_CAPABILITY_HOST_BASED_DISCOVERY.
+ * Added MPI2_EVENT_HOST_BASED_DISCOVERY_PHY define.
+ * Added MPI2_EVENT_SAS_TOPO_ES_NO_EXPANDER define.
+ * Added Host Based Discovery Phy Event data.
+ * Added defines for ProductID Product field
+ * (MPI2_FW_HEADER_PID_).
+ * Modified values for SAS ProductID Family
+ * (MPI2_FW_HEADER_PID_FAMILY_).
* --------------------------------------------------------------------------
*/
@@ -119,8 +130,10 @@ typedef struct _MPI2_IOC_INIT_REQUEST
U16 MsgVersion; /* 0x0C */
U16 HeaderVersion; /* 0x0E */
U32 Reserved5; /* 0x10 */
- U32 Reserved6; /* 0x14 */
- U16 Reserved7; /* 0x18 */
+ U16 Reserved6; /* 0x14 */
+ U8 Reserved7; /* 0x16 */
+ U8 HostMSIxVectors; /* 0x17 */
+ U16 Reserved8; /* 0x18 */
U16 SystemRequestFrameSize; /* 0x1A */
U16 ReplyDescriptorPostQueueDepth; /* 0x1C */
U16 ReplyFreeQueueDepth; /* 0x1E */
@@ -215,7 +228,7 @@ typedef struct _MPI2_IOC_FACTS_REPLY
U8 MaxChainDepth; /* 0x14 */
U8 WhoInit; /* 0x15 */
U8 NumberOfPorts; /* 0x16 */
- U8 Reserved2; /* 0x17 */
+ U8 MaxMSIxVectors; /* 0x17 */
U16 RequestCredit; /* 0x18 */
U16 ProductID; /* 0x1A */
U32 IOCCapabilities; /* 0x1C */
@@ -233,7 +246,8 @@ typedef struct _MPI2_IOC_FACTS_REPLY
U8 MaxVolumes; /* 0x37 */
U16 MaxDevHandle; /* 0x38 */
U16 MaxPersistentEntries; /* 0x3A */
- U32 Reserved4; /* 0x3C */
+ U16 MinDevHandle; /* 0x3C */
+ U16 Reserved4; /* 0x3E */
} MPI2_IOC_FACTS_REPLY, MPI2_POINTER PTR_MPI2_IOC_FACTS_REPLY,
Mpi2IOCFactsReply_t, MPI2_POINTER pMpi2IOCFactsReply_t;
@@ -269,6 +283,7 @@ typedef struct _MPI2_IOC_FACTS_REPLY
/* ProductID field uses MPI2_FW_HEADER_PID_ */
/* IOCCapabilities */
+#define MPI2_IOCFACTS_CAPABILITY_HOST_BASED_DISCOVERY (0x00010000)
#define MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX (0x00008000)
#define MPI2_IOCFACTS_CAPABILITY_RAID_ACCELERATOR (0x00004000)
#define MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY (0x00002000)
@@ -453,6 +468,7 @@ typedef struct _MPI2_EVENT_NOTIFICATION_REPLY
#define MPI2_EVENT_LOG_ENTRY_ADDED (0x0021)
#define MPI2_EVENT_SAS_PHY_COUNTER (0x0022)
#define MPI2_EVENT_GPIO_INTERRUPT (0x0023)
+#define MPI2_EVENT_HOST_BASED_DISCOVERY_PHY (0x0024)
/* Log Entry Added Event data */
@@ -793,6 +809,7 @@ typedef struct _MPI2_EVENT_DATA_SAS_TOPOLOGY_CHANGE_LIST
MPI2_POINTER pMpi2EventDataSasTopologyChangeList_t;
/* values for the ExpStatus field */
+#define MPI2_EVENT_SAS_TOPO_ES_NO_EXPANDER (0x00)
#define MPI2_EVENT_SAS_TOPO_ES_ADDED (0x01)
#define MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING (0x02)
#define MPI2_EVENT_SAS_TOPO_ES_RESPONDING (0x03)
@@ -878,6 +895,44 @@ typedef struct _MPI2_EVENT_DATA_SAS_PHY_COUNTER {
* */
+/* Host Based Discovery Phy Event data */
+
+typedef struct _MPI2_EVENT_HBD_PHY_SAS {
+ U8 Flags; /* 0x00 */
+ U8 NegotiatedLinkRate; /* 0x01 */
+ U8 PhyNum; /* 0x02 */
+ U8 PhysicalPort; /* 0x03 */
+ U32 Reserved1; /* 0x04 */
+ U8 InitialFrame[28]; /* 0x08 */
+} MPI2_EVENT_HBD_PHY_SAS, MPI2_POINTER PTR_MPI2_EVENT_HBD_PHY_SAS,
+ Mpi2EventHbdPhySas_t, MPI2_POINTER pMpi2EventHbdPhySas_t;
+
+/* values for the Flags field */
+#define MPI2_EVENT_HBD_SAS_FLAGS_FRAME_VALID (0x02)
+#define MPI2_EVENT_HBD_SAS_FLAGS_SATA_FRAME (0x01)
+
+/* use MPI2_SAS_NEG_LINK_RATE_ defines from mpi2_cnfg.h for
+ * the NegotiatedLinkRate field */
+
+typedef union _MPI2_EVENT_HBD_DESCRIPTOR {
+ MPI2_EVENT_HBD_PHY_SAS Sas;
+} MPI2_EVENT_HBD_DESCRIPTOR, MPI2_POINTER PTR_MPI2_EVENT_HBD_DESCRIPTOR,
+ Mpi2EventHbdDescriptor_t, MPI2_POINTER pMpi2EventHbdDescriptor_t;
+
+typedef struct _MPI2_EVENT_DATA_HBD_PHY {
+ U8 DescriptorType; /* 0x00 */
+ U8 Reserved1; /* 0x01 */
+ U16 Reserved2; /* 0x02 */
+ U32 Reserved3; /* 0x04 */
+ MPI2_EVENT_HBD_DESCRIPTOR Descriptor; /* 0x08 */
+} MPI2_EVENT_DATA_HBD_PHY, MPI2_POINTER PTR_MPI2_EVENT_DATA_HBD_PHY,
+ Mpi2EventDataHbdPhy_t, MPI2_POINTER pMpi2EventDataMpi2EventDataHbdPhy_t;
+
+/* values for the DescriptorType field */
+#define MPI2_EVENT_HBD_DT_SAS (0x01)
+
+
+
/****************************************************************************
* EventAck message
****************************************************************************/
@@ -1126,13 +1181,17 @@ typedef struct _MPI2_FW_IMAGE_HEADER
#define MPI2_FW_HEADER_PID_TYPE_MASK (0xF000)
#define MPI2_FW_HEADER_PID_TYPE_SAS (0x2000)
-#define MPI2_FW_HEADER_PID_PROD_MASK (0x0F00)
-#define MPI2_FW_HEADER_PID_PROD_A (0x0000)
+#define MPI2_FW_HEADER_PID_PROD_MASK (0x0F00)
+#define MPI2_FW_HEADER_PID_PROD_A (0x0000)
+#define MPI2_FW_HEADER_PID_PROD_MASK (0x0F00)
+#define MPI2_FW_HEADER_PID_PROD_TARGET_INITIATOR_SCSI (0x0200)
+#define MPI2_FW_HEADER_PID_PROD_IR_SCSI (0x0700)
+
#define MPI2_FW_HEADER_PID_FAMILY_MASK (0x00FF)
/* SAS */
-#define MPI2_FW_HEADER_PID_FAMILY_2108_SAS (0x0010)
-#define MPI2_FW_HEADER_PID_FAMILY_2208_SAS (0x0011)
+#define MPI2_FW_HEADER_PID_FAMILY_2108_SAS (0x0013)
+#define MPI2_FW_HEADER_PID_FAMILY_2208_SAS (0x0014)
/* use MPI2_IOCFACTS_PROTOCOL_ defines for ProtocolFlags field */
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_sas.h b/drivers/scsi/mpt2sas/mpi/mpi2_sas.h
index 8a42b136cf53..2d8aeed51392 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_sas.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_sas.h
@@ -6,7 +6,7 @@
* Title: MPI Serial Attached SCSI structures and definitions
* Creation Date: February 9, 2007
*
- * mpi2.h Version: 02.00.02
+ * mpi2.h Version: 02.00.03
*
* Version History
* ---------------
@@ -18,6 +18,8 @@
* Control Request.
* 10-02-08 02.00.02 Added Set IOC Parameter Operation to SAS IO Unit Control
* Request.
+ * 10-28-09 02.00.03 Changed the type of SGL in MPI2_SATA_PASSTHROUGH_REQUEST
+ * to MPI2_SGE_IO_UNION since it supports chained SGLs.
* --------------------------------------------------------------------------
*/
@@ -160,7 +162,7 @@ typedef struct _MPI2_SATA_PASSTHROUGH_REQUEST
U32 Reserved4; /* 0x14 */
U32 DataLength; /* 0x18 */
U8 CommandFIS[20]; /* 0x1C */
- MPI2_SIMPLE_SGE_UNION SGL; /* 0x20 */
+ MPI2_SGE_IO_UNION SGL; /* 0x20 */
} MPI2_SATA_PASSTHROUGH_REQUEST, MPI2_POINTER PTR_MPI2_SATA_PASSTHROUGH_REQUEST,
Mpi2SataPassthroughRequest_t, MPI2_POINTER pMpi2SataPassthroughRequest_t;
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c
index 89d02401b9ec..88e6eebc3159 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
@@ -107,8 +107,7 @@ _scsih_set_fwfault_debug(const char *val, struct kernel_param *kp)
if (ret)
return ret;
- printk(KERN_INFO "setting logging_level(0x%08x)\n",
- mpt2sas_fwfault_debug);
+ printk(KERN_INFO "setting fwfault_debug(%d)\n", mpt2sas_fwfault_debug);
list_for_each_entry(ioc, &mpt2sas_ioc_list, list)
ioc->fwfault_debug = mpt2sas_fwfault_debug;
return 0;
@@ -1222,6 +1221,8 @@ mpt2sas_base_map_resources(struct MPT2SAS_ADAPTER *ioc)
u32 memap_sz;
u32 pio_sz;
int i, r = 0;
+ u64 pio_chip = 0;
+ u64 chip_phys = 0;
dinitprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s\n",
ioc->name, __func__));
@@ -1255,12 +1256,13 @@ mpt2sas_base_map_resources(struct MPT2SAS_ADAPTER *ioc)
if (pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE_IO) {
if (pio_sz)
continue;
- ioc->pio_chip = pci_resource_start(pdev, i);
+ pio_chip = (u64)pci_resource_start(pdev, i);
pio_sz = pci_resource_len(pdev, i);
} else {
if (memap_sz)
continue;
ioc->chip_phys = pci_resource_start(pdev, i);
+ chip_phys = (u64)ioc->chip_phys;
memap_sz = pci_resource_len(pdev, i);
ioc->chip = ioremap(ioc->chip_phys, memap_sz);
if (ioc->chip == NULL) {
@@ -1280,10 +1282,10 @@ mpt2sas_base_map_resources(struct MPT2SAS_ADAPTER *ioc)
printk(MPT2SAS_INFO_FMT "%s: IRQ %d\n",
ioc->name, ((ioc->msix_enable) ? "PCI-MSI-X enabled" :
"IO-APIC enabled"), ioc->pci_irq);
- printk(MPT2SAS_INFO_FMT "iomem(0x%lx), mapped(0x%p), size(%d)\n",
- ioc->name, ioc->chip_phys, ioc->chip, memap_sz);
- printk(MPT2SAS_INFO_FMT "ioport(0x%lx), size(%d)\n",
- ioc->name, ioc->pio_chip, pio_sz);
+ printk(MPT2SAS_INFO_FMT "iomem(0x%016llx), mapped(0x%p), size(%d)\n",
+ ioc->name, (unsigned long long)chip_phys, ioc->chip, memap_sz);
+ printk(MPT2SAS_INFO_FMT "ioport(0x%016llx), size(%d)\n",
+ ioc->name, (unsigned long long)pio_chip, pio_sz);
return 0;
@@ -3573,6 +3575,8 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc)
init_waitqueue_head(&ioc->reset_wq);
+ ioc->fwfault_debug = mpt2sas_fwfault_debug;
+
/* base internal command bits */
mutex_init(&ioc->base_cmds.mutex);
ioc->base_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h
index bb4f14656afa..e18b0544c38f 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.h
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.h
@@ -69,10 +69,10 @@
#define MPT2SAS_DRIVER_NAME "mpt2sas"
#define MPT2SAS_AUTHOR "LSI Corporation <DL-MPTFusionLinux@lsi.com>"
#define MPT2SAS_DESCRIPTION "LSI MPT Fusion SAS 2.0 Device Driver"
-#define MPT2SAS_DRIVER_VERSION "03.100.03.00"
-#define MPT2SAS_MAJOR_VERSION 03
+#define MPT2SAS_DRIVER_VERSION "04.100.01.00"
+#define MPT2SAS_MAJOR_VERSION 04
#define MPT2SAS_MINOR_VERSION 100
-#define MPT2SAS_BUILD_VERSION 03
+#define MPT2SAS_BUILD_VERSION 01
#define MPT2SAS_RELEASE_VERSION 00
/*
@@ -323,6 +323,7 @@ struct _sas_device {
* @device_info: bitfield provides detailed info about the hidden components
* @num_pds: number of hidden raid components
* @responding: used in _scsih_raid_device_mark_responding
+ * @percent_complete: resync percent complete
*/
struct _raid_device {
struct list_head list;
@@ -336,6 +337,7 @@ struct _raid_device {
u32 device_info;
u8 num_pds;
u8 responding;
+ u8 percent_complete;
};
/**
@@ -464,7 +466,6 @@ typedef void (*MPT_ADD_SGE)(void *paddr, u32 flags_length, dma_addr_t dma_addr);
* @pdev: pci pdev object
* @chip: memory mapped register space
* @chip_phys: physical addrss prior to mapping
- * @pio_chip: I/O mapped register space
* @logging_level: see mpt2sas_debug.h
* @fwfault_debug: debuging FW timeouts
* @ir_firmware: IR firmware present
@@ -587,8 +588,7 @@ struct MPT2SAS_ADAPTER {
char tmp_string[MPT_STRING_LENGTH];
struct pci_dev *pdev;
Mpi2SystemInterfaceRegs_t __iomem *chip;
- unsigned long chip_phys;
- unsigned long pio_chip;
+ resource_size_t chip_phys;
int logging_level;
int fwfault_debug;
u8 ir_firmware;
@@ -853,6 +853,8 @@ int mpt2sas_config_set_iounit_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
*mpi_reply, Mpi2IOUnitPage1_t *config_page);
int mpt2sas_config_get_sas_iounit_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
*mpi_reply, Mpi2SasIOUnitPage1_t *config_page, u16 sz);
+int mpt2sas_config_set_sas_iounit_pg1(struct MPT2SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2SasIOUnitPage1_t *config_page, u16 sz);
int mpt2sas_config_get_ioc_pg8(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
*mpi_reply, Mpi2IOCPage8_t *config_page);
int mpt2sas_config_get_expander_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
diff --git a/drivers/scsi/mpt2sas/mpt2sas_config.c b/drivers/scsi/mpt2sas/mpt2sas_config.c
index 594a389c6526..411c27d7f787 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_config.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_config.c
@@ -324,7 +324,9 @@ _config_request(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
if (r != 0)
goto out;
if (mpi_request->Action ==
- MPI2_CONFIG_ACTION_PAGE_WRITE_CURRENT) {
+ MPI2_CONFIG_ACTION_PAGE_WRITE_CURRENT ||
+ mpi_request->Action ==
+ MPI2_CONFIG_ACTION_PAGE_WRITE_NVRAM) {
ioc->base_add_sg_single(&mpi_request->PageBufferSGE,
MPT2_CONFIG_COMMON_WRITE_SGLFLAGS | mem.sz,
mem.page_dma);
@@ -882,7 +884,7 @@ mpt2sas_config_get_sas_iounit_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
}
/**
- * mpt2sas_config_get_sas_iounit_pg1 - obtain sas iounit page 0
+ * mpt2sas_config_get_sas_iounit_pg1 - obtain sas iounit page 1
* @ioc: per adapter object
* @mpi_reply: reply mf payload returned from firmware
* @config_page: contents of the config page
@@ -907,7 +909,7 @@ mpt2sas_config_get_sas_iounit_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_IO_UNIT;
mpi_request.Header.PageNumber = 1;
- mpi_request.Header.PageVersion = MPI2_SASIOUNITPAGE0_PAGEVERSION;
+ mpi_request.Header.PageVersion = MPI2_SASIOUNITPAGE1_PAGEVERSION;
mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE);
r = _config_request(ioc, &mpi_request, mpi_reply,
MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
@@ -922,6 +924,49 @@ mpt2sas_config_get_sas_iounit_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
}
/**
+ * mpt2sas_config_set_sas_iounit_pg1 - send sas iounit page 1
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * @sz: size of buffer passed in config_page
+ * Context: sleep.
+ *
+ * Calling function should call config_get_number_hba_phys prior to
+ * this function, so enough memory is allocated for config_page.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt2sas_config_set_sas_iounit_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
+ *mpi_reply, Mpi2SasIOUnitPage1_t *config_page, u16 sz)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
+ mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_IO_UNIT;
+ mpi_request.Header.PageNumber = 1;
+ mpi_request.Header.PageVersion = MPI2_SASIOUNITPAGE1_PAGEVERSION;
+ mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_CURRENT;
+ _config_request(ioc, &mpi_request, mpi_reply,
+ MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sz);
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_NVRAM;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sz);
+ out:
+ return r;
+}
+
+/**
* mpt2sas_config_get_expander_pg0 - obtain expander page 0
* @ioc: per adapter object
* @mpi_reply: reply mf payload returned from firmware
diff --git a/drivers/scsi/mpt2sas/mpt2sas_ctl.c b/drivers/scsi/mpt2sas/mpt2sas_ctl.c
index 84a124f8e21f..fa9bf83819d5 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_ctl.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_ctl.c
@@ -891,6 +891,7 @@ _ctl_do_mpt_command(struct MPT2SAS_ADAPTER *ioc,
issue_host_reset:
if (issue_reset) {
+ ret = -ENODATA;
if ((mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
mpi_request->Function ==
MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) {
@@ -2202,14 +2203,10 @@ _ctl_compat_mpt_command(struct file *file, unsigned cmd, unsigned long arg)
karg.data_out_size = karg32.data_out_size;
karg.max_sense_bytes = karg32.max_sense_bytes;
karg.data_sge_offset = karg32.data_sge_offset;
- memcpy(&karg.reply_frame_buf_ptr, &karg32.reply_frame_buf_ptr,
- sizeof(uint32_t));
- memcpy(&karg.data_in_buf_ptr, &karg32.data_in_buf_ptr,
- sizeof(uint32_t));
- memcpy(&karg.data_out_buf_ptr, &karg32.data_out_buf_ptr,
- sizeof(uint32_t));
- memcpy(&karg.sense_data_ptr, &karg32.sense_data_ptr,
- sizeof(uint32_t));
+ karg.reply_frame_buf_ptr = compat_ptr(karg32.reply_frame_buf_ptr);
+ karg.data_in_buf_ptr = compat_ptr(karg32.data_in_buf_ptr);
+ karg.data_out_buf_ptr = compat_ptr(karg32.data_out_buf_ptr);
+ karg.sense_data_ptr = compat_ptr(karg32.sense_data_ptr);
state = (file->f_flags & O_NONBLOCK) ? NON_BLOCKING : BLOCKING;
return _ctl_do_mpt_command(ioc, karg, &uarg->mf, state);
}
diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
index efabea1a3ce4..c7ec3f174782 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
@@ -52,6 +52,7 @@
#include <linux/delay.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
+#include <linux/raid_class.h>
#include "mpt2sas_base.h"
@@ -133,6 +134,9 @@ struct fw_event_work {
void *event_data;
};
+/* raid transport support */
+static struct raid_template *mpt2sas_raid_template;
+
/**
* struct _scsi_io_transfer - scsi io transfer
* @handle: sas device handle (assigned by firmware)
@@ -1305,7 +1309,6 @@ _scsih_slave_alloc(struct scsi_device *sdev)
struct MPT2SAS_DEVICE *sas_device_priv_data;
struct scsi_target *starget;
struct _raid_device *raid_device;
- struct _sas_device *sas_device;
unsigned long flags;
sas_device_priv_data = kzalloc(sizeof(struct scsi_device), GFP_KERNEL);
@@ -1332,21 +1335,8 @@ _scsih_slave_alloc(struct scsi_device *sdev)
if (raid_device)
raid_device->sdev = sdev; /* raid is single lun */
spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
- } else {
- /* set TLR bit for SSP devices */
- if (!(ioc->facts.IOCCapabilities &
- MPI2_IOCFACTS_CAPABILITY_TLR))
- goto out;
- spin_lock_irqsave(&ioc->sas_device_lock, flags);
- sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
- sas_device_priv_data->sas_target->sas_address);
- spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
- if (sas_device && sas_device->device_info &
- MPI2_SAS_DEVICE_INFO_SSP_TARGET)
- sas_device_priv_data->flags |= MPT_DEVICE_TLR_ON;
}
- out:
return 0;
}
@@ -1419,6 +1409,140 @@ _scsih_display_sata_capabilities(struct MPT2SAS_ADAPTER *ioc,
}
/**
+ * _scsih_is_raid - return boolean indicating device is raid volume
+ * @dev the device struct object
+ */
+static int
+_scsih_is_raid(struct device *dev)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+
+ return (sdev->channel == RAID_CHANNEL) ? 1 : 0;
+}
+
+/**
+ * _scsih_get_resync - get raid volume resync percent complete
+ * @dev the device struct object
+ */
+static void
+_scsih_get_resync(struct device *dev)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct MPT2SAS_ADAPTER *ioc = shost_priv(sdev->host);
+ static struct _raid_device *raid_device;
+ unsigned long flags;
+ Mpi2RaidVolPage0_t vol_pg0;
+ Mpi2ConfigReply_t mpi_reply;
+ u32 volume_status_flags;
+ u8 percent_complete = 0;
+
+ spin_lock_irqsave(&ioc->raid_device_lock, flags);
+ raid_device = _scsih_raid_device_find_by_id(ioc, sdev->id,
+ sdev->channel);
+ spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
+
+ if (!raid_device)
+ goto out;
+
+ if (mpt2sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0,
+ MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, raid_device->handle,
+ sizeof(Mpi2RaidVolPage0_t))) {
+ printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ goto out;
+ }
+
+ volume_status_flags = le32_to_cpu(vol_pg0.VolumeStatusFlags);
+ if (volume_status_flags & MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS)
+ percent_complete = raid_device->percent_complete;
+ out:
+ raid_set_resync(mpt2sas_raid_template, dev, percent_complete);
+}
+
+/**
+ * _scsih_get_state - get raid volume level
+ * @dev the device struct object
+ */
+static void
+_scsih_get_state(struct device *dev)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct MPT2SAS_ADAPTER *ioc = shost_priv(sdev->host);
+ static struct _raid_device *raid_device;
+ unsigned long flags;
+ Mpi2RaidVolPage0_t vol_pg0;
+ Mpi2ConfigReply_t mpi_reply;
+ u32 volstate;
+ enum raid_state state = RAID_STATE_UNKNOWN;
+
+ spin_lock_irqsave(&ioc->raid_device_lock, flags);
+ raid_device = _scsih_raid_device_find_by_id(ioc, sdev->id,
+ sdev->channel);
+ spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
+
+ if (!raid_device)
+ goto out;
+
+ if (mpt2sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0,
+ MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, raid_device->handle,
+ sizeof(Mpi2RaidVolPage0_t))) {
+ printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ goto out;
+ }
+
+ volstate = le32_to_cpu(vol_pg0.VolumeStatusFlags);
+ if (volstate & MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) {
+ state = RAID_STATE_RESYNCING;
+ goto out;
+ }
+
+ switch (vol_pg0.VolumeState) {
+ case MPI2_RAID_VOL_STATE_OPTIMAL:
+ case MPI2_RAID_VOL_STATE_ONLINE:
+ state = RAID_STATE_ACTIVE;
+ break;
+ case MPI2_RAID_VOL_STATE_DEGRADED:
+ state = RAID_STATE_DEGRADED;
+ break;
+ case MPI2_RAID_VOL_STATE_FAILED:
+ case MPI2_RAID_VOL_STATE_MISSING:
+ state = RAID_STATE_OFFLINE;
+ break;
+ }
+ out:
+ raid_set_state(mpt2sas_raid_template, dev, state);
+}
+
+/**
+ * _scsih_set_level - set raid level
+ * @sdev: scsi device struct
+ * @raid_device: raid_device object
+ */
+static void
+_scsih_set_level(struct scsi_device *sdev, struct _raid_device *raid_device)
+{
+ enum raid_level level = RAID_LEVEL_UNKNOWN;
+
+ switch (raid_device->volume_type) {
+ case MPI2_RAID_VOL_TYPE_RAID0:
+ level = RAID_LEVEL_0;
+ break;
+ case MPI2_RAID_VOL_TYPE_RAID10:
+ level = RAID_LEVEL_10;
+ break;
+ case MPI2_RAID_VOL_TYPE_RAID1E:
+ level = RAID_LEVEL_1E;
+ break;
+ case MPI2_RAID_VOL_TYPE_RAID1:
+ level = RAID_LEVEL_1;
+ break;
+ }
+
+ raid_set_level(mpt2sas_raid_template, &sdev->sdev_gendev, level);
+}
+
+/**
* _scsih_get_volume_capabilities - volume capabilities
* @ioc: per adapter object
* @sas_device: the raid_device object
@@ -1479,6 +1603,32 @@ _scsih_get_volume_capabilities(struct MPT2SAS_ADAPTER *ioc,
}
/**
+ * _scsih_enable_tlr - setting TLR flags
+ * @ioc: per adapter object
+ * @sdev: scsi device struct
+ *
+ * Enabling Transaction Layer Retries for tape devices when
+ * vpd page 0x90 is present
+ *
+ */
+static void
+_scsih_enable_tlr(struct MPT2SAS_ADAPTER *ioc, struct scsi_device *sdev)
+{
+ /* only for TAPE */
+ if (sdev->type != TYPE_TAPE)
+ return;
+
+ if (!(ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR))
+ return;
+
+ sas_enable_tlr(sdev);
+ sdev_printk(KERN_INFO, sdev, "TLR %s\n",
+ sas_is_tlr_enabled(sdev) ? "Enabled" : "Disabled");
+ return;
+
+}
+
+/**
* _scsih_slave_configure - device configure routine.
* @sdev: scsi device struct
*
@@ -1574,6 +1724,8 @@ _scsih_slave_configure(struct scsi_device *sdev)
(unsigned long long)raid_device->wwid,
raid_device->num_pds, ds);
_scsih_change_queue_depth(sdev, qdepth, SCSI_QDEPTH_DEFAULT);
+ /* raid transport support */
+ _scsih_set_level(sdev, raid_device);
return 0;
}
@@ -1621,8 +1773,10 @@ _scsih_slave_configure(struct scsi_device *sdev)
_scsih_change_queue_depth(sdev, qdepth, SCSI_QDEPTH_DEFAULT);
- if (ssp_target)
+ if (ssp_target) {
sas_read_port_mode_page(sdev);
+ _scsih_enable_tlr(ioc, sdev);
+ }
return 0;
}
@@ -2908,8 +3062,9 @@ _scsih_qcmd(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *))
} else
mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
-
- if ((sas_device_priv_data->flags & MPT_DEVICE_TLR_ON))
+ /* Make sure Device is not raid volume */
+ if (!_scsih_is_raid(&scmd->device->sdev_gendev) &&
+ sas_is_tlr_enabled(scmd->device))
mpi_control |= MPI2_SCSIIO_CONTROL_TLR_ON;
smid = mpt2sas_base_get_smid_scsiio(ioc, ioc->scsi_io_cb_idx, scmd);
@@ -3298,10 +3453,12 @@ _scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
le32_to_cpu(mpi_reply->ResponseInfo) & 0xFF;
if (!sas_device_priv_data->tlr_snoop_check) {
sas_device_priv_data->tlr_snoop_check++;
- if ((sas_device_priv_data->flags & MPT_DEVICE_TLR_ON) &&
- response_code == MPI2_SCSITASKMGMT_RSP_INVALID_FRAME)
- sas_device_priv_data->flags &=
- ~MPT_DEVICE_TLR_ON;
+ if (!_scsih_is_raid(&scmd->device->sdev_gendev) &&
+ sas_is_tlr_enabled(scmd->device) &&
+ response_code == MPI2_SCSITASKMGMT_RSP_INVALID_FRAME) {
+ sas_disable_tlr(scmd->device);
+ sdev_printk(KERN_INFO, scmd->device, "TLR disabled\n");
+ }
}
xfer_cnt = le32_to_cpu(mpi_reply->TransferCount);
@@ -5170,11 +5327,33 @@ static void
_scsih_sas_ir_operation_status_event(struct MPT2SAS_ADAPTER *ioc,
struct fw_event_work *fw_event)
{
+ Mpi2EventDataIrOperationStatus_t *event_data = fw_event->event_data;
+ static struct _raid_device *raid_device;
+ unsigned long flags;
+ u16 handle;
+
#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
_scsih_sas_ir_operation_status_event_debug(ioc,
- fw_event->event_data);
+ event_data);
#endif
+
+ /* code added for raid transport support */
+ if (event_data->RAIDOperation == MPI2_EVENT_IR_RAIDOP_RESYNC) {
+
+ handle = le16_to_cpu(event_data->VolDevHandle);
+
+ spin_lock_irqsave(&ioc->raid_device_lock, flags);
+ raid_device = _scsih_raid_device_find_by_handle(ioc, handle);
+ spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
+
+ if (!raid_device)
+ return;
+
+ if (event_data->RAIDOperation == MPI2_EVENT_IR_RAIDOP_RESYNC)
+ raid_device->percent_complete =
+ event_data->PercentComplete;
+ }
}
/**
@@ -5998,6 +6177,8 @@ _scsih_remove(struct pci_dev *pdev)
struct _sas_port *mpt2sas_port;
struct _sas_device *sas_device;
struct _sas_node *expander_sibling;
+ struct _raid_device *raid_device, *next;
+ struct MPT2SAS_TARGET *sas_target_priv_data;
struct workqueue_struct *wq;
unsigned long flags;
@@ -6011,6 +6192,21 @@ _scsih_remove(struct pci_dev *pdev)
if (wq)
destroy_workqueue(wq);
+ /* release all the volumes */
+ list_for_each_entry_safe(raid_device, next, &ioc->raid_device_list,
+ list) {
+ if (raid_device->starget) {
+ sas_target_priv_data =
+ raid_device->starget->hostdata;
+ sas_target_priv_data->deleted = 1;
+ scsi_remove_target(&raid_device->starget->dev);
+ }
+ printk(MPT2SAS_INFO_FMT "removing handle(0x%04x), wwid"
+ "(0x%016llx)\n", ioc->name, raid_device->handle,
+ (unsigned long long) raid_device->wwid);
+ _scsih_raid_device_remove(ioc, raid_device);
+ }
+
/* free ports attached to the sas_host */
retry_again:
list_for_each_entry(mpt2sas_port,
@@ -6373,6 +6569,13 @@ static struct pci_driver scsih_driver = {
#endif
};
+/* raid transport support */
+static struct raid_function_template mpt2sas_raid_functions = {
+ .cookie = &scsih_driver_template,
+ .is_raid = _scsih_is_raid,
+ .get_resync = _scsih_get_resync,
+ .get_state = _scsih_get_state,
+};
/**
* _scsih_init - main entry point for this driver.
@@ -6392,6 +6595,12 @@ _scsih_init(void)
sas_attach_transport(&mpt2sas_transport_functions);
if (!mpt2sas_transport_template)
return -ENODEV;
+ /* raid transport support */
+ mpt2sas_raid_template = raid_class_attach(&mpt2sas_raid_functions);
+ if (!mpt2sas_raid_template) {
+ sas_release_transport(mpt2sas_transport_template);
+ return -ENODEV;
+ }
mpt2sas_base_initialize_callback_handler();
@@ -6426,8 +6635,11 @@ _scsih_init(void)
mpt2sas_ctl_init();
error = pci_register_driver(&scsih_driver);
- if (error)
+ if (error) {
+ /* raid transport support */
+ raid_class_release(mpt2sas_raid_template);
sas_release_transport(mpt2sas_transport_template);
+ }
return error;
}
@@ -6445,7 +6657,8 @@ _scsih_exit(void)
pci_unregister_driver(&scsih_driver);
- sas_release_transport(mpt2sas_transport_template);
+ mpt2sas_ctl_exit();
+
mpt2sas_base_release_callback_handler(scsi_io_cb_idx);
mpt2sas_base_release_callback_handler(tm_cb_idx);
mpt2sas_base_release_callback_handler(base_cb_idx);
@@ -6457,7 +6670,10 @@ _scsih_exit(void)
mpt2sas_base_release_callback_handler(tm_tr_cb_idx);
mpt2sas_base_release_callback_handler(tm_sas_control_cb_idx);
- mpt2sas_ctl_exit();
+ /* raid transport support */
+ raid_class_release(mpt2sas_raid_template);
+ sas_release_transport(mpt2sas_transport_template);
+
}
module_init(_scsih_init);
diff --git a/drivers/scsi/mpt2sas/mpt2sas_transport.c b/drivers/scsi/mpt2sas/mpt2sas_transport.c
index 3a82872bad44..789f9ee7f001 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_transport.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_transport.c
@@ -855,6 +855,17 @@ rphy_to_ioc(struct sas_rphy *rphy)
return shost_priv(shost);
}
+static struct _sas_phy *
+_transport_find_local_phy(struct MPT2SAS_ADAPTER *ioc, struct sas_phy *phy)
+{
+ int i;
+
+ for (i = 0; i < ioc->sas_hba.num_phys; i++)
+ if (ioc->sas_hba.phy[i].phy == phy)
+ return(&ioc->sas_hba.phy[i]);
+ return NULL;
+}
+
/**
* _transport_get_linkerrors -
* @phy: The sas phy object
@@ -870,14 +881,8 @@ _transport_get_linkerrors(struct sas_phy *phy)
struct _sas_phy *mpt2sas_phy;
Mpi2ConfigReply_t mpi_reply;
Mpi2SasPhyPage1_t phy_pg1;
- int i;
- for (i = 0, mpt2sas_phy = NULL; i < ioc->sas_hba.num_phys &&
- !mpt2sas_phy; i++) {
- if (ioc->sas_hba.phy[i].phy != phy)
- continue;
- mpt2sas_phy = &ioc->sas_hba.phy[i];
- }
+ mpt2sas_phy = _transport_find_local_phy(ioc, phy);
if (!mpt2sas_phy) /* this phy not on sas_host */
return -EINVAL;
@@ -971,14 +976,8 @@ _transport_phy_reset(struct sas_phy *phy, int hard_reset)
struct _sas_phy *mpt2sas_phy;
Mpi2SasIoUnitControlReply_t mpi_reply;
Mpi2SasIoUnitControlRequest_t mpi_request;
- int i;
- for (i = 0, mpt2sas_phy = NULL; i < ioc->sas_hba.num_phys &&
- !mpt2sas_phy; i++) {
- if (ioc->sas_hba.phy[i].phy != phy)
- continue;
- mpt2sas_phy = &ioc->sas_hba.phy[i];
- }
+ mpt2sas_phy = _transport_find_local_phy(ioc, phy);
if (!mpt2sas_phy) /* this phy not on sas_host */
return -EINVAL;
@@ -1006,6 +1005,173 @@ _transport_phy_reset(struct sas_phy *phy, int hard_reset)
}
/**
+ * _transport_phy_enable - enable/disable phys
+ * @phy: The sas phy object
+ * @enable: enable phy when true
+ *
+ * Only support sas_host direct attached phys.
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+_transport_phy_enable(struct sas_phy *phy, int enable)
+{
+ struct MPT2SAS_ADAPTER *ioc = phy_to_ioc(phy);
+ struct _sas_phy *mpt2sas_phy;
+ Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL;
+ Mpi2ConfigReply_t mpi_reply;
+ u16 ioc_status;
+ u16 sz;
+ int rc = 0;
+
+ mpt2sas_phy = _transport_find_local_phy(ioc, phy);
+
+ if (!mpt2sas_phy) /* this phy not on sas_host */
+ return -EINVAL;
+
+ /* sas_iounit page 1 */
+ sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (ioc->sas_hba.num_phys *
+ sizeof(Mpi2SasIOUnit1PhyData_t));
+ sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
+ if (!sas_iounit_pg1) {
+ printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ rc = -ENOMEM;
+ goto out;
+ }
+ if ((mpt2sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
+ sas_iounit_pg1, sz))) {
+ printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ rc = -ENXIO;
+ goto out;
+ }
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ rc = -EIO;
+ goto out;
+ }
+
+ if (enable)
+ sas_iounit_pg1->PhyData[mpt2sas_phy->phy_id].PhyFlags
+ &= ~MPI2_SASIOUNIT1_PHYFLAGS_PHY_DISABLE;
+ else
+ sas_iounit_pg1->PhyData[mpt2sas_phy->phy_id].PhyFlags
+ |= MPI2_SASIOUNIT1_PHYFLAGS_PHY_DISABLE;
+
+ mpt2sas_config_set_sas_iounit_pg1(ioc, &mpi_reply, sas_iounit_pg1, sz);
+
+ out:
+ kfree(sas_iounit_pg1);
+ return rc;
+}
+
+/**
+ * _transport_phy_speed - set phy min/max link rates
+ * @phy: The sas phy object
+ * @rates: rates defined in sas_phy_linkrates
+ *
+ * Only support sas_host direct attached phys.
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+_transport_phy_speed(struct sas_phy *phy, struct sas_phy_linkrates *rates)
+{
+ struct MPT2SAS_ADAPTER *ioc = phy_to_ioc(phy);
+ struct _sas_phy *mpt2sas_phy;
+ Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL;
+ Mpi2SasPhyPage0_t phy_pg0;
+ Mpi2ConfigReply_t mpi_reply;
+ u16 ioc_status;
+ u16 sz;
+ int i;
+ int rc = 0;
+
+ mpt2sas_phy = _transport_find_local_phy(ioc, phy);
+
+ if (!mpt2sas_phy) /* this phy not on sas_host */
+ return -EINVAL;
+
+ if (!rates->minimum_linkrate)
+ rates->minimum_linkrate = phy->minimum_linkrate;
+ else if (rates->minimum_linkrate < phy->minimum_linkrate_hw)
+ rates->minimum_linkrate = phy->minimum_linkrate_hw;
+
+ if (!rates->maximum_linkrate)
+ rates->maximum_linkrate = phy->maximum_linkrate;
+ else if (rates->maximum_linkrate > phy->maximum_linkrate_hw)
+ rates->maximum_linkrate = phy->maximum_linkrate_hw;
+
+ /* sas_iounit page 1 */
+ sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (ioc->sas_hba.num_phys *
+ sizeof(Mpi2SasIOUnit1PhyData_t));
+ sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
+ if (!sas_iounit_pg1) {
+ printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ rc = -ENOMEM;
+ goto out;
+ }
+ if ((mpt2sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
+ sas_iounit_pg1, sz))) {
+ printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ rc = -ENXIO;
+ goto out;
+ }
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ rc = -EIO;
+ goto out;
+ }
+
+ for (i = 0; i < ioc->sas_hba.num_phys; i++) {
+ if (mpt2sas_phy->phy_id != i) {
+ sas_iounit_pg1->PhyData[i].MaxMinLinkRate =
+ (ioc->sas_hba.phy[i].phy->minimum_linkrate +
+ (ioc->sas_hba.phy[i].phy->maximum_linkrate << 4));
+ } else {
+ sas_iounit_pg1->PhyData[i].MaxMinLinkRate =
+ (rates->minimum_linkrate +
+ (rates->maximum_linkrate << 4));
+ }
+ }
+
+ if (mpt2sas_config_set_sas_iounit_pg1(ioc, &mpi_reply, sas_iounit_pg1,
+ sz)) {
+ printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ rc = -ENXIO;
+ goto out;
+ }
+
+ /* link reset */
+ _transport_phy_reset(phy, 0);
+
+ /* read phy page 0, then update the rates in the sas transport phy */
+ if (!mpt2sas_config_get_phy_pg0(ioc, &mpi_reply, &phy_pg0,
+ mpt2sas_phy->phy_id)) {
+ phy->minimum_linkrate = _transport_convert_phy_link_rate(
+ phy_pg0.ProgrammedLinkRate & MPI2_SAS_PRATE_MIN_RATE_MASK);
+ phy->maximum_linkrate = _transport_convert_phy_link_rate(
+ phy_pg0.ProgrammedLinkRate >> 4);
+ phy->negotiated_linkrate = _transport_convert_phy_link_rate(
+ phy_pg0.NegotiatedLinkRate &
+ MPI2_SAS_NEG_LINK_RATE_MASK_PHYSICAL);
+ }
+
+ out:
+ kfree(sas_iounit_pg1);
+ return rc;
+}
+
+
+/**
* _transport_smp_handler - transport portal for smp passthru
* @shost: shost object
* @rphy: sas transport rphy object
@@ -1207,6 +1373,8 @@ struct sas_function_template mpt2sas_transport_functions = {
.get_enclosure_identifier = _transport_get_enclosure_identifier,
.get_bay_identifier = _transport_get_bay_identifier,
.phy_reset = _transport_phy_reset,
+ .phy_enable = _transport_phy_enable,
+ .set_phy_speed = _transport_phy_speed,
.smp_handler = _transport_smp_handler,
};
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index c2f1032496cb..f80c1da8f6ca 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -654,7 +654,7 @@ static int __devinit pm8001_pci_probe(struct pci_dev *pdev,
}
chip = &pm8001_chips[ent->driver_data];
SHOST_TO_SAS_HA(shost) =
- kcalloc(1, sizeof(struct sas_ha_struct), GFP_KERNEL);
+ kzalloc(sizeof(struct sas_ha_struct), GFP_KERNEL);
if (!SHOST_TO_SAS_HA(shost)) {
rc = -ENOMEM;
goto err_out_free_host;
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c
index 8371d917a9a2..49ac4148493b 100644
--- a/drivers/scsi/qla1280.c
+++ b/drivers/scsi/qla1280.c
@@ -1640,8 +1640,10 @@ qla1280_load_firmware_pio(struct scsi_qla_host *ha)
uint16_t mb[MAILBOX_REGISTER_COUNT], i;
int err;
+ spin_unlock_irq(ha->host->host_lock);
err = request_firmware(&fw, ql1280_board_tbl[ha->devnum].fwname,
&ha->pdev->dev);
+ spin_lock_irq(ha->host->host_lock);
if (err) {
printk(KERN_ERR "Failed to load image \"%s\" err %d\n",
ql1280_board_tbl[ha->devnum].fwname, err);
@@ -1699,8 +1701,10 @@ qla1280_load_firmware_dma(struct scsi_qla_host *ha)
return -ENOMEM;
#endif
+ spin_unlock_irq(ha->host->host_lock);
err = request_firmware(&fw, ql1280_board_tbl[ha->devnum].fwname,
&ha->pdev->dev);
+ spin_lock_irq(ha->host->host_lock);
if (err) {
printk(KERN_ERR "Failed to load image \"%s\" err %d\n",
ql1280_board_tbl[ha->devnum].fwname, err);
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 3a9f5b288aee..90d1e062ec4f 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -11,7 +11,9 @@
#include <linux/delay.h>
static int qla24xx_vport_disable(struct fc_vport *, bool);
-
+static int qla84xx_reset(scsi_qla_host_t *, struct msg_echo_lb *, struct fc_bsg_job *);
+int qla84xx_reset_chip(scsi_qla_host_t *, uint16_t, uint16_t *);
+static int qla84xx_mgmt_cmd(scsi_qla_host_t *, struct msg_echo_lb *, struct fc_bsg_job *);
/* SYSFS attributes --------------------------------------------------------- */
static ssize_t
@@ -1168,6 +1170,28 @@ qla2x00_total_isp_aborts_show(struct device *dev,
}
static ssize_t
+qla24xx_84xx_fw_version_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int rval = QLA_SUCCESS;
+ uint16_t status[2] = {0, 0};
+ scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+ struct qla_hw_data *ha = vha->hw;
+
+ if (IS_QLA84XX(ha) && ha->cs84xx) {
+ if (ha->cs84xx->op_fw_version == 0) {
+ rval = qla84xx_verify_chip(vha, status);
+ }
+
+ if ((rval == QLA_SUCCESS) && (status[0] == 0))
+ return snprintf(buf, PAGE_SIZE, "%u\n",
+ (uint32_t)ha->cs84xx->op_fw_version);
+ }
+
+ return snprintf(buf, PAGE_SIZE, "\n");
+}
+
+static ssize_t
qla2x00_mpi_version_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
@@ -1281,6 +1305,8 @@ static DEVICE_ATTR(optrom_fcode_version, S_IRUGO,
qla2x00_optrom_fcode_version_show, NULL);
static DEVICE_ATTR(optrom_fw_version, S_IRUGO, qla2x00_optrom_fw_version_show,
NULL);
+static DEVICE_ATTR(84xx_fw_version, S_IRUGO, qla24xx_84xx_fw_version_show,
+ NULL);
static DEVICE_ATTR(total_isp_aborts, S_IRUGO, qla2x00_total_isp_aborts_show,
NULL);
static DEVICE_ATTR(mpi_version, S_IRUGO, qla2x00_mpi_version_show, NULL);
@@ -1310,6 +1336,7 @@ struct device_attribute *qla2x00_host_attrs[] = {
&dev_attr_optrom_efi_version,
&dev_attr_optrom_fcode_version,
&dev_attr_optrom_fw_version,
+ &dev_attr_84xx_fw_version,
&dev_attr_total_isp_aborts,
&dev_attr_mpi_version,
&dev_attr_phy_version,
@@ -1504,8 +1531,6 @@ qla2x00_terminate_rport_io(struct fc_rport *rport)
fcport->vha->hw->isp_ops->fabric_logout(fcport->vha,
fcport->loop_id, fcport->d_id.b.domain,
fcport->d_id.b.area, fcport->d_id.b.al_pa);
-
- qla2x00_abort_fcport_cmds(fcport);
}
static int
@@ -1795,6 +1820,581 @@ qla24xx_vport_disable(struct fc_vport *fc_vport, bool disable)
return 0;
}
+/* BSG support for ELS/CT pass through */
+inline srb_t *
+qla2x00_get_ctx_bsg_sp(scsi_qla_host_t *vha, fc_port_t *fcport, size_t size)
+{
+ srb_t *sp;
+ struct qla_hw_data *ha = vha->hw;
+ struct srb_bsg_ctx *ctx;
+
+ sp = mempool_alloc(ha->srb_mempool, GFP_KERNEL);
+ if (!sp)
+ goto done;
+ ctx = kzalloc(size, GFP_KERNEL);
+ if (!ctx) {
+ mempool_free(sp, ha->srb_mempool);
+ goto done;
+ }
+
+ memset(sp, 0, sizeof(*sp));
+ sp->fcport = fcport;
+ sp->ctx = ctx;
+done:
+ return sp;
+}
+
+static int
+qla2x00_process_els(struct fc_bsg_job *bsg_job)
+{
+ struct fc_rport *rport;
+ fc_port_t *fcport;
+ struct Scsi_Host *host;
+ scsi_qla_host_t *vha;
+ struct qla_hw_data *ha;
+ srb_t *sp;
+ const char *type;
+ int req_sg_cnt, rsp_sg_cnt;
+ int rval = (DRIVER_ERROR << 16);
+ uint16_t nextlid = 0;
+ struct srb_bsg *els;
+
+ /* Multiple SG's are not supported for ELS requests */
+ if (bsg_job->request_payload.sg_cnt > 1 ||
+ bsg_job->reply_payload.sg_cnt > 1) {
+ DEBUG2(printk(KERN_INFO
+ "multiple SG's are not supported for ELS requests"
+ " [request_sg_cnt: %x reply_sg_cnt: %x]\n",
+ bsg_job->request_payload.sg_cnt,
+ bsg_job->reply_payload.sg_cnt));
+ rval = -EPERM;
+ goto done;
+ }
+
+ /* ELS request for rport */
+ if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) {
+ rport = bsg_job->rport;
+ fcport = *(fc_port_t **) rport->dd_data;
+ host = rport_to_shost(rport);
+ vha = shost_priv(host);
+ ha = vha->hw;
+ type = "FC_BSG_RPT_ELS";
+
+ /* make sure the rport is logged in,
+ * if not perform fabric login
+ */
+ if (qla2x00_fabric_login(vha, fcport, &nextlid)) {
+ DEBUG2(qla_printk(KERN_WARNING, ha,
+ "failed to login port %06X for ELS passthru\n",
+ fcport->d_id.b24));
+ rval = -EIO;
+ goto done;
+ }
+ } else {
+ host = bsg_job->shost;
+ vha = shost_priv(host);
+ ha = vha->hw;
+ type = "FC_BSG_HST_ELS_NOLOGIN";
+
+ /* Allocate a dummy fcport structure, since functions
+ * preparing the IOCB and mailbox command retrieves port
+ * specific information from fcport structure. For Host based
+ * ELS commands there will be no fcport structure allocated
+ */
+ fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
+ if (!fcport) {
+ rval = -ENOMEM;
+ goto done;
+ }
+
+ /* Initialize all required fields of fcport */
+ fcport->vha = vha;
+ fcport->vp_idx = vha->vp_idx;
+ fcport->d_id.b.al_pa =
+ bsg_job->request->rqst_data.h_els.port_id[0];
+ fcport->d_id.b.area =
+ bsg_job->request->rqst_data.h_els.port_id[1];
+ fcport->d_id.b.domain =
+ bsg_job->request->rqst_data.h_els.port_id[2];
+ fcport->loop_id =
+ (fcport->d_id.b.al_pa == 0xFD) ?
+ NPH_FABRIC_CONTROLLER : NPH_F_PORT;
+ }
+
+ if (!vha->flags.online) {
+ DEBUG2(qla_printk(KERN_WARNING, ha,
+ "host not online\n"));
+ rval = -EIO;
+ goto done;
+ }
+
+ req_sg_cnt =
+ dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
+ if (!req_sg_cnt) {
+ rval = -ENOMEM;
+ goto done_free_fcport;
+ }
+ rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+ if (!rsp_sg_cnt) {
+ rval = -ENOMEM;
+ goto done_free_fcport;
+ }
+
+ if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
+ (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt))
+ {
+ DEBUG2(printk(KERN_INFO
+ "dma mapping resulted in different sg counts \
+ [request_sg_cnt: %x dma_request_sg_cnt: %x\
+ reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n",
+ bsg_job->request_payload.sg_cnt, req_sg_cnt,
+ bsg_job->reply_payload.sg_cnt, rsp_sg_cnt));
+ rval = -EAGAIN;
+ goto done_unmap_sg;
+ }
+
+ /* Alloc SRB structure */
+ sp = qla2x00_get_ctx_bsg_sp(vha, fcport, sizeof(struct srb_bsg));
+ if (!sp) {
+ rval = -ENOMEM;
+ goto done_unmap_sg;
+ }
+
+ els = sp->ctx;
+ els->ctx.type =
+ (bsg_job->request->msgcode == FC_BSG_RPT_ELS ?
+ SRB_ELS_CMD_RPT : SRB_ELS_CMD_HST);
+ els->bsg_job = bsg_job;
+
+ DEBUG2(qla_printk(KERN_INFO, ha,
+ "scsi(%ld:%x): bsg rqst type: %s els type: %x - loop-id=%x "
+ "portid=%02x%02x%02x.\n", vha->host_no, sp->handle, type,
+ bsg_job->request->rqst_data.h_els.command_code,
+ fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
+ fcport->d_id.b.al_pa));
+
+ rval = qla2x00_start_sp(sp);
+ if (rval != QLA_SUCCESS) {
+ kfree(sp->ctx);
+ mempool_free(sp, ha->srb_mempool);
+ rval = -EIO;
+ goto done_unmap_sg;
+ }
+ return rval;
+
+done_unmap_sg:
+ dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
+ dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+ goto done_free_fcport;
+
+done_free_fcport:
+ if (bsg_job->request->msgcode == FC_BSG_HST_ELS_NOLOGIN)
+ kfree(fcport);
+done:
+ return rval;
+}
+
+static int
+qla2x00_process_ct(struct fc_bsg_job *bsg_job)
+{
+ srb_t *sp;
+ struct Scsi_Host *host = bsg_job->shost;
+ scsi_qla_host_t *vha = shost_priv(host);
+ struct qla_hw_data *ha = vha->hw;
+ int rval = (DRIVER_ERROR << 16);
+ int req_sg_cnt, rsp_sg_cnt;
+ uint16_t loop_id;
+ struct fc_port *fcport;
+ char *type = "FC_BSG_HST_CT";
+ struct srb_bsg *ct;
+
+ /* pass through is supported only for ISP 4Gb or higher */
+ if (!IS_FWI2_CAPABLE(ha)) {
+ DEBUG2(qla_printk(KERN_INFO, ha,
+ "scsi(%ld):Firmware is not capable to support FC "
+ "CT pass thru\n", vha->host_no));
+ rval = -EPERM;
+ goto done;
+ }
+
+ req_sg_cnt =
+ dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
+ if (!req_sg_cnt) {
+ rval = -ENOMEM;
+ goto done;
+ }
+
+ rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+ if (!rsp_sg_cnt) {
+ rval = -ENOMEM;
+ goto done;
+ }
+
+ if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
+ (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt))
+ {
+ DEBUG2(qla_printk(KERN_WARNING, ha,
+ "dma mapping resulted in different sg counts \
+ [request_sg_cnt: %x dma_request_sg_cnt: %x\
+ reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n",
+ bsg_job->request_payload.sg_cnt, req_sg_cnt,
+ bsg_job->reply_payload.sg_cnt, rsp_sg_cnt));
+ rval = -EAGAIN;
+ goto done_unmap_sg;
+ }
+
+ if (!vha->flags.online) {
+ DEBUG2(qla_printk(KERN_WARNING, ha,
+ "host not online\n"));
+ rval = -EIO;
+ goto done_unmap_sg;
+ }
+
+ loop_id =
+ (bsg_job->request->rqst_data.h_ct.preamble_word1 & 0xFF000000)
+ >> 24;
+ switch (loop_id) {
+ case 0xFC:
+ loop_id = cpu_to_le16(NPH_SNS);
+ break;
+ case 0xFA:
+ loop_id = vha->mgmt_svr_loop_id;
+ break;
+ default:
+ DEBUG2(qla_printk(KERN_INFO, ha,
+ "Unknown loop id: %x\n", loop_id));
+ rval = -EINVAL;
+ goto done_unmap_sg;
+ }
+
+ /* Allocate a dummy fcport structure, since functions preparing the
+ * IOCB and mailbox command retrieves port specific information
+ * from fcport structure. For Host based ELS commands there will be
+ * no fcport structure allocated
+ */
+ fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
+ if (!fcport)
+ {
+ rval = -ENOMEM;
+ goto done_unmap_sg;
+ }
+
+ /* Initialize all required fields of fcport */
+ fcport->vha = vha;
+ fcport->vp_idx = vha->vp_idx;
+ fcport->d_id.b.al_pa = bsg_job->request->rqst_data.h_ct.port_id[0];
+ fcport->d_id.b.area = bsg_job->request->rqst_data.h_ct.port_id[1];
+ fcport->d_id.b.domain = bsg_job->request->rqst_data.h_ct.port_id[2];
+ fcport->loop_id = loop_id;
+
+ /* Alloc SRB structure */
+ sp = qla2x00_get_ctx_bsg_sp(vha, fcport, sizeof(struct srb_bsg));
+ if (!sp) {
+ rval = -ENOMEM;
+ goto done_free_fcport;
+ }
+
+ ct = sp->ctx;
+ ct->ctx.type = SRB_CT_CMD;
+ ct->bsg_job = bsg_job;
+
+ DEBUG2(qla_printk(KERN_INFO, ha,
+ "scsi(%ld:%x): bsg rqst type: %s els type: %x - loop-id=%x "
+ "portid=%02x%02x%02x.\n", vha->host_no, sp->handle, type,
+ (bsg_job->request->rqst_data.h_ct.preamble_word2 >> 16),
+ fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
+ fcport->d_id.b.al_pa));
+
+ rval = qla2x00_start_sp(sp);
+ if (rval != QLA_SUCCESS) {
+ kfree(sp->ctx);
+ mempool_free(sp, ha->srb_mempool);
+ rval = -EIO;
+ goto done_free_fcport;
+ }
+ return rval;
+
+done_free_fcport:
+ kfree(fcport);
+done_unmap_sg:
+ dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
+ dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+done:
+ return rval;
+}
+
+static int
+qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job)
+{
+ struct Scsi_Host *host = bsg_job->shost;
+ scsi_qla_host_t *vha = shost_priv(host);
+ struct qla_hw_data *ha = vha->hw;
+ int rval;
+ uint8_t command_sent;
+ uint32_t vendor_cmd;
+ char *type;
+ struct msg_echo_lb elreq;
+ uint16_t response[MAILBOX_REGISTER_COUNT];
+ uint8_t* fw_sts_ptr;
+ uint8_t *req_data;
+ dma_addr_t req_data_dma;
+ uint32_t req_data_len;
+ uint8_t *rsp_data;
+ dma_addr_t rsp_data_dma;
+ uint32_t rsp_data_len;
+
+ if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
+ test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
+ test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
+ rval = -EBUSY;
+ goto done;
+ }
+
+ if (!vha->flags.online) {
+ DEBUG2(qla_printk(KERN_WARNING, ha,
+ "host not online\n"));
+ rval = -EIO;
+ goto done;
+ }
+
+ elreq.req_sg_cnt =
+ dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
+ if (!elreq.req_sg_cnt) {
+ rval = -ENOMEM;
+ goto done;
+ }
+ elreq.rsp_sg_cnt =
+ dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+ if (!elreq.rsp_sg_cnt) {
+ rval = -ENOMEM;
+ goto done;
+ }
+
+ if ((elreq.req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
+ (elreq.rsp_sg_cnt != bsg_job->reply_payload.sg_cnt))
+ {
+ DEBUG2(printk(KERN_INFO
+ "dma mapping resulted in different sg counts \
+ [request_sg_cnt: %x dma_request_sg_cnt: %x\
+ reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n",
+ bsg_job->request_payload.sg_cnt, elreq.req_sg_cnt,
+ bsg_job->reply_payload.sg_cnt, elreq.rsp_sg_cnt));
+ rval = -EAGAIN;
+ goto done_unmap_sg;
+ }
+ req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
+ req_data = dma_alloc_coherent(&ha->pdev->dev, req_data_len,
+ &req_data_dma, GFP_KERNEL);
+
+ rsp_data = dma_alloc_coherent(&ha->pdev->dev, rsp_data_len,
+ &rsp_data_dma, GFP_KERNEL);
+
+ /* Copy the request buffer in req_data now */
+ sg_copy_to_buffer(bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, req_data,
+ req_data_len);
+
+ elreq.send_dma = req_data_dma;
+ elreq.rcv_dma = rsp_data_dma;
+ elreq.transfer_size = req_data_len;
+
+ /* Vendor cmd : loopback or ECHO diagnostic
+ * Options:
+ * Loopback : Either internal or external loopback
+ * ECHO: ECHO ELS or Vendor specific FC4 link data
+ */
+ vendor_cmd = bsg_job->request->rqst_data.h_vendor.vendor_cmd[0];
+ elreq.options =
+ *(((uint32_t *)bsg_job->request->rqst_data.h_vendor.vendor_cmd)
+ + 1);
+
+ switch (bsg_job->request->rqst_data.h_vendor.vendor_cmd[0]) {
+ case QL_VND_LOOPBACK:
+ if (ha->current_topology != ISP_CFG_F) {
+ type = "FC_BSG_HST_VENDOR_LOOPBACK";
+
+ DEBUG2(qla_printk(KERN_INFO, ha,
+ "scsi(%ld) bsg rqst type: %s vendor rqst type: %x options: %x.\n",
+ vha->host_no, type, vendor_cmd, elreq.options));
+
+ command_sent = INT_DEF_LB_LOOPBACK_CMD;
+ rval = qla2x00_loopback_test(vha, &elreq, response);
+ if (IS_QLA81XX(ha)) {
+ if (response[0] == MBS_COMMAND_ERROR && response[1] == MBS_LB_RESET) {
+ DEBUG2(printk(KERN_ERR "%s(%ld): ABORTing "
+ "ISP\n", __func__, vha->host_no));
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ }
+ }
+ } else {
+ type = "FC_BSG_HST_VENDOR_ECHO_DIAG";
+ DEBUG2(qla_printk(KERN_INFO, ha,
+ "scsi(%ld) bsg rqst type: %s vendor rqst type: %x options: %x.\n",
+ vha->host_no, type, vendor_cmd, elreq.options));
+
+ command_sent = INT_DEF_LB_ECHO_CMD;
+ rval = qla2x00_echo_test(vha, &elreq, response);
+ }
+ break;
+ case QLA84_RESET:
+ if (!IS_QLA84XX(vha->hw)) {
+ rval = -EINVAL;
+ DEBUG16(printk(
+ "%s(%ld): 8xxx exiting.\n",
+ __func__, vha->host_no));
+ return rval;
+ }
+ rval = qla84xx_reset(vha, &elreq, bsg_job);
+ break;
+ case QLA84_MGMT_CMD:
+ if (!IS_QLA84XX(vha->hw)) {
+ rval = -EINVAL;
+ DEBUG16(printk(
+ "%s(%ld): 8xxx exiting.\n",
+ __func__, vha->host_no));
+ return rval;
+ }
+ rval = qla84xx_mgmt_cmd(vha, &elreq, bsg_job);
+ break;
+ default:
+ rval = -ENOSYS;
+ }
+
+ if (rval != QLA_SUCCESS) {
+ DEBUG2(qla_printk(KERN_WARNING, ha,
+ "scsi(%ld) Vendor request %s failed\n", vha->host_no, type));
+ rval = 0;
+ bsg_job->reply->result = (DID_ERROR << 16);
+ bsg_job->reply->reply_payload_rcv_len = 0;
+ fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
+ memcpy( fw_sts_ptr, response, sizeof(response));
+ fw_sts_ptr += sizeof(response);
+ *fw_sts_ptr = command_sent;
+ } else {
+ DEBUG2(qla_printk(KERN_WARNING, ha,
+ "scsi(%ld) Vendor request %s completed\n", vha->host_no, type));
+ rval = bsg_job->reply->result = 0;
+ bsg_job->reply_len = sizeof(struct fc_bsg_reply) + sizeof(response) + sizeof(uint8_t);
+ bsg_job->reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len;
+ fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
+ memcpy(fw_sts_ptr, response, sizeof(response));
+ fw_sts_ptr += sizeof(response);
+ *fw_sts_ptr = command_sent;
+ sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt, rsp_data,
+ rsp_data_len);
+ }
+ bsg_job->job_done(bsg_job);
+
+done_unmap_sg:
+
+ if(req_data)
+ dma_free_coherent(&ha->pdev->dev, req_data_len,
+ req_data, req_data_dma);
+ dma_unmap_sg(&ha->pdev->dev,
+ bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
+ dma_unmap_sg(&ha->pdev->dev,
+ bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+
+done:
+ return rval;
+}
+
+static int
+qla24xx_bsg_request(struct fc_bsg_job *bsg_job)
+{
+ int ret = -EINVAL;
+
+ switch (bsg_job->request->msgcode) {
+ case FC_BSG_RPT_ELS:
+ case FC_BSG_HST_ELS_NOLOGIN:
+ ret = qla2x00_process_els(bsg_job);
+ break;
+ case FC_BSG_HST_CT:
+ ret = qla2x00_process_ct(bsg_job);
+ break;
+ case FC_BSG_HST_VENDOR:
+ ret = qla2x00_process_vendor_specific(bsg_job);
+ break;
+ case FC_BSG_HST_ADD_RPORT:
+ case FC_BSG_HST_DEL_RPORT:
+ case FC_BSG_RPT_CT:
+ default:
+ DEBUG2(printk("qla2xxx: unsupported BSG request\n"));
+ break;
+ }
+ return ret;
+}
+
+static int
+qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job)
+{
+ scsi_qla_host_t *vha = shost_priv(bsg_job->shost);
+ struct qla_hw_data *ha = vha->hw;
+ srb_t *sp;
+ int cnt, que;
+ unsigned long flags;
+ struct req_que *req;
+ struct srb_bsg *sp_bsg;
+
+ /* find the bsg job from the active list of commands */
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ for (que = 0; que < ha->max_req_queues; que++) {
+ req = ha->req_q_map[que];
+ if (!req)
+ continue;
+
+ for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++ ) {
+ sp = req->outstanding_cmds[cnt];
+
+ if (sp) {
+ sp_bsg = (struct srb_bsg*)sp->ctx;
+
+ if (((sp_bsg->ctx.type == SRB_CT_CMD) ||
+ (sp_bsg->ctx.type == SRB_ELS_CMD_RPT)
+ || ( sp_bsg->ctx.type == SRB_ELS_CMD_HST)) &&
+ (sp_bsg->bsg_job == bsg_job)) {
+ if (ha->isp_ops->abort_command(sp)) {
+ DEBUG2(qla_printk(KERN_INFO, ha,
+ "scsi(%ld): mbx abort_command failed\n", vha->host_no));
+ bsg_job->req->errors = bsg_job->reply->result = -EIO;
+ } else {
+ DEBUG2(qla_printk(KERN_INFO, ha,
+ "scsi(%ld): mbx abort_command success\n", vha->host_no));
+ bsg_job->req->errors = bsg_job->reply->result = 0;
+ }
+ goto done;
+ }
+ }
+ }
+ }
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ DEBUG2(qla_printk(KERN_INFO, ha,
+ "scsi(%ld) SRB not found to abort\n", vha->host_no));
+ bsg_job->req->errors = bsg_job->reply->result = -ENXIO;
+ return 0;
+
+done:
+ if (bsg_job->request->msgcode == FC_BSG_HST_CT)
+ kfree(sp->fcport);
+ kfree(sp->ctx);
+ mempool_free(sp, ha->srb_mempool);
+ return 0;
+}
+
struct fc_function_template qla2xxx_transport_functions = {
.show_host_node_name = 1,
@@ -1838,6 +2438,8 @@ struct fc_function_template qla2xxx_transport_functions = {
.vport_create = qla24xx_vport_create,
.vport_disable = qla24xx_vport_disable,
.vport_delete = qla24xx_vport_delete,
+ .bsg_request = qla24xx_bsg_request,
+ .bsg_timeout = qla24xx_bsg_timeout,
};
struct fc_function_template qla2xxx_transport_vport_functions = {
@@ -1878,6 +2480,8 @@ struct fc_function_template qla2xxx_transport_vport_functions = {
.dev_loss_tmo_callbk = qla2x00_dev_loss_tmo_callbk,
.terminate_rport_io = qla2x00_terminate_rport_io,
.get_fc_host_stats = qla2x00_get_fc_host_stats,
+ .bsg_request = qla24xx_bsg_request,
+ .bsg_timeout = qla24xx_bsg_timeout,
};
void
@@ -1906,3 +2510,125 @@ qla2x00_init_host_attr(scsi_qla_host_t *vha)
speed = FC_PORTSPEED_1GBIT;
fc_host_supported_speeds(vha->host) = speed;
}
+static int
+qla84xx_reset(scsi_qla_host_t *ha, struct msg_echo_lb *mreq, struct fc_bsg_job *bsg_job)
+{
+ int ret = 0;
+ int cmd;
+ uint16_t cmd_status;
+
+ DEBUG16(printk("%s(%ld): entered.\n", __func__, ha->host_no));
+
+ cmd = (*((bsg_job->request->rqst_data.h_vendor.vendor_cmd) + 2))
+ == A84_RESET_FLAG_ENABLE_DIAG_FW ?
+ A84_ISSUE_RESET_DIAG_FW : A84_ISSUE_RESET_OP_FW;
+ ret = qla84xx_reset_chip(ha, cmd == A84_ISSUE_RESET_DIAG_FW,
+ &cmd_status);
+ return ret;
+}
+
+static int
+qla84xx_mgmt_cmd(scsi_qla_host_t *ha, struct msg_echo_lb *mreq, struct fc_bsg_job *bsg_job)
+{
+ struct access_chip_84xx *mn;
+ dma_addr_t mn_dma, mgmt_dma;
+ void *mgmt_b = NULL;
+ int ret = 0;
+ int rsp_hdr_len, len = 0;
+ struct qla84_msg_mgmt *ql84_mgmt;
+
+ ql84_mgmt = (struct qla84_msg_mgmt *) vmalloc(sizeof(struct qla84_msg_mgmt));
+ ql84_mgmt->cmd =
+ *((uint16_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 2));
+ ql84_mgmt->mgmtp.u.mem.start_addr =
+ *((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 3));
+ ql84_mgmt->len =
+ *((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 4));
+ ql84_mgmt->mgmtp.u.config.id =
+ *((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 5));
+ ql84_mgmt->mgmtp.u.config.param0 =
+ *((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 6));
+ ql84_mgmt->mgmtp.u.config.param1 =
+ *((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 7));
+ ql84_mgmt->mgmtp.u.info.type =
+ *((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 8));
+ ql84_mgmt->mgmtp.u.info.context =
+ *((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 9));
+
+ rsp_hdr_len = bsg_job->request_payload.payload_len;
+
+ mn = dma_pool_alloc(ha->hw->s_dma_pool, GFP_KERNEL, &mn_dma);
+ if (mn == NULL) {
+ DEBUG2(printk(KERN_ERR "%s: dma alloc for fw buffer "
+ "failed%lu\n", __func__, ha->host_no));
+ return -ENOMEM;
+ }
+
+ memset(mn, 0, sizeof (struct access_chip_84xx));
+
+ mn->entry_type = ACCESS_CHIP_IOCB_TYPE;
+ mn->entry_count = 1;
+
+ switch (ql84_mgmt->cmd) {
+ case QLA84_MGMT_READ_MEM:
+ mn->options = cpu_to_le16(ACO_DUMP_MEMORY);
+ mn->parameter1 = cpu_to_le32(ql84_mgmt->mgmtp.u.mem.start_addr);
+ break;
+ case QLA84_MGMT_WRITE_MEM:
+ mn->options = cpu_to_le16(ACO_LOAD_MEMORY);
+ mn->parameter1 = cpu_to_le32(ql84_mgmt->mgmtp.u.mem.start_addr);
+ break;
+ case QLA84_MGMT_CHNG_CONFIG:
+ mn->options = cpu_to_le16(ACO_CHANGE_CONFIG_PARAM);
+ mn->parameter1 = cpu_to_le32(ql84_mgmt->mgmtp.u.config.id);
+ mn->parameter2 = cpu_to_le32(ql84_mgmt->mgmtp.u.config.param0);
+ mn->parameter3 = cpu_to_le32(ql84_mgmt->mgmtp.u.config.param1);
+ break;
+ case QLA84_MGMT_GET_INFO:
+ mn->options = cpu_to_le16(ACO_REQUEST_INFO);
+ mn->parameter1 = cpu_to_le32(ql84_mgmt->mgmtp.u.info.type);
+ mn->parameter2 = cpu_to_le32(ql84_mgmt->mgmtp.u.info.context);
+ break;
+ default:
+ ret = -EIO;
+ goto exit_mgmt0;
+ }
+
+ if ((len == ql84_mgmt->len) &&
+ ql84_mgmt->cmd != QLA84_MGMT_CHNG_CONFIG) {
+ mgmt_b = dma_alloc_coherent(&ha->hw->pdev->dev, len,
+ &mgmt_dma, GFP_KERNEL);
+ if (mgmt_b == NULL) {
+ DEBUG2(printk(KERN_ERR "%s: dma alloc mgmt_b "
+ "failed%lu\n", __func__, ha->host_no));
+ ret = -ENOMEM;
+ goto exit_mgmt0;
+ }
+ mn->total_byte_cnt = cpu_to_le32(ql84_mgmt->len);
+ mn->dseg_count = cpu_to_le16(1);
+ mn->dseg_address[0] = cpu_to_le32(LSD(mgmt_dma));
+ mn->dseg_address[1] = cpu_to_le32(MSD(mgmt_dma));
+ mn->dseg_length = cpu_to_le32(len);
+
+ if (ql84_mgmt->cmd == QLA84_MGMT_WRITE_MEM) {
+ memcpy(mgmt_b, ql84_mgmt->payload, len);
+ }
+ }
+
+ ret = qla2x00_issue_iocb(ha, mn, mn_dma, 0);
+ if ((ret != QLA_SUCCESS) || (ql84_mgmt->cmd == QLA84_MGMT_WRITE_MEM)
+ || (ql84_mgmt->cmd == QLA84_MGMT_CHNG_CONFIG)) {
+ if (ret != QLA_SUCCESS)
+ DEBUG2(printk(KERN_ERR "%s(%lu): failed\n",
+ __func__, ha->host_no));
+ } else if ((ql84_mgmt->cmd == QLA84_MGMT_READ_MEM) ||
+ (ql84_mgmt->cmd == QLA84_MGMT_GET_INFO)) {
+ }
+
+ if (mgmt_b)
+ dma_free_coherent(&ha->hw->pdev->dev, len, mgmt_b, mgmt_dma);
+
+exit_mgmt0:
+ dma_pool_free(ha->hw->s_dma_pool, mn, mn_dma);
+ return ret;
+}
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 1263d9796e89..afa95614aaf8 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -31,6 +31,7 @@
#include <scsi/scsi_device.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_transport_fc.h>
+#include <scsi/scsi_bsg_fc.h>
#define QLA2XXX_DRIVER_NAME "qla2xxx"
@@ -228,6 +229,27 @@ struct srb_logio {
uint16_t flags;
};
+struct srb_bsg_ctx {
+#define SRB_ELS_CMD_RPT 3
+#define SRB_ELS_CMD_HST 4
+#define SRB_CT_CMD 5
+ uint16_t type;
+};
+
+struct srb_bsg {
+ struct srb_bsg_ctx ctx;
+ struct fc_bsg_job *bsg_job;
+};
+
+struct msg_echo_lb {
+ dma_addr_t send_dma;
+ dma_addr_t rcv_dma;
+ uint16_t req_sg_cnt;
+ uint16_t rsp_sg_cnt;
+ uint16_t options;
+ uint32_t transfer_size;
+};
+
/*
* ISP I/O Register Set structure definitions.
*/
@@ -522,6 +544,8 @@ typedef struct {
#define MBA_DISCARD_RND_FRAME 0x8048 /* discard RND frame due to error. */
#define MBA_REJECTED_FCP_CMD 0x8049 /* rejected FCP_CMD. */
+/* ISP mailbox loopback echo diagnostic error code */
+#define MBS_LB_RESET 0x17
/*
* Firmware options 1, 2, 3.
*/
@@ -2230,6 +2254,13 @@ struct req_que {
int max_q_depth;
};
+/* Place holder for FW buffer parameters */
+struct qlfc_fw {
+ void *fw_buf;
+ dma_addr_t fw_dma;
+ uint32_t len;
+};
+
/*
* Qlogic host adapter specific data structure.
*/
@@ -2594,6 +2625,7 @@ struct qla_hw_data {
struct qla_statistics qla_stats;
struct isp_operations *isp_ops;
struct workqueue_struct *wq;
+ struct qlfc_fw fw_buf;
};
/*
@@ -2766,4 +2798,127 @@ typedef struct scsi_qla_host {
#define CMD_SP(Cmnd) ((Cmnd)->SCp.ptr)
+/*
+ * BSG Vendor specific commands
+ */
+
+#define QL_VND_LOOPBACK 0x01
+#define QLA84_RESET 0x02
+#define QLA84_UPDATE_FW 0x03
+#define QLA84_MGMT_CMD 0x04
+
+/* BSG definations for interpreting CommandSent field */
+#define INT_DEF_LB_LOOPBACK_CMD 0
+#define INT_DEF_LB_ECHO_CMD 1
+
+/* BSG Vendor specific definations */
+typedef struct _A84_RESET {
+ uint16_t Flags;
+ uint16_t Reserved;
+#define A84_RESET_FLAG_ENABLE_DIAG_FW 1
+} __attribute__((packed)) A84_RESET, *PA84_RESET;
+
+#define A84_ISSUE_WRITE_TYPE_CMD 0
+#define A84_ISSUE_READ_TYPE_CMD 1
+#define A84_CLEANUP_CMD 2
+#define A84_ISSUE_RESET_OP_FW 3
+#define A84_ISSUE_RESET_DIAG_FW 4
+#define A84_ISSUE_UPDATE_OPFW_CMD 5
+#define A84_ISSUE_UPDATE_DIAGFW_CMD 6
+
+struct qla84_mgmt_param {
+ union {
+ struct {
+ uint32_t start_addr;
+ } mem; /* for QLA84_MGMT_READ/WRITE_MEM */
+ struct {
+ uint32_t id;
+#define QLA84_MGMT_CONFIG_ID_UIF 1
+#define QLA84_MGMT_CONFIG_ID_FCOE_COS 2
+#define QLA84_MGMT_CONFIG_ID_PAUSE 3
+#define QLA84_MGMT_CONFIG_ID_TIMEOUTS 4
+
+ uint32_t param0;
+ uint32_t param1;
+ } config; /* for QLA84_MGMT_CHNG_CONFIG */
+
+ struct {
+ uint32_t type;
+#define QLA84_MGMT_INFO_CONFIG_LOG_DATA 1 /* Get Config Log Data */
+#define QLA84_MGMT_INFO_LOG_DATA 2 /* Get Log Data */
+#define QLA84_MGMT_INFO_PORT_STAT 3 /* Get Port Statistics */
+#define QLA84_MGMT_INFO_LIF_STAT 4 /* Get LIF Statistics */
+#define QLA84_MGMT_INFO_ASIC_STAT 5 /* Get ASIC Statistics */
+#define QLA84_MGMT_INFO_CONFIG_PARAMS 6 /* Get Config Parameters */
+#define QLA84_MGMT_INFO_PANIC_LOG 7 /* Get Panic Log */
+
+ uint32_t context;
+/*
+* context definitions for QLA84_MGMT_INFO_CONFIG_LOG_DATA
+*/
+#define IC_LOG_DATA_LOG_ID_DEBUG_LOG 0
+#define IC_LOG_DATA_LOG_ID_LEARN_LOG 1
+#define IC_LOG_DATA_LOG_ID_FC_ACL_INGRESS_LOG 2
+#define IC_LOG_DATA_LOG_ID_FC_ACL_EGRESS_LOG 3
+#define IC_LOG_DATA_LOG_ID_ETHERNET_ACL_INGRESS_LOG 4
+#define IC_LOG_DATA_LOG_ID_ETHERNET_ACL_EGRESS_LOG 5
+#define IC_LOG_DATA_LOG_ID_MESSAGE_TRANSMIT_LOG 6
+#define IC_LOG_DATA_LOG_ID_MESSAGE_RECEIVE_LOG 7
+#define IC_LOG_DATA_LOG_ID_LINK_EVENT_LOG 8
+#define IC_LOG_DATA_LOG_ID_DCX_LOG 9
+
+/*
+* context definitions for QLA84_MGMT_INFO_PORT_STAT
+*/
+#define IC_PORT_STATISTICS_PORT_NUMBER_ETHERNET_PORT0 0
+#define IC_PORT_STATISTICS_PORT_NUMBER_ETHERNET_PORT1 1
+#define IC_PORT_STATISTICS_PORT_NUMBER_NSL_PORT0 2
+#define IC_PORT_STATISTICS_PORT_NUMBER_NSL_PORT1 3
+#define IC_PORT_STATISTICS_PORT_NUMBER_FC_PORT0 4
+#define IC_PORT_STATISTICS_PORT_NUMBER_FC_PORT1 5
+
+
+/*
+* context definitions for QLA84_MGMT_INFO_LIF_STAT
+*/
+#define IC_LIF_STATISTICS_LIF_NUMBER_ETHERNET_PORT0 0
+#define IC_LIF_STATISTICS_LIF_NUMBER_ETHERNET_PORT1 1
+#define IC_LIF_STATISTICS_LIF_NUMBER_FC_PORT0 2
+#define IC_LIF_STATISTICS_LIF_NUMBER_FC_PORT1 3
+#define IC_LIF_STATISTICS_LIF_NUMBER_CPU 6
+
+ } info; /* for QLA84_MGMT_GET_INFO */
+ } u;
+};
+
+struct qla84_msg_mgmt {
+ uint16_t cmd;
+#define QLA84_MGMT_READ_MEM 0x00
+#define QLA84_MGMT_WRITE_MEM 0x01
+#define QLA84_MGMT_CHNG_CONFIG 0x02
+#define QLA84_MGMT_GET_INFO 0x03
+ uint16_t rsrvd;
+ struct qla84_mgmt_param mgmtp;/* parameters for cmd */
+ uint32_t len; /* bytes in payload following this struct */
+ uint8_t payload[0]; /* payload for cmd */
+};
+
+struct msg_update_fw {
+ /*
+ * diag_fw = 0 operational fw
+ * otherwise diagnostic fw
+ * offset, len, fw_len are present to overcome the current limitation
+ * of 128Kb xfer size. The fw is sent in smaller chunks. Each chunk
+ * specifies the byte "offset" where it fits in the fw buffer. The
+ * number of bytes in each chunk is specified in "len". "fw_len"
+ * is the total size of fw. The first chunk should start at offset = 0.
+ * When offset+len == fw_len, the fw is written to the HBA.
+ */
+ uint32_t diag_fw;
+ uint32_t offset;/* start offset */
+ uint32_t len; /* num bytes in cur xfer */
+ uint32_t fw_len; /* size of fw in bytes */
+ uint8_t fw_bytes[0];
+};
+
#endif
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index 66a8da5d7d08..cebf4f1bb7d9 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -627,6 +627,39 @@ struct els_entry_24xx {
uint32_t rx_len; /* Data segment 1 length. */
};
+struct els_sts_entry_24xx {
+ uint8_t entry_type; /* Entry type. */
+ uint8_t entry_count; /* Entry count. */
+ uint8_t sys_define; /* System Defined. */
+ uint8_t entry_status; /* Entry Status. */
+
+ uint32_t handle; /* System handle. */
+
+ uint16_t comp_status;
+
+ uint16_t nport_handle; /* N_PORT handle. */
+
+ uint16_t reserved_1;
+
+ uint8_t vp_index;
+ uint8_t sof_type;
+
+ uint32_t rx_xchg_address; /* Receive exchange address. */
+ uint16_t reserved_2;
+
+ uint8_t opcode;
+ uint8_t reserved_3;
+
+ uint8_t port_id[3];
+ uint8_t reserved_4;
+
+ uint16_t reserved_5;
+
+ uint16_t control_flags; /* Control flags. */
+ uint32_t total_byte_count;
+ uint32_t error_subcode_1;
+ uint32_t error_subcode_2;
+};
/*
* ISP queue - Mailbox Command entry structure definition.
*/
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 8bc6f53691e9..3a89bc514e2b 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -60,6 +60,8 @@ extern int qla2x00_async_login_done(struct scsi_qla_host *, fc_port_t *,
extern int qla2x00_async_logout_done(struct scsi_qla_host *, fc_port_t *,
uint16_t *);
+extern fc_port_t *
+qla2x00_alloc_fcport(scsi_qla_host_t *, gfp_t );
/*
* Global Data in qla_os.c source file.
*/
@@ -76,6 +78,7 @@ extern int ql2xiidmaenable;
extern int ql2xmaxqueues;
extern int ql2xmultique_tag;
extern int ql2xfwloadbin;
+extern int ql2xetsenable;
extern int qla2x00_loop_reset(scsi_qla_host_t *);
extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int);
@@ -94,7 +97,6 @@ extern int qla2x00_post_uevent_work(struct scsi_qla_host *, u32);
extern int qla81xx_restart_mpi_firmware(scsi_qla_host_t *);
-extern void qla2x00_abort_fcport_cmds(fc_port_t *);
extern struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *,
struct qla_hw_data *);
extern void qla2x00_free_host(struct scsi_qla_host *);
@@ -154,6 +156,7 @@ int qla2x00_marker(struct scsi_qla_host *, struct req_que *, struct rsp_que *,
int __qla2x00_marker(struct scsi_qla_host *, struct req_que *, struct rsp_que *,
uint16_t, uint16_t, uint8_t);
extern int qla2x00_start_sp(srb_t *);
+extern void qla2x00_ctx_sp_free(srb_t *);
/*
* Global Function Prototypes in qla_mbx.c source file.
@@ -426,6 +429,8 @@ extern void qla2x00_free_sysfs_attr(scsi_qla_host_t *);
extern void qla2x00_init_host_attr(scsi_qla_host_t *);
extern void qla2x00_alloc_sysfs_attr(scsi_qla_host_t *);
extern void qla2x00_free_sysfs_attr(scsi_qla_host_t *);
+extern int qla2x00_loopback_test(scsi_qla_host_t *, struct msg_echo_lb *, uint16_t *);
+extern int qla2x00_echo_test(scsi_qla_host_t *, struct msg_echo_lb *, uint16_t *);
/*
* Global Function Prototypes in qla_dfs.c source file.
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 3f8e8495b743..a67b2bafb882 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -62,7 +62,7 @@ qla2x00_ctx_sp_timeout(unsigned long __data)
ctx->free(sp);
}
-static void
+void
qla2x00_ctx_sp_free(srb_t *sp)
{
struct srb_ctx *ctx = sp->ctx;
@@ -338,6 +338,16 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
rval = qla2x00_init_rings(vha);
ha->flags.chip_reset_done = 1;
+ if (rval == QLA_SUCCESS && IS_QLA84XX(ha)) {
+ /* Issue verify 84xx FW IOCB to complete 84xx initialization */
+ rval = qla84xx_init_chip(vha);
+ if (rval != QLA_SUCCESS) {
+ qla_printk(KERN_ERR, ha,
+ "Unable to initialize ISP84XX.\n");
+ qla84xx_put_chip(vha);
+ }
+ }
+
return (rval);
}
@@ -2216,7 +2226,7 @@ qla2x00_rport_del(void *data)
*
* Returns a pointer to the allocated fcport, or NULL, if none available.
*/
-static fc_port_t *
+fc_port_t *
qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags)
{
fc_port_t *fcport;
@@ -2900,8 +2910,13 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
if (qla2x00_is_reserved_id(vha, loop_id))
continue;
- if (atomic_read(&vha->loop_down_timer) || LOOP_TRANSITION(vha))
+ if (atomic_read(&vha->loop_down_timer) ||
+ LOOP_TRANSITION(vha)) {
+ atomic_set(&vha->loop_down_timer, 0);
+ set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+ set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
break;
+ }
if (swl != NULL) {
if (last_dev) {
@@ -4877,6 +4892,15 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
}
void
-qla81xx_update_fw_options(scsi_qla_host_t *ha)
+qla81xx_update_fw_options(scsi_qla_host_t *vha)
{
+ struct qla_hw_data *ha = vha->hw;
+
+ if (!ql2xetsenable)
+ return;
+
+ /* Enable ETS Burst. */
+ memset(ha->fw_options, 0, sizeof(ha->fw_options));
+ ha->fw_options[2] |= BIT_9;
+ qla2x00_set_fw_options(vha, ha->fw_options);
}
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index c5ccac0bef76..8299a9891bfe 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -1025,6 +1025,119 @@ qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx)
/* Implicit: mbx->mbx10 = 0. */
}
+static void
+qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
+{
+ struct fc_bsg_job *bsg_job = ((struct srb_bsg*)sp->ctx)->bsg_job;
+
+ els_iocb->entry_type = ELS_IOCB_TYPE;
+ els_iocb->entry_count = 1;
+ els_iocb->sys_define = 0;
+ els_iocb->entry_status = 0;
+ els_iocb->handle = sp->handle;
+ els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
+ els_iocb->tx_dsd_count = __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
+ els_iocb->vp_index = sp->fcport->vp_idx;
+ els_iocb->sof_type = EST_SOFI3;
+ els_iocb->rx_dsd_count = __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt);
+
+ els_iocb->opcode =(((struct srb_bsg*)sp->ctx)->ctx.type == SRB_ELS_CMD_RPT) ?
+ bsg_job->request->rqst_data.r_els.els_code : bsg_job->request->rqst_data.h_els.command_code;
+ els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
+ els_iocb->port_id[1] = sp->fcport->d_id.b.area;
+ els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
+ els_iocb->control_flags = 0;
+ els_iocb->rx_byte_count =
+ cpu_to_le32(bsg_job->reply_payload.payload_len);
+ els_iocb->tx_byte_count =
+ cpu_to_le32(bsg_job->request_payload.payload_len);
+
+ els_iocb->tx_address[0] = cpu_to_le32(LSD(sg_dma_address
+ (bsg_job->request_payload.sg_list)));
+ els_iocb->tx_address[1] = cpu_to_le32(MSD(sg_dma_address
+ (bsg_job->request_payload.sg_list)));
+ els_iocb->tx_len = cpu_to_le32(sg_dma_len
+ (bsg_job->request_payload.sg_list));
+
+ els_iocb->rx_address[0] = cpu_to_le32(LSD(sg_dma_address
+ (bsg_job->reply_payload.sg_list)));
+ els_iocb->rx_address[1] = cpu_to_le32(MSD(sg_dma_address
+ (bsg_job->reply_payload.sg_list)));
+ els_iocb->rx_len = cpu_to_le32(sg_dma_len
+ (bsg_job->reply_payload.sg_list));
+}
+
+static void
+qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
+{
+ uint16_t avail_dsds;
+ uint32_t *cur_dsd;
+ struct scatterlist *sg;
+ int index;
+ uint16_t tot_dsds;
+ scsi_qla_host_t *vha = sp->fcport->vha;
+ struct fc_bsg_job *bsg_job = ((struct srb_bsg*)sp->ctx)->bsg_job;
+ int loop_iterartion = 0;
+ int cont_iocb_prsnt = 0;
+ int entry_count = 1;
+
+ ct_iocb->entry_type = CT_IOCB_TYPE;
+ ct_iocb->entry_status = 0;
+ ct_iocb->sys_define = 0;
+ ct_iocb->handle = sp->handle;
+
+ ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
+ ct_iocb->vp_index = sp->fcport->vp_idx;
+ ct_iocb->comp_status = __constant_cpu_to_le16(0);
+
+ ct_iocb->cmd_dsd_count =
+ __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
+ ct_iocb->timeout = 0;
+ ct_iocb->rsp_dsd_count =
+ __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt);
+ ct_iocb->rsp_byte_count =
+ cpu_to_le32(bsg_job->reply_payload.payload_len);
+ ct_iocb->cmd_byte_count =
+ cpu_to_le32(bsg_job->request_payload.payload_len);
+ ct_iocb->dseg_0_address[0] = cpu_to_le32(LSD(sg_dma_address
+ (bsg_job->request_payload.sg_list)));
+ ct_iocb->dseg_0_address[1] = cpu_to_le32(MSD(sg_dma_address
+ (bsg_job->request_payload.sg_list)));
+ ct_iocb->dseg_0_len = cpu_to_le32(sg_dma_len
+ (bsg_job->request_payload.sg_list));
+
+ avail_dsds = 1;
+ cur_dsd = (uint32_t *)ct_iocb->dseg_1_address;
+ index = 0;
+ tot_dsds = bsg_job->reply_payload.sg_cnt;
+
+ for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
+ dma_addr_t sle_dma;
+ cont_a64_entry_t *cont_pkt;
+
+ /* Allocate additional continuation packets? */
+ if (avail_dsds == 0) {
+ /*
+ * Five DSDs are available in the Cont.
+ * Type 1 IOCB.
+ */
+ cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
+ cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
+ avail_dsds = 5;
+ cont_iocb_prsnt = 1;
+ entry_count++;
+ }
+
+ sle_dma = sg_dma_address(sg);
+ *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
+ *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
+ *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
+ loop_iterartion++;
+ avail_dsds--;
+ }
+ ct_iocb->entry_count = entry_count;
+}
+
int
qla2x00_start_sp(srb_t *sp)
{
@@ -1052,6 +1165,13 @@ qla2x00_start_sp(srb_t *sp)
qla24xx_logout_iocb(sp, pkt):
qla2x00_logout_iocb(sp, pkt);
break;
+ case SRB_ELS_CMD_RPT:
+ case SRB_ELS_CMD_HST:
+ qla24xx_els_iocb(sp, pkt);
+ break;
+ case SRB_CT_CMD:
+ qla24xx_ct_iocb(sp, pkt);
+ break;
default:
break;
}
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 6fc63b98818c..ab90329ff2e4 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -8,6 +8,7 @@
#include <linux/delay.h>
#include <scsi/scsi_tcq.h>
+#include <scsi/scsi_bsg_fc.h>
static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t);
static void qla2x00_process_completed_request(struct scsi_qla_host *,
@@ -881,7 +882,9 @@ qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func,
index);
return NULL;
}
+
req->outstanding_cmds[index] = NULL;
+
done:
return sp;
}
@@ -982,6 +985,100 @@ done_post_logio_done_work:
}
static void
+qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
+ struct sts_entry_24xx *pkt, int iocb_type)
+{
+ const char func[] = "ELS_CT_IOCB";
+ const char *type;
+ struct qla_hw_data *ha = vha->hw;
+ srb_t *sp;
+ struct srb_bsg *sp_bsg;
+ struct fc_bsg_job *bsg_job;
+ uint16_t comp_status;
+ uint32_t fw_status[3];
+ uint8_t* fw_sts_ptr;
+
+ sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
+ if (!sp)
+ return;
+ sp_bsg = (struct srb_bsg*)sp->ctx;
+ bsg_job = sp_bsg->bsg_job;
+
+ type = NULL;
+ switch (sp_bsg->ctx.type) {
+ case SRB_ELS_CMD_RPT:
+ case SRB_ELS_CMD_HST:
+ type = "els";
+ break;
+ case SRB_CT_CMD:
+ type = "ct pass-through";
+ break;
+ default:
+ qla_printk(KERN_WARNING, ha,
+ "%s: Unrecognized SRB: (%p) type=%d.\n", func, sp,
+ sp_bsg->ctx.type);
+ return;
+ }
+
+ comp_status = fw_status[0] = le16_to_cpu(pkt->comp_status);
+ fw_status[1] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_1);
+ fw_status[2] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_2);
+
+ /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT
+ * fc payload to the caller
+ */
+ bsg_job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
+ bsg_job->reply_len = sizeof(struct fc_bsg_reply) + sizeof(fw_status);
+
+ if (comp_status != CS_COMPLETE) {
+ if (comp_status == CS_DATA_UNDERRUN) {
+ bsg_job->reply->result = DID_OK << 16;
+ bsg_job->reply->reply_payload_rcv_len =
+ le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->total_byte_count);
+
+ DEBUG2(qla_printk(KERN_WARNING, ha,
+ "scsi(%ld:0x%x): ELS-CT pass-through-%s error comp_status-status=0x%x "
+ "error subcode 1=0x%x error subcode 2=0x%x total_byte = 0x%x.\n",
+ vha->host_no, sp->handle, type, comp_status, fw_status[1], fw_status[2],
+ le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->total_byte_count)));
+ fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
+ memcpy( fw_sts_ptr, fw_status, sizeof(fw_status));
+ }
+ else {
+ DEBUG2(qla_printk(KERN_WARNING, ha,
+ "scsi(%ld:0x%x): ELS-CT pass-through-%s error comp_status-status=0x%x "
+ "error subcode 1=0x%x error subcode 2=0x%x.\n",
+ vha->host_no, sp->handle, type, comp_status,
+ le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_1),
+ le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_2)));
+ bsg_job->reply->result = DID_ERROR << 16;
+ bsg_job->reply->reply_payload_rcv_len = 0;
+ fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
+ memcpy( fw_sts_ptr, fw_status, sizeof(fw_status));
+ }
+ DEBUG2(qla2x00_dump_buffer((uint8_t *)pkt, sizeof(*pkt)));
+ }
+ else {
+ bsg_job->reply->result = DID_OK << 16;;
+ bsg_job->reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len;
+ bsg_job->reply_len = 0;
+ }
+
+ dma_unmap_sg(&ha->pdev->dev,
+ bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
+ dma_unmap_sg(&ha->pdev->dev,
+ bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+ if ((sp_bsg->ctx.type == SRB_ELS_CMD_HST) ||
+ (sp_bsg->ctx.type == SRB_CT_CMD))
+ kfree(sp->fcport);
+ kfree(sp->ctx);
+ mempool_free(sp, ha->srb_mempool);
+ bsg_job->job_done(bsg_job);
+}
+
+static void
qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
struct logio_entry_24xx *logio)
{
@@ -1749,6 +1846,13 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
qla24xx_logio_entry(vha, rsp->req,
(struct logio_entry_24xx *)pkt);
break;
+ case CT_IOCB_TYPE:
+ qla24xx_els_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
+ clear_bit(MBX_INTERRUPT, &vha->hw->mbx_cmd_flags);
+ break;
+ case ELS_IOCB_TYPE:
+ qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE);
+ break;
default:
/* Type Not Supported. */
DEBUG4(printk(KERN_WARNING
@@ -2049,7 +2153,6 @@ qla24xx_msix_default(int irq, void *dev_id)
set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
complete(&ha->mbx_intr_comp);
}
-
return IRQ_HANDLED;
}
@@ -2255,10 +2358,11 @@ qla2x00_free_irqs(scsi_qla_host_t *vha)
if (ha->flags.msix_enabled)
qla24xx_disable_msix(ha);
- else if (ha->flags.inta_enabled) {
+ else if (ha->flags.msi_enabled) {
free_irq(ha->pdev->irq, rsp);
pci_disable_msi(ha->pdev);
- }
+ } else
+ free_irq(ha->pdev->irq, rsp);
}
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 056e4d4505f3..6e53bdbb1da8 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -3636,6 +3636,157 @@ qla2x00_read_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t *data)
}
int
+qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq, uint16_t *mresp)
+{
+ int rval;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+ uint32_t iter_cnt = 0x1;
+
+ DEBUG11(printk("scsi(%ld): entered.\n", vha->host_no));
+
+ memset(mcp->mb, 0 , sizeof(mcp->mb));
+ mcp->mb[0] = MBC_DIAGNOSTIC_LOOP_BACK;
+ mcp->mb[1] = mreq->options | BIT_6; // BIT_6 specifies 64 bit addressing
+
+ /* transfer count */
+ mcp->mb[10] = LSW(mreq->transfer_size);
+ mcp->mb[11] = MSW(mreq->transfer_size);
+
+ /* send data address */
+ mcp->mb[14] = LSW(mreq->send_dma);
+ mcp->mb[15] = MSW(mreq->send_dma);
+ mcp->mb[20] = LSW(MSD(mreq->send_dma));
+ mcp->mb[21] = MSW(MSD(mreq->send_dma));
+
+ /* recieve data address */
+ mcp->mb[16] = LSW(mreq->rcv_dma);
+ mcp->mb[17] = MSW(mreq->rcv_dma);
+ mcp->mb[6] = LSW(MSD(mreq->rcv_dma));
+ mcp->mb[7] = MSW(MSD(mreq->rcv_dma));
+
+ /* Iteration count */
+ mcp->mb[18] = LSW(iter_cnt);
+ mcp->mb[19] = MSW(iter_cnt);
+
+ mcp->out_mb = MBX_21|MBX_20|MBX_19|MBX_18|MBX_17|MBX_16|MBX_15|
+ MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0;
+ if (IS_QLA81XX(vha->hw))
+ mcp->out_mb |= MBX_2;
+ mcp->in_mb = MBX_19|MBX_18|MBX_3|MBX_2|MBX_1|MBX_0;
+
+ mcp->buf_size = mreq->transfer_size;
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
+
+ rval = qla2x00_mailbox_command(vha, mcp);
+
+ if (rval != QLA_SUCCESS) {
+ DEBUG2(printk(KERN_WARNING
+ "(%ld): failed=%x mb[0]=0x%x "
+ "mb[1]=0x%x mb[2]=0x%x mb[3]=0x%x mb[18]=0x%x mb[19]=0x%x. \n", vha->host_no, rval,
+ mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3], mcp->mb[18], mcp->mb[19]));
+ } else {
+ DEBUG2(printk(KERN_WARNING
+ "scsi(%ld): done.\n", vha->host_no));
+ }
+
+ /* Copy mailbox information */
+ memcpy( mresp, mcp->mb, 64);
+ mresp[3] = mcp->mb[18];
+ mresp[4] = mcp->mb[19];
+ return rval;
+}
+
+int
+qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq, uint16_t *mresp)
+{
+ int rval;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+ struct qla_hw_data *ha = vha->hw;
+
+ DEBUG11(printk("scsi(%ld): entered.\n", vha->host_no));
+
+ memset(mcp->mb, 0 , sizeof(mcp->mb));
+ mcp->mb[0] = MBC_DIAGNOSTIC_ECHO;
+ mcp->mb[1] = mreq->options | BIT_6; /* BIT_6 specifies 64bit address */
+ if (IS_QLA81XX(ha))
+ mcp->mb[1] |= BIT_15;
+ mcp->mb[2] = IS_QLA81XX(ha) ? vha->fcoe_fcf_idx : 0;
+ mcp->mb[16] = LSW(mreq->rcv_dma);
+ mcp->mb[17] = MSW(mreq->rcv_dma);
+ mcp->mb[6] = LSW(MSD(mreq->rcv_dma));
+ mcp->mb[7] = MSW(MSD(mreq->rcv_dma));
+
+ mcp->mb[10] = LSW(mreq->transfer_size);
+
+ mcp->mb[14] = LSW(mreq->send_dma);
+ mcp->mb[15] = MSW(mreq->send_dma);
+ mcp->mb[20] = LSW(MSD(mreq->send_dma));
+ mcp->mb[21] = MSW(MSD(mreq->send_dma));
+
+ mcp->out_mb = MBX_21|MBX_20|MBX_17|MBX_16|MBX_15|
+ MBX_14|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0;
+ if (IS_QLA81XX(ha))
+ mcp->out_mb |= MBX_2;
+
+ mcp->in_mb = MBX_0;
+ if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || IS_QLA81XX(ha))
+ mcp->in_mb |= MBX_1;
+ if (IS_QLA81XX(ha))
+ mcp->in_mb |= MBX_3;
+
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
+ mcp->buf_size = mreq->transfer_size;
+
+ rval = qla2x00_mailbox_command(vha, mcp);
+
+ if (rval != QLA_SUCCESS) {
+ DEBUG2(printk(KERN_WARNING
+ "(%ld): failed=%x mb[0]=0x%x mb[1]=0x%x.\n",
+ vha->host_no, rval, mcp->mb[0], mcp->mb[1]));
+ } else {
+ DEBUG2(printk(KERN_WARNING
+ "scsi(%ld): done.\n", vha->host_no));
+ }
+
+ /* Copy mailbox information */
+ memcpy( mresp, mcp->mb, 32);
+ return rval;
+}
+int
+qla84xx_reset_chip(scsi_qla_host_t *ha, uint16_t enable_diagnostic,
+ uint16_t *cmd_status)
+{
+ int rval;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+
+ DEBUG16(printk("%s(%ld): enable_diag=%d entered.\n", __func__,
+ ha->host_no, enable_diagnostic));
+
+ mcp->mb[0] = MBC_ISP84XX_RESET;
+ mcp->mb[1] = enable_diagnostic;
+ mcp->out_mb = MBX_1|MBX_0;
+ mcp->in_mb = MBX_1|MBX_0;
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
+ rval = qla2x00_mailbox_command(ha, mcp);
+
+ /* Return mailbox statuses. */
+ *cmd_status = mcp->mb[0];
+ if (rval != QLA_SUCCESS)
+ DEBUG16(printk("%s(%ld): failed=%x.\n", __func__, ha->host_no,
+ rval));
+ else
+ DEBUG16(printk("%s(%ld): done.\n", __func__, ha->host_no));
+
+ return rval;
+}
+
+int
qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data)
{
int rval;
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 8529eb1f3cd4..46720b23028f 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -107,6 +107,12 @@ MODULE_PARM_DESC(ql2xfwloadbin,
" 1 -- load firmware from flash.\n"
" 0 -- use default semantics.\n");
+int ql2xetsenable;
+module_param(ql2xetsenable, int, S_IRUGO|S_IRUSR);
+MODULE_PARM_DESC(ql2xetsenable,
+ "Enables firmware ETS burst."
+ "Default is 0 - skip ETS enablement.");
+
/*
* SCSI host template entry points
*/
@@ -682,44 +688,6 @@ qla2x00_wait_for_loop_ready(scsi_qla_host_t *vha)
return (return_status);
}
-void
-qla2x00_abort_fcport_cmds(fc_port_t *fcport)
-{
- int cnt;
- unsigned long flags;
- srb_t *sp;
- scsi_qla_host_t *vha = fcport->vha;
- struct qla_hw_data *ha = vha->hw;
- struct req_que *req;
-
- spin_lock_irqsave(&ha->hardware_lock, flags);
- req = vha->req;
- for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
- sp = req->outstanding_cmds[cnt];
- if (!sp)
- continue;
- if (sp->fcport != fcport)
- continue;
- if (sp->ctx)
- continue;
-
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
- if (ha->isp_ops->abort_command(sp)) {
- DEBUG2(qla_printk(KERN_WARNING, ha,
- "Abort failed -- %lx\n",
- sp->cmd->serial_number));
- } else {
- if (qla2x00_eh_wait_on_command(sp->cmd) !=
- QLA_SUCCESS)
- DEBUG2(qla_printk(KERN_WARNING, ha,
- "Abort failed while waiting -- %lx\n",
- sp->cmd->serial_number));
- }
- spin_lock_irqsave(&ha->hardware_lock, flags);
- }
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
-}
-
/**************************************************************************
* qla2xxx_eh_abort
*
@@ -1095,6 +1063,20 @@ qla2x00_loop_reset(scsi_qla_host_t *vha)
struct fc_port *fcport;
struct qla_hw_data *ha = vha->hw;
+ if (ha->flags.enable_target_reset) {
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
+ if (fcport->port_type != FCT_TARGET)
+ continue;
+
+ ret = ha->isp_ops->target_reset(fcport, 0, 0);
+ if (ret != QLA_SUCCESS) {
+ DEBUG2_3(printk("%s(%ld): bus_reset failed: "
+ "target_reset=%d d_id=%x.\n", __func__,
+ vha->host_no, ret, fcport->d_id.b24));
+ }
+ }
+ }
+
if (ha->flags.enable_lip_full_login && !IS_QLA81XX(ha)) {
ret = qla2x00_full_login_lip(vha);
if (ret != QLA_SUCCESS) {
@@ -1117,19 +1099,6 @@ qla2x00_loop_reset(scsi_qla_host_t *vha)
qla2x00_wait_for_loop_ready(vha);
}
- if (ha->flags.enable_target_reset) {
- list_for_each_entry(fcport, &vha->vp_fcports, list) {
- if (fcport->port_type != FCT_TARGET)
- continue;
-
- ret = ha->isp_ops->target_reset(fcport, 0, 0);
- if (ret != QLA_SUCCESS) {
- DEBUG2_3(printk("%s(%ld): bus_reset failed: "
- "target_reset=%d d_id=%x.\n", __func__,
- vha->host_no, ret, fcport->d_id.b24));
- }
- }
- }
/* Issue marker command only when we are going to start the I/O */
vha->marker_needed = 1;
@@ -1160,8 +1129,19 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
qla2x00_sp_compl(ha, sp);
} else {
ctx = sp->ctx;
- del_timer_sync(&ctx->timer);
- ctx->free(sp);
+ if (ctx->type == SRB_LOGIN_CMD || ctx->type == SRB_LOGOUT_CMD) {
+ del_timer_sync(&ctx->timer);
+ ctx->free(sp);
+ } else {
+ struct srb_bsg* sp_bsg = (struct srb_bsg*)sp->ctx;
+ if (sp_bsg->bsg_job->request->msgcode == FC_BSG_HST_CT)
+ kfree(sp->fcport);
+ sp_bsg->bsg_job->req->errors = 0;
+ sp_bsg->bsg_job->reply->result = res;
+ sp_bsg->bsg_job->job_done(sp_bsg->bsg_job);
+ kfree(sp->ctx);
+ mempool_free(sp, ha->srb_mempool);
+ }
}
}
}
@@ -1258,7 +1238,7 @@ qla2x00_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason)
qla2x00_adjust_sdev_qdepth_up(sdev, qdepth);
break;
default:
- return EOPNOTSUPP;
+ return -EOPNOTSUPP;
}
return sdev->queue_depth;
@@ -1818,7 +1798,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
/* Set EEH reset type to fundamental if required by hba */
if ( IS_QLA24XX(ha) || IS_QLA25XX(ha) || IS_QLA81XX(ha)) {
pdev->needs_freset = 1;
- pci_save_state(pdev);
}
/* Configure PCI I/O space */
@@ -1970,11 +1949,15 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
host->max_channel = MAX_BUSES - 1;
host->max_lun = MAX_LUNS;
host->transportt = qla2xxx_transport_template;
+ sht->vendor_id = (SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_QLOGIC);
/* Set up the irqs */
ret = qla2x00_request_irqs(ha, rsp);
if (ret)
goto probe_init_failed;
+
+ pci_save_state(pdev);
+
/* Alloc arrays of request and response ring ptrs */
que_init:
if (!qla2x00_alloc_queues(ha)) {
@@ -2176,6 +2159,8 @@ qla2x00_remove_one(struct pci_dev *pdev)
kfree(ha);
ha = NULL;
+ pci_disable_pcie_error_reporting(pdev);
+
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
}
@@ -3310,6 +3295,7 @@ qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
return PCI_ERS_RESULT_CAN_RECOVER;
case pci_channel_io_frozen:
ha->flags.eeh_busy = 1;
+ qla2x00_free_irqs(vha);
pci_disable_device(pdev);
return PCI_ERS_RESULT_NEED_RESET;
case pci_channel_io_perm_failure:
@@ -3363,10 +3349,24 @@ qla2xxx_pci_slot_reset(struct pci_dev *pdev)
pci_ers_result_t ret = PCI_ERS_RESULT_DISCONNECT;
scsi_qla_host_t *base_vha = pci_get_drvdata(pdev);
struct qla_hw_data *ha = base_vha->hw;
- int rc;
+ struct rsp_que *rsp;
+ int rc, retries = 10;
DEBUG17(qla_printk(KERN_WARNING, ha, "slot_reset\n"));
+ /* Workaround: qla2xxx driver which access hardware earlier
+ * needs error state to be pci_channel_io_online.
+ * Otherwise mailbox command timesout.
+ */
+ pdev->error_state = pci_channel_io_normal;
+
+ pci_restore_state(pdev);
+
+ /* pci_restore_state() clears the saved_state flag of the device
+ * save restored state which resets saved_state flag
+ */
+ pci_save_state(pdev);
+
if (ha->mem_only)
rc = pci_enable_device_mem(pdev);
else
@@ -3378,27 +3378,23 @@ qla2xxx_pci_slot_reset(struct pci_dev *pdev)
return ret;
}
+ rsp = ha->rsp_q_map[0];
+ if (qla2x00_request_irqs(ha, rsp))
+ return ret;
+
if (ha->isp_ops->pci_config(base_vha))
return ret;
-#ifdef QL_DEBUG_LEVEL_17
- {
- uint8_t b;
- uint32_t i;
+ while (ha->flags.mbox_busy && retries--)
+ msleep(1000);
- printk("slot_reset_1: ");
- for (i = 0; i < 256; i++) {
- pci_read_config_byte(ha->pdev, i, &b);
- printk("%s%02x", (i%16) ? " " : "\n", b);
- }
- printk("\n");
- }
-#endif
set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
if (qla2x00_abort_isp(base_vha) == QLA_SUCCESS)
ret = PCI_ERS_RESULT_RECOVERED;
clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
+ pci_cleanup_aer_uncorrect_error_status(pdev);
+
DEBUG17(qla_printk(KERN_WARNING, ha,
"slot_reset-return:ret=%x\n", ret));
@@ -3422,8 +3418,6 @@ qla2xxx_pci_resume(struct pci_dev *pdev)
}
ha->flags.eeh_busy = 0;
-
- pci_cleanup_aer_uncorrect_error_status(pdev);
}
static struct pci_error_handlers qla2xxx_err_handler = {
@@ -3536,4 +3530,3 @@ MODULE_FIRMWARE(FW_FILE_ISP2300);
MODULE_FIRMWARE(FW_FILE_ISP2322);
MODULE_FIRMWARE(FW_FILE_ISP24XX);
MODULE_FIRMWARE(FW_FILE_ISP25XX);
-MODULE_FIRMWARE(FW_FILE_ISP81XX);
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index ed36279a33c1..8d2fc2fa7a6b 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,9 +7,9 @@
/*
* Driver version
*/
-#define QLA2XXX_VERSION "8.03.01-k10"
+#define QLA2XXX_VERSION "8.03.02-k1"
#define QLA_DRIVER_MAJOR_VER 8
#define QLA_DRIVER_MINOR_VER 3
-#define QLA_DRIVER_PATCH_VER 1
-#define QLA_DRIVER_BETA_VER 0
+#define QLA_DRIVER_PATCH_VER 2
+#define QLA_DRIVER_BETA_VER 1
diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
index af8c3233e8ae..92329a461c68 100644
--- a/drivers/scsi/qla4xxx/ql4_init.c
+++ b/drivers/scsi/qla4xxx/ql4_init.c
@@ -844,10 +844,10 @@ static int qla4xxx_config_nvram(struct scsi_qla_host *ha)
DEBUG2(printk("scsi%ld: %s: Get EEProm parameters \n", ha->host_no,
__func__));
if (ql4xxx_lock_flash(ha) != QLA_SUCCESS)
- return (QLA_ERROR);
+ return QLA_ERROR;
if (ql4xxx_lock_nvram(ha) != QLA_SUCCESS) {
ql4xxx_unlock_flash(ha);
- return (QLA_ERROR);
+ return QLA_ERROR;
}
/* Get EEPRom Parameters from NVRAM and validate */
@@ -858,20 +858,18 @@ static int qla4xxx_config_nvram(struct scsi_qla_host *ha)
rd_nvram_word(ha, eeprom_ext_hw_conf_offset(ha));
spin_unlock_irqrestore(&ha->hardware_lock, flags);
} else {
- /*
- * QLogic adapters should always have a valid NVRAM.
- * If not valid, do not load.
- */
dev_warn(&ha->pdev->dev,
"scsi%ld: %s: EEProm checksum invalid. "
"Please update your EEPROM\n", ha->host_no,
__func__);
- /* set defaults */
+ /* Attempt to set defaults */
if (is_qla4010(ha))
extHwConfig.Asuint32_t = 0x1912;
else if (is_qla4022(ha) | is_qla4032(ha))
extHwConfig.Asuint32_t = 0x0023;
+ else
+ return QLA_ERROR;
}
DEBUG(printk("scsi%ld: %s: Setting extHwConfig to 0xFFFF%04x\n",
ha->host_no, __func__, extHwConfig.Asuint32_t));
@@ -884,7 +882,7 @@ static int qla4xxx_config_nvram(struct scsi_qla_host *ha)
ql4xxx_unlock_nvram(ha);
ql4xxx_unlock_flash(ha);
- return (QLA_SUCCESS);
+ return QLA_SUCCESS;
}
static void qla4x00_pci_config(struct scsi_qla_host *ha)
diff --git a/drivers/scsi/raid_class.c b/drivers/scsi/raid_class.c
index 8e5c169b03fb..bd88349b8526 100644
--- a/drivers/scsi/raid_class.c
+++ b/drivers/scsi/raid_class.c
@@ -149,6 +149,7 @@ static struct {
{ RAID_LEVEL_0, "raid0" },
{ RAID_LEVEL_1, "raid1" },
{ RAID_LEVEL_10, "raid10" },
+ { RAID_LEVEL_1E, "raid1e" },
{ RAID_LEVEL_3, "raid3" },
{ RAID_LEVEL_4, "raid4" },
{ RAID_LEVEL_5, "raid5" },
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index a60da5555577..513661f45e5f 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -1026,55 +1026,39 @@ static int scsi_vpd_inquiry(struct scsi_device *sdev, unsigned char *buffer,
* responsible for calling kfree() on this pointer when it is no longer
* needed. If we cannot retrieve the VPD page this routine returns %NULL.
*/
-unsigned char *scsi_get_vpd_page(struct scsi_device *sdev, u8 page)
+int scsi_get_vpd_page(struct scsi_device *sdev, u8 page, unsigned char *buf,
+ int buf_len)
{
int i, result;
- unsigned int len;
- const unsigned int init_vpd_len = 255;
- unsigned char *buf = kmalloc(init_vpd_len, GFP_KERNEL);
-
- if (!buf)
- return NULL;
/* Ask for all the pages supported by this device */
- result = scsi_vpd_inquiry(sdev, buf, 0, init_vpd_len);
+ result = scsi_vpd_inquiry(sdev, buf, 0, buf_len);
if (result)
goto fail;
/* If the user actually wanted this page, we can skip the rest */
if (page == 0)
- return buf;
+ return -EINVAL;
- for (i = 0; i < buf[3]; i++)
+ for (i = 0; i < min((int)buf[3], buf_len - 4); i++)
if (buf[i + 4] == page)
goto found;
+
+ if (i < buf[3] && i > buf_len)
+ /* ran off the end of the buffer, give us benefit of doubt */
+ goto found;
/* The device claims it doesn't support the requested page */
goto fail;
found:
- result = scsi_vpd_inquiry(sdev, buf, page, 255);
+ result = scsi_vpd_inquiry(sdev, buf, page, buf_len);
if (result)
goto fail;
- /*
- * Some pages are longer than 255 bytes. The actual length of
- * the page is returned in the header.
- */
- len = ((buf[2] << 8) | buf[3]) + 4;
- if (len <= init_vpd_len)
- return buf;
-
- kfree(buf);
- buf = kmalloc(len, GFP_KERNEL);
- result = scsi_vpd_inquiry(sdev, buf, page, len);
- if (result)
- goto fail;
-
- return buf;
+ return 0;
fail:
- kfree(buf);
- return NULL;
+ return -EINVAL;
}
EXPORT_SYMBOL_GPL(scsi_get_vpd_page);
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index c6642423cc67..56977097de9f 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -773,8 +773,14 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
* we already took a copy of the original into rq->errors which
* is what gets returned to the user
*/
- if (sense_valid && sshdr.sense_key == RECOVERED_ERROR) {
- if (!(req->cmd_flags & REQ_QUIET))
+ if (sense_valid && (sshdr.sense_key == RECOVERED_ERROR)) {
+ /* if ATA PASS-THROUGH INFORMATION AVAILABLE skip
+ * print since caller wants ATA registers. Only occurs on
+ * SCSI ATA PASS_THROUGH commands when CK_COND=1
+ */
+ if ((sshdr.asc == 0x0) && (sshdr.ascq == 0x1d))
+ ;
+ else if (!(req->cmd_flags & REQ_QUIET))
scsi_print_sense("", cmd);
result = 0;
/* BLOCK_PC may have set error */
diff --git a/drivers/scsi/scsi_sas_internal.h b/drivers/scsi/scsi_sas_internal.h
index 998cb5be6833..6266a5d73d0f 100644
--- a/drivers/scsi/scsi_sas_internal.h
+++ b/drivers/scsi/scsi_sas_internal.h
@@ -5,7 +5,7 @@
#define SAS_PHY_ATTRS 17
#define SAS_PORT_ATTRS 1
#define SAS_RPORT_ATTRS 7
-#define SAS_END_DEV_ATTRS 3
+#define SAS_END_DEV_ATTRS 5
#define SAS_EXPANDER_ATTRS 7
struct sas_internal {
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 012f73a96880..f697229ae5a9 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -1339,8 +1339,10 @@ static int scsi_report_lun_scan(struct scsi_target *starget, int bflags,
sdev = scsi_alloc_sdev(starget, 0, NULL);
if (!sdev)
return 0;
- if (scsi_device_get(sdev))
+ if (scsi_device_get(sdev)) {
+ __scsi_remove_device(sdev);
return 0;
+ }
}
sprintf(devname, "host %d channel %d id %d",
@@ -1907,10 +1909,9 @@ struct scsi_device *scsi_get_host_dev(struct Scsi_Host *shost)
goto out;
sdev = scsi_alloc_sdev(starget, 0, NULL);
- if (sdev) {
- sdev->sdev_gendev.parent = get_device(&starget->dev);
+ if (sdev)
sdev->borken = 0;
- } else
+ else
scsi_target_reap(starget);
put_device(&starget->dev);
out:
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 5a065055e68a..a4936c4e2f46 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -878,7 +878,8 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev)
struct request_queue *rq = sdev->request_queue;
struct scsi_target *starget = sdev->sdev_target;
- if ((error = scsi_device_set_state(sdev, SDEV_RUNNING)) != 0)
+ error = scsi_device_set_state(sdev, SDEV_RUNNING);
+ if (error)
return error;
error = scsi_target_add(starget);
@@ -889,13 +890,13 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev)
error = device_add(&sdev->sdev_gendev);
if (error) {
printk(KERN_INFO "error 1\n");
- goto out_remove;
+ return error;
}
error = device_add(&sdev->sdev_dev);
if (error) {
printk(KERN_INFO "error 2\n");
device_del(&sdev->sdev_gendev);
- goto out_remove;
+ return error;
}
transport_add_device(&sdev->sdev_gendev);
sdev->is_visible = 1;
@@ -910,14 +911,14 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev)
else
error = device_create_file(&sdev->sdev_gendev, &dev_attr_queue_depth);
if (error)
- goto out_remove;
+ return error;
if (sdev->host->hostt->change_queue_type)
error = device_create_file(&sdev->sdev_gendev, &sdev_attr_queue_type_rw);
else
error = device_create_file(&sdev->sdev_gendev, &dev_attr_queue_type);
if (error)
- goto out_remove;
+ return error;
error = bsg_register_queue(rq, &sdev->sdev_gendev, NULL, NULL);
@@ -933,16 +934,11 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev)
error = device_create_file(&sdev->sdev_gendev,
sdev->host->hostt->sdev_attrs[i]);
if (error)
- goto out_remove;
+ return error;
}
}
- return 0;
-
- out_remove:
- __scsi_remove_device(sdev);
return error;
-
}
void __scsi_remove_device(struct scsi_device *sdev)
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 653f22a8deb9..79660ee3e211 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -475,7 +475,8 @@ MODULE_PARM_DESC(dev_loss_tmo,
"Maximum number of seconds that the FC transport should"
" insulate the loss of a remote port. Once this value is"
" exceeded, the scsi target is removed. Value should be"
- " between 1 and SCSI_DEVICE_BLOCK_MAX_TIMEOUT.");
+ " between 1 and SCSI_DEVICE_BLOCK_MAX_TIMEOUT if"
+ " fast_io_fail_tmo is not set.");
/*
* Netlink Infrastructure
@@ -842,9 +843,17 @@ store_fc_rport_dev_loss_tmo(struct device *dev, struct device_attribute *attr,
(rport->port_state == FC_PORTSTATE_NOTPRESENT))
return -EBUSY;
val = simple_strtoul(buf, &cp, 0);
- if ((*cp && (*cp != '\n')) ||
- (val < 0) || (val > SCSI_DEVICE_BLOCK_MAX_TIMEOUT))
+ if ((*cp && (*cp != '\n')) || (val < 0))
return -EINVAL;
+
+ /*
+ * If fast_io_fail is off we have to cap
+ * dev_loss_tmo at SCSI_DEVICE_BLOCK_MAX_TIMEOUT
+ */
+ if (rport->fast_io_fail_tmo == -1 &&
+ val > SCSI_DEVICE_BLOCK_MAX_TIMEOUT)
+ return -EINVAL;
+
i->f->set_rport_dev_loss_tmo(rport, val);
return count;
}
@@ -925,9 +934,16 @@ store_fc_rport_fast_io_fail_tmo(struct device *dev,
rport->fast_io_fail_tmo = -1;
else {
val = simple_strtoul(buf, &cp, 0);
- if ((*cp && (*cp != '\n')) ||
- (val < 0) || (val >= rport->dev_loss_tmo))
+ if ((*cp && (*cp != '\n')) || (val < 0))
return -EINVAL;
+ /*
+ * Cap fast_io_fail by dev_loss_tmo or
+ * SCSI_DEVICE_BLOCK_MAX_TIMEOUT.
+ */
+ if ((val >= rport->dev_loss_tmo) ||
+ (val > SCSI_DEVICE_BLOCK_MAX_TIMEOUT))
+ return -EINVAL;
+
rport->fast_io_fail_tmo = val;
}
return count;
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index f27e52d963d3..927e99cb7225 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -155,6 +155,17 @@ static struct {
sas_bitfield_name_search(linkspeed, sas_linkspeed_names)
sas_bitfield_name_set(linkspeed, sas_linkspeed_names)
+static struct sas_end_device *sas_sdev_to_rdev(struct scsi_device *sdev)
+{
+ struct sas_rphy *rphy = target_to_rphy(sdev->sdev_target);
+ struct sas_end_device *rdev;
+
+ BUG_ON(rphy->identify.device_type != SAS_END_DEVICE);
+
+ rdev = rphy_to_end_device(rphy);
+ return rdev;
+}
+
static void sas_smp_request(struct request_queue *q, struct Scsi_Host *shost,
struct sas_rphy *rphy)
{
@@ -358,6 +369,85 @@ void sas_remove_host(struct Scsi_Host *shost)
}
EXPORT_SYMBOL(sas_remove_host);
+/**
+ * sas_tlr_supported - checking TLR bit in vpd 0x90
+ * @sdev: scsi device struct
+ *
+ * Check Transport Layer Retries are supported or not.
+ * If vpd page 0x90 is present, TRL is supported.
+ *
+ */
+unsigned int
+sas_tlr_supported(struct scsi_device *sdev)
+{
+ const int vpd_len = 32;
+ struct sas_end_device *rdev = sas_sdev_to_rdev(sdev);
+ char *buffer = kzalloc(vpd_len, GFP_KERNEL);
+ int ret = 0;
+
+ if (scsi_get_vpd_page(sdev, 0x90, buffer, vpd_len))
+ goto out;
+
+ /*
+ * Magic numbers: the VPD Protocol page (0x90)
+ * has a 4 byte header and then one entry per device port
+ * the TLR bit is at offset 8 on each port entry
+ * if we take the first port, that's at total offset 12
+ */
+ ret = buffer[12] & 0x01;
+
+ out:
+ kfree(buffer);
+ rdev->tlr_supported = ret;
+ return ret;
+
+}
+EXPORT_SYMBOL_GPL(sas_tlr_supported);
+
+/**
+ * sas_disable_tlr - setting TLR flags
+ * @sdev: scsi device struct
+ *
+ * Seting tlr_enabled flag to 0.
+ *
+ */
+void
+sas_disable_tlr(struct scsi_device *sdev)
+{
+ struct sas_end_device *rdev = sas_sdev_to_rdev(sdev);
+
+ rdev->tlr_enabled = 0;
+}
+EXPORT_SYMBOL_GPL(sas_disable_tlr);
+
+/**
+ * sas_enable_tlr - setting TLR flags
+ * @sdev: scsi device struct
+ *
+ * Seting tlr_enabled flag 1.
+ *
+ */
+void sas_enable_tlr(struct scsi_device *sdev)
+{
+ unsigned int tlr_supported = 0;
+ tlr_supported = sas_tlr_supported(sdev);
+
+ if (tlr_supported) {
+ struct sas_end_device *rdev = sas_sdev_to_rdev(sdev);
+
+ rdev->tlr_enabled = 1;
+ }
+
+ return;
+}
+EXPORT_SYMBOL_GPL(sas_enable_tlr);
+
+unsigned int sas_is_tlr_enabled(struct scsi_device *sdev)
+{
+ struct sas_end_device *rdev = sas_sdev_to_rdev(sdev);
+ return rdev->tlr_enabled;
+}
+EXPORT_SYMBOL_GPL(sas_is_tlr_enabled);
/*
* SAS Phy attributes
@@ -1146,15 +1236,10 @@ sas_rphy_simple_attr(identify.phy_identifier, phy_identifier, "%d\n", u8);
int sas_read_port_mode_page(struct scsi_device *sdev)
{
char *buffer = kzalloc(BUF_SIZE, GFP_KERNEL), *msdata;
- struct sas_rphy *rphy = target_to_rphy(sdev->sdev_target);
- struct sas_end_device *rdev;
+ struct sas_end_device *rdev = sas_sdev_to_rdev(sdev);
struct scsi_mode_data mode_data;
int res, error;
- BUG_ON(rphy->identify.device_type != SAS_END_DEVICE);
-
- rdev = rphy_to_end_device(rphy);
-
if (!buffer)
return -ENOMEM;
@@ -1207,6 +1292,10 @@ sas_end_dev_simple_attr(I_T_nexus_loss_timeout, I_T_nexus_loss_timeout,
"%d\n", int);
sas_end_dev_simple_attr(initiator_response_timeout, initiator_response_timeout,
"%d\n", int);
+sas_end_dev_simple_attr(tlr_supported, tlr_supported,
+ "%d\n", int);
+sas_end_dev_simple_attr(tlr_enabled, tlr_enabled,
+ "%d\n", int);
static DECLARE_TRANSPORT_CLASS(sas_expander_class,
"sas_expander", NULL, NULL, NULL);
@@ -1733,6 +1822,8 @@ sas_attach_transport(struct sas_function_template *ft)
SETUP_END_DEV_ATTRIBUTE(end_dev_ready_led_meaning);
SETUP_END_DEV_ATTRIBUTE(end_dev_I_T_nexus_loss_timeout);
SETUP_END_DEV_ATTRIBUTE(end_dev_initiator_response_timeout);
+ SETUP_END_DEV_ATTRIBUTE(end_dev_tlr_supported);
+ SETUP_END_DEV_ATTRIBUTE(end_dev_tlr_enabled);
i->end_dev_attrs[count] = NULL;
count = 0;
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 255da53e5a01..1dd4d8407694 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1196,19 +1196,10 @@ static int sd_done(struct scsi_cmnd *SCpnt)
SCpnt->result = 0;
memset(SCpnt->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
break;
- case ABORTED_COMMAND:
- if (sshdr.asc == 0x10) { /* DIF: Disk detected corruption */
- scsi_print_result(SCpnt);
- scsi_print_sense("sd", SCpnt);
+ case ABORTED_COMMAND: /* DIF: Target detected corruption */
+ case ILLEGAL_REQUEST: /* DIX: Host detected corruption */
+ if (sshdr.asc == 0x10)
good_bytes = sd_completed_bytes(SCpnt);
- }
- break;
- case ILLEGAL_REQUEST:
- if (sshdr.asc == 0x10) { /* DIX: HBA detected corruption */
- scsi_print_result(SCpnt);
- scsi_print_sense("sd", SCpnt);
- good_bytes = sd_completed_bytes(SCpnt);
- }
break;
default:
break;
@@ -1218,8 +1209,19 @@ static int sd_done(struct scsi_cmnd *SCpnt)
sd_dif_complete(SCpnt, good_bytes);
if (scsi_host_dif_capable(sdkp->device->host, sdkp->protection_type)
- == SD_DIF_TYPE2_PROTECTION && SCpnt->cmnd != SCpnt->request->cmd)
+ == SD_DIF_TYPE2_PROTECTION && SCpnt->cmnd != SCpnt->request->cmd) {
+
+ /* We have to print a failed command here as the
+ * extended CDB gets freed before scsi_io_completion()
+ * is called.
+ */
+ if (result)
+ scsi_print_command(SCpnt);
+
mempool_free(SCpnt->cmnd, sd_cdb_pool);
+ SCpnt->cmnd = NULL;
+ SCpnt->cmd_len = 0;
+ }
return good_bytes;
}
@@ -1946,13 +1948,13 @@ static void sd_read_block_limits(struct scsi_disk *sdkp)
{
struct request_queue *q = sdkp->disk->queue;
unsigned int sector_sz = sdkp->device->sector_size;
- char *buffer;
+ const int vpd_len = 32;
+ unsigned char *buffer = kmalloc(vpd_len, GFP_KERNEL);
- /* Block Limits VPD */
- buffer = scsi_get_vpd_page(sdkp->device, 0xb0);
-
- if (buffer == NULL)
- return;
+ if (!buffer ||
+ /* Block Limits VPD */
+ scsi_get_vpd_page(sdkp->device, 0xb0, buffer, vpd_len))
+ goto out;
blk_queue_io_min(sdkp->disk->queue,
get_unaligned_be16(&buffer[6]) * sector_sz);
@@ -1984,6 +1986,7 @@ static void sd_read_block_limits(struct scsi_disk *sdkp)
get_unaligned_be32(&buffer[32]) & ~(1 << 31);
}
+ out:
kfree(buffer);
}
@@ -1993,20 +1996,23 @@ static void sd_read_block_limits(struct scsi_disk *sdkp)
*/
static void sd_read_block_characteristics(struct scsi_disk *sdkp)
{
- char *buffer;
+ unsigned char *buffer;
u16 rot;
+ const int vpd_len = 32;
- /* Block Device Characteristics VPD */
- buffer = scsi_get_vpd_page(sdkp->device, 0xb1);
+ buffer = kmalloc(vpd_len, GFP_KERNEL);
- if (buffer == NULL)
- return;
+ if (!buffer ||
+ /* Block Device Characteristics VPD */
+ scsi_get_vpd_page(sdkp->device, 0xb1, buffer, vpd_len))
+ goto out;
rot = get_unaligned_be16(&buffer[4]);
if (rot == 1)
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, sdkp->disk->queue);
+ out:
kfree(buffer);
}
diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c
index 55b034b72708..1d7a8780e00c 100644
--- a/drivers/scsi/ses.c
+++ b/drivers/scsi/ses.c
@@ -448,13 +448,17 @@ static void ses_match_to_enclosure(struct enclosure_device *edev,
.addr = 0,
};
- buf = scsi_get_vpd_page(sdev, 0x83);
- if (!buf)
- return;
+ buf = kmalloc(INIT_ALLOC_SIZE, GFP_KERNEL);
+ if (!buf || scsi_get_vpd_page(sdev, 0x83, buf, INIT_ALLOC_SIZE))
+ goto free;
ses_enclosure_data_process(edev, to_scsi_device(edev->edev.parent), 0);
vpd_len = ((buf[2] << 8) | buf[3]) + 4;
+ kfree(buf);
+ buf = kmalloc(vpd_len, GFP_KERNEL);
+ if (!buf ||scsi_get_vpd_page(sdev, 0x83, buf, vpd_len))
+ goto free;
desc = buf + 4;
while (desc < buf + vpd_len) {
diff --git a/drivers/scsi/u14-34f.c b/drivers/scsi/u14-34f.c
index 54023d41fd15..26e8e0e6b8dd 100644
--- a/drivers/scsi/u14-34f.c
+++ b/drivers/scsi/u14-34f.c
@@ -1070,7 +1070,7 @@ static int option_setup(char *str) {
char *cur = str;
int i = 1;
- while (cur && isdigit(*cur) && i <= MAX_INT_PARAM) {
+ while (cur && isdigit(*cur) && i < MAX_INT_PARAM) {
ints[i++] = simple_strtoul(cur, NULL, 0);
if ((cur = strchr(cur, ',')) != NULL) cur++;
diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c
index d2604c813a20..e4ac5829b637 100644
--- a/drivers/scsi/vmw_pvscsi.c
+++ b/drivers/scsi/vmw_pvscsi.c
@@ -1069,7 +1069,8 @@ static void pvscsi_free_sgls(const struct pvscsi_adapter *adapter)
free_pages((unsigned long)ctx->sgl, get_order(SGL_SIZE));
}
-static int pvscsi_setup_msix(const struct pvscsi_adapter *adapter, int *irq)
+static int pvscsi_setup_msix(const struct pvscsi_adapter *adapter,
+ unsigned int *irq)
{
struct msix_entry entry = { 0, PVSCSI_VECTOR_COMPLETION };
int ret;