summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/ufs/ti,j721e-ufs.yaml68
-rw-r--r--Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt1
-rw-r--r--Documentation/scsi/scsi_mid_low_api.txt3
-rw-r--r--drivers/ata/pata_arasan_cf.c1
-rw-r--r--drivers/s390/scsi/Makefile2
-rw-r--r--drivers/s390/scsi/zfcp_aux.c12
-rw-r--r--drivers/s390/scsi/zfcp_dbf.c8
-rw-r--r--drivers/s390/scsi/zfcp_def.h4
-rw-r--r--drivers/s390/scsi/zfcp_diag.c305
-rw-r--r--drivers/s390/scsi/zfcp_diag.h101
-rw-r--r--drivers/s390/scsi/zfcp_erp.c4
-rw-r--r--drivers/s390/scsi/zfcp_ext.h1
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c73
-rw-r--r--drivers/s390/scsi/zfcp_fsf.h21
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c4
-rw-r--r--drivers/s390/scsi/zfcp_sysfs.c170
-rw-r--r--drivers/scsi/NCR5380.c37
-rw-r--r--drivers/scsi/aacraid/aachba.c11
-rw-r--r--drivers/scsi/aacraid/aacraid.h23
-rw-r--r--drivers/scsi/aacraid/comminit.c5
-rw-r--r--drivers/scsi/aacraid/commsup.c21
-rw-r--r--drivers/scsi/aacraid/linit.c35
-rw-r--r--drivers/scsi/aacraid/src.c10
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c6
-rw-r--r--drivers/scsi/arm/acornscsi.c4
-rw-r--r--drivers/scsi/atari_scsi.c6
-rw-r--r--drivers/scsi/atp870u.c2
-rw-r--r--drivers/scsi/bfa/bfad.c3
-rw-r--r--drivers/scsi/bfa/bfad_attr.c4
-rw-r--r--drivers/scsi/bnx2fc/57xx_hsi_bnx2fc.h2
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_io.c2
-rw-r--r--drivers/scsi/bnx2i/bnx2i_iscsi.c2
-rw-r--r--drivers/scsi/csiostor/csio_hw.c20
-rw-r--r--drivers/scsi/csiostor/csio_init.c7
-rw-r--r--drivers/scsi/csiostor/csio_lnode.c18
-rw-r--r--drivers/scsi/csiostor/csio_mb.c2
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c2
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.c28
-rw-r--r--drivers/scsi/cxlflash/main.c2
-rw-r--r--drivers/scsi/esas2r/esas2r_flash.c1
-rw-r--r--drivers/scsi/fnic/fnic_scsi.c3
-rw-r--r--drivers/scsi/fnic/vnic_dev.c2
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas.h67
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c376
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v1_hw.c6
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v2_hw.c13
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v3_hw.c30
-rw-r--r--drivers/scsi/hosts.c19
-rw-r--r--drivers/scsi/ips.c2
-rw-r--r--drivers/scsi/isci/port_config.c2
-rw-r--r--drivers/scsi/isci/remote_device.c2
-rw-r--r--drivers/scsi/iscsi_tcp.c8
-rw-r--r--drivers/scsi/lpfc/lpfc.h40
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c298
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c18
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h7
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c28
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c118
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c57
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c200
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h31
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c954
-rw-r--r--drivers/scsi/lpfc/lpfc_logmsg.h17
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c1
-rw-r--r--drivers/scsi/lpfc/lpfc_mem.c3
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c149
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.c85
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.c103
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c43
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c391
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h3
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h42
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/mac_scsi.c2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h3
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c8
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fp.c7
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c36
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.h15
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.c344
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.h9
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c4
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c12
-rw-r--r--drivers/scsi/mvsas/mv_sas.c2
-rw-r--r--drivers/scsi/ncr53c8xx.c2
-rw-r--r--drivers/scsi/nsp32.c2
-rw-r--r--drivers/scsi/pcmcia/Kconfig2
-rw-r--r--drivers/scsi/pcmcia/nsp_cs.c2
-rw-r--r--drivers/scsi/pm8001/pm8001_ctl.c20
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.c131
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c36
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.c70
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.h24
-rw-r--r--drivers/scsi/pm8001/pm80xx_hwi.c451
-rw-r--r--drivers/scsi/pm8001/pm80xx_hwi.h3
-rw-r--r--drivers/scsi/qedf/qedf_dbg.h2
-rw-r--r--drivers/scsi/qedf/qedf_main.c8
-rw-r--r--drivers/scsi/qedi/qedi_dbg.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h34
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h1
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c66
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c140
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h12
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c106
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c36
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c15
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c11
-rw-r--r--drivers/scsi/qla2xxx/qla_nvme.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c174
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_tmpl.c29
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h2
-rw-r--r--drivers/scsi/qla4xxx/ql4_mbx.c3
-rw-r--r--drivers/scsi/scsi.c6
-rw-r--r--drivers/scsi/scsi_debug.c9
-rw-r--r--drivers/scsi/scsi_lib.c45
-rw-r--r--drivers/scsi/scsi_logging.c10
-rw-r--r--drivers/scsi/scsi_priv.h2
-rw-r--r--drivers/scsi/scsi_sysfs.c22
-rw-r--r--drivers/scsi/scsi_trace.c124
-rw-r--r--drivers/scsi/sd.c4
-rw-r--r--drivers/scsi/sg.c91
-rw-r--r--drivers/scsi/smartpqi/smartpqi.h77
-rw-r--r--drivers/scsi/smartpqi/smartpqi_init.c437
-rw-r--r--drivers/scsi/smartpqi/smartpqi_sas_transport.c22
-rw-r--r--drivers/scsi/sun3_scsi.c4
-rw-r--r--drivers/scsi/ufs/Kconfig10
-rw-r--r--drivers/scsi/ufs/Makefile1
-rw-r--r--drivers/scsi/ufs/ti-j721e-ufs.c90
-rw-r--r--drivers/scsi/ufs/ufs-hisi.c5
-rw-r--r--drivers/scsi/ufs/ufs-mediatek.c3
-rw-r--r--drivers/scsi/ufs/ufs-qcom.c53
-rw-r--r--drivers/scsi/ufs/ufs-qcom.h3
-rw-r--r--drivers/scsi/ufs/ufs-sysfs.c15
-rw-r--r--drivers/scsi/ufs/ufs_bsg.c1
-rw-r--r--drivers/scsi/ufs/ufshcd-dwc.c2
-rw-r--r--drivers/scsi/ufs/ufshcd-pltfrm.c1
-rw-r--r--drivers/scsi/ufs/ufshcd.c214
-rw-r--r--drivers/scsi/ufs/ufshcd.h12
-rw-r--r--drivers/scsi/ufs/ufshci.h2
-rw-r--r--drivers/scsi/zorro_esp.c11
-rw-r--r--drivers/target/iscsi/cxgbit/cxgbit_ddp.c3
-rw-r--r--drivers/target/iscsi/iscsi_target.c24
-rw-r--r--drivers/target/iscsi/iscsi_target_auth.c232
-rw-r--r--drivers/target/iscsi/iscsi_target_auth.h17
-rw-r--r--drivers/target/iscsi/iscsi_target_parameters.h3
-rw-r--r--drivers/target/target_core_fabric_lib.c2
-rw-r--r--drivers/target/target_core_tpg.c12
-rw-r--r--drivers/target/target_core_transport.c28
-rw-r--r--drivers/target/target_core_user.c6
-rw-r--r--drivers/target/target_core_xcopy.c1
-rw-r--r--drivers/usb/storage/ene_ub6250.c2
-rw-r--r--drivers/usb/storage/transport.c3
-rw-r--r--drivers/usb/storage/uas.c1
-rw-r--r--include/scsi/iscsi_proto.h1
-rw-r--r--include/scsi/scsi_cmnd.h5
-rw-r--r--include/scsi/scsi_device.h5
-rw-r--r--include/scsi/scsi_host.h19
-rw-r--r--include/target/target_core_base.h1
-rw-r--r--include/uapi/linux/chio.h11
163 files changed, 5656 insertions, 1987 deletions
diff --git a/Documentation/devicetree/bindings/ufs/ti,j721e-ufs.yaml b/Documentation/devicetree/bindings/ufs/ti,j721e-ufs.yaml
new file mode 100644
index 000000000000..c8a2a92074df
--- /dev/null
+++ b/Documentation/devicetree/bindings/ufs/ti,j721e-ufs.yaml
@@ -0,0 +1,68 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/ufs/ti,j721e-ufs.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: TI J721e UFS Host Controller Glue Driver
+
+maintainers:
+ - Vignesh Raghavendra <vigneshr@ti.com>
+
+properties:
+ compatible:
+ items:
+ - const: ti,j721e-ufs
+
+ reg:
+ maxItems: 1
+ description: address of TI UFS glue registers
+
+ clocks:
+ maxItems: 1
+ description: phandle to the M-PHY clock
+
+ power-domains:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - power-domains
+
+patternProperties:
+ "^ufs@[0-9a-f]+$":
+ type: object
+ description: |
+ Cadence UFS controller node must be the child node. Refer
+ Documentation/devicetree/bindings/ufs/cdns,ufshc.txt for binding
+ documentation of child node
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+ ufs_wrapper: ufs-wrapper@4e80000 {
+ compatible = "ti,j721e-ufs";
+ reg = <0x0 0x4e80000 0x0 0x100>;
+ power-domains = <&k3_pds 277>;
+ clocks = <&k3_clks 277 1>;
+ assigned-clocks = <&k3_clks 277 1>;
+ assigned-clock-parents = <&k3_clks 277 4>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ ufs@4e84000 {
+ compatible = "cdns,ufshc-m31-16nm", "jedec,ufs-2.0";
+ reg = <0x0 0x4e84000 0x0 0x10000>;
+ interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
+ freq-table-hz = <19200000 19200000>;
+ power-domains = <&k3_pds 277>;
+ clocks = <&k3_clks 277 1>;
+ assigned-clocks = <&k3_clks 277 1>;
+ assigned-clock-parents = <&k3_clks 277 4>;
+ clock-names = "core_clk";
+ };
+ };
diff --git a/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt b/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt
index d78ef63935f9..415ccdd7442d 100644
--- a/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt
+++ b/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt
@@ -13,6 +13,7 @@ Required properties:
"qcom,msm8996-ufshc", "qcom,ufshc", "jedec,ufs-2.0"
"qcom,msm8998-ufshc", "qcom,ufshc", "jedec,ufs-2.0"
"qcom,sdm845-ufshc", "qcom,ufshc", "jedec,ufs-2.0"
+ "qcom,sm8150-ufshc", "qcom,ufshc", "jedec,ufs-2.0"
- interrupts : <interrupt mapping for UFS host controller IRQ>
- reg : <registers mapping>
diff --git a/Documentation/scsi/scsi_mid_low_api.txt b/Documentation/scsi/scsi_mid_low_api.txt
index c1dd4939f4ae..2a4be1c3e6db 100644
--- a/Documentation/scsi/scsi_mid_low_api.txt
+++ b/Documentation/scsi/scsi_mid_low_api.txt
@@ -1084,7 +1084,8 @@ of interest:
commands to the adapter.
this_id - scsi id of host (scsi initiator) or -1 if not known
sg_tablesize - maximum scatter gather elements allowed by host.
- 0 implies scatter gather not supported by host
+ Set this to SG_ALL or less to avoid chained SG lists.
+ Must be at least 1.
max_sectors - maximum number of sectors (usually 512 bytes) allowed
in a single SCSI command. The default value of 0 leads
to a setting of SCSI_DEFAULT_MAX_SECTORS (defined in
diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
index ebecab8c3f36..135173c8d138 100644
--- a/drivers/ata/pata_arasan_cf.c
+++ b/drivers/ata/pata_arasan_cf.c
@@ -219,7 +219,6 @@ struct arasan_cf_dev {
static struct scsi_host_template arasan_cf_sht = {
ATA_BASE_SHT(DRIVER_NAME),
- .sg_tablesize = SG_NONE,
.dma_boundary = 0xFFFFFFFFUL,
};
diff --git a/drivers/s390/scsi/Makefile b/drivers/s390/scsi/Makefile
index 9dda431ec8f3..352056eb0dd1 100644
--- a/drivers/s390/scsi/Makefile
+++ b/drivers/s390/scsi/Makefile
@@ -5,6 +5,6 @@
zfcp-objs := zfcp_aux.o zfcp_ccw.o zfcp_dbf.o zfcp_erp.o \
zfcp_fc.o zfcp_fsf.o zfcp_qdio.o zfcp_scsi.o zfcp_sysfs.o \
- zfcp_unit.o
+ zfcp_unit.o zfcp_diag.o
obj-$(CONFIG_ZFCP) += zfcp.o
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index e390f8c6d5f3..09ec846fe01d 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -4,7 +4,7 @@
*
* Module interface and handling of zfcp data structures.
*
- * Copyright IBM Corp. 2002, 2017
+ * Copyright IBM Corp. 2002, 2018
*/
/*
@@ -25,6 +25,7 @@
* Martin Petermann
* Sven Schuetz
* Steffen Maier
+ * Benjamin Block
*/
#define KMSG_COMPONENT "zfcp"
@@ -36,6 +37,7 @@
#include "zfcp_ext.h"
#include "zfcp_fc.h"
#include "zfcp_reqlist.h"
+#include "zfcp_diag.h"
#define ZFCP_BUS_ID_SIZE 20
@@ -356,6 +358,9 @@ struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *ccw_device)
adapter->erp_action.adapter = adapter;
+ if (zfcp_diag_adapter_setup(adapter))
+ goto failed;
+
if (zfcp_qdio_setup(adapter))
goto failed;
@@ -402,6 +407,9 @@ struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *ccw_device)
&zfcp_sysfs_adapter_attrs))
goto failed;
+ if (zfcp_diag_sysfs_setup(adapter))
+ goto failed;
+
/* report size limit per scatter-gather segment */
adapter->ccw_device->dev.dma_parms = &adapter->dma_parms;
@@ -426,6 +434,7 @@ void zfcp_adapter_unregister(struct zfcp_adapter *adapter)
zfcp_fc_wka_ports_force_offline(adapter->gs);
zfcp_scsi_adapter_unregister(adapter);
+ zfcp_diag_sysfs_destroy(adapter);
sysfs_remove_group(&cdev->dev.kobj, &zfcp_sysfs_adapter_attrs);
zfcp_erp_thread_kill(adapter);
@@ -449,6 +458,7 @@ void zfcp_adapter_release(struct kref *ref)
dev_set_drvdata(&adapter->ccw_device->dev, NULL);
zfcp_fc_gs_destroy(adapter);
zfcp_free_low_mem_buffers(adapter);
+ zfcp_diag_adapter_free(adapter);
kfree(adapter->req_list);
kfree(adapter->fc_stats);
kfree(adapter->stats_reset_data);
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
index dccdb41bed8c..1234294700c4 100644
--- a/drivers/s390/scsi/zfcp_dbf.c
+++ b/drivers/s390/scsi/zfcp_dbf.c
@@ -95,11 +95,9 @@ void zfcp_dbf_hba_fsf_res(char *tag, int level, struct zfcp_fsf_req *req)
memcpy(rec->u.res.fsf_status_qual, &q_head->fsf_status_qual,
FSF_STATUS_QUALIFIER_SIZE);
- if (q_head->fsf_command != FSF_QTCB_FCP_CMND) {
- rec->pl_len = q_head->log_length;
- zfcp_dbf_pl_write(dbf, (char *)q_pref + q_head->log_start,
- rec->pl_len, "fsf_res", req->req_id);
- }
+ rec->pl_len = q_head->log_length;
+ zfcp_dbf_pl_write(dbf, (char *)q_pref + q_head->log_start,
+ rec->pl_len, "fsf_res", req->req_id);
debug_event(dbf->hba, level, rec, sizeof(*rec));
spin_unlock_irqrestore(&dbf->hba_lock, flags);
diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h
index 87d2f47a6990..8cc0eefe4ccc 100644
--- a/drivers/s390/scsi/zfcp_def.h
+++ b/drivers/s390/scsi/zfcp_def.h
@@ -4,7 +4,7 @@
*
* Global definitions for the zfcp device driver.
*
- * Copyright IBM Corp. 2002, 2017
+ * Copyright IBM Corp. 2002, 2018
*/
#ifndef ZFCP_DEF_H
@@ -86,6 +86,7 @@
#define ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED 0x00000080
#define ZFCP_STATUS_FSFREQ_TMFUNCFAILED 0x00000200
#define ZFCP_STATUS_FSFREQ_DISMISSED 0x00001000
+#define ZFCP_STATUS_FSFREQ_XDATAINCOMPLETE 0x00020000
/************************* STRUCTURE DEFINITIONS *****************************/
@@ -197,6 +198,7 @@ struct zfcp_adapter {
struct device_dma_parameters dma_parms;
struct zfcp_fc_events events;
unsigned long next_port_scan;
+ struct zfcp_diag_adapter *diagnostics;
};
struct zfcp_port {
diff --git a/drivers/s390/scsi/zfcp_diag.c b/drivers/s390/scsi/zfcp_diag.c
new file mode 100644
index 000000000000..67a8f4e57db1
--- /dev/null
+++ b/drivers/s390/scsi/zfcp_diag.c
@@ -0,0 +1,305 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * zfcp device driver
+ *
+ * Functions to handle diagnostics.
+ *
+ * Copyright IBM Corp. 2018
+ */
+
+#include <linux/spinlock.h>
+#include <linux/jiffies.h>
+#include <linux/string.h>
+#include <linux/kernfs.h>
+#include <linux/sysfs.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+
+#include "zfcp_diag.h"
+#include "zfcp_ext.h"
+#include "zfcp_def.h"
+
+static DECLARE_WAIT_QUEUE_HEAD(__zfcp_diag_publish_wait);
+
+/**
+ * zfcp_diag_adapter_setup() - Setup storage for adapter diagnostics.
+ * @adapter: the adapter to setup diagnostics for.
+ *
+ * Creates the data-structures to store the diagnostics for an adapter. This
+ * overwrites whatever was stored before at &zfcp_adapter->diagnostics!
+ *
+ * Return:
+ * * 0 - Everyting is OK
+ * * -ENOMEM - Could not allocate all/parts of the data-structures;
+ * &zfcp_adapter->diagnostics remains unchanged
+ */
+int zfcp_diag_adapter_setup(struct zfcp_adapter *const adapter)
+{
+ struct zfcp_diag_adapter *diag;
+ struct zfcp_diag_header *hdr;
+
+ diag = kzalloc(sizeof(*diag), GFP_KERNEL);
+ if (diag == NULL)
+ return -ENOMEM;
+
+ diag->max_age = (5 * 1000); /* default value: 5 s */
+
+ /* setup header for port_data */
+ hdr = &diag->port_data.header;
+
+ spin_lock_init(&hdr->access_lock);
+ hdr->buffer = &diag->port_data.data;
+ hdr->buffer_size = sizeof(diag->port_data.data);
+ /* set the timestamp so that the first test on age will always fail */
+ hdr->timestamp = jiffies - msecs_to_jiffies(diag->max_age);
+
+ /* setup header for config_data */
+ hdr = &diag->config_data.header;
+
+ spin_lock_init(&hdr->access_lock);
+ hdr->buffer = &diag->config_data.data;
+ hdr->buffer_size = sizeof(diag->config_data.data);
+ /* set the timestamp so that the first test on age will always fail */
+ hdr->timestamp = jiffies - msecs_to_jiffies(diag->max_age);
+
+ adapter->diagnostics = diag;
+ return 0;
+}
+
+/**
+ * zfcp_diag_adapter_free() - Frees all adapter diagnostics allocations.
+ * @adapter: the adapter whose diagnostic structures should be freed.
+ *
+ * Frees all data-structures in the given adapter that store diagnostics
+ * information. Can savely be called with partially setup diagnostics.
+ */
+void zfcp_diag_adapter_free(struct zfcp_adapter *const adapter)
+{
+ kfree(adapter->diagnostics);
+ adapter->diagnostics = NULL;
+}
+
+/**
+ * zfcp_diag_sysfs_setup() - Setup the sysfs-group for adapter-diagnostics.
+ * @adapter: target adapter to which the group should be added.
+ *
+ * Return: 0 on success; Something else otherwise (see sysfs_create_group()).
+ */
+int zfcp_diag_sysfs_setup(struct zfcp_adapter *const adapter)
+{
+ int rc = sysfs_create_group(&adapter->ccw_device->dev.kobj,
+ &zfcp_sysfs_diag_attr_group);
+ if (rc == 0)
+ adapter->diagnostics->sysfs_established = 1;
+
+ return rc;
+}
+
+/**
+ * zfcp_diag_sysfs_destroy() - Remove the sysfs-group for adapter-diagnostics.
+ * @adapter: target adapter from which the group should be removed.
+ */
+void zfcp_diag_sysfs_destroy(struct zfcp_adapter *const adapter)
+{
+ if (adapter->diagnostics == NULL ||
+ !adapter->diagnostics->sysfs_established)
+ return;
+
+ /*
+ * We need this state-handling so we can prevent warnings being printed
+ * on the kernel-console in case we have to abort a halfway done
+ * zfcp_adapter_enqueue(), in which the sysfs-group was not yet
+ * established. sysfs_remove_group() does this checking as well, but
+ * still prints a warning in case we try to remove a group that has not
+ * been established before
+ */
+ adapter->diagnostics->sysfs_established = 0;
+ sysfs_remove_group(&adapter->ccw_device->dev.kobj,
+ &zfcp_sysfs_diag_attr_group);
+}
+
+
+/**
+ * zfcp_diag_update_xdata() - Update a diagnostics buffer.
+ * @hdr: the meta data to update.
+ * @data: data to use for the update.
+ * @incomplete: flag stating whether the data in @data is incomplete.
+ */
+void zfcp_diag_update_xdata(struct zfcp_diag_header *const hdr,
+ const void *const data, const bool incomplete)
+{
+ const unsigned long capture_timestamp = jiffies;
+ unsigned long flags;
+
+ spin_lock_irqsave(&hdr->access_lock, flags);
+
+ /* make sure we never go into the past with an update */
+ if (!time_after_eq(capture_timestamp, hdr->timestamp))
+ goto out;
+
+ hdr->timestamp = capture_timestamp;
+ hdr->incomplete = incomplete;
+ memcpy(hdr->buffer, data, hdr->buffer_size);
+out:
+ spin_unlock_irqrestore(&hdr->access_lock, flags);
+}
+
+/**
+ * zfcp_diag_update_port_data_buffer() - Implementation of
+ * &typedef zfcp_diag_update_buffer_func
+ * to collect and update Port Data.
+ * @adapter: Adapter to collect Port Data from.
+ *
+ * This call is SYNCHRONOUS ! It blocks till the respective command has
+ * finished completely, or has failed in some way.
+ *
+ * Return:
+ * * 0 - Successfully retrieved new Diagnostics and Updated the buffer;
+ * this also includes cases where data was retrieved, but
+ * incomplete; you'll have to check the flag ``incomplete``
+ * of &struct zfcp_diag_header.
+ * * see zfcp_fsf_exchange_port_data_sync() for possible error-codes (
+ * excluding -EAGAIN)
+ */
+int zfcp_diag_update_port_data_buffer(struct zfcp_adapter *const adapter)
+{
+ int rc;
+
+ rc = zfcp_fsf_exchange_port_data_sync(adapter->qdio, NULL);
+ if (rc == -EAGAIN)
+ rc = 0; /* signaling incomplete via struct zfcp_diag_header */
+
+ /* buffer-data was updated in zfcp_fsf_exchange_port_data_handler() */
+
+ return rc;
+}
+
+/**
+ * zfcp_diag_update_config_data_buffer() - Implementation of
+ * &typedef zfcp_diag_update_buffer_func
+ * to collect and update Config Data.
+ * @adapter: Adapter to collect Config Data from.
+ *
+ * This call is SYNCHRONOUS ! It blocks till the respective command has
+ * finished completely, or has failed in some way.
+ *
+ * Return:
+ * * 0 - Successfully retrieved new Diagnostics and Updated the buffer;
+ * this also includes cases where data was retrieved, but
+ * incomplete; you'll have to check the flag ``incomplete``
+ * of &struct zfcp_diag_header.
+ * * see zfcp_fsf_exchange_config_data_sync() for possible error-codes (
+ * excluding -EAGAIN)
+ */
+int zfcp_diag_update_config_data_buffer(struct zfcp_adapter *const adapter)
+{
+ int rc;
+
+ rc = zfcp_fsf_exchange_config_data_sync(adapter->qdio, NULL);
+ if (rc == -EAGAIN)
+ rc = 0; /* signaling incomplete via struct zfcp_diag_header */
+
+ /* buffer-data was updated in zfcp_fsf_exchange_config_data_handler() */
+
+ return rc;
+}
+
+static int __zfcp_diag_update_buffer(struct zfcp_adapter *const adapter,
+ struct zfcp_diag_header *const hdr,
+ zfcp_diag_update_buffer_func buffer_update,
+ unsigned long *const flags)
+ __must_hold(hdr->access_lock)
+{
+ int rc;
+
+ if (hdr->updating == 1) {
+ rc = wait_event_interruptible_lock_irq(__zfcp_diag_publish_wait,
+ hdr->updating == 0,
+ hdr->access_lock);
+ rc = (rc == 0 ? -EAGAIN : -EINTR);
+ } else {
+ hdr->updating = 1;
+ spin_unlock_irqrestore(&hdr->access_lock, *flags);
+
+ /* unlocked, because update function sleeps */
+ rc = buffer_update(adapter);
+
+ spin_lock_irqsave(&hdr->access_lock, *flags);
+ hdr->updating = 0;
+
+ /*
+ * every thread waiting here went via an interruptible wait,
+ * so its fine to only wake those
+ */
+ wake_up_interruptible_all(&__zfcp_diag_publish_wait);
+ }
+
+ return rc;
+}
+
+static bool
+__zfcp_diag_test_buffer_age_isfresh(const struct zfcp_diag_adapter *const diag,
+ const struct zfcp_diag_header *const hdr)
+ __must_hold(hdr->access_lock)
+{
+ const unsigned long now = jiffies;
+
+ /*
+ * Should not happen (data is from the future).. if it does, still
+ * signal that it needs refresh
+ */
+ if (!time_after_eq(now, hdr->timestamp))
+ return false;
+
+ if (jiffies_to_msecs(now - hdr->timestamp) >= diag->max_age)
+ return false;
+
+ return true;
+}
+
+/**
+ * zfcp_diag_update_buffer_limited() - Collect diagnostics and update a
+ * diagnostics buffer rate limited.
+ * @adapter: Adapter to collect the diagnostics from.
+ * @hdr: buffer-header for which to update with the collected diagnostics.
+ * @buffer_update: Specific implementation for collecting and updating.
+ *
+ * This function will cause an update of the given @hdr by calling the also
+ * given @buffer_update function. If called by multiple sources at the same
+ * time, it will synchornize the update by only allowing one source to call
+ * @buffer_update and the others to wait for that source to complete instead
+ * (the wait is interruptible).
+ *
+ * Additionally this version is rate-limited and will only exit if either the
+ * buffer is fresh enough (within the limit) - it will do nothing if the buffer
+ * is fresh enough to begin with -, or if the source/thread that started this
+ * update is the one that made the update (to prevent endless loops).
+ *
+ * Return:
+ * * 0 - If the update was successfully published and/or the buffer is
+ * fresh enough
+ * * -EINTR - If the thread went into the wait-state and was interrupted
+ * * whatever @buffer_update returns
+ */
+int zfcp_diag_update_buffer_limited(struct zfcp_adapter *const adapter,
+ struct zfcp_diag_header *const hdr,
+ zfcp_diag_update_buffer_func buffer_update)
+{
+ unsigned long flags;
+ int rc;
+
+ spin_lock_irqsave(&hdr->access_lock, flags);
+
+ for (rc = 0;
+ !__zfcp_diag_test_buffer_age_isfresh(adapter->diagnostics, hdr);
+ rc = 0) {
+ rc = __zfcp_diag_update_buffer(adapter, hdr, buffer_update,
+ &flags);
+ if (rc != -EAGAIN)
+ break;
+ }
+
+ spin_unlock_irqrestore(&hdr->access_lock, flags);
+
+ return rc;
+}
diff --git a/drivers/s390/scsi/zfcp_diag.h b/drivers/s390/scsi/zfcp_diag.h
new file mode 100644
index 000000000000..b9c93d15f67c
--- /dev/null
+++ b/drivers/s390/scsi/zfcp_diag.h
@@ -0,0 +1,101 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * zfcp device driver
+ *
+ * Definitions for handling diagnostics in the the zfcp device driver.
+ *
+ * Copyright IBM Corp. 2018
+ */
+
+#ifndef ZFCP_DIAG_H
+#define ZFCP_DIAG_H
+
+#include <linux/spinlock.h>
+
+#include "zfcp_fsf.h"
+#include "zfcp_def.h"
+
+/**
+ * struct zfcp_diag_header - general part of a diagnostic buffer.
+ * @access_lock: lock protecting all the data in this buffer.
+ * @updating: flag showing that an update for this buffer is currently running.
+ * @incomplete: flag showing that the data in @buffer is incomplete.
+ * @timestamp: time in jiffies when the data of this buffer was last captured.
+ * @buffer: implementation-depending data of this buffer
+ * @buffer_size: size of @buffer
+ */
+struct zfcp_diag_header {
+ spinlock_t access_lock;
+
+ /* Flags */
+ u64 updating :1;
+ u64 incomplete :1;
+
+ unsigned long timestamp;
+
+ void *buffer;
+ size_t buffer_size;
+};
+
+/**
+ * struct zfcp_diag_adapter - central storage for all diagnostics concerning an
+ * adapter.
+ * @sysfs_established: flag showing that the associated sysfs-group was created
+ * during run of zfcp_adapter_enqueue().
+ * @max_age: maximum age of data in diagnostic buffers before they need to be
+ * refreshed (in ms).
+ * @port_data: data retrieved using exchange port data.
+ * @port_data.header: header with metadata for the cache in @port_data.data.
+ * @port_data.data: cached QTCB Bottom of command exchange port data.
+ * @config_data: data retrieved using exchange config data.
+ * @config_data.header: header with metadata for the cache in @config_data.data.
+ * @config_data.data: cached QTCB Bottom of command exchange config data.
+ */
+struct zfcp_diag_adapter {
+ u64 sysfs_established :1;
+
+ unsigned long max_age;
+
+ struct {
+ struct zfcp_diag_header header;
+ struct fsf_qtcb_bottom_port data;
+ } port_data;
+ struct {
+ struct zfcp_diag_header header;
+ struct fsf_qtcb_bottom_config data;
+ } config_data;
+};
+
+int zfcp_diag_adapter_setup(struct zfcp_adapter *const adapter);
+void zfcp_diag_adapter_free(struct zfcp_adapter *const adapter);
+
+int zfcp_diag_sysfs_setup(struct zfcp_adapter *const adapter);
+void zfcp_diag_sysfs_destroy(struct zfcp_adapter *const adapter);
+
+void zfcp_diag_update_xdata(struct zfcp_diag_header *const hdr,
+ const void *const data, const bool incomplete);
+
+/*
+ * Function-Type used in zfcp_diag_update_buffer_limited() for the function
+ * that does the buffer-implementation dependent work.
+ */
+typedef int (*zfcp_diag_update_buffer_func)(struct zfcp_adapter *const adapter);
+
+int zfcp_diag_update_config_data_buffer(struct zfcp_adapter *const adapter);
+int zfcp_diag_update_port_data_buffer(struct zfcp_adapter *const adapter);
+int zfcp_diag_update_buffer_limited(struct zfcp_adapter *const adapter,
+ struct zfcp_diag_header *const hdr,
+ zfcp_diag_update_buffer_func buffer_update);
+
+/**
+ * zfcp_diag_support_sfp() - Return %true if the @adapter supports reporting
+ * SFP Data.
+ * @adapter: adapter to test the availability of SFP Data reporting for.
+ */
+static inline bool
+zfcp_diag_support_sfp(const struct zfcp_adapter *const adapter)
+{
+ return !!(adapter->adapter_features & FSF_FEATURE_REPORT_SFP_DATA);
+}
+
+#endif /* ZFCP_DIAG_H */
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index 96f0d34e9459..93655b85b73f 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -174,7 +174,7 @@ static enum zfcp_erp_act_type zfcp_erp_required_act(enum zfcp_erp_act_type want,
return 0;
p_status = atomic_read(&port->status);
if (!(p_status & ZFCP_STATUS_COMMON_RUNNING) ||
- p_status & ZFCP_STATUS_COMMON_ERP_FAILED)
+ p_status & ZFCP_STATUS_COMMON_ERP_FAILED)
return 0;
if (!(p_status & ZFCP_STATUS_COMMON_UNBLOCKED))
need = ZFCP_ERP_ACTION_REOPEN_PORT;
@@ -190,7 +190,7 @@ static enum zfcp_erp_act_type zfcp_erp_required_act(enum zfcp_erp_act_type want,
return 0;
a_status = atomic_read(&adapter->status);
if (!(a_status & ZFCP_STATUS_COMMON_RUNNING) ||
- a_status & ZFCP_STATUS_COMMON_ERP_FAILED)
+ a_status & ZFCP_STATUS_COMMON_ERP_FAILED)
return 0;
if (p_status & ZFCP_STATUS_COMMON_NOESC)
return need;
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index 31e8a7240fd7..c8556787cfdc 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -167,6 +167,7 @@ extern const struct attribute_group *zfcp_port_attr_groups[];
extern struct mutex zfcp_sysfs_port_units_mutex;
extern struct device_attribute *zfcp_sysfs_sdev_attrs[];
extern struct device_attribute *zfcp_sysfs_shost_attrs[];
+extern const struct attribute_group zfcp_sysfs_diag_attr_group;
bool zfcp_sysfs_port_is_removing(const struct zfcp_port *const port);
/* zfcp_unit.c */
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index cf63916814cc..223a805f0b0b 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -11,6 +11,7 @@
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/blktrace_api.h>
+#include <linux/jiffies.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <scsi/fc/fc_els.h>
@@ -19,6 +20,7 @@
#include "zfcp_dbf.h"
#include "zfcp_qdio.h"
#include "zfcp_reqlist.h"
+#include "zfcp_diag.h"
/* timeout for FSF requests sent during scsi_eh: abort or FCP TMF */
#define ZFCP_FSF_SCSI_ER_TIMEOUT (10*HZ)
@@ -554,6 +556,8 @@ static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req)
{
struct zfcp_adapter *adapter = req->adapter;
+ struct zfcp_diag_header *const diag_hdr =
+ &adapter->diagnostics->config_data.header;
struct fsf_qtcb *qtcb = req->qtcb;
struct fsf_qtcb_bottom_config *bottom = &qtcb->bottom.config;
struct Scsi_Host *shost = adapter->scsi_host;
@@ -570,6 +574,12 @@ static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req)
switch (qtcb->header.fsf_status) {
case FSF_GOOD:
+ /*
+ * usually we wait with an update till the cache is too old,
+ * but because we have the data available, update it anyway
+ */
+ zfcp_diag_update_xdata(diag_hdr, bottom, false);
+
if (zfcp_fsf_exchange_config_evaluate(req))
return;
@@ -585,6 +595,9 @@ static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req)
&adapter->status);
break;
case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE:
+ zfcp_diag_update_xdata(diag_hdr, bottom, true);
+ req->status |= ZFCP_STATUS_FSFREQ_XDATAINCOMPLETE;
+
fc_host_node_name(shost) = 0;
fc_host_port_name(shost) = 0;
fc_host_port_id(shost) = 0;
@@ -653,16 +666,28 @@ static void zfcp_fsf_exchange_port_evaluate(struct zfcp_fsf_req *req)
static void zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *req)
{
+ struct zfcp_diag_header *const diag_hdr =
+ &req->adapter->diagnostics->port_data.header;
struct fsf_qtcb *qtcb = req->qtcb;
+ struct fsf_qtcb_bottom_port *bottom = &qtcb->bottom.port;
if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
return;
switch (qtcb->header.fsf_status) {
case FSF_GOOD:
+ /*
+ * usually we wait with an update till the cache is too old,
+ * but because we have the data available, update it anyway
+ */
+ zfcp_diag_update_xdata(diag_hdr, bottom, false);
+
zfcp_fsf_exchange_port_evaluate(req);
break;
case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE:
+ zfcp_diag_update_xdata(diag_hdr, bottom, true);
+ req->status |= ZFCP_STATUS_FSFREQ_XDATAINCOMPLETE;
+
zfcp_fsf_exchange_port_evaluate(req);
zfcp_fsf_link_down_info_eval(req,
&qtcb->header.fsf_status_qual.link_down_info);
@@ -1261,7 +1286,8 @@ int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action)
req->qtcb->bottom.config.feature_selection =
FSF_FEATURE_NOTIFICATION_LOST |
- FSF_FEATURE_UPDATE_ALERT;
+ FSF_FEATURE_UPDATE_ALERT |
+ FSF_FEATURE_REQUEST_SFP_DATA;
req->erp_action = erp_action;
req->handler = zfcp_fsf_exchange_config_data_handler;
erp_action->fsf_req_id = req->req_id;
@@ -1278,6 +1304,19 @@ out:
return retval;
}
+
+/**
+ * zfcp_fsf_exchange_config_data_sync() - Request information about FCP channel.
+ * @qdio: pointer to the QDIO-Queue to use for sending the command.
+ * @data: pointer to the QTCB-Bottom for storing the result of the command,
+ * might be %NULL.
+ *
+ * Returns:
+ * * 0 - Exchange Config Data was successful, @data is complete
+ * * -EIO - Exchange Config Data was not successful, @data is invalid
+ * * -EAGAIN - @data contains incomplete data
+ * * -ENOMEM - Some memory allocation failed along the way
+ */
int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *qdio,
struct fsf_qtcb_bottom_config *data)
{
@@ -1301,7 +1340,8 @@ int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *qdio,
req->qtcb->bottom.config.feature_selection =
FSF_FEATURE_NOTIFICATION_LOST |
- FSF_FEATURE_UPDATE_ALERT;
+ FSF_FEATURE_UPDATE_ALERT |
+ FSF_FEATURE_REQUEST_SFP_DATA;
if (data)
req->data = data;
@@ -1309,9 +1349,16 @@ int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *qdio,
zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
retval = zfcp_fsf_req_send(req);
spin_unlock_irq(&qdio->req_q_lock);
+
if (!retval) {
/* NOTE: ONLY TOUCH SYNC req AGAIN ON req->completion. */
wait_for_completion(&req->completion);
+
+ if (req->status &
+ (ZFCP_STATUS_FSFREQ_ERROR | ZFCP_STATUS_FSFREQ_DISMISSED))
+ retval = -EIO;
+ else if (req->status & ZFCP_STATUS_FSFREQ_XDATAINCOMPLETE)
+ retval = -EAGAIN;
}
zfcp_fsf_req_free(req);
@@ -1369,10 +1416,17 @@ out:
}
/**
- * zfcp_fsf_exchange_port_data_sync - request information about local port
- * @qdio: pointer to struct zfcp_qdio
- * @data: pointer to struct fsf_qtcb_bottom_port
- * Returns: 0 on success, error otherwise
+ * zfcp_fsf_exchange_port_data_sync() - Request information about local port.
+ * @qdio: pointer to the QDIO-Queue to use for sending the command.
+ * @data: pointer to the QTCB-Bottom for storing the result of the command,
+ * might be %NULL.
+ *
+ * Returns:
+ * * 0 - Exchange Port Data was successful, @data is complete
+ * * -EIO - Exchange Port Data was not successful, @data is invalid
+ * * -EAGAIN - @data contains incomplete data
+ * * -ENOMEM - Some memory allocation failed along the way
+ * * -EOPNOTSUPP - This operation is not supported
*/
int zfcp_fsf_exchange_port_data_sync(struct zfcp_qdio *qdio,
struct fsf_qtcb_bottom_port *data)
@@ -1408,10 +1462,15 @@ int zfcp_fsf_exchange_port_data_sync(struct zfcp_qdio *qdio,
if (!retval) {
/* NOTE: ONLY TOUCH SYNC req AGAIN ON req->completion. */
wait_for_completion(&req->completion);
+
+ if (req->status &
+ (ZFCP_STATUS_FSFREQ_ERROR | ZFCP_STATUS_FSFREQ_DISMISSED))
+ retval = -EIO;
+ else if (req->status & ZFCP_STATUS_FSFREQ_XDATAINCOMPLETE)
+ retval = -EAGAIN;
}
zfcp_fsf_req_free(req);
-
return retval;
out_unlock:
diff --git a/drivers/s390/scsi/zfcp_fsf.h b/drivers/s390/scsi/zfcp_fsf.h
index 2c658b66318c..2b1e4da1944f 100644
--- a/drivers/s390/scsi/zfcp_fsf.h
+++ b/drivers/s390/scsi/zfcp_fsf.h
@@ -163,6 +163,8 @@
#define FSF_FEATURE_ELS_CT_CHAINED_SBALS 0x00000020
#define FSF_FEATURE_UPDATE_ALERT 0x00000100
#define FSF_FEATURE_MEASUREMENT_DATA 0x00000200
+#define FSF_FEATURE_REQUEST_SFP_DATA 0x00000200
+#define FSF_FEATURE_REPORT_SFP_DATA 0x00000800
#define FSF_FEATURE_DIF_PROT_TYPE1 0x00010000
#define FSF_FEATURE_DIX_PROT_TCPIP 0x00020000
@@ -407,7 +409,24 @@ struct fsf_qtcb_bottom_port {
u8 cp_util;
u8 cb_util;
u8 a_util;
- u8 res2[253];
+ u8 res2;
+ u16 temperature;
+ u16 vcc;
+ u16 tx_bias;
+ u16 tx_power;
+ u16 rx_power;
+ union {
+ u16 raw;
+ struct {
+ u16 fec_active :1;
+ u16:7;
+ u16 connector_type :2;
+ u16 sfp_invalid :1;
+ u16 optical_port :1;
+ u16 port_tx_type :4;
+ };
+ } sfp_flags;
+ u8 res3[240];
} __attribute__ ((packed));
union fsf_qtcb_bottom {
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index e9ded2befa0d..3910d529c15a 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -605,7 +605,7 @@ zfcp_scsi_get_fc_host_stats(struct Scsi_Host *host)
return NULL;
ret = zfcp_fsf_exchange_port_data_sync(adapter->qdio, data);
- if (ret) {
+ if (ret != 0 && ret != -EAGAIN) {
kfree(data);
return NULL;
}
@@ -634,7 +634,7 @@ static void zfcp_scsi_reset_fc_host_stats(struct Scsi_Host *shost)
return;
ret = zfcp_fsf_exchange_port_data_sync(adapter->qdio, data);
- if (ret)
+ if (ret != 0 && ret != -EAGAIN)
kfree(data);
else {
adapter->stats_reset = jiffies/HZ;
diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c
index af197e2b3e69..494b9fe9cc94 100644
--- a/drivers/s390/scsi/zfcp_sysfs.c
+++ b/drivers/s390/scsi/zfcp_sysfs.c
@@ -11,6 +11,7 @@
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/slab.h>
+#include "zfcp_diag.h"
#include "zfcp_ext.h"
#define ZFCP_DEV_ATTR(_feat, _name, _mode, _show, _store) \
@@ -325,6 +326,50 @@ static ssize_t zfcp_sysfs_port_remove_store(struct device *dev,
static ZFCP_DEV_ATTR(adapter, port_remove, S_IWUSR, NULL,
zfcp_sysfs_port_remove_store);
+static ssize_t
+zfcp_sysfs_adapter_diag_max_age_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(to_ccwdev(dev));
+ ssize_t rc;
+
+ if (!adapter)
+ return -ENODEV;
+
+ /* ceil(log(2^64 - 1) / log(10)) = 20 */
+ rc = scnprintf(buf, 20 + 2, "%lu\n", adapter->diagnostics->max_age);
+
+ zfcp_ccw_adapter_put(adapter);
+ return rc;
+}
+
+static ssize_t
+zfcp_sysfs_adapter_diag_max_age_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(to_ccwdev(dev));
+ unsigned long max_age;
+ ssize_t rc;
+
+ if (!adapter)
+ return -ENODEV;
+
+ rc = kstrtoul(buf, 10, &max_age);
+ if (rc != 0)
+ goto out;
+
+ adapter->diagnostics->max_age = max_age;
+
+ rc = count;
+out:
+ zfcp_ccw_adapter_put(adapter);
+ return rc;
+}
+static ZFCP_DEV_ATTR(adapter, diag_max_age, 0644,
+ zfcp_sysfs_adapter_diag_max_age_show,
+ zfcp_sysfs_adapter_diag_max_age_store);
+
static struct attribute *zfcp_adapter_attrs[] = {
&dev_attr_adapter_failed.attr,
&dev_attr_adapter_in_recovery.attr,
@@ -337,6 +382,7 @@ static struct attribute *zfcp_adapter_attrs[] = {
&dev_attr_adapter_lic_version.attr,
&dev_attr_adapter_status.attr,
&dev_attr_adapter_hardware_version.attr,
+ &dev_attr_adapter_diag_max_age.attr,
NULL
};
@@ -577,7 +623,7 @@ static ssize_t zfcp_sysfs_adapter_util_show(struct device *dev,
return -ENOMEM;
retval = zfcp_fsf_exchange_port_data_sync(adapter->qdio, qtcb_port);
- if (!retval)
+ if (retval == 0 || retval == -EAGAIN)
retval = sprintf(buf, "%u %u %u\n", qtcb_port->cp_util,
qtcb_port->cb_util, qtcb_port->a_util);
kfree(qtcb_port);
@@ -603,7 +649,7 @@ static int zfcp_sysfs_adapter_ex_config(struct device *dev,
return -ENOMEM;
retval = zfcp_fsf_exchange_config_data_sync(adapter->qdio, qtcb_config);
- if (!retval)
+ if (retval == 0 || retval == -EAGAIN)
*stat_inf = qtcb_config->stat_info;
kfree(qtcb_config);
@@ -664,3 +710,123 @@ struct device_attribute *zfcp_sysfs_shost_attrs[] = {
&dev_attr_queue_full,
NULL
};
+
+static ssize_t zfcp_sysfs_adapter_diag_b2b_credit_show(
+ struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(to_ccwdev(dev));
+ struct zfcp_diag_header *diag_hdr;
+ struct fc_els_flogi *nsp;
+ ssize_t rc = -ENOLINK;
+ unsigned long flags;
+ unsigned int status;
+
+ if (!adapter)
+ return -ENODEV;
+
+ status = atomic_read(&adapter->status);
+ if (0 == (status & ZFCP_STATUS_COMMON_OPEN) ||
+ 0 == (status & ZFCP_STATUS_COMMON_UNBLOCKED) ||
+ 0 != (status & ZFCP_STATUS_COMMON_ERP_FAILED))
+ goto out;
+
+ diag_hdr = &adapter->diagnostics->config_data.header;
+
+ rc = zfcp_diag_update_buffer_limited(
+ adapter, diag_hdr, zfcp_diag_update_config_data_buffer);
+ if (rc != 0)
+ goto out;
+
+ spin_lock_irqsave(&diag_hdr->access_lock, flags);
+ /* nport_serv_param doesn't contain the ELS_Command code */
+ nsp = (struct fc_els_flogi *)((unsigned long)
+ adapter->diagnostics->config_data
+ .data.nport_serv_param -
+ sizeof(u32));
+
+ rc = scnprintf(buf, 5 + 2, "%hu\n",
+ be16_to_cpu(nsp->fl_csp.sp_bb_cred));
+ spin_unlock_irqrestore(&diag_hdr->access_lock, flags);
+
+out:
+ zfcp_ccw_adapter_put(adapter);
+ return rc;
+}
+static ZFCP_DEV_ATTR(adapter_diag, b2b_credit, 0400,
+ zfcp_sysfs_adapter_diag_b2b_credit_show, NULL);
+
+#define ZFCP_DEFINE_DIAG_SFP_ATTR(_name, _qtcb_member, _prtsize, _prtfmt) \
+ static ssize_t zfcp_sysfs_adapter_diag_sfp_##_name##_show( \
+ struct device *dev, struct device_attribute *attr, char *buf) \
+ { \
+ struct zfcp_adapter *const adapter = \
+ zfcp_ccw_adapter_by_cdev(to_ccwdev(dev)); \
+ struct zfcp_diag_header *diag_hdr; \
+ ssize_t rc = -ENOLINK; \
+ unsigned long flags; \
+ unsigned int status; \
+ \
+ if (!adapter) \
+ return -ENODEV; \
+ \
+ status = atomic_read(&adapter->status); \
+ if (0 == (status & ZFCP_STATUS_COMMON_OPEN) || \
+ 0 == (status & ZFCP_STATUS_COMMON_UNBLOCKED) || \
+ 0 != (status & ZFCP_STATUS_COMMON_ERP_FAILED)) \
+ goto out; \
+ \
+ if (!zfcp_diag_support_sfp(adapter)) { \
+ rc = -EOPNOTSUPP; \
+ goto out; \
+ } \
+ \
+ diag_hdr = &adapter->diagnostics->port_data.header; \
+ \
+ rc = zfcp_diag_update_buffer_limited( \
+ adapter, diag_hdr, zfcp_diag_update_port_data_buffer); \
+ if (rc != 0) \
+ goto out; \
+ \
+ spin_lock_irqsave(&diag_hdr->access_lock, flags); \
+ rc = scnprintf( \
+ buf, (_prtsize) + 2, _prtfmt "\n", \
+ adapter->diagnostics->port_data.data._qtcb_member); \
+ spin_unlock_irqrestore(&diag_hdr->access_lock, flags); \
+ \
+ out: \
+ zfcp_ccw_adapter_put(adapter); \
+ return rc; \
+ } \
+ static ZFCP_DEV_ATTR(adapter_diag_sfp, _name, 0400, \
+ zfcp_sysfs_adapter_diag_sfp_##_name##_show, NULL)
+
+ZFCP_DEFINE_DIAG_SFP_ATTR(temperature, temperature, 5, "%hu");
+ZFCP_DEFINE_DIAG_SFP_ATTR(vcc, vcc, 5, "%hu");
+ZFCP_DEFINE_DIAG_SFP_ATTR(tx_bias, tx_bias, 5, "%hu");
+ZFCP_DEFINE_DIAG_SFP_ATTR(tx_power, tx_power, 5, "%hu");
+ZFCP_DEFINE_DIAG_SFP_ATTR(rx_power, rx_power, 5, "%hu");
+ZFCP_DEFINE_DIAG_SFP_ATTR(port_tx_type, sfp_flags.port_tx_type, 2, "%hu");
+ZFCP_DEFINE_DIAG_SFP_ATTR(optical_port, sfp_flags.optical_port, 1, "%hu");
+ZFCP_DEFINE_DIAG_SFP_ATTR(sfp_invalid, sfp_flags.sfp_invalid, 1, "%hu");
+ZFCP_DEFINE_DIAG_SFP_ATTR(connector_type, sfp_flags.connector_type, 1, "%hu");
+ZFCP_DEFINE_DIAG_SFP_ATTR(fec_active, sfp_flags.fec_active, 1, "%hu");
+
+static struct attribute *zfcp_sysfs_diag_attrs[] = {
+ &dev_attr_adapter_diag_sfp_temperature.attr,
+ &dev_attr_adapter_diag_sfp_vcc.attr,
+ &dev_attr_adapter_diag_sfp_tx_bias.attr,
+ &dev_attr_adapter_diag_sfp_tx_power.attr,
+ &dev_attr_adapter_diag_sfp_rx_power.attr,
+ &dev_attr_adapter_diag_sfp_port_tx_type.attr,
+ &dev_attr_adapter_diag_sfp_optical_port.attr,
+ &dev_attr_adapter_diag_sfp_sfp_invalid.attr,
+ &dev_attr_adapter_diag_sfp_connector_type.attr,
+ &dev_attr_adapter_diag_sfp_fec_active.attr,
+ &dev_attr_adapter_diag_b2b_credit.attr,
+ NULL,
+};
+
+const struct attribute_group zfcp_sysfs_diag_attr_group = {
+ .name = "diagnostics",
+ .attrs = zfcp_sysfs_diag_attrs,
+};
diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c
index 536426f25e86..f2f7e6e76c07 100644
--- a/drivers/scsi/NCR5380.c
+++ b/drivers/scsi/NCR5380.c
@@ -129,6 +129,9 @@
#define NCR5380_release_dma_irq(x)
#endif
+static unsigned int disconnect_mask = ~0;
+module_param(disconnect_mask, int, 0444);
+
static int do_abort(struct Scsi_Host *);
static void do_reset(struct Scsi_Host *);
static void bus_reset_cleanup(struct Scsi_Host *);
@@ -172,6 +175,19 @@ static inline void advance_sg_buffer(struct scsi_cmnd *cmd)
}
}
+static inline void set_resid_from_SCp(struct scsi_cmnd *cmd)
+{
+ int resid = cmd->SCp.this_residual;
+ struct scatterlist *s = cmd->SCp.buffer;
+
+ if (s)
+ while (!sg_is_last(s)) {
+ s = sg_next(s);
+ resid += s->length;
+ }
+ scsi_set_resid(cmd, resid);
+}
+
/**
* NCR5380_poll_politely2 - wait for two chip register values
* @hostdata: host private data
@@ -954,7 +970,8 @@ static bool NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd)
int err;
bool ret = true;
bool can_disconnect = instance->irq != NO_IRQ &&
- cmd->cmnd[0] != REQUEST_SENSE;
+ cmd->cmnd[0] != REQUEST_SENSE &&
+ (disconnect_mask & BIT(scmd_id(cmd)));
NCR5380_dprint(NDEBUG_ARBITRATION, instance);
dsprintk(NDEBUG_ARBITRATION, instance, "starting arbitration, id = %d\n",
@@ -1379,7 +1396,7 @@ static void do_reset(struct Scsi_Host *instance)
* MESSAGE OUT phase and sending an ABORT message.
* @instance: relevant scsi host instance
*
- * Returns 0 on success, -1 on failure.
+ * Returns 0 on success, negative error code on failure.
*/
static int do_abort(struct Scsi_Host *instance)
@@ -1404,7 +1421,7 @@ static int do_abort(struct Scsi_Host *instance)
rc = NCR5380_poll_politely(hostdata, STATUS_REG, SR_REQ, SR_REQ, 10 * HZ);
if (rc < 0)
- goto timeout;
+ goto out;
tmp = NCR5380_read(STATUS_REG) & PHASE_MASK;
@@ -1415,7 +1432,7 @@ static int do_abort(struct Scsi_Host *instance)
ICR_BASE | ICR_ASSERT_ATN | ICR_ASSERT_ACK);
rc = NCR5380_poll_politely(hostdata, STATUS_REG, SR_REQ, 0, 3 * HZ);
if (rc < 0)
- goto timeout;
+ goto out;
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN);
}
@@ -1424,17 +1441,17 @@ static int do_abort(struct Scsi_Host *instance)
len = 1;
phase = PHASE_MSGOUT;
NCR5380_transfer_pio(instance, &phase, &len, &msgptr);
+ if (len)
+ rc = -ENXIO;
/*
* If we got here, and the command completed successfully,
* we're about to go into bus free state.
*/
- return len ? -1 : 0;
-
-timeout:
+out:
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
- return -1;
+ return rc;
}
/*
@@ -1803,6 +1820,8 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
cmd->result |= cmd->SCp.Status;
cmd->result |= cmd->SCp.Message << 8;
+ set_resid_from_SCp(cmd);
+
if (cmd->cmnd[0] == REQUEST_SENSE)
complete_cmd(instance, cmd);
else {
@@ -2264,7 +2283,7 @@ static int NCR5380_abort(struct scsi_cmnd *cmd)
dsprintk(NDEBUG_ABORT, instance, "abort: cmd %p is connected\n", cmd);
hostdata->connected = NULL;
hostdata->dma_len = 0;
- if (do_abort(instance)) {
+ if (do_abort(instance) < 0) {
set_host_byte(cmd, DID_ERROR);
complete_cmd(instance, cmd);
result = FAILED;
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index 0ed3f806ace5..e36608ce937a 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -1477,6 +1477,7 @@ static struct aac_srb * aac_scsi_common(struct fib * fib, struct scsi_cmnd * cmd
struct aac_srb * srbcmd;
u32 flag;
u32 timeout;
+ struct aac_dev *dev = fib->dev;
aac_fib_init(fib);
switch(cmd->sc_data_direction){
@@ -1503,7 +1504,7 @@ static struct aac_srb * aac_scsi_common(struct fib * fib, struct scsi_cmnd * cmd
srbcmd->flags = cpu_to_le32(flag);
timeout = cmd->request->timeout/HZ;
if (timeout == 0)
- timeout = 1;
+ timeout = (dev->sa_firmware ? AAC_SA_TIMEOUT : AAC_ARC_TIMEOUT);
srbcmd->timeout = cpu_to_le32(timeout); // timeout in seconds
srbcmd->retry_limit = 0; /* Obsolete parameter */
srbcmd->cdb_size = cpu_to_le32(cmd->cmd_len);
@@ -2467,13 +2468,13 @@ static int aac_read(struct scsi_cmnd * scsicmd)
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
SAM_STAT_CHECK_CONDITION;
set_sense(&dev->fsa_dev[cid].sense_data,
- HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE,
+ ILLEGAL_REQUEST, SENCODE_LBA_OUT_OF_RANGE,
ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0);
memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
SCSI_SENSE_BUFFERSIZE));
scsicmd->scsi_done(scsicmd);
- return 1;
+ return 0;
}
dprintk((KERN_DEBUG "aac_read[cpu %d]: lba = %llu, t = %ld.\n",
@@ -2559,13 +2560,13 @@ static int aac_write(struct scsi_cmnd * scsicmd)
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
SAM_STAT_CHECK_CONDITION;
set_sense(&dev->fsa_dev[cid].sense_data,
- HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE,
+ ILLEGAL_REQUEST, SENCODE_LBA_OUT_OF_RANGE,
ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0);
memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
SCSI_SENSE_BUFFERSIZE));
scsicmd->scsi_done(scsicmd);
- return 1;
+ return 0;
}
dprintk((KERN_DEBUG "aac_write[cpu %d]: lba = %llu, t = %ld.\n",
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index 3fa03230f6ba..e3e4ecbea726 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -85,7 +85,7 @@ enum {
#define PMC_GLOBAL_INT_BIT0 0x00000001
#ifndef AAC_DRIVER_BUILD
-# define AAC_DRIVER_BUILD 50877
+# define AAC_DRIVER_BUILD 50983
# define AAC_DRIVER_BRANCH "-custom"
#endif
#define MAXIMUM_NUM_CONTAINERS 32
@@ -108,6 +108,8 @@ enum {
#define AAC_BUS_TARGET_LOOP (AAC_MAX_BUSES * AAC_MAX_TARGETS)
#define AAC_MAX_NATIVE_SIZE 2048
#define FW_ERROR_BUFFER_SIZE 512
+#define AAC_SA_TIMEOUT 180
+#define AAC_ARC_TIMEOUT 60
#define get_bus_number(x) (x/AAC_MAX_TARGETS)
#define get_target_number(x) (x%AAC_MAX_TARGETS)
@@ -1328,7 +1330,7 @@ struct fib {
#define AAC_DEVTYPE_ARC_RAW 2
#define AAC_DEVTYPE_NATIVE_RAW 3
-#define AAC_SAFW_RESCAN_DELAY (10 * HZ)
+#define AAC_RESCAN_DELAY (10 * HZ)
struct aac_hba_map_info {
__le32 rmw_nexus; /* nexus for native HBA devices */
@@ -1601,6 +1603,7 @@ struct aac_dev
struct fsa_dev_info *fsa_dev;
struct task_struct *thread;
struct delayed_work safw_rescan_work;
+ struct delayed_work src_reinit_aif_worker;
int cardtype;
/*
*This lock will protect the two 32-bit
@@ -1673,6 +1676,7 @@ struct aac_dev
u8 adapter_shutdown;
u32 handle_pci_error;
bool init_reset;
+ u8 soft_reset_support;
};
#define aac_adapter_interrupt(dev) \
@@ -2644,7 +2648,12 @@ int aac_scan_host(struct aac_dev *dev);
static inline void aac_schedule_safw_scan_worker(struct aac_dev *dev)
{
- schedule_delayed_work(&dev->safw_rescan_work, AAC_SAFW_RESCAN_DELAY);
+ schedule_delayed_work(&dev->safw_rescan_work, AAC_RESCAN_DELAY);
+}
+
+static inline void aac_schedule_src_reinit_aif_worker(struct aac_dev *dev)
+{
+ schedule_delayed_work(&dev->src_reinit_aif_worker, AAC_RESCAN_DELAY);
}
static inline void aac_safw_rescan_worker(struct work_struct *work)
@@ -2658,10 +2667,10 @@ static inline void aac_safw_rescan_worker(struct work_struct *work)
aac_scan_host(dev);
}
-static inline void aac_cancel_safw_rescan_worker(struct aac_dev *dev)
+static inline void aac_cancel_rescan_worker(struct aac_dev *dev)
{
- if (dev->sa_firmware)
- cancel_delayed_work_sync(&dev->safw_rescan_work);
+ cancel_delayed_work_sync(&dev->safw_rescan_work);
+ cancel_delayed_work_sync(&dev->src_reinit_aif_worker);
}
/* SCp.phase values */
@@ -2671,6 +2680,7 @@ static inline void aac_cancel_safw_rescan_worker(struct aac_dev *dev)
#define AAC_OWNER_FIRMWARE 0x106
void aac_safw_rescan_worker(struct work_struct *work);
+void aac_src_reinit_aif_worker(struct work_struct *work);
int aac_acquire_irq(struct aac_dev *dev);
void aac_free_irq(struct aac_dev *dev);
int aac_setup_safw_adapter(struct aac_dev *dev);
@@ -2728,6 +2738,7 @@ int aac_probe_container(struct aac_dev *dev, int cid);
int _aac_rx_init(struct aac_dev *dev);
int aac_rx_select_comm(struct aac_dev *dev, int comm);
int aac_rx_deliver_producer(struct fib * fib);
+void aac_reinit_aif(struct aac_dev *aac, unsigned int index);
static inline int aac_is_src(struct aac_dev *dev)
{
diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c
index d4fcfa1e54e0..f75878d773cf 100644
--- a/drivers/scsi/aacraid/comminit.c
+++ b/drivers/scsi/aacraid/comminit.c
@@ -571,6 +571,11 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev)
else
dev->sa_firmware = 0;
+ if (status[4] & le32_to_cpu(AAC_EXTOPT_SOFT_RESET))
+ dev->soft_reset_support = 1;
+ else
+ dev->soft_reset_support = 0;
+
if ((dev->comm_interface == AAC_COMM_MESSAGE) &&
(status[2] > dev->base_size)) {
aac_adapter_ioremap(dev, 0);
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index 2142a649e865..5a8a999606ea 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -232,6 +232,7 @@ struct fib *aac_fib_alloc_tag(struct aac_dev *dev, struct scsi_cmnd *scmd)
fibptr->type = FSAFS_NTC_FIB_CONTEXT;
fibptr->callback_data = NULL;
fibptr->callback = NULL;
+ fibptr->flags = 0;
return fibptr;
}
@@ -1463,6 +1464,14 @@ retry_next:
}
}
+static void aac_schedule_bus_scan(struct aac_dev *aac)
+{
+ if (aac->sa_firmware)
+ aac_schedule_safw_scan_worker(aac);
+ else
+ aac_schedule_src_reinit_aif_worker(aac);
+}
+
static int _aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type)
{
int index, quirks;
@@ -1638,7 +1647,7 @@ out:
*/
if (!retval && !is_kdump_kernel()) {
dev_info(&aac->pdev->dev, "Scheduling bus rescan\n");
- aac_schedule_safw_scan_worker(aac);
+ aac_schedule_bus_scan(aac);
}
if (jafo) {
@@ -1959,6 +1968,16 @@ int aac_scan_host(struct aac_dev *dev)
return rcode;
}
+void aac_src_reinit_aif_worker(struct work_struct *work)
+{
+ struct aac_dev *dev = container_of(to_delayed_work(work),
+ struct aac_dev, src_reinit_aif_worker);
+
+ wait_event(dev->scsi_host_ptr->host_wait,
+ !scsi_host_in_recovery(dev->scsi_host_ptr));
+ aac_reinit_aif(dev, dev->cardtype);
+}
+
/**
* aac_handle_sa_aif Handle a message from the firmware
* @dev: Which adapter this fib is from
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 4a858789e6c5..ee6bc2f9b80a 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -391,6 +391,7 @@ static int aac_slave_configure(struct scsi_device *sdev)
int chn, tid;
unsigned int depth = 0;
unsigned int set_timeout = 0;
+ int timeout = 0;
bool set_qd_dev_type = false;
u8 devtype = 0;
@@ -483,10 +484,13 @@ common_config:
/*
* Firmware has an individual device recovery time typically
- * of 35 seconds, give us a margin.
+ * of 35 seconds, give us a margin. Thor devices can take longer in
+ * error recovery, hence different value.
*/
- if (set_timeout && sdev->request_queue->rq_timeout < (45 * HZ))
- blk_queue_rq_timeout(sdev->request_queue, 45*HZ);
+ if (set_timeout) {
+ timeout = aac->sa_firmware ? AAC_SA_TIMEOUT : AAC_ARC_TIMEOUT;
+ blk_queue_rq_timeout(sdev->request_queue, timeout * HZ);
+ }
if (depth > 256)
depth = 256;
@@ -608,9 +612,13 @@ static struct device_attribute *aac_dev_attrs[] = {
static int aac_ioctl(struct scsi_device *sdev, unsigned int cmd,
void __user *arg)
{
+ int retval;
struct aac_dev *dev = (struct aac_dev *)sdev->host->hostdata;
if (!capable(CAP_SYS_RAWIO))
return -EPERM;
+ retval = aac_adapter_check_health(dev);
+ if (retval)
+ return -EBUSY;
return aac_do_ioctl(dev, cmd, arg);
}
@@ -1585,6 +1593,19 @@ static void aac_init_char(void)
}
}
+void aac_reinit_aif(struct aac_dev *aac, unsigned int index)
+{
+ /*
+ * Firmware may send a AIF messages very early and the Driver may have
+ * ignored as it is not fully ready to process the messages. Send
+ * AIF to firmware so that if there are any unprocessed events they
+ * can be processed now.
+ */
+ if (aac_drivers[index].quirks & AAC_QUIRK_SRC)
+ aac_intr_normal(aac, 0, 2, 0, NULL);
+
+}
+
static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
unsigned index = id->driver_data;
@@ -1682,6 +1703,8 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
mutex_init(&aac->scan_mutex);
INIT_DELAYED_WORK(&aac->safw_rescan_work, aac_safw_rescan_worker);
+ INIT_DELAYED_WORK(&aac->src_reinit_aif_worker,
+ aac_src_reinit_aif_worker);
/*
* Map in the registers from the adapter.
*/
@@ -1872,7 +1895,7 @@ static int aac_suspend(struct pci_dev *pdev, pm_message_t state)
struct aac_dev *aac = (struct aac_dev *)shost->hostdata;
scsi_block_requests(shost);
- aac_cancel_safw_rescan_worker(aac);
+ aac_cancel_rescan_worker(aac);
aac_send_shutdown(aac);
aac_release_resources(aac);
@@ -1931,7 +1954,7 @@ static void aac_remove_one(struct pci_dev *pdev)
struct Scsi_Host *shost = pci_get_drvdata(pdev);
struct aac_dev *aac = (struct aac_dev *)shost->hostdata;
- aac_cancel_safw_rescan_worker(aac);
+ aac_cancel_rescan_worker(aac);
scsi_remove_host(shost);
__aac_shutdown(aac);
@@ -1989,7 +2012,7 @@ static pci_ers_result_t aac_pci_error_detected(struct pci_dev *pdev,
aac->handle_pci_error = 1;
scsi_block_requests(aac->scsi_host_ptr);
- aac_cancel_safw_rescan_worker(aac);
+ aac_cancel_rescan_worker(aac);
aac_flush_ios(aac);
aac_release_resources(aac);
diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c
index 3b66e06726c8..787ec9baebb0 100644
--- a/drivers/scsi/aacraid/src.c
+++ b/drivers/scsi/aacraid/src.c
@@ -733,10 +733,20 @@ static bool aac_is_ctrl_up_and_running(struct aac_dev *dev)
return ctrl_up;
}
+static void aac_src_drop_io(struct aac_dev *dev)
+{
+ if (!dev->soft_reset_support)
+ return;
+
+ aac_adapter_sync_cmd(dev, DROP_IO,
+ 0, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL);
+}
+
static void aac_notify_fw_of_iop_reset(struct aac_dev *dev)
{
aac_adapter_sync_cmd(dev, IOP_RESET_ALWAYS, 0, 0, 0, 0, 0, 0, NULL,
NULL, NULL, NULL, NULL);
+ aac_src_drop_io(dev);
}
static void aac_send_iop_reset(struct aac_dev *dev)
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index 88053b15c363..db687ef8a99e 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -1400,7 +1400,7 @@ static void arcmsr_drain_donequeue(struct AdapterControlBlock *acb, struct Comma
, pCCB->acb
, pCCB->startdone
, atomic_read(&acb->ccboutstandingcount));
- return;
+ return;
}
arcmsr_report_ccb_state(acb, pCCB, error);
}
@@ -3476,8 +3476,8 @@ polling_hbc_ccb_retry:
, pCCB->pcmd->device->id
, (u32)pCCB->pcmd->device->lun
, pCCB);
- pCCB->pcmd->result = DID_ABORT << 16;
- arcmsr_ccb_complete(pCCB);
+ pCCB->pcmd->result = DID_ABORT << 16;
+ arcmsr_ccb_complete(pCCB);
continue;
}
printk(KERN_NOTICE "arcmsr%d: polling get an illegal ccb"
diff --git a/drivers/scsi/arm/acornscsi.c b/drivers/scsi/arm/acornscsi.c
index d12dd89538df..ddb52e7ba622 100644
--- a/drivers/scsi/arm/acornscsi.c
+++ b/drivers/scsi/arm/acornscsi.c
@@ -1067,7 +1067,7 @@ void acornscsi_dma_setup(AS_Host *host, dmadir_t direction)
* Purpose : ensure that all DMA transfers are up-to-date & host->scsi.SCp is correct
* Params : host - host to finish
* Notes : This is called when a command is:
- * terminating, RESTORE_POINTERS, SAVE_POINTERS, DISCONECT
+ * terminating, RESTORE_POINTERS, SAVE_POINTERS, DISCONNECT
* : This must not return until all transfers are completed.
*/
static
@@ -1816,7 +1816,7 @@ int acornscsi_reconnect(AS_Host *host)
}
/*
- * Function: int acornscsi_reconect_finish(AS_Host *host)
+ * Function: int acornscsi_reconnect_finish(AS_Host *host)
* Purpose : finish reconnecting a command
* Params : host - host to complete
* Returns : 0 if failed
diff --git a/drivers/scsi/atari_scsi.c b/drivers/scsi/atari_scsi.c
index e809493d0d06..a82b63a66635 100644
--- a/drivers/scsi/atari_scsi.c
+++ b/drivers/scsi/atari_scsi.c
@@ -742,7 +742,7 @@ static int __init atari_scsi_probe(struct platform_device *pdev)
atari_scsi_template.sg_tablesize = SG_ALL;
} else {
atari_scsi_template.can_queue = 1;
- atari_scsi_template.sg_tablesize = SG_NONE;
+ atari_scsi_template.sg_tablesize = 1;
}
if (setup_can_queue > 0)
@@ -751,8 +751,8 @@ static int __init atari_scsi_probe(struct platform_device *pdev)
if (setup_cmd_per_lun > 0)
atari_scsi_template.cmd_per_lun = setup_cmd_per_lun;
- /* Leave sg_tablesize at 0 on a Falcon! */
- if (ATARIHW_PRESENT(TT_SCSI) && setup_sg_tablesize >= 0)
+ /* Don't increase sg_tablesize on Falcon! */
+ if (ATARIHW_PRESENT(TT_SCSI) && setup_sg_tablesize > 0)
atari_scsi_template.sg_tablesize = setup_sg_tablesize;
if (setup_hostid >= 0) {
diff --git a/drivers/scsi/atp870u.c b/drivers/scsi/atp870u.c
index e41f0bbdc9fd..c6a752309dda 100644
--- a/drivers/scsi/atp870u.c
+++ b/drivers/scsi/atp870u.c
@@ -1680,7 +1680,7 @@ static struct scsi_host_template atp870u_template = {
.bios_param = atp870u_biosparam /* biosparm */,
.can_queue = qcnt /* can_queue */,
.this_id = 7 /* SCSI ID */,
- .sg_tablesize = ATP870U_SCATTER /*SG_ALL*/ /*SG_NONE*/,
+ .sg_tablesize = ATP870U_SCATTER /*SG_ALL*/,
.max_sectors = ATP870U_MAX_SECTORS,
};
diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c
index 2f9213b257a4..eb0c76338295 100644
--- a/drivers/scsi/bfa/bfad.c
+++ b/drivers/scsi/bfa/bfad.c
@@ -1487,8 +1487,7 @@ bfad_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
return ret;
}
-int
-restart_bfa(struct bfad_s *bfad)
+static int restart_bfa(struct bfad_s *bfad)
{
unsigned long flags;
struct pci_dev *pdev = bfad->pcidev;
diff --git a/drivers/scsi/bfa/bfad_attr.c b/drivers/scsi/bfa/bfad_attr.c
index 29ab81df75c0..fbfce02e5b93 100644
--- a/drivers/scsi/bfa/bfad_attr.c
+++ b/drivers/scsi/bfa/bfad_attr.c
@@ -275,8 +275,10 @@ bfad_im_get_stats(struct Scsi_Host *shost)
rc = bfa_port_get_stats(BFA_FCPORT(&bfad->bfa),
fcstats, bfad_hcb_comp, &fcomp);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
- if (rc != BFA_STATUS_OK)
+ if (rc != BFA_STATUS_OK) {
+ kfree(fcstats);
return NULL;
+ }
wait_for_completion(&fcomp.comp);
diff --git a/drivers/scsi/bnx2fc/57xx_hsi_bnx2fc.h b/drivers/scsi/bnx2fc/57xx_hsi_bnx2fc.h
index e4469df9c469..698f5ebaa0c2 100644
--- a/drivers/scsi/bnx2fc/57xx_hsi_bnx2fc.h
+++ b/drivers/scsi/bnx2fc/57xx_hsi_bnx2fc.h
@@ -813,7 +813,7 @@ struct fcoe_confqe {
/*
- * FCoE conection data base
+ * FCoE connection data base
*/
struct fcoe_conn_db {
#if defined(__BIG_ENDIAN)
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
index 401743e2b429..4c8122a82322 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_io.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
@@ -1242,7 +1242,7 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
/* Wait 2 * RA_TOV + 1 to be sure timeout function hasn't fired */
time_left = wait_for_completion_timeout(&io_req->abts_done,
- (2 * rp->r_a_tov + 1) * HZ);
+ msecs_to_jiffies(2 * rp->r_a_tov + 1));
if (time_left)
BNX2FC_IO_DBG(io_req,
"Timed out in eh_abort waiting for abts_done");
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
index c5fa5f3b00e9..0b28d44d3573 100644
--- a/drivers/scsi/bnx2i/bnx2i_iscsi.c
+++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
@@ -915,12 +915,12 @@ void bnx2i_free_hba(struct bnx2i_hba *hba)
INIT_LIST_HEAD(&hba->ep_ofld_list);
INIT_LIST_HEAD(&hba->ep_active_list);
INIT_LIST_HEAD(&hba->ep_destroy_list);
- pci_dev_put(hba->pcidev);
if (hba->regview) {
pci_iounmap(hba->pcidev, hba->regview);
hba->regview = NULL;
}
+ pci_dev_put(hba->pcidev);
bnx2i_free_mp_bdt(hba);
bnx2i_release_free_cid_que(hba);
iscsi_host_free(shost);
diff --git a/drivers/scsi/csiostor/csio_hw.c b/drivers/scsi/csiostor/csio_hw.c
index e51923886475..950f9cdf0577 100644
--- a/drivers/scsi/csiostor/csio_hw.c
+++ b/drivers/scsi/csiostor/csio_hw.c
@@ -793,10 +793,10 @@ csio_hw_get_flash_params(struct csio_hw *hw)
goto found;
}
- /* Decode Flash part size. The code below looks repetative with
+ /* Decode Flash part size. The code below looks repetitive with
* common encodings, but that's not guaranteed in the JEDEC
- * specification for the Read JADEC ID command. The only thing that
- * we're guaranteed by the JADEC specification is where the
+ * specification for the Read JEDEC ID command. The only thing that
+ * we're guaranteed by the JEDEC specification is where the
* Manufacturer ID is in the returned result. After that each
* Manufacturer ~could~ encode things completely differently.
* Note, all Flash parts must have 64KB sectors.
@@ -983,8 +983,8 @@ retry:
waiting -= 50;
/*
- * If neither Error nor Initialialized are indicated
- * by the firmware keep waiting till we exaust our
+ * If neither Error nor Initialized are indicated
+ * by the firmware keep waiting till we exhaust our
* timeout ... and then retry if we haven't exhausted
* our retries ...
*/
@@ -1738,7 +1738,7 @@ static void csio_link_l1cfg(struct link_config *lc, uint16_t fw_caps,
* Convert Common Code Forward Error Control settings into the
* Firmware's API. If the current Requested FEC has "Automatic"
* (IEEE 802.3) specified, then we use whatever the Firmware
- * sent us as part of it's IEEE 802.3-based interpratation of
+ * sent us as part of it's IEEE 802.3-based interpretation of
* the Transceiver Module EPROM FEC parameters. Otherwise we
* use whatever is in the current Requested FEC settings.
*/
@@ -2834,7 +2834,7 @@ csio_hws_configuring(struct csio_hw *hw, enum csio_hw_ev evt)
}
/*
- * csio_hws_initializing - Initialiazing state
+ * csio_hws_initializing - Initializing state
* @hw - HW module
* @evt - Event
*
@@ -3049,7 +3049,7 @@ csio_hws_removing(struct csio_hw *hw, enum csio_hw_ev evt)
if (!csio_is_hw_master(hw))
break;
/*
- * The BYE should have alerady been issued, so we cant
+ * The BYE should have already been issued, so we can't
* use the mailbox interface. Hence we use the PL_RST
* register directly.
*/
@@ -3104,7 +3104,7 @@ csio_hws_pcierr(struct csio_hw *hw, enum csio_hw_ev evt)
*
* A table driven interrupt handler that applies a set of masks to an
* interrupt status word and performs the corresponding actions if the
- * interrupts described by the mask have occured. The actions include
+ * interrupts described by the mask have occurred. The actions include
* optionally emitting a warning or alert message. The table is terminated
* by an entry specifying mask 0. Returns the number of fatal interrupt
* conditions.
@@ -4219,7 +4219,7 @@ csio_mgmtm_exit(struct csio_mgmtm *mgmtm)
* @hw: Pointer to HW module.
*
* It is assumed that the initialization is a synchronous operation.
- * So when we return afer posting the event, the HW SM should be in
+ * So when we return after posting the event, the HW SM should be in
* the ready state, if there were no errors during init.
*/
int
diff --git a/drivers/scsi/csiostor/csio_init.c b/drivers/scsi/csiostor/csio_init.c
index a6dd704d7f2d..2e8a3ac575cb 100644
--- a/drivers/scsi/csiostor/csio_init.c
+++ b/drivers/scsi/csiostor/csio_init.c
@@ -154,13 +154,10 @@ csio_dfs_create(struct csio_hw *hw)
/*
* csio_dfs_destroy - Destroys per-hw debugfs.
*/
-static int
+static void
csio_dfs_destroy(struct csio_hw *hw)
{
- if (hw->debugfs_root)
- debugfs_remove_recursive(hw->debugfs_root);
-
- return 0;
+ debugfs_remove_recursive(hw->debugfs_root);
}
/*
diff --git a/drivers/scsi/csiostor/csio_lnode.c b/drivers/scsi/csiostor/csio_lnode.c
index 66e58f0a75dc..74ff8adc41f7 100644
--- a/drivers/scsi/csiostor/csio_lnode.c
+++ b/drivers/scsi/csiostor/csio_lnode.c
@@ -301,6 +301,7 @@ csio_ln_fdmi_rhba_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
struct fc_fdmi_port_name *port_name;
uint8_t buf[64];
uint8_t *fc4_type;
+ unsigned long flags;
if (fdmi_req->wr_status != FW_SUCCESS) {
csio_ln_dbg(ln, "WR error:%x in processing fdmi rhba cmd\n",
@@ -385,13 +386,13 @@ csio_ln_fdmi_rhba_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
len = (uint32_t)(pld - (uint8_t *)cmd);
/* Submit FDMI RPA request */
- spin_lock_irq(&hw->lock);
+ spin_lock_irqsave(&hw->lock, flags);
if (csio_ln_mgmt_submit_req(fdmi_req, csio_ln_fdmi_done,
FCOE_CT, &fdmi_req->dma_buf, len)) {
CSIO_INC_STATS(ln, n_fdmi_err);
csio_ln_dbg(ln, "Failed to issue fdmi rpa req\n");
}
- spin_unlock_irq(&hw->lock);
+ spin_unlock_irqrestore(&hw->lock, flags);
}
/*
@@ -412,6 +413,7 @@ csio_ln_fdmi_dprt_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
struct fc_fdmi_rpl *reg_pl;
struct fs_fdmi_attrs *attrib_blk;
uint8_t buf[64];
+ unsigned long flags;
if (fdmi_req->wr_status != FW_SUCCESS) {
csio_ln_dbg(ln, "WR error:%x in processing fdmi dprt cmd\n",
@@ -491,13 +493,13 @@ csio_ln_fdmi_dprt_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
attrib_blk->numattrs = htonl(numattrs);
/* Submit FDMI RHBA request */
- spin_lock_irq(&hw->lock);
+ spin_lock_irqsave(&hw->lock, flags);
if (csio_ln_mgmt_submit_req(fdmi_req, csio_ln_fdmi_rhba_cbfn,
FCOE_CT, &fdmi_req->dma_buf, len)) {
CSIO_INC_STATS(ln, n_fdmi_err);
csio_ln_dbg(ln, "Failed to issue fdmi rhba req\n");
}
- spin_unlock_irq(&hw->lock);
+ spin_unlock_irqrestore(&hw->lock, flags);
}
/*
@@ -512,6 +514,7 @@ csio_ln_fdmi_dhba_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
void *cmd;
struct fc_fdmi_port_name *port_name;
uint32_t len;
+ unsigned long flags;
if (fdmi_req->wr_status != FW_SUCCESS) {
csio_ln_dbg(ln, "WR error:%x in processing fdmi dhba cmd\n",
@@ -542,13 +545,13 @@ csio_ln_fdmi_dhba_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
len += sizeof(*port_name);
/* Submit FDMI request */
- spin_lock_irq(&hw->lock);
+ spin_lock_irqsave(&hw->lock, flags);
if (csio_ln_mgmt_submit_req(fdmi_req, csio_ln_fdmi_dprt_cbfn,
FCOE_CT, &fdmi_req->dma_buf, len)) {
CSIO_INC_STATS(ln, n_fdmi_err);
csio_ln_dbg(ln, "Failed to issue fdmi dprt req\n");
}
- spin_unlock_irq(&hw->lock);
+ spin_unlock_irqrestore(&hw->lock, flags);
}
/**
@@ -1989,7 +1992,7 @@ static int
csio_ln_init(struct csio_lnode *ln)
{
int rv = -EINVAL;
- struct csio_lnode *rln, *pln;
+ struct csio_lnode *pln;
struct csio_hw *hw = csio_lnode_to_hw(ln);
csio_init_state(&ln->sm, csio_lns_uninit);
@@ -2019,7 +2022,6 @@ csio_ln_init(struct csio_lnode *ln)
* THe rest is common for non-root physical and NPIV lnodes.
* Just get references to all other modules
*/
- rln = csio_root_lnode(ln);
if (csio_is_npiv_ln(ln)) {
/* NPIV */
diff --git a/drivers/scsi/csiostor/csio_mb.c b/drivers/scsi/csiostor/csio_mb.c
index 6f13673d6aa0..94810b19e747 100644
--- a/drivers/scsi/csiostor/csio_mb.c
+++ b/drivers/scsi/csiostor/csio_mb.c
@@ -1210,7 +1210,7 @@ csio_mb_issue(struct csio_hw *hw, struct csio_mb *mbp)
!csio_is_hw_intr_enabled(hw)) {
csio_err(hw, "Cannot issue mailbox in interrupt mode 0x%x\n",
*((uint8_t *)mbp->mb));
- goto error_out;
+ goto error_out;
}
if (mbm->mcurrent != NULL) {
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index da50e87921bc..bc1086ae6835 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -2073,7 +2073,6 @@ static int cxgb4i_ddp_init(struct cxgbi_device *cdev)
struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
struct net_device *ndev = cdev->ports[0];
struct cxgbi_tag_format tformat;
- unsigned int ppmax;
int i, err;
if (!lldi->vr->iscsi.size) {
@@ -2082,7 +2081,6 @@ static int cxgb4i_ddp_init(struct cxgbi_device *cdev)
}
cdev->flags |= CXGBI_FLAG_USE_PPOD_OFLDQ;
- ppmax = lldi->vr->iscsi.size >> PPOD_SIZE_SHIFT;
memset(&tformat, 0, sizeof(struct cxgbi_tag_format));
for (i = 0; i < 4; i++)
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
index 3e17af8aedeb..0d044c165960 100644
--- a/drivers/scsi/cxgbi/libcxgbi.c
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -2284,34 +2284,6 @@ int cxgbi_set_conn_param(struct iscsi_cls_conn *cls_conn,
}
EXPORT_SYMBOL_GPL(cxgbi_set_conn_param);
-static inline int csk_print_port(struct cxgbi_sock *csk, char *buf)
-{
- int len;
-
- cxgbi_sock_get(csk);
- len = sprintf(buf, "%hu\n", ntohs(csk->daddr.sin_port));
- cxgbi_sock_put(csk);
-
- return len;
-}
-
-static inline int csk_print_ip(struct cxgbi_sock *csk, char *buf)
-{
- int len;
-
- cxgbi_sock_get(csk);
- if (csk->csk_family == AF_INET)
- len = sprintf(buf, "%pI4",
- &csk->daddr.sin_addr.s_addr);
- else
- len = sprintf(buf, "%pI6",
- &csk->daddr6.sin6_addr);
-
- cxgbi_sock_put(csk);
-
- return len;
-}
-
int cxgbi_get_ep_param(struct iscsi_endpoint *ep, enum iscsi_param param,
char *buf)
{
diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c
index 2dbf35f82787..fbd2ae40dab4 100644
--- a/drivers/scsi/cxlflash/main.c
+++ b/drivers/scsi/cxlflash/main.c
@@ -44,14 +44,12 @@ static void process_cmd_err(struct afu_cmd *cmd, struct scsi_cmnd *scp)
struct afu *afu = cmd->parent;
struct cxlflash_cfg *cfg = afu->parent;
struct device *dev = &cfg->dev->dev;
- struct sisl_ioarcb *ioarcb;
struct sisl_ioasa *ioasa;
u32 resid;
if (unlikely(!cmd))
return;
- ioarcb = &(cmd->rcb);
ioasa = &(cmd->sa);
if (ioasa->rc.flags & SISL_RC_FLAGS_UNDERRUN) {
diff --git a/drivers/scsi/esas2r/esas2r_flash.c b/drivers/scsi/esas2r/esas2r_flash.c
index 7bd376d95ed5..b02ac389e6c6 100644
--- a/drivers/scsi/esas2r/esas2r_flash.c
+++ b/drivers/scsi/esas2r/esas2r_flash.c
@@ -1197,6 +1197,7 @@ bool esas2r_nvram_read_direct(struct esas2r_adapter *a)
if (!esas2r_read_flash_block(a, a->nvram, FLS_OFFSET_NVR,
sizeof(struct esas2r_sas_nvram))) {
esas2r_hdebug("NVRAM read failed, using defaults");
+ up(&a->nvram_semaphore);
return false;
}
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
index 80608b53897b..8ef150dfb6f7 100644
--- a/drivers/scsi/fnic/fnic_scsi.c
+++ b/drivers/scsi/fnic/fnic_scsi.c
@@ -1024,7 +1024,8 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
atomic64_inc(&fnic_stats->io_stats.io_completions);
- io_duration_time = jiffies_to_msecs(jiffies) - jiffies_to_msecs(io_req->start_time);
+ io_duration_time = jiffies_to_msecs(jiffies) -
+ jiffies_to_msecs(start_time);
if(io_duration_time <= 10)
atomic64_inc(&fnic_stats->io_stats.io_btw_0_to_10_msec);
diff --git a/drivers/scsi/fnic/vnic_dev.c b/drivers/scsi/fnic/vnic_dev.c
index 78af9cc2009b..1f55b9e4e74a 100644
--- a/drivers/scsi/fnic/vnic_dev.c
+++ b/drivers/scsi/fnic/vnic_dev.c
@@ -259,7 +259,7 @@ int vnic_dev_cmd1(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, int wait)
struct vnic_devcmd __iomem *devcmd = vdev->devcmd;
int delay;
u32 status;
- int dev_cmd_err[] = {
+ static const int dev_cmd_err[] = {
/* convert from fw's version of error.h to host's version */
0, /* ERR_SUCCESS */
EINVAL, /* ERR_EINVAL */
diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h
index 720c4d6be939..233c73e01246 100644
--- a/drivers/scsi/hisi_sas/hisi_sas.h
+++ b/drivers/scsi/hisi_sas/hisi_sas.h
@@ -21,6 +21,7 @@
#include <linux/platform_device.h>
#include <linux/property.h>
#include <linux/regmap.h>
+#include <linux/timer.h>
#include <scsi/sas_ata.h>
#include <scsi/libsas.h>
@@ -84,6 +85,7 @@
#define HISI_SAS_PROT_MASK (HISI_SAS_DIF_PROT_MASK | HISI_SAS_DIX_PROT_MASK)
#define HISI_SAS_WAIT_PHYUP_TIMEOUT 20
+#define CLEAR_ITCT_TIMEOUT 20
struct hisi_hba;
@@ -167,6 +169,7 @@ struct hisi_sas_phy {
enum sas_linkrate minimum_linkrate;
enum sas_linkrate maximum_linkrate;
int enable;
+ atomic_t down_cnt;
};
struct hisi_sas_port {
@@ -296,8 +299,8 @@ struct hisi_sas_hw {
void (*phy_set_linkrate)(struct hisi_hba *hisi_hba, int phy_no,
struct sas_phy_linkrates *linkrates);
enum sas_linkrate (*phy_get_max_linkrate)(void);
- void (*clear_itct)(struct hisi_hba *hisi_hba,
- struct hisi_sas_device *dev);
+ int (*clear_itct)(struct hisi_hba *hisi_hba,
+ struct hisi_sas_device *dev);
void (*free_device)(struct hisi_sas_device *sas_dev);
int (*get_wideport_bitmap)(struct hisi_hba *hisi_hba, int port_id);
void (*dereg_device)(struct hisi_hba *hisi_hba,
@@ -321,6 +324,44 @@ struct hisi_sas_hw {
const struct hisi_sas_debugfs_reg *debugfs_reg_port;
};
+#define HISI_SAS_MAX_DEBUGFS_DUMP (50)
+
+struct hisi_sas_debugfs_cq {
+ struct hisi_sas_cq *cq;
+ void *complete_hdr;
+};
+
+struct hisi_sas_debugfs_dq {
+ struct hisi_sas_dq *dq;
+ struct hisi_sas_cmd_hdr *hdr;
+};
+
+struct hisi_sas_debugfs_regs {
+ struct hisi_hba *hisi_hba;
+ u32 *data;
+};
+
+struct hisi_sas_debugfs_port {
+ struct hisi_sas_phy *phy;
+ u32 *data;
+};
+
+struct hisi_sas_debugfs_iost {
+ struct hisi_sas_iost *iost;
+};
+
+struct hisi_sas_debugfs_itct {
+ struct hisi_sas_itct *itct;
+};
+
+struct hisi_sas_debugfs_iost_cache {
+ struct hisi_sas_iost_itct_cache *cache;
+};
+
+struct hisi_sas_debugfs_itct_cache {
+ struct hisi_sas_iost_itct_cache *cache;
+};
+
struct hisi_hba {
/* This must be the first element, used by SHOST_TO_SAS_HA */
struct sas_ha_struct *p;
@@ -402,19 +443,20 @@ struct hisi_hba {
/* debugfs memories */
/* Put Global AXI and RAS Register into register array */
- u32 *debugfs_regs[DEBUGFS_REGS_NUM];
- u32 *debugfs_port_reg[HISI_SAS_MAX_PHYS];
- void *debugfs_complete_hdr[HISI_SAS_MAX_QUEUES];
- struct hisi_sas_cmd_hdr *debugfs_cmd_hdr[HISI_SAS_MAX_QUEUES];
- struct hisi_sas_iost *debugfs_iost;
- struct hisi_sas_itct *debugfs_itct;
- u64 *debugfs_iost_cache;
- u64 *debugfs_itct_cache;
-
+ struct hisi_sas_debugfs_regs debugfs_regs[HISI_SAS_MAX_DEBUGFS_DUMP][DEBUGFS_REGS_NUM];
+ struct hisi_sas_debugfs_port debugfs_port_reg[HISI_SAS_MAX_DEBUGFS_DUMP][HISI_SAS_MAX_PHYS];
+ struct hisi_sas_debugfs_cq debugfs_cq[HISI_SAS_MAX_DEBUGFS_DUMP][HISI_SAS_MAX_QUEUES];
+ struct hisi_sas_debugfs_dq debugfs_dq[HISI_SAS_MAX_DEBUGFS_DUMP][HISI_SAS_MAX_QUEUES];
+ struct hisi_sas_debugfs_iost debugfs_iost[HISI_SAS_MAX_DEBUGFS_DUMP];
+ struct hisi_sas_debugfs_itct debugfs_itct[HISI_SAS_MAX_DEBUGFS_DUMP];
+ struct hisi_sas_debugfs_iost_cache debugfs_iost_cache[HISI_SAS_MAX_DEBUGFS_DUMP];
+ struct hisi_sas_debugfs_itct_cache debugfs_itct_cache[HISI_SAS_MAX_DEBUGFS_DUMP];
+
+ u64 debugfs_timestamp[HISI_SAS_MAX_DEBUGFS_DUMP];
+ int debugfs_dump_index;
struct dentry *debugfs_dir;
struct dentry *debugfs_dump_dentry;
struct dentry *debugfs_bist_dentry;
- bool debugfs_snapshot;
};
/* Generic HW DMA host memory structures */
@@ -556,6 +598,7 @@ struct hisi_sas_slot_dif_buf_table {
extern struct scsi_transport_template *hisi_sas_stt;
extern bool hisi_sas_debugfs_enable;
+extern u32 hisi_sas_debugfs_dump_count;
extern struct dentry *hisi_sas_debugfs_dir;
extern void hisi_sas_stop_phys(struct hisi_hba *hisi_hba);
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index 0847e682797b..03588ec3c394 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -587,7 +587,13 @@ static int hisi_sas_task_exec(struct sas_task *task, gfp_t gfp_flags,
dev = hisi_hba->dev;
if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags))) {
- if (in_softirq())
+ /*
+ * For IOs from upper layer, it may already disable preempt
+ * in the IO path, if disable preempt again in down(),
+ * function schedule() will report schedule_bug(), so check
+ * preemptible() before goto down().
+ */
+ if (!preemptible())
return -EINVAL;
down(&hisi_hba->sem);
@@ -968,12 +974,13 @@ static void hisi_sas_port_notify_formed(struct asd_sas_phy *sas_phy)
struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
struct hisi_sas_phy *phy = sas_phy->lldd_phy;
struct asd_sas_port *sas_port = sas_phy->port;
- struct hisi_sas_port *port = to_hisi_sas_port(sas_port);
+ struct hisi_sas_port *port;
unsigned long flags;
if (!sas_port)
return;
+ port = to_hisi_sas_port(sas_port);
spin_lock_irqsave(&hisi_hba->lock, flags);
port->port_attached = 1;
port->id = phy->port_id;
@@ -1045,6 +1052,7 @@ static void hisi_sas_dev_gone(struct domain_device *device)
struct hisi_sas_device *sas_dev = device->lldd_dev;
struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
struct device *dev = hisi_hba->dev;
+ int ret = 0;
dev_info(dev, "dev[%d:%x] is gone\n",
sas_dev->device_id, sas_dev->dev_type);
@@ -1056,13 +1064,16 @@ static void hisi_sas_dev_gone(struct domain_device *device)
hisi_sas_dereg_device(hisi_hba, device);
- hisi_hba->hw->clear_itct(hisi_hba, sas_dev);
+ ret = hisi_hba->hw->clear_itct(hisi_hba, sas_dev);
device->lldd_dev = NULL;
}
if (hisi_hba->hw->free_device)
hisi_hba->hw->free_device(sas_dev);
- sas_dev->dev_type = SAS_PHY_UNUSED;
+
+ /* Don't mark it as SAS_PHY_UNUSED if failed to clear ITCT */
+ if (!ret)
+ sas_dev->dev_type = SAS_PHY_UNUSED;
sas_dev->sas_device = NULL;
up(&hisi_hba->sem);
}
@@ -1402,7 +1413,7 @@ static void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 state)
struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
struct asd_sas_phy *sas_phy = &phy->sas_phy;
struct asd_sas_port *sas_port = sas_phy->port;
- bool do_port_check = !!(_sas_port != sas_port);
+ bool do_port_check = _sas_port != sas_port;
if (!sas_phy->phy->enabled)
continue;
@@ -1563,7 +1574,7 @@ static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba)
struct Scsi_Host *shost = hisi_hba->shost;
int rc;
- if (hisi_sas_debugfs_enable && hisi_hba->debugfs_itct)
+ if (hisi_sas_debugfs_enable && hisi_hba->debugfs_itct[0].itct)
queue_work(hisi_hba->wq, &hisi_hba->debugfs_work);
if (!hisi_hba->hw->soft_reset)
@@ -2055,7 +2066,7 @@ _hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
/* Internal abort timed out */
if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
- if (hisi_sas_debugfs_enable && hisi_hba->debugfs_itct)
+ if (hisi_sas_debugfs_enable && hisi_hba->debugfs_itct[0].itct)
queue_work(hisi_hba->wq, &hisi_hba->debugfs_work);
if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
@@ -2676,6 +2687,7 @@ int hisi_sas_probe(struct platform_device *pdev,
err_out_register_ha:
scsi_remove_host(shost);
err_out_ha:
+ hisi_sas_debugfs_exit(hisi_hba);
hisi_sas_free(hisi_hba);
scsi_host_put(shost);
return rc;
@@ -2687,10 +2699,11 @@ struct dentry *hisi_sas_debugfs_dir;
static void hisi_sas_debugfs_snapshot_cq_reg(struct hisi_hba *hisi_hba)
{
int queue_entry_size = hisi_hba->hw->complete_hdr_size;
+ int dump_index = hisi_hba->debugfs_dump_index;
int i;
for (i = 0; i < hisi_hba->queue_count; i++)
- memcpy(hisi_hba->debugfs_complete_hdr[i],
+ memcpy(hisi_hba->debugfs_cq[dump_index][i].complete_hdr,
hisi_hba->complete_hdr[i],
HISI_SAS_QUEUE_SLOTS * queue_entry_size);
}
@@ -2698,13 +2711,14 @@ static void hisi_sas_debugfs_snapshot_cq_reg(struct hisi_hba *hisi_hba)
static void hisi_sas_debugfs_snapshot_dq_reg(struct hisi_hba *hisi_hba)
{
int queue_entry_size = sizeof(struct hisi_sas_cmd_hdr);
+ int dump_index = hisi_hba->debugfs_dump_index;
int i;
for (i = 0; i < hisi_hba->queue_count; i++) {
- struct hisi_sas_cmd_hdr *debugfs_cmd_hdr, *cmd_hdr;
+ struct hisi_sas_cmd_hdr *debugfs_cmd_hdr, *cmd_hdr;
int j;
- debugfs_cmd_hdr = hisi_hba->debugfs_cmd_hdr[i];
+ debugfs_cmd_hdr = hisi_hba->debugfs_dq[dump_index][i].hdr;
cmd_hdr = hisi_hba->cmd_hdr[i];
for (j = 0; j < HISI_SAS_QUEUE_SLOTS; j++)
@@ -2715,6 +2729,7 @@ static void hisi_sas_debugfs_snapshot_dq_reg(struct hisi_hba *hisi_hba)
static void hisi_sas_debugfs_snapshot_port_reg(struct hisi_hba *hisi_hba)
{
+ int dump_index = hisi_hba->debugfs_dump_index;
const struct hisi_sas_debugfs_reg *port =
hisi_hba->hw->debugfs_reg_port;
int i, phy_cnt;
@@ -2722,7 +2737,7 @@ static void hisi_sas_debugfs_snapshot_port_reg(struct hisi_hba *hisi_hba)
u32 *databuf;
for (phy_cnt = 0; phy_cnt < hisi_hba->n_phy; phy_cnt++) {
- databuf = (u32 *)hisi_hba->debugfs_port_reg[phy_cnt];
+ databuf = hisi_hba->debugfs_port_reg[dump_index][phy_cnt].data;
for (i = 0; i < port->count; i++, databuf++) {
offset = port->base_off + 4 * i;
*databuf = port->read_port_reg(hisi_hba, phy_cnt,
@@ -2733,7 +2748,8 @@ static void hisi_sas_debugfs_snapshot_port_reg(struct hisi_hba *hisi_hba)
static void hisi_sas_debugfs_snapshot_global_reg(struct hisi_hba *hisi_hba)
{
- u32 *databuf = hisi_hba->debugfs_regs[DEBUGFS_GLOBAL];
+ int dump_index = hisi_hba->debugfs_dump_index;
+ u32 *databuf = hisi_hba->debugfs_regs[dump_index][DEBUGFS_GLOBAL].data;
const struct hisi_sas_hw *hw = hisi_hba->hw;
const struct hisi_sas_debugfs_reg *global =
hw->debugfs_reg_array[DEBUGFS_GLOBAL];
@@ -2745,7 +2761,8 @@ static void hisi_sas_debugfs_snapshot_global_reg(struct hisi_hba *hisi_hba)
static void hisi_sas_debugfs_snapshot_axi_reg(struct hisi_hba *hisi_hba)
{
- u32 *databuf = hisi_hba->debugfs_regs[DEBUGFS_AXI];
+ int dump_index = hisi_hba->debugfs_dump_index;
+ u32 *databuf = hisi_hba->debugfs_regs[dump_index][DEBUGFS_AXI].data;
const struct hisi_sas_hw *hw = hisi_hba->hw;
const struct hisi_sas_debugfs_reg *axi =
hw->debugfs_reg_array[DEBUGFS_AXI];
@@ -2758,7 +2775,8 @@ static void hisi_sas_debugfs_snapshot_axi_reg(struct hisi_hba *hisi_hba)
static void hisi_sas_debugfs_snapshot_ras_reg(struct hisi_hba *hisi_hba)
{
- u32 *databuf = hisi_hba->debugfs_regs[DEBUGFS_RAS];
+ int dump_index = hisi_hba->debugfs_dump_index;
+ u32 *databuf = hisi_hba->debugfs_regs[dump_index][DEBUGFS_RAS].data;
const struct hisi_sas_hw *hw = hisi_hba->hw;
const struct hisi_sas_debugfs_reg *ras =
hw->debugfs_reg_array[DEBUGFS_RAS];
@@ -2771,8 +2789,9 @@ static void hisi_sas_debugfs_snapshot_ras_reg(struct hisi_hba *hisi_hba)
static void hisi_sas_debugfs_snapshot_itct_reg(struct hisi_hba *hisi_hba)
{
- void *cachebuf = hisi_hba->debugfs_itct_cache;
- void *databuf = hisi_hba->debugfs_itct;
+ int dump_index = hisi_hba->debugfs_dump_index;
+ void *cachebuf = hisi_hba->debugfs_itct_cache[dump_index].cache;
+ void *databuf = hisi_hba->debugfs_itct[dump_index].itct;
struct hisi_sas_itct *itct;
int i;
@@ -2789,9 +2808,10 @@ static void hisi_sas_debugfs_snapshot_itct_reg(struct hisi_hba *hisi_hba)
static void hisi_sas_debugfs_snapshot_iost_reg(struct hisi_hba *hisi_hba)
{
+ int dump_index = hisi_hba->debugfs_dump_index;
int max_command_entries = HISI_SAS_MAX_COMMANDS;
- void *cachebuf = hisi_hba->debugfs_iost_cache;
- void *databuf = hisi_hba->debugfs_iost;
+ void *cachebuf = hisi_hba->debugfs_iost_cache[dump_index].cache;
+ void *databuf = hisi_hba->debugfs_iost[dump_index].iost;
struct hisi_sas_iost *iost;
int i;
@@ -2842,11 +2862,12 @@ static void hisi_sas_debugfs_print_reg(u32 *regs_val, const void *ptr,
static int hisi_sas_debugfs_global_show(struct seq_file *s, void *p)
{
- struct hisi_hba *hisi_hba = s->private;
+ struct hisi_sas_debugfs_regs *global = s->private;
+ struct hisi_hba *hisi_hba = global->hisi_hba;
const struct hisi_sas_hw *hw = hisi_hba->hw;
const void *reg_global = hw->debugfs_reg_array[DEBUGFS_GLOBAL];
- hisi_sas_debugfs_print_reg(hisi_hba->debugfs_regs[DEBUGFS_GLOBAL],
+ hisi_sas_debugfs_print_reg(global->data,
reg_global, s);
return 0;
@@ -2868,11 +2889,12 @@ static const struct file_operations hisi_sas_debugfs_global_fops = {
static int hisi_sas_debugfs_axi_show(struct seq_file *s, void *p)
{
- struct hisi_hba *hisi_hba = s->private;
+ struct hisi_sas_debugfs_regs *axi = s->private;
+ struct hisi_hba *hisi_hba = axi->hisi_hba;
const struct hisi_sas_hw *hw = hisi_hba->hw;
const void *reg_axi = hw->debugfs_reg_array[DEBUGFS_AXI];
- hisi_sas_debugfs_print_reg(hisi_hba->debugfs_regs[DEBUGFS_AXI],
+ hisi_sas_debugfs_print_reg(axi->data,
reg_axi, s);
return 0;
@@ -2894,11 +2916,12 @@ static const struct file_operations hisi_sas_debugfs_axi_fops = {
static int hisi_sas_debugfs_ras_show(struct seq_file *s, void *p)
{
- struct hisi_hba *hisi_hba = s->private;
+ struct hisi_sas_debugfs_regs *ras = s->private;
+ struct hisi_hba *hisi_hba = ras->hisi_hba;
const struct hisi_sas_hw *hw = hisi_hba->hw;
const void *reg_ras = hw->debugfs_reg_array[DEBUGFS_RAS];
- hisi_sas_debugfs_print_reg(hisi_hba->debugfs_regs[DEBUGFS_RAS],
+ hisi_sas_debugfs_print_reg(ras->data,
reg_ras, s);
return 0;
@@ -2920,13 +2943,13 @@ static const struct file_operations hisi_sas_debugfs_ras_fops = {
static int hisi_sas_debugfs_port_show(struct seq_file *s, void *p)
{
- struct hisi_sas_phy *phy = s->private;
+ struct hisi_sas_debugfs_port *port = s->private;
+ struct hisi_sas_phy *phy = port->phy;
struct hisi_hba *hisi_hba = phy->hisi_hba;
const struct hisi_sas_hw *hw = hisi_hba->hw;
const struct hisi_sas_debugfs_reg *reg_port = hw->debugfs_reg_port;
- u32 *databuf = hisi_hba->debugfs_port_reg[phy->sas_phy.id];
- hisi_sas_debugfs_print_reg(databuf, reg_port, s);
+ hisi_sas_debugfs_print_reg(port->data, reg_port, s);
return 0;
}
@@ -2975,13 +2998,13 @@ static void hisi_sas_show_row_32(struct seq_file *s, int index,
seq_puts(s, "\n");
}
-static void hisi_sas_cq_show_slot(struct seq_file *s, int slot, void *cq_ptr)
+static void hisi_sas_cq_show_slot(struct seq_file *s, int slot,
+ struct hisi_sas_debugfs_cq *debugfs_cq)
{
- struct hisi_sas_cq *cq = cq_ptr;
+ struct hisi_sas_cq *cq = debugfs_cq->cq;
struct hisi_hba *hisi_hba = cq->hisi_hba;
- void *complete_queue = hisi_hba->debugfs_complete_hdr[cq->id];
- __le32 *complete_hdr = complete_queue +
- (hisi_hba->hw->complete_hdr_size * slot);
+ __le32 *complete_hdr = debugfs_cq->complete_hdr +
+ (hisi_hba->hw->complete_hdr_size * slot);
hisi_sas_show_row_32(s, slot,
hisi_hba->hw->complete_hdr_size,
@@ -2990,11 +3013,11 @@ static void hisi_sas_cq_show_slot(struct seq_file *s, int slot, void *cq_ptr)
static int hisi_sas_debugfs_cq_show(struct seq_file *s, void *p)
{
- struct hisi_sas_cq *cq = s->private;
+ struct hisi_sas_debugfs_cq *debugfs_cq = s->private;
int slot;
for (slot = 0; slot < HISI_SAS_QUEUE_SLOTS; slot++) {
- hisi_sas_cq_show_slot(s, slot, cq);
+ hisi_sas_cq_show_slot(s, slot, debugfs_cq);
}
return 0;
}
@@ -3014,9 +3037,8 @@ static const struct file_operations hisi_sas_debugfs_cq_fops = {
static void hisi_sas_dq_show_slot(struct seq_file *s, int slot, void *dq_ptr)
{
- struct hisi_sas_dq *dq = dq_ptr;
- struct hisi_hba *hisi_hba = dq->hisi_hba;
- void *cmd_queue = hisi_hba->debugfs_cmd_hdr[dq->id];
+ struct hisi_sas_debugfs_dq *debugfs_dq = dq_ptr;
+ void *cmd_queue = debugfs_dq->hdr;
__le32 *cmd_hdr = cmd_queue +
sizeof(struct hisi_sas_cmd_hdr) * slot;
@@ -3048,14 +3070,14 @@ static const struct file_operations hisi_sas_debugfs_dq_fops = {
static int hisi_sas_debugfs_iost_show(struct seq_file *s, void *p)
{
- struct hisi_hba *hisi_hba = s->private;
- struct hisi_sas_iost *debugfs_iost = hisi_hba->debugfs_iost;
+ struct hisi_sas_debugfs_iost *debugfs_iost = s->private;
+ struct hisi_sas_iost *iost = debugfs_iost->iost;
int i, max_command_entries = HISI_SAS_MAX_COMMANDS;
- for (i = 0; i < max_command_entries; i++, debugfs_iost++) {
- __le64 *iost = &debugfs_iost->qw0;
+ for (i = 0; i < max_command_entries; i++, iost++) {
+ __le64 *data = &iost->qw0;
- hisi_sas_show_row_64(s, i, sizeof(*debugfs_iost), iost);
+ hisi_sas_show_row_64(s, i, sizeof(*iost), data);
}
return 0;
@@ -3076,9 +3098,8 @@ static const struct file_operations hisi_sas_debugfs_iost_fops = {
static int hisi_sas_debugfs_iost_cache_show(struct seq_file *s, void *p)
{
- struct hisi_hba *hisi_hba = s->private;
- struct hisi_sas_iost_itct_cache *iost_cache =
- (struct hisi_sas_iost_itct_cache *)hisi_hba->debugfs_iost_cache;
+ struct hisi_sas_debugfs_iost_cache *debugfs_iost_cache = s->private;
+ struct hisi_sas_iost_itct_cache *iost_cache = debugfs_iost_cache->cache;
u32 cache_size = HISI_SAS_IOST_ITCT_CACHE_DW_SZ * 4;
int i, tab_idx;
__le64 *iost;
@@ -3117,13 +3138,13 @@ static const struct file_operations hisi_sas_debugfs_iost_cache_fops = {
static int hisi_sas_debugfs_itct_show(struct seq_file *s, void *p)
{
int i;
- struct hisi_hba *hisi_hba = s->private;
- struct hisi_sas_itct *debugfs_itct = hisi_hba->debugfs_itct;
+ struct hisi_sas_debugfs_itct *debugfs_itct = s->private;
+ struct hisi_sas_itct *itct = debugfs_itct->itct;
- for (i = 0; i < HISI_SAS_MAX_ITCT_ENTRIES; i++, debugfs_itct++) {
- __le64 *itct = &debugfs_itct->qw0;
+ for (i = 0; i < HISI_SAS_MAX_ITCT_ENTRIES; i++, itct++) {
+ __le64 *data = &itct->qw0;
- hisi_sas_show_row_64(s, i, sizeof(*debugfs_itct), itct);
+ hisi_sas_show_row_64(s, i, sizeof(*itct), data);
}
return 0;
@@ -3144,9 +3165,8 @@ static const struct file_operations hisi_sas_debugfs_itct_fops = {
static int hisi_sas_debugfs_itct_cache_show(struct seq_file *s, void *p)
{
- struct hisi_hba *hisi_hba = s->private;
- struct hisi_sas_iost_itct_cache *itct_cache =
- (struct hisi_sas_iost_itct_cache *)hisi_hba->debugfs_itct_cache;
+ struct hisi_sas_debugfs_itct_cache *debugfs_itct_cache = s->private;
+ struct hisi_sas_iost_itct_cache *itct_cache = debugfs_itct_cache->cache;
u32 cache_size = HISI_SAS_IOST_ITCT_CACHE_DW_SZ * 4;
int i, tab_idx;
__le64 *itct;
@@ -3184,6 +3204,8 @@ static const struct file_operations hisi_sas_debugfs_itct_cache_fops = {
static void hisi_sas_debugfs_create_files(struct hisi_hba *hisi_hba)
{
+ u64 *debugfs_timestamp;
+ int dump_index = hisi_hba->debugfs_dump_index;
struct dentry *dump_dentry;
struct dentry *dentry;
char name[256];
@@ -3191,19 +3213,26 @@ static void hisi_sas_debugfs_create_files(struct hisi_hba *hisi_hba)
int c;
int d;
- /* Create dump dir inside device dir */
- dump_dentry = debugfs_create_dir("dump", hisi_hba->debugfs_dir);
- hisi_hba->debugfs_dump_dentry = dump_dentry;
+ snprintf(name, 256, "%d", dump_index);
+
+ dump_dentry = debugfs_create_dir(name, hisi_hba->debugfs_dump_dentry);
- debugfs_create_file("global", 0400, dump_dentry, hisi_hba,
- &hisi_sas_debugfs_global_fops);
+ debugfs_timestamp = &hisi_hba->debugfs_timestamp[dump_index];
+
+ debugfs_create_u64("timestamp", 0400, dump_dentry,
+ debugfs_timestamp);
+
+ debugfs_create_file("global", 0400, dump_dentry,
+ &hisi_hba->debugfs_regs[dump_index][DEBUGFS_GLOBAL],
+ &hisi_sas_debugfs_global_fops);
/* Create port dir and files */
dentry = debugfs_create_dir("port", dump_dentry);
for (p = 0; p < hisi_hba->n_phy; p++) {
snprintf(name, 256, "%d", p);
- debugfs_create_file(name, 0400, dentry, &hisi_hba->phy[p],
+ debugfs_create_file(name, 0400, dentry,
+ &hisi_hba->debugfs_port_reg[dump_index][p],
&hisi_sas_debugfs_port_fops);
}
@@ -3212,7 +3241,8 @@ static void hisi_sas_debugfs_create_files(struct hisi_hba *hisi_hba)
for (c = 0; c < hisi_hba->queue_count; c++) {
snprintf(name, 256, "%d", c);
- debugfs_create_file(name, 0400, dentry, &hisi_hba->cq[c],
+ debugfs_create_file(name, 0400, dentry,
+ &hisi_hba->debugfs_cq[dump_index][c],
&hisi_sas_debugfs_cq_fops);
}
@@ -3221,26 +3251,33 @@ static void hisi_sas_debugfs_create_files(struct hisi_hba *hisi_hba)
for (d = 0; d < hisi_hba->queue_count; d++) {
snprintf(name, 256, "%d", d);
- debugfs_create_file(name, 0400, dentry, &hisi_hba->dq[d],
+ debugfs_create_file(name, 0400, dentry,
+ &hisi_hba->debugfs_dq[dump_index][d],
&hisi_sas_debugfs_dq_fops);
}
- debugfs_create_file("iost", 0400, dump_dentry, hisi_hba,
+ debugfs_create_file("iost", 0400, dump_dentry,
+ &hisi_hba->debugfs_iost[dump_index],
&hisi_sas_debugfs_iost_fops);
- debugfs_create_file("iost_cache", 0400, dump_dentry, hisi_hba,
+ debugfs_create_file("iost_cache", 0400, dump_dentry,
+ &hisi_hba->debugfs_iost_cache[dump_index],
&hisi_sas_debugfs_iost_cache_fops);
- debugfs_create_file("itct", 0400, dump_dentry, hisi_hba,
+ debugfs_create_file("itct", 0400, dump_dentry,
+ &hisi_hba->debugfs_itct[dump_index],
&hisi_sas_debugfs_itct_fops);
- debugfs_create_file("itct_cache", 0400, dump_dentry, hisi_hba,
+ debugfs_create_file("itct_cache", 0400, dump_dentry,
+ &hisi_hba->debugfs_itct_cache[dump_index],
&hisi_sas_debugfs_itct_cache_fops);
- debugfs_create_file("axi", 0400, dump_dentry, hisi_hba,
+ debugfs_create_file("axi", 0400, dump_dentry,
+ &hisi_hba->debugfs_regs[dump_index][DEBUGFS_AXI],
&hisi_sas_debugfs_axi_fops);
- debugfs_create_file("ras", 0400, dump_dentry, hisi_hba,
+ debugfs_create_file("ras", 0400, dump_dentry,
+ &hisi_hba->debugfs_regs[dump_index][DEBUGFS_RAS],
&hisi_sas_debugfs_ras_fops);
return;
@@ -3271,8 +3308,7 @@ static ssize_t hisi_sas_debugfs_trigger_dump_write(struct file *file,
struct hisi_hba *hisi_hba = file->f_inode->i_private;
char buf[8];
- /* A bit racy, but don't care too much since it's only debugfs */
- if (hisi_hba->debugfs_snapshot)
+ if (hisi_hba->debugfs_dump_index >= hisi_sas_debugfs_dump_count)
return -EFAULT;
if (count > 8)
@@ -3539,7 +3575,7 @@ static const struct {
int value;
char *name;
} hisi_sas_debugfs_loop_modes[] = {
- { HISI_SAS_BIST_LOOPBACK_MODE_DIGITAL, "digial" },
+ { HISI_SAS_BIST_LOOPBACK_MODE_DIGITAL, "digital" },
{ HISI_SAS_BIST_LOOPBACK_MODE_SERDES, "serdes" },
{ HISI_SAS_BIST_LOOPBACK_MODE_REMOTE, "remote" },
};
@@ -3670,132 +3706,201 @@ static const struct file_operations hisi_sas_debugfs_bist_enable_ops = {
.owner = THIS_MODULE,
};
+static ssize_t hisi_sas_debugfs_phy_down_cnt_write(struct file *filp,
+ const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *s = filp->private_data;
+ struct hisi_sas_phy *phy = s->private;
+ unsigned int set_val;
+ int res;
+
+ res = kstrtouint_from_user(buf, count, 0, &set_val);
+ if (res)
+ return res;
+
+ if (set_val > 0)
+ return -EINVAL;
+
+ atomic_set(&phy->down_cnt, 0);
+
+ return count;
+}
+
+static int hisi_sas_debugfs_phy_down_cnt_show(struct seq_file *s, void *p)
+{
+ struct hisi_sas_phy *phy = s->private;
+
+ seq_printf(s, "%d\n", atomic_read(&phy->down_cnt));
+
+ return 0;
+}
+
+static int hisi_sas_debugfs_phy_down_cnt_open(struct inode *inode,
+ struct file *filp)
+{
+ return single_open(filp, hisi_sas_debugfs_phy_down_cnt_show,
+ inode->i_private);
+}
+
+static const struct file_operations hisi_sas_debugfs_phy_down_cnt_ops = {
+ .open = hisi_sas_debugfs_phy_down_cnt_open,
+ .read = seq_read,
+ .write = hisi_sas_debugfs_phy_down_cnt_write,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
void hisi_sas_debugfs_work_handler(struct work_struct *work)
{
struct hisi_hba *hisi_hba =
container_of(work, struct hisi_hba, debugfs_work);
+ int debugfs_dump_index = hisi_hba->debugfs_dump_index;
+ struct device *dev = hisi_hba->dev;
+ u64 timestamp = local_clock();
- if (hisi_hba->debugfs_snapshot)
+ if (debugfs_dump_index >= hisi_sas_debugfs_dump_count) {
+ dev_warn(dev, "dump count exceeded!\n");
return;
- hisi_hba->debugfs_snapshot = true;
+ }
+
+ do_div(timestamp, NSEC_PER_MSEC);
+ hisi_hba->debugfs_timestamp[debugfs_dump_index] = timestamp;
hisi_sas_debugfs_snapshot_regs(hisi_hba);
+ hisi_hba->debugfs_dump_index++;
}
EXPORT_SYMBOL_GPL(hisi_sas_debugfs_work_handler);
-static void hisi_sas_debugfs_release(struct hisi_hba *hisi_hba)
+static void hisi_sas_debugfs_release(struct hisi_hba *hisi_hba, int dump_index)
{
struct device *dev = hisi_hba->dev;
int i;
- devm_kfree(dev, hisi_hba->debugfs_iost_cache);
- devm_kfree(dev, hisi_hba->debugfs_itct_cache);
- devm_kfree(dev, hisi_hba->debugfs_iost);
+ devm_kfree(dev, hisi_hba->debugfs_iost_cache[dump_index].cache);
+ devm_kfree(dev, hisi_hba->debugfs_itct_cache[dump_index].cache);
+ devm_kfree(dev, hisi_hba->debugfs_iost[dump_index].iost);
+ devm_kfree(dev, hisi_hba->debugfs_itct[dump_index].itct);
for (i = 0; i < hisi_hba->queue_count; i++)
- devm_kfree(dev, hisi_hba->debugfs_cmd_hdr[i]);
+ devm_kfree(dev, hisi_hba->debugfs_dq[dump_index][i].hdr);
for (i = 0; i < hisi_hba->queue_count; i++)
- devm_kfree(dev, hisi_hba->debugfs_complete_hdr[i]);
+ devm_kfree(dev,
+ hisi_hba->debugfs_cq[dump_index][i].complete_hdr);
for (i = 0; i < DEBUGFS_REGS_NUM; i++)
- devm_kfree(dev, hisi_hba->debugfs_regs[i]);
+ devm_kfree(dev, hisi_hba->debugfs_regs[dump_index][i].data);
for (i = 0; i < hisi_hba->n_phy; i++)
- devm_kfree(dev, hisi_hba->debugfs_port_reg[i]);
+ devm_kfree(dev, hisi_hba->debugfs_port_reg[dump_index][i].data);
}
-static int hisi_sas_debugfs_alloc(struct hisi_hba *hisi_hba)
+static int hisi_sas_debugfs_alloc(struct hisi_hba *hisi_hba, int dump_index)
{
const struct hisi_sas_hw *hw = hisi_hba->hw;
struct device *dev = hisi_hba->dev;
- int p, c, d;
+ int p, c, d, r, i;
size_t sz;
- hisi_hba->debugfs_dump_dentry =
- debugfs_create_dir("dump", hisi_hba->debugfs_dir);
+ for (r = 0; r < DEBUGFS_REGS_NUM; r++) {
+ struct hisi_sas_debugfs_regs *regs =
+ &hisi_hba->debugfs_regs[dump_index][r];
- sz = hw->debugfs_reg_array[DEBUGFS_GLOBAL]->count * 4;
- hisi_hba->debugfs_regs[DEBUGFS_GLOBAL] =
- devm_kmalloc(dev, sz, GFP_KERNEL);
-
- if (!hisi_hba->debugfs_regs[DEBUGFS_GLOBAL])
- goto fail;
+ sz = hw->debugfs_reg_array[r]->count * 4;
+ regs->data = devm_kmalloc(dev, sz, GFP_KERNEL);
+ if (!regs->data)
+ goto fail;
+ regs->hisi_hba = hisi_hba;
+ }
sz = hw->debugfs_reg_port->count * 4;
for (p = 0; p < hisi_hba->n_phy; p++) {
- hisi_hba->debugfs_port_reg[p] =
- devm_kmalloc(dev, sz, GFP_KERNEL);
+ struct hisi_sas_debugfs_port *port =
+ &hisi_hba->debugfs_port_reg[dump_index][p];
- if (!hisi_hba->debugfs_port_reg[p])
+ port->data = devm_kmalloc(dev, sz, GFP_KERNEL);
+ if (!port->data)
goto fail;
+ port->phy = &hisi_hba->phy[p];
}
- sz = hw->debugfs_reg_array[DEBUGFS_AXI]->count * 4;
- hisi_hba->debugfs_regs[DEBUGFS_AXI] =
- devm_kmalloc(dev, sz, GFP_KERNEL);
-
- if (!hisi_hba->debugfs_regs[DEBUGFS_AXI])
- goto fail;
-
- sz = hw->debugfs_reg_array[DEBUGFS_RAS]->count * 4;
- hisi_hba->debugfs_regs[DEBUGFS_RAS] =
- devm_kmalloc(dev, sz, GFP_KERNEL);
-
- if (!hisi_hba->debugfs_regs[DEBUGFS_RAS])
- goto fail;
-
sz = hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
for (c = 0; c < hisi_hba->queue_count; c++) {
- hisi_hba->debugfs_complete_hdr[c] =
- devm_kmalloc(dev, sz, GFP_KERNEL);
+ struct hisi_sas_debugfs_cq *cq =
+ &hisi_hba->debugfs_cq[dump_index][c];
- if (!hisi_hba->debugfs_complete_hdr[c])
+ cq->complete_hdr = devm_kmalloc(dev, sz, GFP_KERNEL);
+ if (!cq->complete_hdr)
goto fail;
+ cq->cq = &hisi_hba->cq[c];
}
sz = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
for (d = 0; d < hisi_hba->queue_count; d++) {
- hisi_hba->debugfs_cmd_hdr[d] =
- devm_kmalloc(dev, sz, GFP_KERNEL);
+ struct hisi_sas_debugfs_dq *dq =
+ &hisi_hba->debugfs_dq[dump_index][d];
- if (!hisi_hba->debugfs_cmd_hdr[d])
+ dq->hdr = devm_kmalloc(dev, sz, GFP_KERNEL);
+ if (!dq->hdr)
goto fail;
+ dq->dq = &hisi_hba->dq[d];
}
sz = HISI_SAS_MAX_COMMANDS * sizeof(struct hisi_sas_iost);
- hisi_hba->debugfs_iost = devm_kmalloc(dev, sz, GFP_KERNEL);
- if (!hisi_hba->debugfs_iost)
+ hisi_hba->debugfs_iost[dump_index].iost =
+ devm_kmalloc(dev, sz, GFP_KERNEL);
+ if (!hisi_hba->debugfs_iost[dump_index].iost)
goto fail;
sz = HISI_SAS_IOST_ITCT_CACHE_NUM *
sizeof(struct hisi_sas_iost_itct_cache);
- hisi_hba->debugfs_iost_cache = devm_kmalloc(dev, sz, GFP_KERNEL);
- if (!hisi_hba->debugfs_iost_cache)
+ hisi_hba->debugfs_iost_cache[dump_index].cache =
+ devm_kmalloc(dev, sz, GFP_KERNEL);
+ if (!hisi_hba->debugfs_iost_cache[dump_index].cache)
goto fail;
sz = HISI_SAS_IOST_ITCT_CACHE_NUM *
sizeof(struct hisi_sas_iost_itct_cache);
- hisi_hba->debugfs_itct_cache = devm_kmalloc(dev, sz, GFP_KERNEL);
- if (!hisi_hba->debugfs_itct_cache)
+ hisi_hba->debugfs_itct_cache[dump_index].cache =
+ devm_kmalloc(dev, sz, GFP_KERNEL);
+ if (!hisi_hba->debugfs_itct_cache[dump_index].cache)
goto fail;
/* New memory allocation must be locate before itct */
sz = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct);
- hisi_hba->debugfs_itct = devm_kmalloc(dev, sz, GFP_KERNEL);
- if (!hisi_hba->debugfs_itct)
+ hisi_hba->debugfs_itct[dump_index].itct =
+ devm_kmalloc(dev, sz, GFP_KERNEL);
+ if (!hisi_hba->debugfs_itct[dump_index].itct)
goto fail;
return 0;
fail:
- hisi_sas_debugfs_release(hisi_hba);
+ for (i = 0; i < hisi_sas_debugfs_dump_count; i++)
+ hisi_sas_debugfs_release(hisi_hba, i);
return -ENOMEM;
}
+static void hisi_sas_debugfs_phy_down_cnt_init(struct hisi_hba *hisi_hba)
+{
+ struct dentry *dir = debugfs_create_dir("phy_down_cnt",
+ hisi_hba->debugfs_dir);
+ char name[16];
+ int phy_no;
+
+ for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) {
+ snprintf(name, 16, "%d", phy_no);
+ debugfs_create_file(name, 0600, dir,
+ &hisi_hba->phy[phy_no],
+ &hisi_sas_debugfs_phy_down_cnt_ops);
+ }
+}
+
static void hisi_sas_debugfs_bist_init(struct hisi_hba *hisi_hba)
{
hisi_hba->debugfs_bist_dentry =
@@ -3827,6 +3932,7 @@ static void hisi_sas_debugfs_bist_init(struct hisi_hba *hisi_hba)
void hisi_sas_debugfs_init(struct hisi_hba *hisi_hba)
{
struct device *dev = hisi_hba->dev;
+ int i;
hisi_hba->debugfs_dir = debugfs_create_dir(dev_name(dev),
hisi_sas_debugfs_dir);
@@ -3838,9 +3944,17 @@ void hisi_sas_debugfs_init(struct hisi_hba *hisi_hba)
/* create bist structures */
hisi_sas_debugfs_bist_init(hisi_hba);
- if (hisi_sas_debugfs_alloc(hisi_hba)) {
- debugfs_remove_recursive(hisi_hba->debugfs_dir);
- dev_dbg(dev, "failed to init debugfs!\n");
+ hisi_hba->debugfs_dump_dentry =
+ debugfs_create_dir("dump", hisi_hba->debugfs_dir);
+
+ hisi_sas_debugfs_phy_down_cnt_init(hisi_hba);
+
+ for (i = 0; i < hisi_sas_debugfs_dump_count; i++) {
+ if (hisi_sas_debugfs_alloc(hisi_hba, i)) {
+ debugfs_remove_recursive(hisi_hba->debugfs_dir);
+ dev_dbg(dev, "failed to init debugfs!\n");
+ break;
+ }
}
}
EXPORT_SYMBOL_GPL(hisi_sas_debugfs_init);
@@ -3874,14 +3988,24 @@ EXPORT_SYMBOL_GPL(hisi_sas_debugfs_enable);
module_param_named(debugfs_enable, hisi_sas_debugfs_enable, bool, 0444);
MODULE_PARM_DESC(hisi_sas_debugfs_enable, "Enable driver debugfs (default disabled)");
+u32 hisi_sas_debugfs_dump_count = 1;
+EXPORT_SYMBOL_GPL(hisi_sas_debugfs_dump_count);
+module_param_named(debugfs_dump_count, hisi_sas_debugfs_dump_count, uint, 0444);
+MODULE_PARM_DESC(hisi_sas_debugfs_dump_count, "Number of debugfs dumps to allow");
+
static __init int hisi_sas_init(void)
{
hisi_sas_stt = sas_domain_attach_transport(&hisi_sas_transport_ops);
if (!hisi_sas_stt)
return -ENOMEM;
- if (hisi_sas_debugfs_enable)
+ if (hisi_sas_debugfs_enable) {
hisi_sas_debugfs_dir = debugfs_create_dir("hisi_sas", NULL);
+ if (hisi_sas_debugfs_dump_count > HISI_SAS_MAX_DEBUGFS_DUMP) {
+ pr_info("hisi_sas: Limiting debugfs dump count\n");
+ hisi_sas_debugfs_dump_count = HISI_SAS_MAX_DEBUGFS_DUMP;
+ }
+ }
return 0;
}
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
index b861a0f14c9d..3af53cc42bd6 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
@@ -531,8 +531,8 @@ static void setup_itct_v1_hw(struct hisi_hba *hisi_hba,
(0xff00ULL << ITCT_HDR_REJ_OPEN_TL_OFF));
}
-static void clear_itct_v1_hw(struct hisi_hba *hisi_hba,
- struct hisi_sas_device *sas_dev)
+static int clear_itct_v1_hw(struct hisi_hba *hisi_hba,
+ struct hisi_sas_device *sas_dev)
{
u64 dev_id = sas_dev->device_id;
struct hisi_sas_itct *itct = &hisi_hba->itct[dev_id];
@@ -551,6 +551,8 @@ static void clear_itct_v1_hw(struct hisi_hba *hisi_hba,
qw0 = le64_to_cpu(itct->qw0);
qw0 &= ~ITCT_HDR_VALID_MSK;
itct->qw0 = cpu_to_le64(qw0);
+
+ return 0;
}
static int reset_hw_v1_hw(struct hisi_hba *hisi_hba)
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
index 8e96a257e439..61b1e2693b08 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
@@ -974,13 +974,14 @@ static void setup_itct_v2_hw(struct hisi_hba *hisi_hba,
(0x1ULL << ITCT_HDR_RTOLT_OFF));
}
-static void clear_itct_v2_hw(struct hisi_hba *hisi_hba,
- struct hisi_sas_device *sas_dev)
+static int clear_itct_v2_hw(struct hisi_hba *hisi_hba,
+ struct hisi_sas_device *sas_dev)
{
DECLARE_COMPLETION_ONSTACK(completion);
u64 dev_id = sas_dev->device_id;
struct hisi_sas_itct *itct = &hisi_hba->itct[dev_id];
u32 reg_val = hisi_sas_read32(hisi_hba, ENT_INT_SRC3);
+ struct device *dev = hisi_hba->dev;
int i;
sas_dev->completion = &completion;
@@ -990,13 +991,19 @@ static void clear_itct_v2_hw(struct hisi_hba *hisi_hba,
hisi_sas_write32(hisi_hba, ENT_INT_SRC3,
ENT_INT_SRC3_ITC_INT_MSK);
+ /* need to set register twice to clear ITCT for v2 hw */
for (i = 0; i < 2; i++) {
reg_val = ITCT_CLR_EN_MSK | (dev_id & ITCT_DEV_MSK);
hisi_sas_write32(hisi_hba, ITCT_CLR, reg_val);
- wait_for_completion(sas_dev->completion);
+ if (!wait_for_completion_timeout(sas_dev->completion,
+ CLEAR_ITCT_TIMEOUT * HZ)) {
+ dev_warn(dev, "failed to clear ITCT\n");
+ return -ETIMEDOUT;
+ }
memset(itct, 0, sizeof(struct hisi_sas_itct));
}
+ return 0;
}
static void free_device_v2_hw(struct hisi_sas_device *sas_dev)
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
index cb8d087762db..bf5d5f138437 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
@@ -795,13 +795,14 @@ static void setup_itct_v3_hw(struct hisi_hba *hisi_hba,
(0x1ULL << ITCT_HDR_RTOLT_OFF));
}
-static void clear_itct_v3_hw(struct hisi_hba *hisi_hba,
- struct hisi_sas_device *sas_dev)
+static int clear_itct_v3_hw(struct hisi_hba *hisi_hba,
+ struct hisi_sas_device *sas_dev)
{
DECLARE_COMPLETION_ONSTACK(completion);
u64 dev_id = sas_dev->device_id;
struct hisi_sas_itct *itct = &hisi_hba->itct[dev_id];
u32 reg_val = hisi_sas_read32(hisi_hba, ENT_INT_SRC3);
+ struct device *dev = hisi_hba->dev;
sas_dev->completion = &completion;
@@ -814,8 +815,14 @@ static void clear_itct_v3_hw(struct hisi_hba *hisi_hba,
reg_val = ITCT_CLR_EN_MSK | (dev_id & ITCT_DEV_MSK);
hisi_sas_write32(hisi_hba, ITCT_CLR, reg_val);
- wait_for_completion(sas_dev->completion);
+ if (!wait_for_completion_timeout(sas_dev->completion,
+ CLEAR_ITCT_TIMEOUT * HZ)) {
+ dev_warn(dev, "failed to clear ITCT\n");
+ return -ETIMEDOUT;
+ }
+
memset(itct, 0, sizeof(struct hisi_sas_itct));
+ return 0;
}
static void dereg_device_v3_hw(struct hisi_hba *hisi_hba,
@@ -1542,6 +1549,8 @@ static irqreturn_t phy_down_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
u32 phy_state, sl_ctrl, txid_auto;
struct device *dev = hisi_hba->dev;
+ atomic_inc(&phy->down_cnt);
+
del_timer(&phy->timer);
hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_NOT_RDY_MSK, 1);
@@ -3022,11 +3031,6 @@ static int debugfs_set_bist_v3_hw(struct hisi_hba *hisi_hba, bool enable)
hisi_sas_phy_write32(hisi_hba, phy_id,
SAS_PHY_BIST_CTRL, reg_val);
- mdelay(100);
- reg_val |= (CFG_RX_BIST_EN_MSK | CFG_TX_BIST_EN_MSK);
- hisi_sas_phy_write32(hisi_hba, phy_id,
- SAS_PHY_BIST_CTRL, reg_val);
-
/* set the bist init value */
hisi_sas_phy_write32(hisi_hba, phy_id,
SAS_PHY_BIST_CODE,
@@ -3035,6 +3039,11 @@ static int debugfs_set_bist_v3_hw(struct hisi_hba *hisi_hba, bool enable)
SAS_PHY_BIST_CODE1,
SAS_PHY_BIST_CODE1_INIT);
+ mdelay(100);
+ reg_val |= (CFG_RX_BIST_EN_MSK | CFG_TX_BIST_EN_MSK);
+ hisi_sas_phy_write32(hisi_hba, phy_id,
+ SAS_PHY_BIST_CTRL, reg_val);
+
/* clear error bit */
mdelay(100);
hisi_sas_phy_read32(hisi_hba, phy_id, SAS_BIST_ERR_CNT);
@@ -3259,6 +3268,7 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id)
err_out_register_ha:
scsi_remove_host(shost);
err_out_ha:
+ hisi_sas_debugfs_exit(hisi_hba);
scsi_host_put(shost);
err_out_regions:
pci_release_regions(pdev);
@@ -3292,8 +3302,6 @@ static void hisi_sas_v3_remove(struct pci_dev *pdev)
struct hisi_hba *hisi_hba = sha->lldd_ha;
struct Scsi_Host *shost = sha->core.shost;
- hisi_sas_debugfs_exit(hisi_hba);
-
if (timer_pending(&hisi_hba->timer))
del_timer(&hisi_hba->timer);
@@ -3305,6 +3313,7 @@ static void hisi_sas_v3_remove(struct pci_dev *pdev)
pci_release_regions(pdev);
pci_disable_device(pdev);
hisi_sas_free(hisi_hba);
+ hisi_sas_debugfs_exit(hisi_hba);
scsi_host_put(shost);
}
@@ -3422,6 +3431,7 @@ static int hisi_sas_v3_resume(struct pci_dev *pdev)
if (rc) {
scsi_remove_host(shost);
pci_disable_device(pdev);
+ return rc;
}
hisi_hba->hw->phys_init(hisi_hba);
sas_resume_ha(sha);
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index 55522b7162d3..1d669e47b692 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -38,6 +38,7 @@
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_transport.h>
+#include <scsi/scsi_cmnd.h>
#include "scsi_priv.h"
#include "scsi_logging.h"
@@ -554,13 +555,29 @@ struct Scsi_Host *scsi_host_get(struct Scsi_Host *shost)
}
EXPORT_SYMBOL(scsi_host_get);
+static bool scsi_host_check_in_flight(struct request *rq, void *data,
+ bool reserved)
+{
+ int *count = data;
+ struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
+
+ if (test_bit(SCMD_STATE_INFLIGHT, &cmd->state))
+ (*count)++;
+
+ return true;
+}
+
/**
* scsi_host_busy - Return the host busy counter
* @shost: Pointer to Scsi_Host to inc.
**/
int scsi_host_busy(struct Scsi_Host *shost)
{
- return atomic_read(&shost->host_busy);
+ int cnt = 0;
+
+ blk_mq_tagset_busy_iter(&shost->tag_set,
+ scsi_host_check_in_flight, &cnt);
+ return cnt;
}
EXPORT_SYMBOL(scsi_host_busy);
diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c
index e8bc8d328bab..f25672982c5f 100644
--- a/drivers/scsi/ips.c
+++ b/drivers/scsi/ips.c
@@ -498,7 +498,7 @@ ips_setup(char *ips_str)
int i;
char *key;
char *value;
- IPS_OPTION options[] = {
+ static const IPS_OPTION options[] = {
{"noi2o", &ips_force_i2o, 0},
{"nommap", &ips_force_memio, 0},
{"ioctlsize", &ips_ioctlsize, IPS_IOCTL_SIZE},
diff --git a/drivers/scsi/isci/port_config.c b/drivers/scsi/isci/port_config.c
index 9e8de1462593..b1c197505579 100644
--- a/drivers/scsi/isci/port_config.c
+++ b/drivers/scsi/isci/port_config.c
@@ -147,7 +147,7 @@ static struct isci_port *sci_port_configuration_agent_find_port(
/**
*
* @controller: This is the controller object that contains the port agent
- * @port_agent: This is the port configruation agent for the controller.
+ * @port_agent: This is the port configuration agent for the controller.
*
* This routine will validate the port configuration is correct for the SCU
* hardware. The SCU hardware allows for port configurations as follows. LP0
diff --git a/drivers/scsi/isci/remote_device.c b/drivers/scsi/isci/remote_device.c
index 49aa4e657c44..cd1e4b4d95bb 100644
--- a/drivers/scsi/isci/remote_device.c
+++ b/drivers/scsi/isci/remote_device.c
@@ -1504,7 +1504,7 @@ static enum sci_status isci_remote_device_construct(struct isci_port *iport,
* This function builds the isci_remote_device when a libsas dev_found message
* is received.
* @isci_host: This parameter specifies the isci host object.
- * @port: This parameter specifies the isci_port conected to this device.
+ * @port: This parameter specifies the isci_port connected to this device.
*
* pointer to new isci_remote_device.
*/
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index 7bedbe877704..0bc63a7ab41c 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -369,8 +369,16 @@ static int iscsi_sw_tcp_pdu_xmit(struct iscsi_task *task)
{
struct iscsi_conn *conn = task->conn;
unsigned int noreclaim_flag;
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+ struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
int rc = 0;
+ if (!tcp_sw_conn->sock) {
+ iscsi_conn_printk(KERN_ERR, conn,
+ "Transport not bound to socket!\n");
+ return -EINVAL;
+ }
+
noreclaim_flag = memalloc_noreclaim_save();
while (iscsi_sw_tcp_xmit_qlen(conn)) {
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 691acbdcc46d..935f98804198 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -605,6 +605,12 @@ struct lpfc_epd_pool {
spinlock_t lock; /* lock for expedite pool */
};
+enum ras_state {
+ INACTIVE,
+ REG_INPROGRESS,
+ ACTIVE
+};
+
struct lpfc_ras_fwlog {
uint8_t *fwlog_buff;
uint32_t fw_buffcount; /* Buffer size posted to FW */
@@ -621,7 +627,7 @@ struct lpfc_ras_fwlog {
bool ras_enabled; /* Ras Enabled for the function */
#define LPFC_RAS_DISABLE_LOGGING 0x00
#define LPFC_RAS_ENABLE_LOGGING 0x01
- bool ras_active; /* RAS logging running state */
+ enum ras_state state; /* RAS logging running state */
};
struct lpfc_hba {
@@ -725,6 +731,7 @@ struct lpfc_hba {
#define HBA_FCOE_MODE 0x4 /* HBA function in FCoE Mode */
#define HBA_SP_QUEUE_EVT 0x8 /* Slow-path qevt posted to worker thread*/
#define HBA_POST_RECEIVE_BUFFER 0x10 /* Rcv buffers need to be posted */
+#define HBA_PERSISTENT_TOPO 0x20 /* Persistent topology support in hba */
#define ELS_XRI_ABORT_EVENT 0x40
#define ASYNC_EVENT 0x80
#define LINK_DISABLED 0x100 /* Link disabled by user */
@@ -830,6 +837,7 @@ struct lpfc_hba {
uint32_t cfg_fcp_mq_threshold;
uint32_t cfg_hdw_queue;
uint32_t cfg_irq_chann;
+ uint32_t cfg_irq_numa;
uint32_t cfg_suppress_rsp;
uint32_t cfg_nvme_oas;
uint32_t cfg_nvme_embed_cmd;
@@ -872,7 +880,6 @@ struct lpfc_hba {
uint32_t cfg_aer_support;
uint32_t cfg_sriov_nr_virtfn;
uint32_t cfg_request_firmware_upgrade;
- uint32_t cfg_iocb_cnt;
uint32_t cfg_suppress_link_up;
uint32_t cfg_rrq_xri_bitmap_sz;
uint32_t cfg_delay_discovery;
@@ -990,7 +997,6 @@ struct lpfc_hba {
struct dma_pool *lpfc_drb_pool; /* data receive buffer pool */
struct dma_pool *lpfc_nvmet_drb_pool; /* data receive buffer pool */
struct dma_pool *lpfc_hbq_pool; /* SLI3 hbq buffer pool */
- struct dma_pool *txrdy_payload_pool;
struct dma_pool *lpfc_cmd_rsp_buf_pool;
struct lpfc_dma_pool lpfc_mbuf_safety_pool;
@@ -1055,6 +1061,7 @@ struct lpfc_hba {
#ifdef LPFC_HDWQ_LOCK_STAT
struct dentry *debug_lockstat;
#endif
+ struct dentry *debug_ras_log;
atomic_t nvmeio_trc_cnt;
uint32_t nvmeio_trc_size;
uint32_t nvmeio_trc_output_idx;
@@ -1209,6 +1216,13 @@ struct lpfc_hba {
uint64_t ktime_seg10_min;
uint64_t ktime_seg10_max;
#endif
+
+ struct hlist_node cpuhp; /* used for cpuhp per hba callback */
+ struct timer_list cpuhp_poll_timer;
+ struct list_head poll_list; /* slowpath eq polling list */
+#define LPFC_POLL_HB 1 /* slowpath heartbeat */
+#define LPFC_POLL_FASTPATH 0 /* called from fastpath */
+#define LPFC_POLL_SLOWPATH 1 /* called from slowpath */
};
static inline struct Scsi_Host *
@@ -1299,6 +1313,26 @@ lpfc_phba_elsring(struct lpfc_hba *phba)
}
/**
+ * lpfc_next_online_numa_cpu - Finds next online CPU on NUMA node
+ * @numa_mask: Pointer to phba's numa_mask member.
+ * @start: starting cpu index
+ *
+ * Note: If no valid cpu found, then nr_cpu_ids is returned.
+ *
+ **/
+static inline unsigned int
+lpfc_next_online_numa_cpu(const struct cpumask *numa_mask, unsigned int start)
+{
+ unsigned int cpu_it;
+
+ for_each_cpu_wrap(cpu_it, numa_mask, start) {
+ if (cpu_online(cpu_it))
+ break;
+ }
+
+ return cpu_it;
+}
+/**
* lpfc_sli4_mod_hba_eq_delay - update EQ delay
* @phba: Pointer to HBA context object.
* @q: The Event Queue to update.
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 25aa7a53d255..4ff82b36a37a 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -176,7 +176,6 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
int i;
int len = 0;
char tmp[LPFC_MAX_NVME_INFO_TMP_LEN] = {0};
- unsigned long iflags = 0;
if (!(vport->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) {
len = scnprintf(buf, PAGE_SIZE, "NVME Disabled\n");
@@ -347,7 +346,6 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
if (strlcat(buf, "\nNVME Initiator Enabled\n", PAGE_SIZE) >= PAGE_SIZE)
goto buffer_done;
- rcu_read_lock();
scnprintf(tmp, sizeof(tmp),
"XRI Dist lpfc%d Total %d IO %d ELS %d\n",
phba->brd_no,
@@ -355,7 +353,7 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
phba->sli4_hba.io_xri_max,
lpfc_sli4_get_els_iocb_cnt(phba));
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
- goto rcu_unlock_buf_done;
+ goto buffer_done;
/* Port state is only one of two values for now. */
if (localport->port_id)
@@ -371,15 +369,17 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
wwn_to_u64(vport->fc_nodename.u.wwn),
localport->port_id, statep);
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
- goto rcu_unlock_buf_done;
+ goto buffer_done;
+
+ spin_lock_irq(shost->host_lock);
list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
nrport = NULL;
- spin_lock_irqsave(&vport->phba->hbalock, iflags);
+ spin_lock(&vport->phba->hbalock);
rport = lpfc_ndlp_get_nrport(ndlp);
if (rport)
nrport = rport->remoteport;
- spin_unlock_irqrestore(&vport->phba->hbalock, iflags);
+ spin_unlock(&vport->phba->hbalock);
if (!nrport)
continue;
@@ -398,39 +398,39 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
/* Tab in to show lport ownership. */
if (strlcat(buf, "NVME RPORT ", PAGE_SIZE) >= PAGE_SIZE)
- goto rcu_unlock_buf_done;
+ goto unlock_buf_done;
if (phba->brd_no >= 10) {
if (strlcat(buf, " ", PAGE_SIZE) >= PAGE_SIZE)
- goto rcu_unlock_buf_done;
+ goto unlock_buf_done;
}
scnprintf(tmp, sizeof(tmp), "WWPN x%llx ",
nrport->port_name);
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
- goto rcu_unlock_buf_done;
+ goto unlock_buf_done;
scnprintf(tmp, sizeof(tmp), "WWNN x%llx ",
nrport->node_name);
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
- goto rcu_unlock_buf_done;
+ goto unlock_buf_done;
scnprintf(tmp, sizeof(tmp), "DID x%06x ",
nrport->port_id);
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
- goto rcu_unlock_buf_done;
+ goto unlock_buf_done;
/* An NVME rport can have multiple roles. */
if (nrport->port_role & FC_PORT_ROLE_NVME_INITIATOR) {
if (strlcat(buf, "INITIATOR ", PAGE_SIZE) >= PAGE_SIZE)
- goto rcu_unlock_buf_done;
+ goto unlock_buf_done;
}
if (nrport->port_role & FC_PORT_ROLE_NVME_TARGET) {
if (strlcat(buf, "TARGET ", PAGE_SIZE) >= PAGE_SIZE)
- goto rcu_unlock_buf_done;
+ goto unlock_buf_done;
}
if (nrport->port_role & FC_PORT_ROLE_NVME_DISCOVERY) {
if (strlcat(buf, "DISCSRVC ", PAGE_SIZE) >= PAGE_SIZE)
- goto rcu_unlock_buf_done;
+ goto unlock_buf_done;
}
if (nrport->port_role & ~(FC_PORT_ROLE_NVME_INITIATOR |
FC_PORT_ROLE_NVME_TARGET |
@@ -438,14 +438,14 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
scnprintf(tmp, sizeof(tmp), "UNKNOWN ROLE x%x",
nrport->port_role);
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
- goto rcu_unlock_buf_done;
+ goto unlock_buf_done;
}
scnprintf(tmp, sizeof(tmp), "%s\n", statep);
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
- goto rcu_unlock_buf_done;
+ goto unlock_buf_done;
}
- rcu_read_unlock();
+ spin_unlock_irq(shost->host_lock);
if (!lport)
goto buffer_done;
@@ -505,11 +505,11 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
atomic_read(&lport->cmpl_fcp_err));
strlcat(buf, tmp, PAGE_SIZE);
- /* RCU is already unlocked. */
+ /* host_lock is already unlocked. */
goto buffer_done;
- rcu_unlock_buf_done:
- rcu_read_unlock();
+ unlock_buf_done:
+ spin_unlock_irq(shost->host_lock);
buffer_done:
len = strnlen(buf, PAGE_SIZE);
@@ -1475,8 +1475,9 @@ lpfc_sli4_pdev_status_reg_wait(struct lpfc_hba *phba)
int i;
msleep(100);
- lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
- &portstat_reg.word0);
+ if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
+ &portstat_reg.word0))
+ return -EIO;
/* verify if privileged for the request operation */
if (!bf_get(lpfc_sliport_status_rn, &portstat_reg) &&
@@ -1486,8 +1487,9 @@ lpfc_sli4_pdev_status_reg_wait(struct lpfc_hba *phba)
/* wait for the SLI port firmware ready after firmware reset */
for (i = 0; i < LPFC_FW_RESET_MAXIMUM_WAIT_10MS_CNT; i++) {
msleep(10);
- lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
- &portstat_reg.word0);
+ if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
+ &portstat_reg.word0))
+ continue;
if (!bf_get(lpfc_sliport_status_err, &portstat_reg))
continue;
if (!bf_get(lpfc_sliport_status_rn, &portstat_reg))
@@ -1642,7 +1644,7 @@ lpfc_set_trunking(struct lpfc_hba *phba, char *buff_out)
{
LPFC_MBOXQ_t *mbox = NULL;
unsigned long val = 0;
- char *pval = 0;
+ char *pval = NULL;
int rc = 0;
if (!strncmp("enable", buff_out,
@@ -3533,6 +3535,31 @@ LPFC_ATTR_R(enable_rrq, 2, 0, 2,
LPFC_ATTR_R(suppress_link_up, LPFC_INITIALIZE_LINK, LPFC_INITIALIZE_LINK,
LPFC_DELAY_INIT_LINK_INDEFINITELY,
"Suppress Link Up at initialization");
+
+static ssize_t
+lpfc_pls_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n",
+ phba->sli4_hba.pc_sli4_params.pls);
+}
+static DEVICE_ATTR(pls, 0444,
+ lpfc_pls_show, NULL);
+
+static ssize_t
+lpfc_pt_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n",
+ (phba->hba_flag & HBA_PERSISTENT_TOPO) ? 1 : 0);
+}
+static DEVICE_ATTR(pt, 0444,
+ lpfc_pt_show, NULL);
+
/*
# lpfc_cnt: Number of IOCBs allocated for ELS, CT, and ABTS
# 1 - (1024)
@@ -3580,9 +3607,6 @@ lpfc_txcmplq_hw_show(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR(txcmplq_hw, S_IRUGO,
lpfc_txcmplq_hw_show, NULL);
-LPFC_ATTR_R(iocb_cnt, 2, 1, 5,
- "Number of IOCBs alloc for ELS, CT, and ABTS: 1k to 5k IOCBs");
-
/*
# lpfc_nodev_tmo: If set, it will hold all I/O errors on devices that disappear
# until the timer expires. Value range is [0,255]. Default value is 30.
@@ -4096,7 +4120,16 @@ lpfc_topology_store(struct device *dev, struct device_attribute *attr,
val);
return -EINVAL;
}
- if ((phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC ||
+ /*
+ * The 'topology' is not a configurable parameter if :
+ * - persistent topology enabled
+ * - G7 adapters
+ * - G6 with no private loop support
+ */
+
+ if (((phba->hba_flag & HBA_PERSISTENT_TOPO) ||
+ (!phba->sli4_hba.pc_sli4_params.pls &&
+ phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC) ||
phba->pcidev->device == PCI_DEVICE_ID_LANCER_G7_FC) &&
val == 4) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
@@ -5298,7 +5331,7 @@ lpfc_fcp_cpu_map_show(struct device *dev, struct device_attribute *attr,
len += scnprintf(buf + len, PAGE_SIZE - len,
"CPU %02d not present\n",
phba->sli4_hba.curr_disp_cpu);
- else if (cpup->irq == LPFC_VECTOR_MAP_EMPTY) {
+ else if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) {
if (cpup->hdwq == LPFC_VECTOR_MAP_EMPTY)
len += scnprintf(
buf + len, PAGE_SIZE - len,
@@ -5311,10 +5344,10 @@ lpfc_fcp_cpu_map_show(struct device *dev, struct device_attribute *attr,
else
len += scnprintf(
buf + len, PAGE_SIZE - len,
- "CPU %02d EQ %04d hdwq %04d "
+ "CPU %02d EQ None hdwq %04d "
"physid %d coreid %d ht %d ua %d\n",
phba->sli4_hba.curr_disp_cpu,
- cpup->eq, cpup->hdwq, cpup->phys_id,
+ cpup->hdwq, cpup->phys_id,
cpup->core_id,
(cpup->flag & LPFC_CPU_MAP_HYPER),
(cpup->flag & LPFC_CPU_MAP_UNASSIGN));
@@ -5329,7 +5362,7 @@ lpfc_fcp_cpu_map_show(struct device *dev, struct device_attribute *attr,
cpup->core_id,
(cpup->flag & LPFC_CPU_MAP_HYPER),
(cpup->flag & LPFC_CPU_MAP_UNASSIGN),
- cpup->irq);
+ lpfc_get_irq(cpup->eq));
else
len += scnprintf(
buf + len, PAGE_SIZE - len,
@@ -5340,7 +5373,7 @@ lpfc_fcp_cpu_map_show(struct device *dev, struct device_attribute *attr,
cpup->core_id,
(cpup->flag & LPFC_CPU_MAP_HYPER),
(cpup->flag & LPFC_CPU_MAP_UNASSIGN),
- cpup->irq);
+ lpfc_get_irq(cpup->eq));
}
phba->sli4_hba.curr_disp_cpu++;
@@ -5711,7 +5744,7 @@ LPFC_ATTR_RW(nvme_embed_cmd, 1, 0, 2,
* the driver will advertise it supports to the SCSI layer.
*
* 0 = Set nr_hw_queues by the number of CPUs or HW queues.
- * 1,128 = Manually specify the maximum nr_hw_queue value to be set,
+ * 1,256 = Manually specify nr_hw_queue value to be advertised,
*
* Value range is [0,256]. Default value is 8.
*/
@@ -5729,30 +5762,130 @@ LPFC_ATTR_R(fcp_mq_threshold, LPFC_FCP_MQ_THRESHOLD_DEF,
* A hardware IO queue maps (qidx) to a specific driver CQ/WQ.
*
* 0 = Configure the number of hdw queues to the number of active CPUs.
- * 1,128 = Manually specify how many hdw queues to use.
+ * 1,256 = Manually specify how many hdw queues to use.
*
- * Value range is [0,128]. Default value is 0.
+ * Value range is [0,256]. Default value is 0.
*/
LPFC_ATTR_R(hdw_queue,
LPFC_HBA_HDWQ_DEF,
LPFC_HBA_HDWQ_MIN, LPFC_HBA_HDWQ_MAX,
"Set the number of I/O Hardware Queues");
+static inline void
+lpfc_assign_default_irq_numa(struct lpfc_hba *phba)
+{
+#if IS_ENABLED(CONFIG_X86)
+ /* If AMD architecture, then default is LPFC_IRQ_CHANN_NUMA */
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
+ phba->cfg_irq_numa = 1;
+ else
+ phba->cfg_irq_numa = 0;
+#else
+ phba->cfg_irq_numa = 0;
+#endif
+}
+
/*
* lpfc_irq_chann: Set the number of IRQ vectors that are available
* for Hardware Queues to utilize. This also will map to the number
* of EQ / MSI-X vectors the driver will create. This should never be
* more than the number of Hardware Queues
*
- * 0 = Configure number of IRQ Channels to the number of active CPUs.
- * 1,128 = Manually specify how many IRQ Channels to use.
+ * 0 = Configure number of IRQ Channels to:
+ * if AMD architecture, number of CPUs on HBA's NUMA node
+ * otherwise, number of active CPUs.
+ * [1,256] = Manually specify how many IRQ Channels to use.
*
- * Value range is [0,128]. Default value is 0.
+ * Value range is [0,256]. Default value is [0].
*/
-LPFC_ATTR_R(irq_chann,
- LPFC_HBA_HDWQ_DEF,
- LPFC_HBA_HDWQ_MIN, LPFC_HBA_HDWQ_MAX,
- "Set the number of I/O IRQ Channels");
+static uint lpfc_irq_chann = LPFC_IRQ_CHANN_DEF;
+module_param(lpfc_irq_chann, uint, 0444);
+MODULE_PARM_DESC(lpfc_irq_chann, "Set number of interrupt vectors to allocate");
+
+/* lpfc_irq_chann_init - Set the hba irq_chann initial value
+ * @phba: lpfc_hba pointer.
+ * @val: contains the initial value
+ *
+ * Description:
+ * Validates the initial value is within range and assigns it to the
+ * adapter. If not in range, an error message is posted and the
+ * default value is assigned.
+ *
+ * Returns:
+ * zero if value is in range and is set
+ * -EINVAL if value was out of range
+ **/
+static int
+lpfc_irq_chann_init(struct lpfc_hba *phba, uint32_t val)
+{
+ const struct cpumask *numa_mask;
+
+ if (phba->cfg_use_msi != 2) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "8532 use_msi = %u ignoring cfg_irq_numa\n",
+ phba->cfg_use_msi);
+ phba->cfg_irq_numa = 0;
+ phba->cfg_irq_chann = LPFC_IRQ_CHANN_MIN;
+ return 0;
+ }
+
+ /* Check if default setting was passed */
+ if (val == LPFC_IRQ_CHANN_DEF)
+ lpfc_assign_default_irq_numa(phba);
+
+ if (phba->cfg_irq_numa) {
+ numa_mask = &phba->sli4_hba.numa_mask;
+
+ if (cpumask_empty(numa_mask)) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "8533 Could not identify NUMA node, "
+ "ignoring cfg_irq_numa\n");
+ phba->cfg_irq_numa = 0;
+ phba->cfg_irq_chann = LPFC_IRQ_CHANN_MIN;
+ } else {
+ phba->cfg_irq_chann = cpumask_weight(numa_mask);
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "8543 lpfc_irq_chann set to %u "
+ "(numa)\n", phba->cfg_irq_chann);
+ }
+ } else {
+ if (val > LPFC_IRQ_CHANN_MAX) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "8545 lpfc_irq_chann attribute cannot "
+ "be set to %u, allowed range is "
+ "[%u,%u]\n",
+ val,
+ LPFC_IRQ_CHANN_MIN,
+ LPFC_IRQ_CHANN_MAX);
+ phba->cfg_irq_chann = LPFC_IRQ_CHANN_MIN;
+ return -EINVAL;
+ }
+ phba->cfg_irq_chann = val;
+ }
+
+ return 0;
+}
+
+/**
+ * lpfc_irq_chann_show - Display value of irq_chann
+ * @dev: class converted to a Scsi_host structure.
+ * @attr: device attribute, not used.
+ * @buf: on return contains a string with the list sizes
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_irq_chann_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+
+ return scnprintf(buf, PAGE_SIZE, "%u\n", phba->cfg_irq_chann);
+}
+
+static DEVICE_ATTR_RO(lpfc_irq_chann);
/*
# lpfc_enable_hba_reset: Allow or prevent HBA resets to the hardware.
@@ -5933,7 +6066,53 @@ LPFC_ATTR_RW(enable_mds_diags, 0, 0, 1, "Enable MDS Diagnostics");
* [1-4] = Multiple of 1/4th Mb of host memory for FW logging
* Value range [0..4]. Default value is 0
*/
-LPFC_ATTR_RW(ras_fwlog_buffsize, 0, 0, 4, "Host memory for FW logging");
+LPFC_ATTR(ras_fwlog_buffsize, 0, 0, 4, "Host memory for FW logging");
+lpfc_param_show(ras_fwlog_buffsize);
+
+static ssize_t
+lpfc_ras_fwlog_buffsize_set(struct lpfc_hba *phba, uint val)
+{
+ int ret = 0;
+ enum ras_state state;
+
+ if (!lpfc_rangecheck(val, 0, 4))
+ return -EINVAL;
+
+ if (phba->cfg_ras_fwlog_buffsize == val)
+ return 0;
+
+ if (phba->cfg_ras_fwlog_func != PCI_FUNC(phba->pcidev->devfn))
+ return -EINVAL;
+
+ spin_lock_irq(&phba->hbalock);
+ state = phba->ras_fwlog.state;
+ spin_unlock_irq(&phba->hbalock);
+
+ if (state == REG_INPROGRESS) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "6147 RAS Logging "
+ "registration is in progress\n");
+ return -EBUSY;
+ }
+
+ /* For disable logging: stop the logs and free the DMA.
+ * For ras_fwlog_buffsize size change we still need to free and
+ * reallocate the DMA in lpfc_sli4_ras_fwlog_init.
+ */
+ phba->cfg_ras_fwlog_buffsize = val;
+ if (state == ACTIVE) {
+ lpfc_ras_stop_fwlog(phba);
+ lpfc_sli4_ras_dma_free(phba);
+ }
+
+ lpfc_sli4_ras_init(phba);
+ if (phba->ras_fwlog.ras_enabled)
+ ret = lpfc_sli4_ras_fwlog_init(phba, phba->cfg_ras_fwlog_level,
+ LPFC_RAS_ENABLE_LOGGING);
+ return ret;
+}
+
+lpfc_param_store(ras_fwlog_buffsize);
+static DEVICE_ATTR_RW(lpfc_ras_fwlog_buffsize);
/*
* lpfc_ras_fwlog_level: Firmware logging verbosity level
@@ -6071,8 +6250,9 @@ struct device_attribute *lpfc_hba_attrs[] = {
&dev_attr_lpfc_sriov_nr_virtfn,
&dev_attr_lpfc_req_fw_upgrade,
&dev_attr_lpfc_suppress_link_up,
- &dev_attr_lpfc_iocb_cnt,
&dev_attr_iocb_hw,
+ &dev_attr_pls,
+ &dev_attr_pt,
&dev_attr_txq_hw,
&dev_attr_txcmplq_hw,
&dev_attr_lpfc_fips_level,
@@ -7085,11 +7265,22 @@ struct fc_function_template lpfc_vport_transport_functions = {
static void
lpfc_get_hba_function_mode(struct lpfc_hba *phba)
{
- /* If it's a SkyHawk FCoE adapter */
- if (phba->pcidev->device == PCI_DEVICE_ID_SKYHAWK)
+ /* If the adapter supports FCoE mode */
+ switch (phba->pcidev->device) {
+ case PCI_DEVICE_ID_SKYHAWK:
+ case PCI_DEVICE_ID_SKYHAWK_VF:
+ case PCI_DEVICE_ID_LANCER_FCOE:
+ case PCI_DEVICE_ID_LANCER_FCOE_VF:
+ case PCI_DEVICE_ID_ZEPHYR_DCSP:
+ case PCI_DEVICE_ID_HORNET:
+ case PCI_DEVICE_ID_TIGERSHARK:
+ case PCI_DEVICE_ID_TOMCAT:
phba->hba_flag |= HBA_FCOE_MODE;
- else
+ break;
+ default:
+ /* for others, clear the flag */
phba->hba_flag &= ~HBA_FCOE_MODE;
+ }
}
/**
@@ -7099,6 +7290,7 @@ lpfc_get_hba_function_mode(struct lpfc_hba *phba)
void
lpfc_get_cfgparam(struct lpfc_hba *phba)
{
+ lpfc_hba_log_verbose_init(phba, lpfc_log_verbose);
lpfc_fcp_io_sched_init(phba, lpfc_fcp_io_sched);
lpfc_ns_query_init(phba, lpfc_ns_query);
lpfc_fcp2_no_tgt_reset_init(phba, lpfc_fcp2_no_tgt_reset);
@@ -7205,12 +7397,10 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
phba->cfg_soft_wwpn = 0L;
lpfc_sg_seg_cnt_init(phba, lpfc_sg_seg_cnt);
lpfc_hba_queue_depth_init(phba, lpfc_hba_queue_depth);
- lpfc_hba_log_verbose_init(phba, lpfc_log_verbose);
lpfc_aer_support_init(phba, lpfc_aer_support);
lpfc_sriov_nr_virtfn_init(phba, lpfc_sriov_nr_virtfn);
lpfc_request_firmware_upgrade_init(phba, lpfc_req_fw_upgrade);
lpfc_suppress_link_up_init(phba, lpfc_suppress_link_up);
- lpfc_iocb_cnt_init(phba, lpfc_iocb_cnt);
lpfc_delay_discovery_init(phba, lpfc_delay_discovery);
lpfc_sli_mode_init(phba, lpfc_sli_mode);
phba->cfg_enable_dss = 1;
@@ -7256,11 +7446,11 @@ lpfc_nvme_mod_param_dep(struct lpfc_hba *phba)
}
if (!phba->cfg_nvmet_mrq)
- phba->cfg_nvmet_mrq = phba->cfg_irq_chann;
+ phba->cfg_nvmet_mrq = phba->cfg_hdw_queue;
/* Adjust lpfc_nvmet_mrq to avoid running out of WQE slots */
- if (phba->cfg_nvmet_mrq > phba->cfg_irq_chann) {
- phba->cfg_nvmet_mrq = phba->cfg_irq_chann;
+ if (phba->cfg_nvmet_mrq > phba->cfg_hdw_queue) {
+ phba->cfg_nvmet_mrq = phba->cfg_hdw_queue;
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
"6018 Adjust lpfc_nvmet_mrq to %d\n",
phba->cfg_nvmet_mrq);
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index 39a736b887b1..d4e1b120cc9e 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -5435,10 +5435,12 @@ lpfc_bsg_get_ras_config(struct bsg_job *job)
bsg_reply->reply_data.vendor_reply.vendor_rsp;
/* Current logging state */
- if (ras_fwlog->ras_active == true)
+ spin_lock_irq(&phba->hbalock);
+ if (ras_fwlog->state == ACTIVE)
ras_reply->state = LPFC_RASLOG_STATE_RUNNING;
else
ras_reply->state = LPFC_RASLOG_STATE_STOPPED;
+ spin_unlock_irq(&phba->hbalock);
ras_reply->log_level = phba->ras_fwlog.fw_loglevel;
ras_reply->log_buff_sz = phba->cfg_ras_fwlog_buffsize;
@@ -5495,10 +5497,13 @@ lpfc_bsg_set_ras_config(struct bsg_job *job)
if (action == LPFC_RASACTION_STOP_LOGGING) {
/* Check if already disabled */
- if (ras_fwlog->ras_active == false) {
+ spin_lock_irq(&phba->hbalock);
+ if (ras_fwlog->state != ACTIVE) {
+ spin_unlock_irq(&phba->hbalock);
rc = -ESRCH;
goto ras_job_error;
}
+ spin_unlock_irq(&phba->hbalock);
/* Disable logging */
lpfc_ras_stop_fwlog(phba);
@@ -5509,8 +5514,10 @@ lpfc_bsg_set_ras_config(struct bsg_job *job)
* FW-logging with new log-level. Return status
* "Logging already Running" to caller.
**/
- if (ras_fwlog->ras_active)
+ spin_lock_irq(&phba->hbalock);
+ if (ras_fwlog->state != INACTIVE)
action_status = -EINPROGRESS;
+ spin_unlock_irq(&phba->hbalock);
/* Enable logging */
rc = lpfc_sli4_ras_fwlog_init(phba, log_level,
@@ -5626,10 +5633,13 @@ lpfc_bsg_get_ras_fwlog(struct bsg_job *job)
goto ras_job_error;
/* Logging to be stopped before reading */
- if (ras_fwlog->ras_active == true) {
+ spin_lock_irq(&phba->hbalock);
+ if (ras_fwlog->state == ACTIVE) {
+ spin_unlock_irq(&phba->hbalock);
rc = -EINPROGRESS;
goto ras_job_error;
}
+ spin_unlock_irq(&phba->hbalock);
if (job->request_len <
sizeof(struct fc_bsg_request) +
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index b2ad8c750486..ee353c84a097 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -215,6 +215,12 @@ irqreturn_t lpfc_sli_fp_intr_handler(int, void *);
irqreturn_t lpfc_sli4_intr_handler(int, void *);
irqreturn_t lpfc_sli4_hba_intr_handler(int, void *);
+void lpfc_sli4_cleanup_poll_list(struct lpfc_hba *phba);
+int lpfc_sli4_poll_eq(struct lpfc_queue *q, uint8_t path);
+void lpfc_sli4_poll_hbtimer(struct timer_list *t);
+void lpfc_sli4_start_polling(struct lpfc_queue *q);
+void lpfc_sli4_stop_polling(struct lpfc_queue *q);
+
void lpfc_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_sli4_swap_str(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_config_ring(struct lpfc_hba *, int, LPFC_MBOXQ_t *);
@@ -586,6 +592,7 @@ void lpfc_release_io_buf(struct lpfc_hba *phba, struct lpfc_io_buf *ncmd,
void lpfc_nvme_cmd_template(void);
void lpfc_nvmet_cmd_template(void);
void lpfc_nvme_cancel_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn);
+void lpfc_nvme_prep_abort_wqe(struct lpfc_iocbq *pwqeq, u16 xritag, u8 opt);
extern int lpfc_enable_nvmet_cnt;
extern unsigned long long lpfc_enable_nvmet[];
extern int lpfc_no_hba_reset_cnt;
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 25e86706e207..99c9bb249758 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -763,9 +763,11 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
cpu_to_be16(SLI_CT_RESPONSE_FS_ACC)) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"0208 NameServer Rsp Data: x%x x%x "
- "sz x%x\n",
+ "x%x x%x sz x%x\n",
vport->fc_flag,
CTreq->un.gid.Fc4Type,
+ vport->num_disc_nodes,
+ vport->gidft_inp,
irsp->un.genreq64.bdl.bdeSize);
lpfc_ns_rsp(vport,
@@ -961,9 +963,13 @@ lpfc_cmpl_ct_cmd_gid_pt(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
if (CTrsp->CommandResponse.bits.CmdRsp ==
cpu_to_be16(SLI_CT_RESPONSE_FS_ACC)) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
- "4105 NameServer Rsp Data: x%x x%x\n",
+ "4105 NameServer Rsp Data: x%x x%x "
+ "x%x x%x sz x%x\n",
vport->fc_flag,
- CTreq->un.gid.Fc4Type);
+ CTreq->un.gid.Fc4Type,
+ vport->num_disc_nodes,
+ vport->gidft_inp,
+ irsp->un.genreq64.bdl.bdeSize);
lpfc_ns_rsp(vport,
outp,
@@ -1025,6 +1031,11 @@ lpfc_cmpl_ct_cmd_gid_pt(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
}
vport->gidft_inp--;
}
+
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ "6450 GID_PT cmpl inp %d disc %d\n",
+ vport->gidft_inp, vport->num_disc_nodes);
+
/* Link up / RSCN discovery */
if ((vport->num_disc_nodes == 0) &&
(vport->gidft_inp == 0)) {
@@ -1159,6 +1170,11 @@ out:
/* Link up / RSCN discovery */
if (vport->num_disc_nodes)
vport->num_disc_nodes--;
+
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ "6451 GFF_ID cmpl inp %d disc %d\n",
+ vport->gidft_inp, vport->num_disc_nodes);
+
if (vport->num_disc_nodes == 0) {
/*
* The driver has cycled through all Nports in the RSCN payload.
@@ -1868,6 +1884,12 @@ lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
if (irsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
switch ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK)) {
case IOERR_SLI_ABORTED:
+ case IOERR_SLI_DOWN:
+ /* Driver aborted this IO. No retry as error
+ * is likely Offline->Online or some adapter
+ * error. Recovery will try again.
+ */
+ break;
case IOERR_ABORT_IN_PROGRESS:
case IOERR_SEQUENCE_TIMEOUT:
case IOERR_ILLEGAL_FRAME:
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index 8d34be60d379..2e6a68d9ea4f 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -31,6 +31,7 @@
#include <linux/pci.h>
#include <linux/spinlock.h>
#include <linux/ctype.h>
+#include <linux/vmalloc.h>
#include <scsi/scsi.h>
#include <scsi/scsi_device.h>
@@ -2078,6 +2079,96 @@ lpfc_debugfs_lockstat_write(struct file *file, const char __user *buf,
}
#endif
+static int lpfc_debugfs_ras_log_data(struct lpfc_hba *phba,
+ char *buffer, int size)
+{
+ int copied = 0;
+ struct lpfc_dmabuf *dmabuf, *next;
+
+ spin_lock_irq(&phba->hbalock);
+ if (phba->ras_fwlog.state != ACTIVE) {
+ spin_unlock_irq(&phba->hbalock);
+ return -EINVAL;
+ }
+ spin_unlock_irq(&phba->hbalock);
+
+ list_for_each_entry_safe(dmabuf, next,
+ &phba->ras_fwlog.fwlog_buff_list, list) {
+ memcpy(buffer + copied, dmabuf->virt, LPFC_RAS_MAX_ENTRY_SIZE);
+ copied += LPFC_RAS_MAX_ENTRY_SIZE;
+ if (size > copied)
+ break;
+ }
+ return copied;
+}
+
+static int
+lpfc_debugfs_ras_log_release(struct inode *inode, struct file *file)
+{
+ struct lpfc_debug *debug = file->private_data;
+
+ vfree(debug->buffer);
+ kfree(debug);
+
+ return 0;
+}
+
+/**
+ * lpfc_debugfs_ras_log_open - Open the RAS log debugfs buffer
+ * @inode: The inode pointer that contains a vport pointer.
+ * @file: The file pointer to attach the log output.
+ *
+ * Description:
+ * This routine is the entry point for the debugfs open file operation. It gets
+ * the vport from the i_private field in @inode, allocates the necessary buffer
+ * for the log, fills the buffer from the in-memory log for this vport, and then
+ * returns a pointer to that log in the private_data field in @file.
+ *
+ * Returns:
+ * This function returns zero if successful. On error it will return a negative
+ * error value.
+ **/
+static int
+lpfc_debugfs_ras_log_open(struct inode *inode, struct file *file)
+{
+ struct lpfc_hba *phba = inode->i_private;
+ struct lpfc_debug *debug;
+ int size;
+ int rc = -ENOMEM;
+
+ spin_lock_irq(&phba->hbalock);
+ if (phba->ras_fwlog.state != ACTIVE) {
+ spin_unlock_irq(&phba->hbalock);
+ rc = -EINVAL;
+ goto out;
+ }
+ spin_unlock_irq(&phba->hbalock);
+ debug = kmalloc(sizeof(*debug), GFP_KERNEL);
+ if (!debug)
+ goto out;
+
+ size = LPFC_RAS_MIN_BUFF_POST_SIZE * phba->cfg_ras_fwlog_buffsize;
+ debug->buffer = vmalloc(size);
+ if (!debug->buffer)
+ goto free_debug;
+
+ debug->len = lpfc_debugfs_ras_log_data(phba, debug->buffer, size);
+ if (debug->len < 0) {
+ rc = -EINVAL;
+ goto free_buffer;
+ }
+ file->private_data = debug;
+
+ return 0;
+
+free_buffer:
+ vfree(debug->buffer);
+free_debug:
+ kfree(debug);
+out:
+ return rc;
+}
+
/**
* lpfc_debugfs_dumpHBASlim_open - Open the Dump HBA SLIM debugfs buffer
* @inode: The inode pointer that contains a vport pointer.
@@ -5286,6 +5377,16 @@ static const struct file_operations lpfc_debugfs_op_lockstat = {
};
#endif
+#undef lpfc_debugfs_ras_log
+static const struct file_operations lpfc_debugfs_ras_log = {
+ .owner = THIS_MODULE,
+ .open = lpfc_debugfs_ras_log_open,
+ .llseek = lpfc_debugfs_lseek,
+ .read = lpfc_debugfs_read,
+ .release = lpfc_debugfs_ras_log_release,
+};
+#endif
+
#undef lpfc_debugfs_op_dumpHBASlim
static const struct file_operations lpfc_debugfs_op_dumpHBASlim = {
.owner = THIS_MODULE,
@@ -5457,7 +5558,6 @@ static const struct file_operations lpfc_idiag_op_extAcc = {
.release = lpfc_idiag_cmd_release,
};
-#endif
/* lpfc_idiag_mbxacc_dump_bsg_mbox - idiag debugfs dump bsg mailbox command
* @phba: Pointer to HBA context object.
@@ -5707,6 +5807,19 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
goto debug_failed;
}
+ /* RAS log */
+ snprintf(name, sizeof(name), "ras_log");
+ phba->debug_ras_log =
+ debugfs_create_file(name, 0644,
+ phba->hba_debugfs_root,
+ phba, &lpfc_debugfs_ras_log);
+ if (!phba->debug_ras_log) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ "6148 Cannot create debugfs"
+ " ras_log\n");
+ goto debug_failed;
+ }
+
/* Setup hbqinfo */
snprintf(name, sizeof(name), "hbqinfo");
phba->debug_hbqinfo =
@@ -6117,6 +6230,9 @@ lpfc_debugfs_terminate(struct lpfc_vport *vport)
debugfs_remove(phba->debug_hbqinfo); /* hbqinfo */
phba->debug_hbqinfo = NULL;
+ debugfs_remove(phba->debug_ras_log);
+ phba->debug_ras_log = NULL;
+
#ifdef LPFC_HDWQ_LOCK_STAT
debugfs_remove(phba->debug_lockstat); /* lockstat */
phba->debug_lockstat = NULL;
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index d5303994bfd6..42a2bf38eaea 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -2236,6 +2236,7 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
IOCB_t *irsp;
struct lpfc_nodelist *ndlp;
+ char *mode;
/* we pass cmdiocb to state machine which needs rspiocb as well */
cmdiocb->context_un.rsp_iocb = rspiocb;
@@ -2273,8 +2274,17 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
goto out;
}
+ /* If we don't send GFT_ID to Fabric, a PRLI error
+ * could be expected.
+ */
+ if ((vport->fc_flag & FC_FABRIC) ||
+ (vport->cfg_enable_fc4_type != LPFC_ENABLE_BOTH))
+ mode = KERN_ERR;
+ else
+ mode = KERN_INFO;
+
/* PRLI failed */
- lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ lpfc_printf_vlog(vport, mode, LOG_ELS,
"2754 PRLI failure DID:%06X Status:x%x/x%x, "
"data: x%x\n",
ndlp->nlp_DID, irsp->ulpStatus,
@@ -4291,6 +4301,11 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
irsp = &rspiocb->iocb;
+ if (!vport) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
+ "3177 ELS response failed\n");
+ goto out;
+ }
if (cmdiocb->context_un.mbox)
mbox = cmdiocb->context_un.mbox;
@@ -4430,7 +4445,7 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
mempool_free(mbox, phba->mbox_mem_pool);
}
out:
- if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
+ if (ndlp && NLP_CHK_NODE_ACT(ndlp) && shost) {
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag &= ~(NLP_ACC_REGLOGIN | NLP_RM_DFLT_RPI);
spin_unlock_irq(shost->host_lock);
@@ -5260,6 +5275,11 @@ lpfc_els_disc_plogi(struct lpfc_vport *vport)
}
}
}
+
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ "6452 Discover PLOGI %d flag x%x\n",
+ sentplogi, vport->fc_flag);
+
if (sentplogi) {
lpfc_set_disctmo(vport);
}
@@ -6455,7 +6475,7 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
uint32_t payload_len, length, nportid, *cmd;
int rscn_cnt;
int rscn_id = 0, hba_id = 0;
- int i;
+ int i, tmo;
pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
lp = (uint32_t *) pcmd->virt;
@@ -6561,6 +6581,13 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
spin_lock_irq(shost->host_lock);
vport->fc_flag |= FC_RSCN_DEFERRED;
+
+ /* Restart disctmo if its already running */
+ if (vport->fc_flag & FC_DISC_TMO) {
+ tmo = ((phba->fc_ratov * 3) + 3);
+ mod_timer(&vport->fc_disctmo,
+ jiffies + msecs_to_jiffies(1000 * tmo));
+ }
if ((rscn_cnt < FC_MAX_HOLD_RSCN) &&
!(vport->fc_flag & FC_RSCN_DISCOVERY)) {
vport->fc_flag |= FC_RSCN_MODE;
@@ -6663,9 +6690,10 @@ lpfc_els_handle_rscn(struct lpfc_vport *vport)
/* RSCN processed */
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
- "0215 RSCN processed Data: x%x x%x x%x x%x\n",
+ "0215 RSCN processed Data: x%x x%x x%x x%x x%x x%x\n",
vport->fc_flag, 0, vport->fc_rscn_id_cnt,
- vport->port_state);
+ vport->port_state, vport->num_disc_nodes,
+ vport->gidft_inp);
/* To process RSCN, first compare RSCN data with NameServer */
vport->fc_ns_retry = 0;
@@ -7986,20 +8014,22 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)
struct lpfc_sli_ring *pring;
struct lpfc_iocbq *tmp_iocb, *piocb;
IOCB_t *cmd = NULL;
+ unsigned long iflags = 0;
lpfc_fabric_abort_vport(vport);
+
/*
* For SLI3, only the hbalock is required. But SLI4 needs to coordinate
* with the ring insert operation. Because lpfc_sli_issue_abort_iotag
* ultimately grabs the ring_lock, the driver must splice the list into
* a working list and release the locks before calling the abort.
*/
- spin_lock_irq(&phba->hbalock);
+ spin_lock_irqsave(&phba->hbalock, iflags);
pring = lpfc_phba_elsring(phba);
/* Bail out if we've no ELS wq, like in PCI error recovery case. */
if (unlikely(!pring)) {
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
return;
}
@@ -8014,6 +8044,9 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)
if (piocb->vport != vport)
continue;
+ if (piocb->iocb_flag & LPFC_DRIVER_ABORTED)
+ continue;
+
/* On the ELS ring we can have ELS_REQUESTs or
* GEN_REQUESTs waiting for a response.
*/
@@ -8037,21 +8070,21 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)
if (phba->sli_rev == LPFC_SLI_REV4)
spin_unlock(&pring->ring_lock);
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
/* Abort each txcmpl iocb on aborted list and remove the dlist links. */
list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) {
- spin_lock_irq(&phba->hbalock);
+ spin_lock_irqsave(&phba->hbalock, iflags);
list_del_init(&piocb->dlist);
lpfc_sli_issue_abort_iotag(phba, pring, piocb);
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
}
if (!list_empty(&abort_list))
lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
"3387 abort list for txq not empty\n");
INIT_LIST_HEAD(&abort_list);
- spin_lock_irq(&phba->hbalock);
+ spin_lock_irqsave(&phba->hbalock, iflags);
if (phba->sli_rev == LPFC_SLI_REV4)
spin_lock(&pring->ring_lock);
@@ -8091,7 +8124,7 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)
if (phba->sli_rev == LPFC_SLI_REV4)
spin_unlock(&pring->ring_lock);
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
/* Cancel all the IOCBs from the completions list */
lpfc_sli_cancel_iocbs(phba, &abort_list,
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 749286acdc17..85ada3deb47d 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -700,7 +700,10 @@ lpfc_work_done(struct lpfc_hba *phba)
if (!(phba->hba_flag & HBA_SP_QUEUE_EVT))
set_bit(LPFC_DATA_READY, &phba->data_flags);
} else {
- if (phba->link_state >= LPFC_LINK_UP ||
+ /* Driver could have abort request completed in queue
+ * when link goes down. Allow for this transition.
+ */
+ if (phba->link_state >= LPFC_LINK_DOWN ||
phba->link_flag & LS_MDS_LOOPBACK) {
pring->flag &= ~LPFC_DEFERRED_RING_EVENT;
lpfc_sli_handle_slow_ring_event(phba, pring,
@@ -1135,7 +1138,6 @@ void
lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
struct lpfc_vport *vport = pmb->vport;
- uint8_t bbscn = 0;
if (pmb->u.mb.mbxStatus)
goto out;
@@ -1162,17 +1164,11 @@ lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
/* Start discovery by sending a FLOGI. port_state is identically
* LPFC_FLOGI while waiting for FLOGI cmpl
*/
- if (vport->port_state != LPFC_FLOGI) {
- if (phba->bbcredit_support && phba->cfg_enable_bbcr) {
- bbscn = bf_get(lpfc_bbscn_def,
- &phba->sli4_hba.bbscn_params);
- vport->fc_sparam.cmn.bbRcvSizeMsb &= 0xf;
- vport->fc_sparam.cmn.bbRcvSizeMsb |= (bbscn << 4);
- }
+ if (vport->port_state != LPFC_FLOGI)
lpfc_initial_flogi(vport);
- } else if (vport->fc_flag & FC_PT2PT) {
+ else if (vport->fc_flag & FC_PT2PT)
lpfc_disc_start(vport);
- }
+
return;
out:
@@ -3456,8 +3452,8 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
phba->pport->port_state, vport->fc_flag);
else if (attn_type == LPFC_ATT_UNEXP_WWPN)
lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
- "1313 Link Down UNEXP WWPN Event x%x received "
- "Data: x%x x%x x%x x%x x%x\n",
+ "1313 Link Down Unexpected FA WWPN Event x%x "
+ "received Data: x%x x%x x%x x%x x%x\n",
la->eventTag, phba->fc_eventTag,
phba->pport->port_state, vport->fc_flag,
bf_get(lpfc_mbx_read_top_mm, la),
@@ -4046,7 +4042,7 @@ out:
ndlp->nlp_flag |= NLP_RPI_REGISTERED;
ndlp->nlp_type |= NLP_FABRIC;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
- lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_DISCOVERY,
"0003 rpi:%x DID:%x flg:%x %d map%x x%px\n",
ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
kref_read(&ndlp->kref),
@@ -4575,8 +4571,10 @@ lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
return ndlp;
free_rpi:
- if (phba->sli_rev == LPFC_SLI_REV4)
+ if (phba->sli_rev == LPFC_SLI_REV4) {
lpfc_sli4_free_rpi(vport->phba, rpi);
+ ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
+ }
return NULL;
}
@@ -4835,12 +4833,51 @@ lpfc_nlp_logo_unreg(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
if (ndlp->nlp_flag & NLP_RELEASE_RPI) {
lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi);
ndlp->nlp_flag &= ~NLP_RELEASE_RPI;
+ ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
}
ndlp->nlp_flag &= ~NLP_UNREG_INP;
}
}
/*
+ * Sets the mailbox completion handler to be used for the
+ * unreg_rpi command. The handler varies based on the state of
+ * the port and what will be happening to the rpi next.
+ */
+static void
+lpfc_set_unreg_login_mbx_cmpl(struct lpfc_hba *phba, struct lpfc_vport *vport,
+ struct lpfc_nodelist *ndlp, LPFC_MBOXQ_t *mbox)
+{
+ unsigned long iflags;
+
+ if (ndlp->nlp_flag & NLP_ISSUE_LOGO) {
+ mbox->ctx_ndlp = ndlp;
+ mbox->mbox_cmpl = lpfc_nlp_logo_unreg;
+
+ } else if (phba->sli_rev == LPFC_SLI_REV4 &&
+ (!(vport->load_flag & FC_UNLOADING)) &&
+ (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
+ LPFC_SLI_INTF_IF_TYPE_2) &&
+ (kref_read(&ndlp->kref) > 0)) {
+ mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
+ mbox->mbox_cmpl = lpfc_sli4_unreg_rpi_cmpl_clr;
+ } else {
+ if (vport->load_flag & FC_UNLOADING) {
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ spin_lock_irqsave(&vport->phba->ndlp_lock,
+ iflags);
+ ndlp->nlp_flag |= NLP_RELEASE_RPI;
+ spin_unlock_irqrestore(&vport->phba->ndlp_lock,
+ iflags);
+ }
+ lpfc_nlp_get(ndlp);
+ }
+ mbox->ctx_ndlp = ndlp;
+ mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ }
+}
+
+/*
* Free rpi associated with LPFC_NODELIST entry.
* This routine is called from lpfc_freenode(), when we are removing
* a LPFC_NODELIST entry. It is also called if the driver initiates a
@@ -4860,7 +4897,8 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
if (ndlp->nlp_flag & NLP_RPI_REGISTERED ||
ndlp->nlp_flag & NLP_REG_LOGIN_SEND) {
if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND)
- lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
+ lpfc_printf_vlog(vport, KERN_INFO,
+ LOG_NODE | LOG_DISCOVERY,
"3366 RPI x%x needs to be "
"unregistered nlp_flag x%x "
"did x%x\n",
@@ -4871,7 +4909,8 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
* no need to queue up another one.
*/
if (ndlp->nlp_flag & NLP_UNREG_INP) {
- lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ lpfc_printf_vlog(vport, KERN_INFO,
+ LOG_NODE | LOG_DISCOVERY,
"1436 unreg_rpi SKIP UNREG x%x on "
"NPort x%x deferred x%x flg x%x "
"Data: x%px\n",
@@ -4890,39 +4929,19 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
lpfc_unreg_login(phba, vport->vpi, rpi, mbox);
mbox->vport = vport;
- if (ndlp->nlp_flag & NLP_ISSUE_LOGO) {
- mbox->ctx_ndlp = ndlp;
- mbox->mbox_cmpl = lpfc_nlp_logo_unreg;
- } else {
- if (phba->sli_rev == LPFC_SLI_REV4 &&
- (!(vport->load_flag & FC_UNLOADING)) &&
- (bf_get(lpfc_sli_intf_if_type,
- &phba->sli4_hba.sli_intf) >=
- LPFC_SLI_INTF_IF_TYPE_2) &&
- (kref_read(&ndlp->kref) > 0)) {
- mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
- mbox->mbox_cmpl =
- lpfc_sli4_unreg_rpi_cmpl_clr;
- /*
- * accept PLOGIs after unreg_rpi_cmpl
- */
- acc_plogi = 0;
- } else if (vport->load_flag & FC_UNLOADING) {
- mbox->ctx_ndlp = NULL;
- mbox->mbox_cmpl =
- lpfc_sli_def_mbox_cmpl;
- } else {
- mbox->ctx_ndlp = ndlp;
- mbox->mbox_cmpl =
- lpfc_sli_def_mbox_cmpl;
- }
- }
+ lpfc_set_unreg_login_mbx_cmpl(phba, vport, ndlp, mbox);
+ if (mbox->mbox_cmpl == lpfc_sli4_unreg_rpi_cmpl_clr)
+ /*
+ * accept PLOGIs after unreg_rpi_cmpl
+ */
+ acc_plogi = 0;
if (((ndlp->nlp_DID & Fabric_DID_MASK) !=
Fabric_DID_MASK) &&
(!(vport->fc_flag & FC_OFFLINE_MODE)))
ndlp->nlp_flag |= NLP_UNREG_INP;
- lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ lpfc_printf_vlog(vport, KERN_INFO,
+ LOG_NODE | LOG_DISCOVERY,
"1433 unreg_rpi UNREG x%x on "
"NPort x%x deferred flg x%x "
"Data:x%px\n",
@@ -5057,6 +5076,7 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
struct lpfc_hba *phba = vport->phba;
LPFC_MBOXQ_t *mb, *nextmb;
struct lpfc_dmabuf *mp;
+ unsigned long iflags;
/* Cleanup node for NPort <nlp_DID> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
@@ -5138,8 +5158,20 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
lpfc_cleanup_vports_rrqs(vport, ndlp);
if (phba->sli_rev == LPFC_SLI_REV4)
ndlp->nlp_flag |= NLP_RELEASE_RPI;
- lpfc_unreg_rpi(vport, ndlp);
-
+ if (!lpfc_unreg_rpi(vport, ndlp)) {
+ /* Clean up unregistered and non freed rpis */
+ if ((ndlp->nlp_flag & NLP_RELEASE_RPI) &&
+ !(ndlp->nlp_rpi == LPFC_RPI_ALLOC_ERROR)) {
+ lpfc_sli4_free_rpi(vport->phba,
+ ndlp->nlp_rpi);
+ spin_lock_irqsave(&vport->phba->ndlp_lock,
+ iflags);
+ ndlp->nlp_flag &= ~NLP_RELEASE_RPI;
+ ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
+ spin_unlock_irqrestore(&vport->phba->ndlp_lock,
+ iflags);
+ }
+ }
return 0;
}
@@ -5165,8 +5197,10 @@ lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
/* For this case we need to cleanup the default rpi
* allocated by the firmware.
*/
- lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
- "0005 rpi:%x DID:%x flg:%x %d map:%x x%px\n",
+ lpfc_printf_vlog(vport, KERN_INFO,
+ LOG_NODE | LOG_DISCOVERY,
+ "0005 Cleanup Default rpi:x%x DID:x%x flg:x%x "
+ "ref %d map:x%x ndlp x%px\n",
ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
kref_read(&ndlp->kref),
ndlp->nlp_usg_map, ndlp);
@@ -5203,8 +5237,9 @@ lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
*/
lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
"0940 removed node x%px DID x%x "
- " rport not null x%px\n",
- ndlp, ndlp->nlp_DID, ndlp->rport);
+ "rpi %d rport not null x%px\n",
+ ndlp, ndlp->nlp_DID, ndlp->nlp_rpi,
+ ndlp->rport);
rport = ndlp->rport;
rdata = rport->dd_data;
rdata->pnode = NULL;
@@ -5362,6 +5397,13 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
if (!ndlp)
return NULL;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ "6453 Setup New Node 2B_DISC x%x "
+ "Data:x%x x%x x%x\n",
+ ndlp->nlp_DID, ndlp->nlp_flag,
+ ndlp->nlp_state, vport->fc_flag);
+
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_NPR_2B_DISC;
spin_unlock_irq(shost->host_lock);
@@ -5375,6 +5417,12 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
"0014 Could not enable ndlp\n");
return NULL;
}
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ "6454 Setup Enabled Node 2B_DISC x%x "
+ "Data:x%x x%x x%x\n",
+ ndlp->nlp_DID, ndlp->nlp_flag,
+ ndlp->nlp_state, vport->fc_flag);
+
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_NPR_2B_DISC;
spin_unlock_irq(shost->host_lock);
@@ -5394,6 +5442,12 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
*/
lpfc_cancel_retry_delay_tmo(vport, ndlp);
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ "6455 Setup RSCN Node 2B_DISC x%x "
+ "Data:x%x x%x x%x\n",
+ ndlp->nlp_DID, ndlp->nlp_flag,
+ ndlp->nlp_state, vport->fc_flag);
+
/* NVME Target mode waits until rport is known to be
* impacted by the RSCN before it transitions. No
* active management - just go to NPR provided the
@@ -5405,15 +5459,32 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
/* If we've already received a PLOGI from this NPort
* we don't need to try to discover it again.
*/
- if (ndlp->nlp_flag & NLP_RCV_PLOGI)
+ if (ndlp->nlp_flag & NLP_RCV_PLOGI &&
+ !(ndlp->nlp_type &
+ (NLP_FCP_TARGET | NLP_NVME_TARGET)))
return NULL;
+ ndlp->nlp_prev_state = ndlp->nlp_state;
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_NPR_2B_DISC;
spin_unlock_irq(shost->host_lock);
- } else
+ } else {
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ "6456 Skip Setup RSCN Node x%x "
+ "Data:x%x x%x x%x\n",
+ ndlp->nlp_DID, ndlp->nlp_flag,
+ ndlp->nlp_state, vport->fc_flag);
ndlp = NULL;
+ }
} else {
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ "6457 Setup Active Node 2B_DISC x%x "
+ "Data:x%x x%x x%x\n",
+ ndlp->nlp_DID, ndlp->nlp_flag,
+ ndlp->nlp_state, vport->fc_flag);
+
/* If the initiator received a PLOGI from this NPort or if the
* initiator is already in the process of discovery on it,
* there's no need to try to discover it again.
@@ -5565,10 +5636,10 @@ lpfc_disc_start(struct lpfc_vport *vport)
/* Start Discovery state <hba_state> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
- "0202 Start Discovery hba state x%x "
- "Data: x%x x%x x%x\n",
+ "0202 Start Discovery port state x%x "
+ "flg x%x Data: x%x x%x x%x\n",
vport->port_state, vport->fc_flag, vport->fc_plogi_cnt,
- vport->fc_adisc_cnt);
+ vport->fc_adisc_cnt, vport->fc_npr_cnt);
/* First do ADISCs - if any */
num_sent = lpfc_els_disc_adisc(vport);
@@ -5996,7 +6067,7 @@ lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
ndlp->nlp_flag |= NLP_RPI_REGISTERED;
ndlp->nlp_type |= NLP_FABRIC;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
- lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_DISCOVERY,
"0004 rpi:%x DID:%x flg:%x %d map:%x x%px\n",
ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
kref_read(&ndlp->kref),
@@ -6185,12 +6256,12 @@ lpfc_nlp_init(struct lpfc_vport *vport, uint32_t did)
INIT_LIST_HEAD(&ndlp->nlp_listp);
if (vport->phba->sli_rev == LPFC_SLI_REV4) {
ndlp->nlp_rpi = rpi;
- lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
- "0007 rpi:%x DID:%x flg:%x refcnt:%d "
- "map:%x x%px\n", ndlp->nlp_rpi, ndlp->nlp_DID,
- ndlp->nlp_flag,
- kref_read(&ndlp->kref),
- ndlp->nlp_usg_map, ndlp);
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_DISCOVERY,
+ "0007 Init New ndlp x%px, rpi:x%x DID:%x "
+ "flg:x%x refcnt:%d map:x%x\n",
+ ndlp, ndlp->nlp_rpi, ndlp->nlp_DID,
+ ndlp->nlp_flag, kref_read(&ndlp->kref),
+ ndlp->nlp_usg_map);
ndlp->active_rrqs_xri_bitmap =
mempool_alloc(vport->phba->active_rrq_pool,
@@ -6419,7 +6490,8 @@ lpfc_fcf_inuse(struct lpfc_hba *phba)
goto out;
} else if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
ret = 1;
- lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+ lpfc_printf_log(phba, KERN_INFO,
+ LOG_NODE | LOG_DISCOVERY,
"2624 RPI %x DID %x flag %x "
"still logged in\n",
ndlp->nlp_rpi, ndlp->nlp_DID,
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index bd533475c86a..25cdcbc2b02f 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -210,7 +210,6 @@ struct lpfc_sli_intf {
#define LPFC_MAX_IMAX 5000000
#define LPFC_DEF_IMAX 0
-#define LPFC_IMAX_THRESHOLD 1000
#define LPFC_MAX_AUTO_EQ_DELAY 120
#define LPFC_EQ_DELAY_STEP 15
#define LPFC_EQD_ISR_TRIGGER 20000
@@ -2320,6 +2319,7 @@ struct lpfc_mbx_redisc_fcf_tbl {
#define ADD_STATUS_OPERATION_ALREADY_ACTIVE 0x67
#define ADD_STATUS_FW_NOT_SUPPORTED 0xEB
#define ADD_STATUS_INVALID_REQUEST 0x4B
+#define ADD_STATUS_FW_DOWNLOAD_HW_DISABLED 0x58
struct lpfc_mbx_sli4_config {
struct mbox_header header;
@@ -2809,6 +2809,15 @@ struct lpfc_mbx_read_config {
#define lpfc_mbx_rd_conf_trunk_SHIFT 12
#define lpfc_mbx_rd_conf_trunk_MASK 0x0000000F
#define lpfc_mbx_rd_conf_trunk_WORD word2
+#define lpfc_mbx_rd_conf_pt_SHIFT 20
+#define lpfc_mbx_rd_conf_pt_MASK 0x00000003
+#define lpfc_mbx_rd_conf_pt_WORD word2
+#define lpfc_mbx_rd_conf_tf_SHIFT 22
+#define lpfc_mbx_rd_conf_tf_MASK 0x00000001
+#define lpfc_mbx_rd_conf_tf_WORD word2
+#define lpfc_mbx_rd_conf_ptv_SHIFT 23
+#define lpfc_mbx_rd_conf_ptv_MASK 0x00000001
+#define lpfc_mbx_rd_conf_ptv_WORD word2
#define lpfc_mbx_rd_conf_topology_SHIFT 24
#define lpfc_mbx_rd_conf_topology_MASK 0x000000FF
#define lpfc_mbx_rd_conf_topology_WORD word2
@@ -3479,6 +3488,9 @@ struct lpfc_sli4_parameters {
#define cfg_bv1s_SHIFT 10
#define cfg_bv1s_MASK 0x00000001
#define cfg_bv1s_WORD word19
+#define cfg_pvl_SHIFT 13
+#define cfg_pvl_MASK 0x00000001
+#define cfg_pvl_WORD word19
#define cfg_nsler_SHIFT 12
#define cfg_nsler_MASK 0x00000001
@@ -3518,6 +3530,7 @@ struct lpfc_sli4_parameters {
#define LPFC_SET_UE_RECOVERY 0x10
#define LPFC_SET_MDS_DIAGS 0x11
+#define LPFC_SET_DUAL_DUMP 0x1e
struct lpfc_mbx_set_feature {
struct mbox_header header;
uint32_t feature;
@@ -3532,6 +3545,15 @@ struct lpfc_mbx_set_feature {
#define lpfc_mbx_set_feature_mds_deep_loopbk_SHIFT 1
#define lpfc_mbx_set_feature_mds_deep_loopbk_MASK 0x00000001
#define lpfc_mbx_set_feature_mds_deep_loopbk_WORD word6
+#define lpfc_mbx_set_feature_dd_SHIFT 0
+#define lpfc_mbx_set_feature_dd_MASK 0x00000001
+#define lpfc_mbx_set_feature_dd_WORD word6
+#define lpfc_mbx_set_feature_ddquery_SHIFT 1
+#define lpfc_mbx_set_feature_ddquery_MASK 0x00000001
+#define lpfc_mbx_set_feature_ddquery_WORD word6
+#define LPFC_DISABLE_DUAL_DUMP 0
+#define LPFC_ENABLE_DUAL_DUMP 1
+#define LPFC_QUERY_OP_DUAL_DUMP 2
uint32_t word7;
#define lpfc_mbx_set_feature_UERP_SHIFT 0
#define lpfc_mbx_set_feature_UERP_MASK 0x0000ffff
@@ -4261,6 +4283,8 @@ struct lpfc_acqe_sli {
#define LPFC_SLI_EVENT_TYPE_DIAG_DUMP 0x5
#define LPFC_SLI_EVENT_TYPE_MISCONFIGURED 0x9
#define LPFC_SLI_EVENT_TYPE_REMOTE_DPORT 0xA
+#define LPFC_SLI_EVENT_TYPE_MISCONF_FAWWN 0xF
+#define LPFC_SLI_EVENT_TYPE_EEPROM_FAILURE 0x10
};
/*
@@ -4659,6 +4683,7 @@ struct create_xri_wqe {
uint32_t rsvd_12_15[4]; /* word 12-15 */
};
+#define INHIBIT_ABORT 1
#define T_REQUEST_TAG 3
#define T_XRI_TAG 1
@@ -4807,8 +4832,8 @@ union lpfc_wqe128 {
struct send_frame_wqe send_frame;
};
-#define MAGIC_NUMER_G6 0xFEAA0003
-#define MAGIC_NUMER_G7 0xFEAA0005
+#define MAGIC_NUMBER_G6 0xFEAA0003
+#define MAGIC_NUMBER_G7 0xFEAA0005
struct lpfc_grp_hdr {
uint32_t size;
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index e8813d26e594..dc6f7c4b54c6 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -40,6 +40,8 @@
#include <linux/irq.h>
#include <linux/bitops.h>
#include <linux/crash_dump.h>
+#include <linux/cpu.h>
+#include <linux/cpuhotplug.h>
#include <scsi/scsi.h>
#include <scsi/scsi_device.h>
@@ -66,9 +68,13 @@
#include "lpfc_version.h"
#include "lpfc_ids.h"
+static enum cpuhp_state lpfc_cpuhp_state;
/* Used when mapping IRQ vectors in a driver centric manner */
static uint32_t lpfc_present_cpu;
+static void __lpfc_cpuhp_remove(struct lpfc_hba *phba);
+static void lpfc_cpuhp_remove(struct lpfc_hba *phba);
+static void lpfc_cpuhp_add(struct lpfc_hba *phba);
static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
static int lpfc_post_rcv_buf(struct lpfc_hba *);
static int lpfc_sli4_queue_verify(struct lpfc_hba *);
@@ -1235,10 +1241,9 @@ lpfc_hb_eq_delay_work(struct work_struct *work)
struct lpfc_hba, eq_delay_work);
struct lpfc_eq_intr_info *eqi, *eqi_new;
struct lpfc_queue *eq, *eq_next;
- unsigned char *eqcnt = NULL;
+ unsigned char *ena_delay = NULL;
uint32_t usdelay;
int i;
- bool update = false;
if (!phba->cfg_auto_imax || phba->pport->load_flag & FC_UNLOADING)
return;
@@ -1247,44 +1252,36 @@ lpfc_hb_eq_delay_work(struct work_struct *work)
phba->pport->fc_flag & FC_OFFLINE_MODE)
goto requeue;
- eqcnt = kcalloc(num_possible_cpus(), sizeof(unsigned char),
- GFP_KERNEL);
- if (!eqcnt)
+ ena_delay = kcalloc(phba->sli4_hba.num_possible_cpu, sizeof(*ena_delay),
+ GFP_KERNEL);
+ if (!ena_delay)
goto requeue;
- if (phba->cfg_irq_chann > 1) {
- /* Loop thru all IRQ vectors */
- for (i = 0; i < phba->cfg_irq_chann; i++) {
- /* Get the EQ corresponding to the IRQ vector */
- eq = phba->sli4_hba.hba_eq_hdl[i].eq;
- if (!eq)
- continue;
- if (eq->q_mode) {
- update = true;
- break;
- }
- if (eqcnt[eq->last_cpu] < 2)
- eqcnt[eq->last_cpu]++;
+ for (i = 0; i < phba->cfg_irq_chann; i++) {
+ /* Get the EQ corresponding to the IRQ vector */
+ eq = phba->sli4_hba.hba_eq_hdl[i].eq;
+ if (!eq)
+ continue;
+ if (eq->q_mode || eq->q_flag & HBA_EQ_DELAY_CHK) {
+ eq->q_flag &= ~HBA_EQ_DELAY_CHK;
+ ena_delay[eq->last_cpu] = 1;
}
- } else
- update = true;
+ }
for_each_present_cpu(i) {
eqi = per_cpu_ptr(phba->sli4_hba.eq_info, i);
- if (!update && eqcnt[i] < 2) {
- eqi->icnt = 0;
- continue;
+ if (ena_delay[i]) {
+ usdelay = (eqi->icnt >> 10) * LPFC_EQ_DELAY_STEP;
+ if (usdelay > LPFC_MAX_AUTO_EQ_DELAY)
+ usdelay = LPFC_MAX_AUTO_EQ_DELAY;
+ } else {
+ usdelay = 0;
}
- usdelay = (eqi->icnt / LPFC_IMAX_THRESHOLD) *
- LPFC_EQ_DELAY_STEP;
- if (usdelay > LPFC_MAX_AUTO_EQ_DELAY)
- usdelay = LPFC_MAX_AUTO_EQ_DELAY;
-
eqi->icnt = 0;
list_for_each_entry_safe(eq, eq_next, &eqi->list, cpu_list) {
- if (eq->last_cpu != i) {
+ if (unlikely(eq->last_cpu != i)) {
eqi_new = per_cpu_ptr(phba->sli4_hba.eq_info,
eq->last_cpu);
list_move_tail(&eq->cpu_list, &eqi_new->list);
@@ -1296,7 +1293,7 @@ lpfc_hb_eq_delay_work(struct work_struct *work)
}
}
- kfree(eqcnt);
+ kfree(ena_delay);
requeue:
queue_delayed_work(phba->wq, &phba->eq_delay_work,
@@ -3053,11 +3050,12 @@ lpfc_sli4_node_prep(struct lpfc_hba *phba)
continue;
}
ndlp->nlp_rpi = rpi;
- lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
- "0009 rpi:%x DID:%x "
- "flg:%x map:%x x%px\n", ndlp->nlp_rpi,
- ndlp->nlp_DID, ndlp->nlp_flag,
- ndlp->nlp_usg_map, ndlp);
+ lpfc_printf_vlog(ndlp->vport, KERN_INFO,
+ LOG_NODE | LOG_DISCOVERY,
+ "0009 Assign RPI x%x to ndlp x%px "
+ "DID:x%06x flg:x%x map:x%x\n",
+ ndlp->nlp_rpi, ndlp, ndlp->nlp_DID,
+ ndlp->nlp_flag, ndlp->nlp_usg_map);
}
}
lpfc_destroy_vport_work_array(phba, vports);
@@ -3387,6 +3385,8 @@ lpfc_online(struct lpfc_hba *phba)
if (phba->cfg_xri_rebalancing)
lpfc_create_multixri_pools(phba);
+ lpfc_cpuhp_add(phba);
+
lpfc_unblock_mgmt_io(phba);
return 0;
}
@@ -3453,10 +3453,15 @@ lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
list_for_each_entry_safe(ndlp, next_ndlp,
&vports[i]->fc_nodes,
nlp_listp) {
- if (!NLP_CHK_NODE_ACT(ndlp))
- continue;
- if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
+ if ((!NLP_CHK_NODE_ACT(ndlp)) ||
+ ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
+ /* Driver must assume RPI is invalid for
+ * any unused or inactive node.
+ */
+ ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
continue;
+ }
+
if (ndlp->nlp_type & NLP_FABRIC) {
lpfc_disc_state_machine(vports[i], ndlp,
NULL, NLP_EVT_DEVICE_RECOVERY);
@@ -3472,16 +3477,16 @@ lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
* comes back online.
*/
if (phba->sli_rev == LPFC_SLI_REV4) {
- lpfc_printf_vlog(ndlp->vport,
- KERN_INFO, LOG_NODE,
- "0011 lpfc_offline: "
- "ndlp:x%px did %x "
- "usgmap:x%x rpi:%x\n",
- ndlp, ndlp->nlp_DID,
- ndlp->nlp_usg_map,
- ndlp->nlp_rpi);
-
+ lpfc_printf_vlog(ndlp->vport, KERN_INFO,
+ LOG_NODE | LOG_DISCOVERY,
+ "0011 Free RPI x%x on "
+ "ndlp:x%px did x%x "
+ "usgmap:x%x\n",
+ ndlp->nlp_rpi, ndlp,
+ ndlp->nlp_DID,
+ ndlp->nlp_usg_map);
lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi);
+ ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
}
lpfc_unreg_rpi(vports[i], ndlp);
}
@@ -3545,6 +3550,7 @@ lpfc_offline(struct lpfc_hba *phba)
spin_unlock_irq(shost->host_lock);
}
lpfc_destroy_vport_work_array(phba, vports);
+ __lpfc_cpuhp_remove(phba);
if (phba->cfg_xri_rebalancing)
lpfc_destroy_multixri_pools(phba);
@@ -5283,10 +5289,10 @@ lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
evt_type = bf_get(lpfc_trailer_type, acqe_sli);
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
- "2901 Async SLI event - Event Data1:x%08x Event Data2:"
- "x%08x SLI Event Type:%d\n",
+ "2901 Async SLI event - Type:%d, Event Data: x%08x "
+ "x%08x x%08x x%08x\n", evt_type,
acqe_sli->event_data1, acqe_sli->event_data2,
- evt_type);
+ acqe_sli->reserved, acqe_sli->trailer);
port_name = phba->Port[0];
if (port_name == 0x00)
@@ -5433,11 +5439,26 @@ lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
"Event Data1:x%08x Event Data2: x%08x\n",
acqe_sli->event_data1, acqe_sli->event_data2);
break;
+ case LPFC_SLI_EVENT_TYPE_MISCONF_FAWWN:
+ /* Misconfigured WWN. Reports that the SLI Port is configured
+ * to use FA-WWN, but the attached device doesn’t support it.
+ * No driver action is required.
+ * Event Data1 - N.A, Event Data2 - N.A
+ */
+ lpfc_log_msg(phba, KERN_WARNING, LOG_SLI,
+ "2699 Misconfigured FA-WWN - Attached device does "
+ "not support FA-WWN\n");
+ break;
+ case LPFC_SLI_EVENT_TYPE_EEPROM_FAILURE:
+ /* EEPROM failure. No driver action is required */
+ lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+ "2518 EEPROM failure - "
+ "Event Data1: x%08x Event Data2: x%08x\n",
+ acqe_sli->event_data1, acqe_sli->event_data2);
+ break;
default:
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
- "3193 Async SLI event - Event Data1:x%08x Event Data2:"
- "x%08x SLI Event Type:%d\n",
- acqe_sli->event_data1, acqe_sli->event_data2,
+ "3193 Unrecognized SLI event, type: 0x%x",
evt_type);
break;
}
@@ -5976,6 +5997,29 @@ static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode)
}
/**
+ * lpfc_cpumask_of_node_init - initalizes cpumask of phba's NUMA node
+ * @phba: Pointer to HBA context object.
+ *
+ **/
+static void
+lpfc_cpumask_of_node_init(struct lpfc_hba *phba)
+{
+ unsigned int cpu, numa_node;
+ struct cpumask *numa_mask = &phba->sli4_hba.numa_mask;
+
+ cpumask_clear(numa_mask);
+
+ /* Check if we're a NUMA architecture */
+ numa_node = dev_to_node(&phba->pcidev->dev);
+ if (numa_node == NUMA_NO_NODE)
+ return;
+
+ for_each_possible_cpu(cpu)
+ if (cpu_to_node(cpu) == numa_node)
+ cpumask_set_cpu(cpu, numa_mask);
+}
+
+/**
* lpfc_enable_pci_dev - Enable a generic PCI device.
* @phba: pointer to lpfc hba data structure.
*
@@ -6418,6 +6462,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
phba->sli4_hba.num_present_cpu = lpfc_present_cpu;
phba->sli4_hba.num_possible_cpu = num_possible_cpus();
phba->sli4_hba.curr_disp_cpu = 0;
+ lpfc_cpumask_of_node_init(phba);
/* Get all the module params for configuring this host */
lpfc_get_cfgparam(phba);
@@ -6953,6 +6998,7 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
phba->sli4_hba.num_possible_cpu = 0;
phba->sli4_hba.num_present_cpu = 0;
phba->sli4_hba.curr_disp_cpu = 0;
+ cpumask_clear(&phba->sli4_hba.numa_mask);
/* Free memory allocated for fast-path work queue handles */
kfree(phba->sli4_hba.hba_eq_hdl);
@@ -7126,7 +7172,7 @@ lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count)
if (iocbq_entry == NULL) {
printk(KERN_ERR "%s: only allocated %d iocbs of "
"expected %d count. Unloading driver.\n",
- __func__, i, LPFC_IOCB_LIST_CNT);
+ __func__, i, iocb_count);
goto out_free_iocbq;
}
@@ -7545,18 +7591,10 @@ lpfc_create_shost(struct lpfc_hba *phba)
if (phba->nvmet_support) {
/* Only 1 vport (pport) will support NVME target */
- if (phba->txrdy_payload_pool == NULL) {
- phba->txrdy_payload_pool = dma_pool_create(
- "txrdy_pool", &phba->pcidev->dev,
- TXRDY_PAYLOAD_LEN, 16, 0);
- if (phba->txrdy_payload_pool) {
- phba->targetport = NULL;
- phba->cfg_enable_fc4_type = LPFC_ENABLE_NVME;
- lpfc_printf_log(phba, KERN_INFO,
- LOG_INIT | LOG_NVME_DISC,
- "6076 NVME Target Found\n");
- }
- }
+ phba->targetport = NULL;
+ phba->cfg_enable_fc4_type = LPFC_ENABLE_NVME;
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME_DISC,
+ "6076 NVME Target Found\n");
}
lpfc_debugfs_initialize(vport);
@@ -8235,6 +8273,94 @@ lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba)
memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx));
}
+static const char * const lpfc_topo_to_str[] = {
+ "Loop then P2P",
+ "Loopback",
+ "P2P Only",
+ "Unsupported",
+ "Loop Only",
+ "Unsupported",
+ "P2P then Loop",
+};
+
+/**
+ * lpfc_map_topology - Map the topology read from READ_CONFIG
+ * @phba: pointer to lpfc hba data structure.
+ * @rdconf: pointer to read config data
+ *
+ * This routine is invoked to map the topology values as read
+ * from the read config mailbox command. If the persistent
+ * topology feature is supported, the firmware will provide the
+ * saved topology information to be used in INIT_LINK
+ *
+ **/
+#define LINK_FLAGS_DEF 0x0
+#define LINK_FLAGS_P2P 0x1
+#define LINK_FLAGS_LOOP 0x2
+static void
+lpfc_map_topology(struct lpfc_hba *phba, struct lpfc_mbx_read_config *rd_config)
+{
+ u8 ptv, tf, pt;
+
+ ptv = bf_get(lpfc_mbx_rd_conf_ptv, rd_config);
+ tf = bf_get(lpfc_mbx_rd_conf_tf, rd_config);
+ pt = bf_get(lpfc_mbx_rd_conf_pt, rd_config);
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "2027 Read Config Data : ptv:0x%x, tf:0x%x pt:0x%x",
+ ptv, tf, pt);
+ if (!ptv) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+ "2019 FW does not support persistent topology "
+ "Using driver parameter defined value [%s]",
+ lpfc_topo_to_str[phba->cfg_topology]);
+ return;
+ }
+ /* FW supports persistent topology - override module parameter value */
+ phba->hba_flag |= HBA_PERSISTENT_TOPO;
+ switch (phba->pcidev->device) {
+ case PCI_DEVICE_ID_LANCER_G7_FC:
+ if (tf || (pt == LINK_FLAGS_LOOP)) {
+ /* Invalid values from FW - use driver params */
+ phba->hba_flag &= ~HBA_PERSISTENT_TOPO;
+ } else {
+ /* Prism only supports PT2PT topology */
+ phba->cfg_topology = FLAGS_TOPOLOGY_MODE_PT_PT;
+ }
+ break;
+ case PCI_DEVICE_ID_LANCER_G6_FC:
+ if (!tf) {
+ phba->cfg_topology = ((pt == LINK_FLAGS_LOOP)
+ ? FLAGS_TOPOLOGY_MODE_LOOP
+ : FLAGS_TOPOLOGY_MODE_PT_PT);
+ } else {
+ phba->hba_flag &= ~HBA_PERSISTENT_TOPO;
+ }
+ break;
+ default: /* G5 */
+ if (tf) {
+ /* If topology failover set - pt is '0' or '1' */
+ phba->cfg_topology = (pt ? FLAGS_TOPOLOGY_MODE_PT_LOOP :
+ FLAGS_TOPOLOGY_MODE_LOOP_PT);
+ } else {
+ phba->cfg_topology = ((pt == LINK_FLAGS_P2P)
+ ? FLAGS_TOPOLOGY_MODE_PT_PT
+ : FLAGS_TOPOLOGY_MODE_LOOP);
+ }
+ break;
+ }
+ if (phba->hba_flag & HBA_PERSISTENT_TOPO) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "2020 Using persistent topology value [%s]",
+ lpfc_topo_to_str[phba->cfg_topology]);
+ } else {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+ "2021 Invalid topology values from FW "
+ "Using driver parameter defined value [%s]",
+ lpfc_topo_to_str[phba->cfg_topology]);
+ }
+}
+
/**
* lpfc_sli4_read_config - Get the config parameters.
* @phba: pointer to lpfc hba data structure.
@@ -8346,6 +8472,7 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ?
(phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0;
phba->max_vports = phba->max_vpi;
+ lpfc_map_topology(phba, rd_config);
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
"2003 cfg params Extents? %d "
"XRI(B:%d M:%d), "
@@ -8619,8 +8746,8 @@ lpfc_sli4_queue_verify(struct lpfc_hba *phba)
*/
if (phba->nvmet_support) {
- if (phba->cfg_irq_chann < phba->cfg_nvmet_mrq)
- phba->cfg_nvmet_mrq = phba->cfg_irq_chann;
+ if (phba->cfg_hdw_queue < phba->cfg_nvmet_mrq)
+ phba->cfg_nvmet_mrq = phba->cfg_hdw_queue;
if (phba->cfg_nvmet_mrq > LPFC_NVMET_MRQ_MAX)
phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_MAX;
}
@@ -9160,6 +9287,8 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
}
spin_unlock_irq(&phba->hbalock);
+ lpfc_sli4_cleanup_poll_list(phba);
+
/* Release HBA eqs */
if (phba->sli4_hba.hdwq)
lpfc_sli4_release_hdwq(phba);
@@ -10581,7 +10710,6 @@ lpfc_find_cpu_handle(struct lpfc_hba *phba, uint16_t id, int match)
*/
if ((match == LPFC_FIND_BY_EQ) &&
(cpup->flag & LPFC_CPU_FIRST_IRQ) &&
- (cpup->irq != LPFC_VECTOR_MAP_EMPTY) &&
(cpup->eq == id))
return cpu;
@@ -10619,6 +10747,75 @@ lpfc_find_hyper(struct lpfc_hba *phba, int cpu,
}
#endif
+/*
+ * lpfc_assign_eq_map_info - Assigns eq for vector_map structure
+ * @phba: pointer to lpfc hba data structure.
+ * @eqidx: index for eq and irq vector
+ * @flag: flags to set for vector_map structure
+ * @cpu: cpu used to index vector_map structure
+ *
+ * The routine assigns eq info into vector_map structure
+ */
+static inline void
+lpfc_assign_eq_map_info(struct lpfc_hba *phba, uint16_t eqidx, uint16_t flag,
+ unsigned int cpu)
+{
+ struct lpfc_vector_map_info *cpup = &phba->sli4_hba.cpu_map[cpu];
+ struct lpfc_hba_eq_hdl *eqhdl = lpfc_get_eq_hdl(eqidx);
+
+ cpup->eq = eqidx;
+ cpup->flag |= flag;
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "3336 Set Affinity: CPU %d irq %d eq %d flag x%x\n",
+ cpu, eqhdl->irq, cpup->eq, cpup->flag);
+}
+
+/**
+ * lpfc_cpu_map_array_init - Initialize cpu_map structure
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * The routine initializes the cpu_map array structure
+ */
+static void
+lpfc_cpu_map_array_init(struct lpfc_hba *phba)
+{
+ struct lpfc_vector_map_info *cpup;
+ struct lpfc_eq_intr_info *eqi;
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ cpup = &phba->sli4_hba.cpu_map[cpu];
+ cpup->phys_id = LPFC_VECTOR_MAP_EMPTY;
+ cpup->core_id = LPFC_VECTOR_MAP_EMPTY;
+ cpup->hdwq = LPFC_VECTOR_MAP_EMPTY;
+ cpup->eq = LPFC_VECTOR_MAP_EMPTY;
+ cpup->flag = 0;
+ eqi = per_cpu_ptr(phba->sli4_hba.eq_info, cpu);
+ INIT_LIST_HEAD(&eqi->list);
+ eqi->icnt = 0;
+ }
+}
+
+/**
+ * lpfc_hba_eq_hdl_array_init - Initialize hba_eq_hdl structure
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * The routine initializes the hba_eq_hdl array structure
+ */
+static void
+lpfc_hba_eq_hdl_array_init(struct lpfc_hba *phba)
+{
+ struct lpfc_hba_eq_hdl *eqhdl;
+ int i;
+
+ for (i = 0; i < phba->cfg_irq_chann; i++) {
+ eqhdl = lpfc_get_eq_hdl(i);
+ eqhdl->irq = LPFC_VECTOR_MAP_EMPTY;
+ eqhdl->phba = phba;
+ }
+}
+
/**
* lpfc_cpu_affinity_check - Check vector CPU affinity mappings
* @phba: pointer to lpfc hba data structure.
@@ -10637,22 +10834,10 @@ lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors)
int max_core_id, min_core_id;
struct lpfc_vector_map_info *cpup;
struct lpfc_vector_map_info *new_cpup;
- const struct cpumask *maskp;
#ifdef CONFIG_X86
struct cpuinfo_x86 *cpuinfo;
#endif
- /* Init cpu_map array */
- for_each_possible_cpu(cpu) {
- cpup = &phba->sli4_hba.cpu_map[cpu];
- cpup->phys_id = LPFC_VECTOR_MAP_EMPTY;
- cpup->core_id = LPFC_VECTOR_MAP_EMPTY;
- cpup->hdwq = LPFC_VECTOR_MAP_EMPTY;
- cpup->eq = LPFC_VECTOR_MAP_EMPTY;
- cpup->irq = LPFC_VECTOR_MAP_EMPTY;
- cpup->flag = 0;
- }
-
max_phys_id = 0;
min_phys_id = LPFC_VECTOR_MAP_EMPTY;
max_core_id = 0;
@@ -10688,65 +10873,6 @@ lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors)
min_core_id = cpup->core_id;
}
- for_each_possible_cpu(i) {
- struct lpfc_eq_intr_info *eqi =
- per_cpu_ptr(phba->sli4_hba.eq_info, i);
-
- INIT_LIST_HEAD(&eqi->list);
- eqi->icnt = 0;
- }
-
- /* This loop sets up all CPUs that are affinitized with a
- * irq vector assigned to the driver. All affinitized CPUs
- * will get a link to that vectors IRQ and EQ.
- *
- * NULL affinity mask handling:
- * If irq count is greater than one, log an error message.
- * If the null mask is received for the first irq, find the
- * first present cpu, and assign the eq index to ensure at
- * least one EQ is assigned.
- */
- for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
- /* Get a CPU mask for all CPUs affinitized to this vector */
- maskp = pci_irq_get_affinity(phba->pcidev, idx);
- if (!maskp) {
- if (phba->cfg_irq_chann > 1)
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "3329 No affinity mask found "
- "for vector %d (%d)\n",
- idx, phba->cfg_irq_chann);
- if (!idx) {
- cpu = cpumask_first(cpu_present_mask);
- cpup = &phba->sli4_hba.cpu_map[cpu];
- cpup->eq = idx;
- cpup->irq = pci_irq_vector(phba->pcidev, idx);
- cpup->flag |= LPFC_CPU_FIRST_IRQ;
- }
- break;
- }
-
- i = 0;
- /* Loop through all CPUs associated with vector idx */
- for_each_cpu_and(cpu, maskp, cpu_present_mask) {
- /* Set the EQ index and IRQ for that vector */
- cpup = &phba->sli4_hba.cpu_map[cpu];
- cpup->eq = idx;
- cpup->irq = pci_irq_vector(phba->pcidev, idx);
-
- /* If this is the first CPU thats assigned to this
- * vector, set LPFC_CPU_FIRST_IRQ.
- */
- if (!i)
- cpup->flag |= LPFC_CPU_FIRST_IRQ;
- i++;
-
- lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
- "3336 Set Affinity: CPU %d "
- "irq %d eq %d flag x%x\n",
- cpu, cpup->irq, cpup->eq, cpup->flag);
- }
- }
-
/* After looking at each irq vector assigned to this pcidev, its
* possible to see that not ALL CPUs have been accounted for.
* Next we will set any unassigned (unaffinitized) cpu map
@@ -10772,7 +10898,7 @@ lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors)
for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) &&
- (new_cpup->irq != LPFC_VECTOR_MAP_EMPTY) &&
+ (new_cpup->eq != LPFC_VECTOR_MAP_EMPTY) &&
(new_cpup->phys_id == cpup->phys_id))
goto found_same;
new_cpu = cpumask_next(
@@ -10785,7 +10911,6 @@ lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors)
found_same:
/* We found a matching phys_id, so copy the IRQ info */
cpup->eq = new_cpup->eq;
- cpup->irq = new_cpup->irq;
/* Bump start_cpu to the next slot to minmize the
* chance of having multiple unassigned CPU entries
@@ -10797,9 +10922,10 @@ found_same:
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"3337 Set Affinity: CPU %d "
- "irq %d from id %d same "
+ "eq %d from peer cpu %d same "
"phys_id (%d)\n",
- cpu, cpup->irq, new_cpu, cpup->phys_id);
+ cpu, cpup->eq, new_cpu,
+ cpup->phys_id);
}
}
@@ -10823,7 +10949,7 @@ found_same:
for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) &&
- (new_cpup->irq != LPFC_VECTOR_MAP_EMPTY))
+ (new_cpup->eq != LPFC_VECTOR_MAP_EMPTY))
goto found_any;
new_cpu = cpumask_next(
new_cpu, cpu_present_mask);
@@ -10833,13 +10959,12 @@ found_same:
/* We should never leave an entry unassigned */
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"3339 Set Affinity: CPU %d "
- "irq %d UNASSIGNED\n",
- cpup->hdwq, cpup->irq);
+ "eq %d UNASSIGNED\n",
+ cpup->hdwq, cpup->eq);
continue;
found_any:
/* We found an available entry, copy the IRQ info */
cpup->eq = new_cpup->eq;
- cpup->irq = new_cpup->irq;
/* Bump start_cpu to the next slot to minmize the
* chance of having multiple unassigned CPU entries
@@ -10851,8 +10976,8 @@ found_any:
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"3338 Set Affinity: CPU %d "
- "irq %d from id %d (%d/%d)\n",
- cpu, cpup->irq, new_cpu,
+ "eq %d from peer cpu %d (%d/%d)\n",
+ cpu, cpup->eq, new_cpu,
new_cpup->phys_id, new_cpup->core_id);
}
}
@@ -10873,11 +10998,11 @@ found_any:
idx++;
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"3333 Set Affinity: CPU %d (phys %d core %d): "
- "hdwq %d eq %d irq %d flg x%x\n",
+ "hdwq %d eq %d flg x%x\n",
cpu, cpup->phys_id, cpup->core_id,
- cpup->hdwq, cpup->eq, cpup->irq, cpup->flag);
+ cpup->hdwq, cpup->eq, cpup->flag);
}
- /* Finally we need to associate a hdwq with each cpu_map entry
+ /* Associate a hdwq with each cpu_map entry
* This will be 1 to 1 - hdwq to cpu, unless there are less
* hardware queues then CPUs. For that case we will just round-robin
* the available hardware queues as they get assigned to CPUs.
@@ -10951,9 +11076,26 @@ found_any:
logit:
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"3335 Set Affinity: CPU %d (phys %d core %d): "
- "hdwq %d eq %d irq %d flg x%x\n",
+ "hdwq %d eq %d flg x%x\n",
cpu, cpup->phys_id, cpup->core_id,
- cpup->hdwq, cpup->eq, cpup->irq, cpup->flag);
+ cpup->hdwq, cpup->eq, cpup->flag);
+ }
+
+ /*
+ * Initialize the cpu_map slots for not-present cpus in case
+ * a cpu is hot-added. Perform a simple hdwq round robin assignment.
+ */
+ idx = 0;
+ for_each_possible_cpu(cpu) {
+ cpup = &phba->sli4_hba.cpu_map[cpu];
+ if (cpup->hdwq != LPFC_VECTOR_MAP_EMPTY)
+ continue;
+
+ cpup->hdwq = idx++ % phba->cfg_hdw_queue;
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "3340 Set Affinity: not present "
+ "CPU %d hdwq %d\n",
+ cpu, cpup->hdwq);
}
/* The cpu_map array will be used later during initialization
@@ -10963,11 +11105,280 @@ found_any:
}
/**
+ * lpfc_cpuhp_get_eq
+ *
+ * @phba: pointer to lpfc hba data structure.
+ * @cpu: cpu going offline
+ * @eqlist:
+ */
+static void
+lpfc_cpuhp_get_eq(struct lpfc_hba *phba, unsigned int cpu,
+ struct list_head *eqlist)
+{
+ const struct cpumask *maskp;
+ struct lpfc_queue *eq;
+ cpumask_t tmp;
+ u16 idx;
+
+ for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
+ maskp = pci_irq_get_affinity(phba->pcidev, idx);
+ if (!maskp)
+ continue;
+ /*
+ * if irq is not affinitized to the cpu going
+ * then we don't need to poll the eq attached
+ * to it.
+ */
+ if (!cpumask_and(&tmp, maskp, cpumask_of(cpu)))
+ continue;
+ /* get the cpus that are online and are affini-
+ * tized to this irq vector. If the count is
+ * more than 1 then cpuhp is not going to shut-
+ * down this vector. Since this cpu has not
+ * gone offline yet, we need >1.
+ */
+ cpumask_and(&tmp, maskp, cpu_online_mask);
+ if (cpumask_weight(&tmp) > 1)
+ continue;
+
+ /* Now that we have an irq to shutdown, get the eq
+ * mapped to this irq. Note: multiple hdwq's in
+ * the software can share an eq, but eventually
+ * only eq will be mapped to this vector
+ */
+ eq = phba->sli4_hba.hba_eq_hdl[idx].eq;
+ list_add(&eq->_poll_list, eqlist);
+ }
+}
+
+static void __lpfc_cpuhp_remove(struct lpfc_hba *phba)
+{
+ if (phba->sli_rev != LPFC_SLI_REV4)
+ return;
+
+ cpuhp_state_remove_instance_nocalls(lpfc_cpuhp_state,
+ &phba->cpuhp);
+ /*
+ * unregistering the instance doesn't stop the polling
+ * timer. Wait for the poll timer to retire.
+ */
+ synchronize_rcu();
+ del_timer_sync(&phba->cpuhp_poll_timer);
+}
+
+static void lpfc_cpuhp_remove(struct lpfc_hba *phba)
+{
+ if (phba->pport->fc_flag & FC_OFFLINE_MODE)
+ return;
+
+ __lpfc_cpuhp_remove(phba);
+}
+
+static void lpfc_cpuhp_add(struct lpfc_hba *phba)
+{
+ if (phba->sli_rev != LPFC_SLI_REV4)
+ return;
+
+ rcu_read_lock();
+
+ if (!list_empty(&phba->poll_list)) {
+ timer_setup(&phba->cpuhp_poll_timer, lpfc_sli4_poll_hbtimer, 0);
+ mod_timer(&phba->cpuhp_poll_timer,
+ jiffies + msecs_to_jiffies(LPFC_POLL_HB));
+ }
+
+ rcu_read_unlock();
+
+ cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state,
+ &phba->cpuhp);
+}
+
+static int __lpfc_cpuhp_checks(struct lpfc_hba *phba, int *retval)
+{
+ if (phba->pport->load_flag & FC_UNLOADING) {
+ *retval = -EAGAIN;
+ return true;
+ }
+
+ if (phba->sli_rev != LPFC_SLI_REV4) {
+ *retval = 0;
+ return true;
+ }
+
+ /* proceed with the hotplug */
+ return false;
+}
+
+/**
+ * lpfc_irq_set_aff - set IRQ affinity
+ * @eqhdl: EQ handle
+ * @cpu: cpu to set affinity
+ *
+ **/
+static inline void
+lpfc_irq_set_aff(struct lpfc_hba_eq_hdl *eqhdl, unsigned int cpu)
+{
+ cpumask_clear(&eqhdl->aff_mask);
+ cpumask_set_cpu(cpu, &eqhdl->aff_mask);
+ irq_set_status_flags(eqhdl->irq, IRQ_NO_BALANCING);
+ irq_set_affinity_hint(eqhdl->irq, &eqhdl->aff_mask);
+}
+
+/**
+ * lpfc_irq_clear_aff - clear IRQ affinity
+ * @eqhdl: EQ handle
+ *
+ **/
+static inline void
+lpfc_irq_clear_aff(struct lpfc_hba_eq_hdl *eqhdl)
+{
+ cpumask_clear(&eqhdl->aff_mask);
+ irq_clear_status_flags(eqhdl->irq, IRQ_NO_BALANCING);
+ irq_set_affinity_hint(eqhdl->irq, &eqhdl->aff_mask);
+}
+
+/**
+ * lpfc_irq_rebalance - rebalances IRQ affinity according to cpuhp event
+ * @phba: pointer to HBA context object.
+ * @cpu: cpu going offline/online
+ * @offline: true, cpu is going offline. false, cpu is coming online.
+ *
+ * If cpu is going offline, we'll try our best effort to find the next
+ * online cpu on the phba's NUMA node and migrate all offlining IRQ affinities.
+ *
+ * If cpu is coming online, reaffinitize the IRQ back to the onlineng cpu.
+ *
+ * Note: Call only if cfg_irq_numa is enabled, otherwise rely on
+ * PCI_IRQ_AFFINITY to auto-manage IRQ affinity.
+ *
+ **/
+static void
+lpfc_irq_rebalance(struct lpfc_hba *phba, unsigned int cpu, bool offline)
+{
+ struct lpfc_vector_map_info *cpup;
+ struct cpumask *aff_mask;
+ unsigned int cpu_select, cpu_next, idx;
+ const struct cpumask *numa_mask;
+
+ if (!phba->cfg_irq_numa)
+ return;
+
+ numa_mask = &phba->sli4_hba.numa_mask;
+
+ if (!cpumask_test_cpu(cpu, numa_mask))
+ return;
+
+ cpup = &phba->sli4_hba.cpu_map[cpu];
+
+ if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
+ return;
+
+ if (offline) {
+ /* Find next online CPU on NUMA node */
+ cpu_next = cpumask_next_wrap(cpu, numa_mask, cpu, true);
+ cpu_select = lpfc_next_online_numa_cpu(numa_mask, cpu_next);
+
+ /* Found a valid CPU */
+ if ((cpu_select < nr_cpu_ids) && (cpu_select != cpu)) {
+ /* Go through each eqhdl and ensure offlining
+ * cpu aff_mask is migrated
+ */
+ for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
+ aff_mask = lpfc_get_aff_mask(idx);
+
+ /* Migrate affinity */
+ if (cpumask_test_cpu(cpu, aff_mask))
+ lpfc_irq_set_aff(lpfc_get_eq_hdl(idx),
+ cpu_select);
+ }
+ } else {
+ /* Rely on irqbalance if no online CPUs left on NUMA */
+ for (idx = 0; idx < phba->cfg_irq_chann; idx++)
+ lpfc_irq_clear_aff(lpfc_get_eq_hdl(idx));
+ }
+ } else {
+ /* Migrate affinity back to this CPU */
+ lpfc_irq_set_aff(lpfc_get_eq_hdl(cpup->eq), cpu);
+ }
+}
+
+static int lpfc_cpu_offline(unsigned int cpu, struct hlist_node *node)
+{
+ struct lpfc_hba *phba = hlist_entry_safe(node, struct lpfc_hba, cpuhp);
+ struct lpfc_queue *eq, *next;
+ LIST_HEAD(eqlist);
+ int retval;
+
+ if (!phba) {
+ WARN_ONCE(!phba, "cpu: %u. phba:NULL", raw_smp_processor_id());
+ return 0;
+ }
+
+ if (__lpfc_cpuhp_checks(phba, &retval))
+ return retval;
+
+ lpfc_irq_rebalance(phba, cpu, true);
+
+ lpfc_cpuhp_get_eq(phba, cpu, &eqlist);
+
+ /* start polling on these eq's */
+ list_for_each_entry_safe(eq, next, &eqlist, _poll_list) {
+ list_del_init(&eq->_poll_list);
+ lpfc_sli4_start_polling(eq);
+ }
+
+ return 0;
+}
+
+static int lpfc_cpu_online(unsigned int cpu, struct hlist_node *node)
+{
+ struct lpfc_hba *phba = hlist_entry_safe(node, struct lpfc_hba, cpuhp);
+ struct lpfc_queue *eq, *next;
+ unsigned int n;
+ int retval;
+
+ if (!phba) {
+ WARN_ONCE(!phba, "cpu: %u. phba:NULL", raw_smp_processor_id());
+ return 0;
+ }
+
+ if (__lpfc_cpuhp_checks(phba, &retval))
+ return retval;
+
+ lpfc_irq_rebalance(phba, cpu, false);
+
+ list_for_each_entry_safe(eq, next, &phba->poll_list, _poll_list) {
+ n = lpfc_find_cpu_handle(phba, eq->hdwq, LPFC_FIND_BY_HDWQ);
+ if (n == cpu)
+ lpfc_sli4_stop_polling(eq);
+ }
+
+ return 0;
+}
+
+/**
* lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device
* @phba: pointer to lpfc hba data structure.
*
* This routine is invoked to enable the MSI-X interrupt vectors to device
- * with SLI-4 interface spec.
+ * with SLI-4 interface spec. It also allocates MSI-X vectors and maps them
+ * to cpus on the system.
+ *
+ * When cfg_irq_numa is enabled, the adapter will only allocate vectors for
+ * the number of cpus on the same numa node as this adapter. The vectors are
+ * allocated without requesting OS affinity mapping. A vector will be
+ * allocated and assigned to each online and offline cpu. If the cpu is
+ * online, then affinity will be set to that cpu. If the cpu is offline, then
+ * affinity will be set to the nearest peer cpu within the numa node that is
+ * online. If there are no online cpus within the numa node, affinity is not
+ * assigned and the OS may do as it pleases. Note: cpu vector affinity mapping
+ * is consistent with the way cpu online/offline is handled when cfg_irq_numa is
+ * configured.
+ *
+ * If numa mode is not enabled and there is more than 1 vector allocated, then
+ * the driver relies on the managed irq interface where the OS assigns vector to
+ * cpu affinity. The driver will then use that affinity mapping to setup its
+ * cpu mapping table.
*
* Return codes
* 0 - successful
@@ -10978,13 +11389,31 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba)
{
int vectors, rc, index;
char *name;
+ const struct cpumask *numa_mask = NULL;
+ unsigned int cpu = 0, cpu_cnt = 0, cpu_select = nr_cpu_ids;
+ struct lpfc_hba_eq_hdl *eqhdl;
+ const struct cpumask *maskp;
+ bool first;
+ unsigned int flags = PCI_IRQ_MSIX;
/* Set up MSI-X multi-message vectors */
vectors = phba->cfg_irq_chann;
- rc = pci_alloc_irq_vectors(phba->pcidev,
- 1,
- vectors, PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
+ if (phba->cfg_irq_numa) {
+ numa_mask = &phba->sli4_hba.numa_mask;
+ cpu_cnt = cpumask_weight(numa_mask);
+ vectors = min(phba->cfg_irq_chann, cpu_cnt);
+
+ /* cpu: iterates over numa_mask including offline or online
+ * cpu_select: iterates over online numa_mask to set affinity
+ */
+ cpu = cpumask_first(numa_mask);
+ cpu_select = lpfc_next_online_numa_cpu(numa_mask, cpu);
+ } else {
+ flags |= PCI_IRQ_AFFINITY;
+ }
+
+ rc = pci_alloc_irq_vectors(phba->pcidev, 1, vectors, flags);
if (rc < 0) {
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"0484 PCI enable MSI-X failed (%d)\n", rc);
@@ -10994,23 +11423,61 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba)
/* Assign MSI-X vectors to interrupt handlers */
for (index = 0; index < vectors; index++) {
- name = phba->sli4_hba.hba_eq_hdl[index].handler_name;
+ eqhdl = lpfc_get_eq_hdl(index);
+ name = eqhdl->handler_name;
memset(name, 0, LPFC_SLI4_HANDLER_NAME_SZ);
snprintf(name, LPFC_SLI4_HANDLER_NAME_SZ,
LPFC_DRIVER_HANDLER_NAME"%d", index);
- phba->sli4_hba.hba_eq_hdl[index].idx = index;
- phba->sli4_hba.hba_eq_hdl[index].phba = phba;
+ eqhdl->idx = index;
rc = request_irq(pci_irq_vector(phba->pcidev, index),
&lpfc_sli4_hba_intr_handler, 0,
- name,
- &phba->sli4_hba.hba_eq_hdl[index]);
+ name, eqhdl);
if (rc) {
lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
"0486 MSI-X fast-path (%d) "
"request_irq failed (%d)\n", index, rc);
goto cfg_fail_out;
}
+
+ eqhdl->irq = pci_irq_vector(phba->pcidev, index);
+
+ if (phba->cfg_irq_numa) {
+ /* If found a neighboring online cpu, set affinity */
+ if (cpu_select < nr_cpu_ids)
+ lpfc_irq_set_aff(eqhdl, cpu_select);
+
+ /* Assign EQ to cpu_map */
+ lpfc_assign_eq_map_info(phba, index,
+ LPFC_CPU_FIRST_IRQ,
+ cpu);
+
+ /* Iterate to next offline or online cpu in numa_mask */
+ cpu = cpumask_next(cpu, numa_mask);
+
+ /* Find next online cpu in numa_mask to set affinity */
+ cpu_select = lpfc_next_online_numa_cpu(numa_mask, cpu);
+ } else if (vectors == 1) {
+ cpu = cpumask_first(cpu_present_mask);
+ lpfc_assign_eq_map_info(phba, index, LPFC_CPU_FIRST_IRQ,
+ cpu);
+ } else {
+ maskp = pci_irq_get_affinity(phba->pcidev, index);
+
+ first = true;
+ /* Loop through all CPUs associated with vector index */
+ for_each_cpu_and(cpu, maskp, cpu_present_mask) {
+ /* If this is the first CPU thats assigned to
+ * this vector, set LPFC_CPU_FIRST_IRQ.
+ */
+ lpfc_assign_eq_map_info(phba, index,
+ first ?
+ LPFC_CPU_FIRST_IRQ : 0,
+ cpu);
+ if (first)
+ first = false;
+ }
+ }
}
if (vectors != phba->cfg_irq_chann) {
@@ -11020,17 +11487,18 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba)
phba->cfg_irq_chann, vectors);
if (phba->cfg_irq_chann > vectors)
phba->cfg_irq_chann = vectors;
- if (phba->nvmet_support && (phba->cfg_nvmet_mrq > vectors))
- phba->cfg_nvmet_mrq = vectors;
}
return rc;
cfg_fail_out:
/* free the irq already requested */
- for (--index; index >= 0; index--)
- free_irq(pci_irq_vector(phba->pcidev, index),
- &phba->sli4_hba.hba_eq_hdl[index]);
+ for (--index; index >= 0; index--) {
+ eqhdl = lpfc_get_eq_hdl(index);
+ lpfc_irq_clear_aff(eqhdl);
+ irq_set_affinity_hint(eqhdl->irq, NULL);
+ free_irq(eqhdl->irq, eqhdl);
+ }
/* Unconfigure MSI-X capability structure */
pci_free_irq_vectors(phba->pcidev);
@@ -11057,6 +11525,8 @@ static int
lpfc_sli4_enable_msi(struct lpfc_hba *phba)
{
int rc, index;
+ unsigned int cpu;
+ struct lpfc_hba_eq_hdl *eqhdl;
rc = pci_alloc_irq_vectors(phba->pcidev, 1, 1,
PCI_IRQ_MSI | PCI_IRQ_AFFINITY);
@@ -11078,9 +11548,15 @@ lpfc_sli4_enable_msi(struct lpfc_hba *phba)
return rc;
}
+ eqhdl = lpfc_get_eq_hdl(0);
+ eqhdl->irq = pci_irq_vector(phba->pcidev, 0);
+
+ cpu = cpumask_first(cpu_present_mask);
+ lpfc_assign_eq_map_info(phba, 0, LPFC_CPU_FIRST_IRQ, cpu);
+
for (index = 0; index < phba->cfg_irq_chann; index++) {
- phba->sli4_hba.hba_eq_hdl[index].idx = index;
- phba->sli4_hba.hba_eq_hdl[index].phba = phba;
+ eqhdl = lpfc_get_eq_hdl(index);
+ eqhdl->idx = index;
}
return 0;
@@ -11138,15 +11614,21 @@ lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
IRQF_SHARED, LPFC_DRIVER_NAME, phba);
if (!retval) {
struct lpfc_hba_eq_hdl *eqhdl;
+ unsigned int cpu;
/* Indicate initialization to INTx mode */
phba->intr_type = INTx;
intr_mode = 0;
+ eqhdl = lpfc_get_eq_hdl(0);
+ eqhdl->irq = pci_irq_vector(phba->pcidev, 0);
+
+ cpu = cpumask_first(cpu_present_mask);
+ lpfc_assign_eq_map_info(phba, 0, LPFC_CPU_FIRST_IRQ,
+ cpu);
for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
- eqhdl = &phba->sli4_hba.hba_eq_hdl[idx];
+ eqhdl = lpfc_get_eq_hdl(idx);
eqhdl->idx = idx;
- eqhdl->phba = phba;
}
}
}
@@ -11168,14 +11650,14 @@ lpfc_sli4_disable_intr(struct lpfc_hba *phba)
/* Disable the currently initialized interrupt mode */
if (phba->intr_type == MSIX) {
int index;
+ struct lpfc_hba_eq_hdl *eqhdl;
/* Free up MSI-X multi-message vectors */
for (index = 0; index < phba->cfg_irq_chann; index++) {
- irq_set_affinity_hint(
- pci_irq_vector(phba->pcidev, index),
- NULL);
- free_irq(pci_irq_vector(phba->pcidev, index),
- &phba->sli4_hba.hba_eq_hdl[index]);
+ eqhdl = lpfc_get_eq_hdl(index);
+ lpfc_irq_clear_aff(eqhdl);
+ irq_set_affinity_hint(eqhdl->irq, NULL);
+ free_irq(eqhdl->irq, eqhdl);
}
} else {
free_irq(phba->pcidev->irq, phba);
@@ -11367,6 +11849,9 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba)
/* Wait for completion of device XRI exchange busy */
lpfc_sli4_xri_exchange_busy_wait(phba);
+ /* per-phba callback de-registration for hotplug event */
+ lpfc_cpuhp_remove(phba);
+
/* Disable PCI subsystem interrupt */
lpfc_sli4_disable_intr(phba);
@@ -11538,6 +12023,7 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
sli4_params->cqav = bf_get(cfg_cqav, mbx_sli4_parameters);
sli4_params->wqsize = bf_get(cfg_wqsize, mbx_sli4_parameters);
sli4_params->bv1s = bf_get(cfg_bv1s, mbx_sli4_parameters);
+ sli4_params->pls = bf_get(cfg_pvl, mbx_sli4_parameters);
sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt,
mbx_sli4_parameters);
sli4_params->wqpcnt = bf_get(cfg_wqpcnt, mbx_sli4_parameters);
@@ -11589,13 +12075,10 @@ fcponly:
}
/* If the NVME FC4 type is enabled, scale the sg_seg_cnt to
- * accommodate 512K and 1M IOs in a single nvme buf and supply
- * enough NVME LS iocb buffers for larger connectivity counts.
+ * accommodate 512K and 1M IOs in a single nvme buf.
*/
- if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
phba->cfg_sg_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
- phba->cfg_iocb_cnt = 5;
- }
/* Only embed PBDE for if_type 6, PBDE support requires xib be set */
if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
@@ -12312,35 +12795,57 @@ lpfc_sli4_get_iocb_cnt(struct lpfc_hba *phba)
}
-static void
+static int
lpfc_log_write_firmware_error(struct lpfc_hba *phba, uint32_t offset,
uint32_t magic_number, uint32_t ftype, uint32_t fid, uint32_t fsize,
const struct firmware *fw)
{
- if ((offset == ADD_STATUS_FW_NOT_SUPPORTED) ||
+ int rc;
+
+ /* Three cases: (1) FW was not supported on the detected adapter.
+ * (2) FW update has been locked out administratively.
+ * (3) Some other error during FW update.
+ * In each case, an unmaskable message is written to the console
+ * for admin diagnosis.
+ */
+ if (offset == ADD_STATUS_FW_NOT_SUPPORTED ||
(phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC &&
- magic_number != MAGIC_NUMER_G6) ||
+ magic_number != MAGIC_NUMBER_G6) ||
(phba->pcidev->device == PCI_DEVICE_ID_LANCER_G7_FC &&
- magic_number != MAGIC_NUMER_G7))
+ magic_number != MAGIC_NUMBER_G7)) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "3030 This firmware version is not supported on "
- "this HBA model. Device:%x Magic:%x Type:%x "
- "ID:%x Size %d %zd\n",
- phba->pcidev->device, magic_number, ftype, fid,
- fsize, fw->size);
- else
+ "3030 This firmware version is not supported on"
+ " this HBA model. Device:%x Magic:%x Type:%x "
+ "ID:%x Size %d %zd\n",
+ phba->pcidev->device, magic_number, ftype, fid,
+ fsize, fw->size);
+ rc = -EINVAL;
+ } else if (offset == ADD_STATUS_FW_DOWNLOAD_HW_DISABLED) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "3022 FW Download failed. Device:%x Magic:%x Type:%x "
- "ID:%x Size %d %zd\n",
- phba->pcidev->device, magic_number, ftype, fid,
- fsize, fw->size);
+ "3021 Firmware downloads have been prohibited "
+ "by a system configuration setting on "
+ "Device:%x Magic:%x Type:%x ID:%x Size %d "
+ "%zd\n",
+ phba->pcidev->device, magic_number, ftype, fid,
+ fsize, fw->size);
+ rc = -EACCES;
+ } else {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3022 FW Download failed. Add Status x%x "
+ "Device:%x Magic:%x Type:%x ID:%x Size %d "
+ "%zd\n",
+ offset, phba->pcidev->device, magic_number,
+ ftype, fid, fsize, fw->size);
+ rc = -EIO;
+ }
+ return rc;
}
-
/**
* lpfc_write_firmware - attempt to write a firmware image to the port
* @fw: pointer to firmware image returned from request_firmware.
- * @phba: pointer to lpfc hba data structure.
+ * @context: pointer to firmware image returned from request_firmware.
+ * @ret: return value this routine provides to the caller.
*
**/
static void
@@ -12409,8 +12914,12 @@ lpfc_write_firmware(const struct firmware *fw, void *context)
rc = lpfc_wr_object(phba, &dma_buffer_list,
(fw->size - offset), &offset);
if (rc) {
- lpfc_log_write_firmware_error(phba, offset,
- magic_number, ftype, fid, fsize, fw);
+ rc = lpfc_log_write_firmware_error(phba, offset,
+ magic_number,
+ ftype,
+ fid,
+ fsize,
+ fw);
goto release_out;
}
}
@@ -12430,9 +12939,12 @@ release_out:
}
release_firmware(fw);
out:
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "3024 Firmware update done: %d.\n", rc);
- return;
+ if (rc < 0)
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3062 Firmware update error, status %d.\n", rc);
+ else
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3024 Firmware update success: size %d.\n", rc);
}
/**
@@ -12551,6 +13063,12 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
phba->pport = NULL;
lpfc_stop_port(phba);
+ /* Init cpu_map array */
+ lpfc_cpu_map_array_init(phba);
+
+ /* Init hba_eq_hdl array */
+ lpfc_hba_eq_hdl_array_init(phba);
+
/* Configure and enable interrupt */
intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode);
if (intr_mode == LPFC_INTR_ERROR) {
@@ -12632,6 +13150,9 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
/* Enable RAS FW log support */
lpfc_sli4_ras_setup(phba);
+ INIT_LIST_HEAD(&phba->poll_list);
+ cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state, &phba->cpuhp);
+
return 0;
out_free_sysfs_attr:
@@ -13344,8 +13865,7 @@ lpfc_sli4_oas_verify(struct lpfc_hba *phba)
phba->cfg_fof = 1;
} else {
phba->cfg_fof = 0;
- if (phba->device_data_mem_pool)
- mempool_destroy(phba->device_data_mem_pool);
+ mempool_destroy(phba->device_data_mem_pool);
phba->device_data_mem_pool = NULL;
}
@@ -13450,11 +13970,24 @@ lpfc_init(void)
/* Initialize in case vector mapping is needed */
lpfc_present_cpu = num_present_cpus();
+ error = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
+ "lpfc/sli4:online",
+ lpfc_cpu_online, lpfc_cpu_offline);
+ if (error < 0)
+ goto cpuhp_failure;
+ lpfc_cpuhp_state = error;
+
error = pci_register_driver(&lpfc_driver);
- if (error) {
- fc_release_transport(lpfc_transport_template);
- fc_release_transport(lpfc_vport_transport_template);
- }
+ if (error)
+ goto unwind;
+
+ return error;
+
+unwind:
+ cpuhp_remove_multi_state(lpfc_cpuhp_state);
+cpuhp_failure:
+ fc_release_transport(lpfc_transport_template);
+ fc_release_transport(lpfc_vport_transport_template);
return error;
}
@@ -13471,6 +14004,7 @@ lpfc_exit(void)
{
misc_deregister(&lpfc_mgmt_dev);
pci_unregister_driver(&lpfc_driver);
+ cpuhp_remove_multi_state(lpfc_cpuhp_state);
fc_release_transport(lpfc_transport_template);
fc_release_transport(lpfc_vport_transport_template);
idr_destroy(&lpfc_hba_index);
diff --git a/drivers/scsi/lpfc/lpfc_logmsg.h b/drivers/scsi/lpfc/lpfc_logmsg.h
index ea10f03437f5..148d02a27b58 100644
--- a/drivers/scsi/lpfc/lpfc_logmsg.h
+++ b/drivers/scsi/lpfc/lpfc_logmsg.h
@@ -46,6 +46,23 @@
#define LOG_NVME_IOERR 0x00800000 /* NVME IO Error events. */
#define LOG_ALL_MSG 0xffffffff /* LOG all messages */
+/* generate message by verbose log setting or severity */
+#define lpfc_vlog_msg(vport, level, mask, fmt, arg...) \
+{ if (((mask) & (vport)->cfg_log_verbose) || (level[1] <= '4')) \
+ dev_printk(level, &((vport)->phba->pcidev)->dev, "%d:(%d):" \
+ fmt, (vport)->phba->brd_no, vport->vpi, ##arg); }
+
+#define lpfc_log_msg(phba, level, mask, fmt, arg...) \
+do { \
+ { uint32_t log_verbose = (phba)->pport ? \
+ (phba)->pport->cfg_log_verbose : \
+ (phba)->cfg_log_verbose; \
+ if (((mask) & log_verbose) || (level[1] <= '4')) \
+ dev_printk(level, &((phba)->pcidev)->dev, "%d:" \
+ fmt, phba->brd_no, ##arg); \
+ } \
+} while (0)
+
#define lpfc_printf_vlog(vport, level, mask, fmt, arg...) \
do { \
{ if (((mask) & (vport)->cfg_log_verbose) || (level[1] <= '3')) \
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index 8abe933bad09..d1773c01d2b3 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -515,6 +515,7 @@ lpfc_init_link(struct lpfc_hba * phba,
if ((phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC ||
phba->pcidev->device == PCI_DEVICE_ID_LANCER_G7_FC) &&
+ !(phba->sli4_hba.pc_sli4_params.pls) &&
mb->un.varInitLnk.link_flags & FLAGS_TOPOLOGY_MODE_LOOP) {
mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_PT_PT;
phba->cfg_topology = FLAGS_TOPOLOGY_MODE_PT_PT;
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c
index ae09bb863497..7082279e4c01 100644
--- a/drivers/scsi/lpfc/lpfc_mem.c
+++ b/drivers/scsi/lpfc/lpfc_mem.c
@@ -230,9 +230,6 @@ lpfc_mem_free(struct lpfc_hba *phba)
dma_pool_destroy(phba->lpfc_hrb_pool);
phba->lpfc_hrb_pool = NULL;
- dma_pool_destroy(phba->txrdy_payload_pool);
- phba->txrdy_payload_pool = NULL;
-
dma_pool_destroy(phba->lpfc_hbq_pool);
phba->lpfc_hbq_pool = NULL;
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index fc6e4546d738..ae4359013846 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -279,6 +279,55 @@ lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
lpfc_cancel_retry_delay_tmo(phba->pport, ndlp);
}
+/* lpfc_defer_pt2pt_acc - Complete SLI3 pt2pt processing on link up
+ * @phba: pointer to lpfc hba data structure.
+ * @link_mbox: pointer to CONFIG_LINK mailbox object
+ *
+ * This routine is only called if we are SLI3, direct connect pt2pt
+ * mode and the remote NPort issues the PLOGI after link up.
+ */
+static void
+lpfc_defer_pt2pt_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *link_mbox)
+{
+ LPFC_MBOXQ_t *login_mbox;
+ MAILBOX_t *mb = &link_mbox->u.mb;
+ struct lpfc_iocbq *save_iocb;
+ struct lpfc_nodelist *ndlp;
+ int rc;
+
+ ndlp = link_mbox->ctx_ndlp;
+ login_mbox = link_mbox->context3;
+ save_iocb = login_mbox->context3;
+ link_mbox->context3 = NULL;
+ login_mbox->context3 = NULL;
+
+ /* Check for CONFIG_LINK error */
+ if (mb->mbxStatus) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
+ "4575 CONFIG_LINK fails pt2pt discovery: %x\n",
+ mb->mbxStatus);
+ mempool_free(login_mbox, phba->mbox_mem_pool);
+ mempool_free(link_mbox, phba->mbox_mem_pool);
+ lpfc_sli_release_iocbq(phba, save_iocb);
+ return;
+ }
+
+ /* Now that CONFIG_LINK completed, and our SID is configured,
+ * we can now proceed with sending the PLOGI ACC.
+ */
+ rc = lpfc_els_rsp_acc(link_mbox->vport, ELS_CMD_PLOGI,
+ save_iocb, ndlp, login_mbox);
+ if (rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
+ "4576 PLOGI ACC fails pt2pt discovery: %x\n",
+ rc);
+ mempool_free(login_mbox, phba->mbox_mem_pool);
+ }
+
+ mempool_free(link_mbox, phba->mbox_mem_pool);
+ lpfc_sli_release_iocbq(phba, save_iocb);
+}
+
static int
lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
struct lpfc_iocbq *cmdiocb)
@@ -291,10 +340,12 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
IOCB_t *icmd;
struct serv_parm *sp;
uint32_t ed_tov;
- LPFC_MBOXQ_t *mbox;
+ LPFC_MBOXQ_t *link_mbox;
+ LPFC_MBOXQ_t *login_mbox;
+ struct lpfc_iocbq *save_iocb;
struct ls_rjt stat;
uint32_t vid, flag;
- int rc;
+ int rc, defer_acc;
memset(&stat, 0, sizeof (struct ls_rjt));
pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
@@ -343,6 +394,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
else
ndlp->nlp_fcp_info |= CLASS3;
+ defer_acc = 0;
ndlp->nlp_class_sup = 0;
if (sp->cls1.classValid)
ndlp->nlp_class_sup |= FC_COS_CLASS1;
@@ -354,7 +406,6 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
ndlp->nlp_class_sup |= FC_COS_CLASS4;
ndlp->nlp_maxframe =
((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb;
-
/* if already logged in, do implicit logout */
switch (ndlp->nlp_state) {
case NLP_STE_NPR_NODE:
@@ -396,6 +447,10 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
ndlp->nlp_flag &= ~NLP_FIRSTBURST;
+ login_mbox = NULL;
+ link_mbox = NULL;
+ save_iocb = NULL;
+
/* Check for Nport to NPort pt2pt protocol */
if ((vport->fc_flag & FC_PT2PT) &&
!(vport->fc_flag & FC_PT2PT_PLOGI)) {
@@ -423,17 +478,22 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (phba->sli_rev == LPFC_SLI_REV4)
lpfc_issue_reg_vfi(vport);
else {
- mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
- if (mbox == NULL)
+ defer_acc = 1;
+ link_mbox = mempool_alloc(phba->mbox_mem_pool,
+ GFP_KERNEL);
+ if (!link_mbox)
goto out;
- lpfc_config_link(phba, mbox);
- mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
- mbox->vport = vport;
- rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
- if (rc == MBX_NOT_FINISHED) {
- mempool_free(mbox, phba->mbox_mem_pool);
+ lpfc_config_link(phba, link_mbox);
+ link_mbox->mbox_cmpl = lpfc_defer_pt2pt_acc;
+ link_mbox->vport = vport;
+ link_mbox->ctx_ndlp = ndlp;
+
+ save_iocb = lpfc_sli_get_iocbq(phba);
+ if (!save_iocb)
goto out;
- }
+ /* Save info from cmd IOCB used in rsp */
+ memcpy((uint8_t *)save_iocb, (uint8_t *)cmdiocb,
+ sizeof(struct lpfc_iocbq));
}
lpfc_can_disctmo(vport);
@@ -448,8 +508,8 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
ndlp->nlp_flag |= NLP_SUPPRESS_RSP;
}
- mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
- if (!mbox)
+ login_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!login_mbox)
goto out;
/* Registering an existing RPI behaves differently for SLI3 vs SLI4 */
@@ -457,21 +517,19 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
lpfc_unreg_rpi(vport, ndlp);
rc = lpfc_reg_rpi(phba, vport->vpi, icmd->un.rcvels.remoteID,
- (uint8_t *) sp, mbox, ndlp->nlp_rpi);
- if (rc) {
- mempool_free(mbox, phba->mbox_mem_pool);
+ (uint8_t *)sp, login_mbox, ndlp->nlp_rpi);
+ if (rc)
goto out;
- }
/* ACC PLOGI rsp command needs to execute first,
- * queue this mbox command to be processed later.
+ * queue this login_mbox command to be processed later.
*/
- mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
+ login_mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
/*
- * mbox->ctx_ndlp = lpfc_nlp_get(ndlp) deferred until mailbox
+ * login_mbox->ctx_ndlp = lpfc_nlp_get(ndlp) deferred until mailbox
* command issued in lpfc_cmpl_els_acc().
*/
- mbox->vport = vport;
+ login_mbox->vport = vport;
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI);
spin_unlock_irq(shost->host_lock);
@@ -484,8 +542,10 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
* single discovery thread, this will cause a huge delay in
* discovery. Also this will cause multiple state machines
* running in parallel for this node.
+ * This only applies to a fabric environment.
*/
- if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE) {
+ if ((ndlp->nlp_state == NLP_STE_PLOGI_ISSUE) &&
+ (vport->fc_flag & FC_FABRIC)) {
/* software abort outstanding PLOGI */
lpfc_els_abort(phba, ndlp);
}
@@ -504,16 +564,47 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
stat.un.b.lsRjtRsnCode = LSRJT_INVALID_CMD;
stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
rc = lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb,
- ndlp, mbox);
+ ndlp, login_mbox);
if (rc)
- mempool_free(mbox, phba->mbox_mem_pool);
+ mempool_free(login_mbox, phba->mbox_mem_pool);
+ return 1;
+ }
+ if (defer_acc) {
+ /* So the order here should be:
+ * Issue CONFIG_LINK mbox
+ * CONFIG_LINK cmpl
+ * Issue PLOGI ACC
+ * PLOGI ACC cmpl
+ * Issue REG_LOGIN mbox
+ */
+
+ /* Save the REG_LOGIN mbox for and rcv IOCB copy later */
+ link_mbox->context3 = login_mbox;
+ login_mbox->context3 = save_iocb;
+
+ /* Start the ball rolling by issuing CONFIG_LINK here */
+ rc = lpfc_sli_issue_mbox(phba, link_mbox, MBX_NOWAIT);
+ if (rc == MBX_NOT_FINISHED)
+ goto out;
return 1;
}
- rc = lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, mbox);
+
+ rc = lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, login_mbox);
if (rc)
- mempool_free(mbox, phba->mbox_mem_pool);
+ mempool_free(login_mbox, phba->mbox_mem_pool);
return 1;
out:
+ if (defer_acc)
+ lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
+ "4577 pt2pt discovery failure: %p %p %p\n",
+ save_iocb, link_mbox, login_mbox);
+ if (save_iocb)
+ lpfc_sli_release_iocbq(phba, save_iocb);
+ if (link_mbox)
+ mempool_free(link_mbox, phba->mbox_mem_pool);
+ if (login_mbox)
+ mempool_free(login_mbox, phba->mbox_mem_pool);
+
stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
stat.un.b.lsRjtRsnCodeExp = LSEXP_OUT_OF_RESOURCE;
lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
@@ -2030,7 +2121,9 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (bf_get_be32(prli_init, nvpr))
ndlp->nlp_type |= NLP_NVME_INITIATOR;
- if (phba->nsler && bf_get_be32(prli_nsler, nvpr))
+ if (phba->nsler && bf_get_be32(prli_nsler, nvpr) &&
+ bf_get_be32(prli_conf, nvpr))
+
ndlp->nlp_nvme_info |= NLP_NVME_NSLER;
else
ndlp->nlp_nvme_info &= ~NLP_NVME_NSLER;
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
index a227e36cbdc2..db4a04a207ec 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.c
+++ b/drivers/scsi/lpfc/lpfc_nvme.c
@@ -196,6 +196,46 @@ lpfc_nvme_cmd_template(void)
}
/**
+ * lpfc_nvme_prep_abort_wqe - set up 'abort' work queue entry.
+ * @pwqeq: Pointer to command iocb.
+ * @xritag: Tag that uniqely identifies the local exchange resource.
+ * @opt: Option bits -
+ * bit 0 = inhibit sending abts on the link
+ *
+ * This function is called with hbalock held.
+ **/
+void
+lpfc_nvme_prep_abort_wqe(struct lpfc_iocbq *pwqeq, u16 xritag, u8 opt)
+{
+ union lpfc_wqe128 *wqe = &pwqeq->wqe;
+
+ /* WQEs are reused. Clear stale data and set key fields to
+ * zero like ia, iaab, iaar, xri_tag, and ctxt_tag.
+ */
+ memset(wqe, 0, sizeof(*wqe));
+
+ if (opt & INHIBIT_ABORT)
+ bf_set(abort_cmd_ia, &wqe->abort_cmd, 1);
+ /* Abort specified xri tag, with the mask deliberately zeroed */
+ bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG);
+
+ bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
+
+ /* Abort the IO associated with this outstanding exchange ID. */
+ wqe->abort_cmd.wqe_com.abort_tag = xritag;
+
+ /* iotag for the wqe completion. */
+ bf_set(wqe_reqtag, &wqe->abort_cmd.wqe_com, pwqeq->iotag);
+
+ bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1);
+ bf_set(wqe_lenloc, &wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
+
+ bf_set(wqe_cmd_type, &wqe->abort_cmd.wqe_com, OTHER_COMMAND);
+ bf_set(wqe_wqec, &wqe->abort_cmd.wqe_com, 1);
+ bf_set(wqe_cqid, &wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
+}
+
+/**
* lpfc_nvme_create_queue -
* @lpfc_pnvme: Pointer to the driver's nvme instance data
* @qidx: An cpu index used to affinitize IO queues and MSIX vectors.
@@ -1791,7 +1831,6 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
struct lpfc_iocbq *abts_buf;
struct lpfc_iocbq *nvmereq_wqe;
struct lpfc_nvme_fcpreq_priv *freqpriv;
- union lpfc_wqe128 *abts_wqe;
unsigned long flags;
int ret_val;
@@ -1912,37 +1951,7 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
/* Ready - mark outstanding as aborted by driver. */
nvmereq_wqe->iocb_flag |= LPFC_DRIVER_ABORTED;
- /* Complete prepping the abort wqe and issue to the FW. */
- abts_wqe = &abts_buf->wqe;
-
- /* WQEs are reused. Clear stale data and set key fields to
- * zero like ia, iaab, iaar, xri_tag, and ctxt_tag.
- */
- memset(abts_wqe, 0, sizeof(*abts_wqe));
- bf_set(abort_cmd_criteria, &abts_wqe->abort_cmd, T_XRI_TAG);
-
- /* word 7 */
- bf_set(wqe_cmnd, &abts_wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
- bf_set(wqe_class, &abts_wqe->abort_cmd.wqe_com,
- nvmereq_wqe->iocb.ulpClass);
-
- /* word 8 - tell the FW to abort the IO associated with this
- * outstanding exchange ID.
- */
- abts_wqe->abort_cmd.wqe_com.abort_tag = nvmereq_wqe->sli4_xritag;
-
- /* word 9 - this is the iotag for the abts_wqe completion. */
- bf_set(wqe_reqtag, &abts_wqe->abort_cmd.wqe_com,
- abts_buf->iotag);
-
- /* word 10 */
- bf_set(wqe_qosd, &abts_wqe->abort_cmd.wqe_com, 1);
- bf_set(wqe_lenloc, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
-
- /* word 11 */
- bf_set(wqe_cmd_type, &abts_wqe->abort_cmd.wqe_com, OTHER_COMMAND);
- bf_set(wqe_wqec, &abts_wqe->abort_cmd.wqe_com, 1);
- bf_set(wqe_cqid, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
+ lpfc_nvme_prep_abort_wqe(abts_buf, nvmereq_wqe->sli4_xritag, 0);
/* ABTS WQE must go to the same WQ as the WQE to be aborted */
abts_buf->iocb_flag |= LPFC_IO_NVME;
@@ -2084,7 +2093,7 @@ lpfc_release_nvme_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd)
lpfc_ncmd->flags &= ~LPFC_SBUF_BUMP_QDEPTH;
qp = lpfc_ncmd->hdwq;
- if (lpfc_ncmd->flags & LPFC_SBUF_XBUSY) {
+ if (unlikely(lpfc_ncmd->flags & LPFC_SBUF_XBUSY)) {
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
"6310 XB release deferred for "
"ox_id x%x on reqtag x%x\n",
@@ -2139,12 +2148,10 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport)
*/
lpfc_nvme_template.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1;
- /* Advertise how many hw queues we support based on fcp_io_sched */
- if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ)
- lpfc_nvme_template.max_hw_queues = phba->cfg_hdw_queue;
- else
- lpfc_nvme_template.max_hw_queues =
- phba->sli4_hba.num_present_cpu;
+ /* Advertise how many hw queues we support based on cfg_hdw_queue,
+ * which will not exceed cpu count.
+ */
+ lpfc_nvme_template.max_hw_queues = phba->cfg_hdw_queue;
if (!IS_ENABLED(CONFIG_NVME_FC))
return ret;
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c
index 9884228800a5..9dc9afe1c255 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.c
+++ b/drivers/scsi/lpfc/lpfc_nvmet.c
@@ -378,13 +378,6 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
int cpu;
unsigned long iflag;
- if (ctxp->txrdy) {
- dma_pool_free(phba->txrdy_payload_pool, ctxp->txrdy,
- ctxp->txrdy_phys);
- ctxp->txrdy = NULL;
- ctxp->txrdy_phys = 0;
- }
-
if (ctxp->state == LPFC_NVMET_STE_FREE) {
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
"6411 NVMET free, already free IO x%x: %d %d\n",
@@ -430,7 +423,6 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
ctxp = (struct lpfc_nvmet_rcv_ctx *)ctx_buf->context;
ctxp->wqeq = NULL;
- ctxp->txrdy = NULL;
ctxp->offset = 0;
ctxp->phba = phba;
ctxp->size = size;
@@ -1958,12 +1950,10 @@ lpfc_nvmet_unsol_ls_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
uint32_t *payload;
uint32_t size, oxid, sid, rc;
- fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
- oxid = be16_to_cpu(fc_hdr->fh_ox_id);
- if (!phba->targetport) {
+ if (!nvmebuf || !phba->targetport) {
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
- "6154 LS Drop IO x%x\n", oxid);
+ "6154 LS Drop IO\n");
oxid = 0;
size = 0;
sid = 0;
@@ -1971,6 +1961,9 @@ lpfc_nvmet_unsol_ls_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
goto dropit;
}
+ fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
+ oxid = be16_to_cpu(fc_hdr->fh_ox_id);
+
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
payload = (uint32_t *)(nvmebuf->dbuf.virt);
size = bf_get(lpfc_rcqe_length, &nvmebuf->cq_event.cqe.rcqe_cmpl);
@@ -2326,7 +2319,6 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
ctxp->state, ctxp->entry_cnt, ctxp->oxid);
}
ctxp->wqeq = NULL;
- ctxp->txrdy = NULL;
ctxp->offset = 0;
ctxp->phba = phba;
ctxp->size = size;
@@ -2401,6 +2393,11 @@ lpfc_nvmet_unsol_ls_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
d_buf = piocb->context2;
nvmebuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
+ if (!nvmebuf) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
+ "3015 LS Drop IO\n");
+ return;
+ }
if (phba->nvmet_support == 0) {
lpfc_in_buf_free(phba, &nvmebuf->dbuf);
return;
@@ -2429,6 +2426,11 @@ lpfc_nvmet_unsol_fcp_event(struct lpfc_hba *phba,
uint64_t isr_timestamp,
uint8_t cqflag)
{
+ if (!nvmebuf) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
+ "3167 NVMET FCP Drop IO\n");
+ return;
+ }
if (phba->nvmet_support == 0) {
lpfc_rq_buf_free(phba, &nvmebuf->hbuf);
return;
@@ -2595,7 +2597,6 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
struct scatterlist *sgel;
union lpfc_wqe128 *wqe;
struct ulp_bde64 *bde;
- uint32_t *txrdy;
dma_addr_t physaddr;
int i, cnt;
int do_pbde;
@@ -2757,23 +2758,11 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
&lpfc_treceive_cmd_template.words[3],
sizeof(uint32_t) * 9);
- /* Words 0 - 2 : The first sg segment */
- txrdy = dma_pool_alloc(phba->txrdy_payload_pool,
- GFP_KERNEL, &physaddr);
- if (!txrdy) {
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
- "6041 Bad txrdy buffer: oxid x%x\n",
- ctxp->oxid);
- return NULL;
- }
- ctxp->txrdy = txrdy;
- ctxp->txrdy_phys = physaddr;
- wqe->fcp_treceive.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
- wqe->fcp_treceive.bde.tus.f.bdeSize = TXRDY_PAYLOAD_LEN;
- wqe->fcp_treceive.bde.addrLow =
- cpu_to_le32(putPaddrLow(physaddr));
- wqe->fcp_treceive.bde.addrHigh =
- cpu_to_le32(putPaddrHigh(physaddr));
+ /* Words 0 - 2 : First SGE is skipped, set invalid BDE type */
+ wqe->fcp_treceive.bde.tus.f.bdeFlags = LPFC_SGE_TYPE_SKIP;
+ wqe->fcp_treceive.bde.tus.f.bdeSize = 0;
+ wqe->fcp_treceive.bde.addrLow = 0;
+ wqe->fcp_treceive.bde.addrHigh = 0;
/* Word 4 */
wqe->fcp_treceive.relative_offset = ctxp->offset;
@@ -2808,17 +2797,13 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
/* Word 12 */
wqe->fcp_tsend.fcp_data_len = rsp->transfer_length;
- /* Setup 1 TXRDY and 1 SKIP SGE */
- txrdy[0] = 0;
- txrdy[1] = cpu_to_be32(rsp->transfer_length);
- txrdy[2] = 0;
-
- sgl->addr_hi = putPaddrHigh(physaddr);
- sgl->addr_lo = putPaddrLow(physaddr);
+ /* Setup 2 SKIP SGEs */
+ sgl->addr_hi = 0;
+ sgl->addr_lo = 0;
sgl->word2 = 0;
- bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
+ bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
sgl->word2 = cpu_to_le32(sgl->word2);
- sgl->sge_len = cpu_to_le32(TXRDY_PAYLOAD_LEN);
+ sgl->sge_len = 0;
sgl++;
sgl->addr_hi = 0;
sgl->addr_lo = 0;
@@ -3239,9 +3224,9 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
{
struct lpfc_nvmet_tgtport *tgtp;
struct lpfc_iocbq *abts_wqeq;
- union lpfc_wqe128 *abts_wqe;
struct lpfc_nodelist *ndlp;
unsigned long flags;
+ u8 opt;
int rc;
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
@@ -3280,8 +3265,8 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
return 0;
}
abts_wqeq = ctxp->abort_wqeq;
- abts_wqe = &abts_wqeq->wqe;
ctxp->state = LPFC_NVMET_STE_ABORT;
+ opt = (ctxp->flag & LPFC_NVMET_ABTS_RCV) ? INHIBIT_ABORT : 0;
spin_unlock_irqrestore(&ctxp->ctxlock, flags);
/* Announce entry to new IO submit field. */
@@ -3327,40 +3312,12 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
/* Ready - mark outstanding as aborted by driver. */
abts_wqeq->iocb_flag |= LPFC_DRIVER_ABORTED;
- /* WQEs are reused. Clear stale data and set key fields to
- * zero like ia, iaab, iaar, xri_tag, and ctxt_tag.
- */
- memset(abts_wqe, 0, sizeof(*abts_wqe));
-
- /* word 3 */
- bf_set(abort_cmd_criteria, &abts_wqe->abort_cmd, T_XRI_TAG);
-
- /* word 7 */
- bf_set(wqe_ct, &abts_wqe->abort_cmd.wqe_com, 0);
- bf_set(wqe_cmnd, &abts_wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
-
- /* word 8 - tell the FW to abort the IO associated with this
- * outstanding exchange ID.
- */
- abts_wqe->abort_cmd.wqe_com.abort_tag = ctxp->wqeq->sli4_xritag;
-
- /* word 9 - this is the iotag for the abts_wqe completion. */
- bf_set(wqe_reqtag, &abts_wqe->abort_cmd.wqe_com,
- abts_wqeq->iotag);
-
- /* word 10 */
- bf_set(wqe_qosd, &abts_wqe->abort_cmd.wqe_com, 1);
- bf_set(wqe_lenloc, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
-
- /* word 11 */
- bf_set(wqe_cmd_type, &abts_wqe->abort_cmd.wqe_com, OTHER_COMMAND);
- bf_set(wqe_wqec, &abts_wqe->abort_cmd.wqe_com, 1);
- bf_set(wqe_cqid, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
+ lpfc_nvme_prep_abort_wqe(abts_wqeq, ctxp->wqeq->sli4_xritag, opt);
/* ABTS WQE must go to the same WQ as the WQE to be aborted */
abts_wqeq->hba_wqidx = ctxp->wqeq->hba_wqidx;
abts_wqeq->wqe_cmpl = lpfc_nvmet_sol_fcp_abort_cmp;
- abts_wqeq->iocb_cmpl = 0;
+ abts_wqeq->iocb_cmpl = NULL;
abts_wqeq->iocb_flag |= LPFC_IO_NVME;
abts_wqeq->context2 = ctxp;
abts_wqeq->vport = phba->pport;
@@ -3495,7 +3452,7 @@ lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *phba,
spin_lock_irqsave(&phba->hbalock, flags);
abts_wqeq->wqe_cmpl = lpfc_nvmet_xmt_ls_abort_cmp;
- abts_wqeq->iocb_cmpl = 0;
+ abts_wqeq->iocb_cmpl = NULL;
abts_wqeq->iocb_flag |= LPFC_IO_NVME_LS;
rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, abts_wqeq);
spin_unlock_irqrestore(&phba->hbalock, flags);
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.h b/drivers/scsi/lpfc/lpfc_nvmet.h
index 8ff67deac10a..b80b1639b9a7 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.h
+++ b/drivers/scsi/lpfc/lpfc_nvmet.h
@@ -112,9 +112,7 @@ struct lpfc_nvmet_rcv_ctx {
struct lpfc_hba *phba;
struct lpfc_iocbq *wqeq;
struct lpfc_iocbq *abort_wqeq;
- dma_addr_t txrdy_phys;
spinlock_t ctxlock; /* protect flag access */
- uint32_t *txrdy;
uint32_t sid;
uint32_t offset;
uint16_t oxid;
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 6822cd9ff8f1..b138d9fee675 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -134,21 +134,21 @@ lpfc_sli4_set_rsp_sgl_last(struct lpfc_hba *phba,
/**
* lpfc_update_stats - Update statistical data for the command completion
- * @phba: Pointer to HBA object.
+ * @vport: The virtual port on which this call is executing.
* @lpfc_cmd: lpfc scsi command object pointer.
*
* This function is called when there is a command completion and this
* function updates the statistical data for the command completion.
**/
static void
-lpfc_update_stats(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
+lpfc_update_stats(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd)
{
+ struct lpfc_hba *phba = vport->phba;
struct lpfc_rport_data *rdata;
struct lpfc_nodelist *pnode;
struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
unsigned long flags;
- struct Scsi_Host *shost = cmd->device->host;
- struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
unsigned long latency;
int i;
@@ -526,7 +526,7 @@ lpfc_sli4_io_xri_aborted(struct lpfc_hba *phba,
&qp->lpfc_abts_io_buf_list, list) {
if (psb->cur_iocbq.sli4_xritag == xri) {
list_del_init(&psb->list);
- psb->exch_busy = 0;
+ psb->flags &= ~LPFC_SBUF_XBUSY;
psb->status = IOSTAT_SUCCESS;
if (psb->cur_iocbq.iocb_flag == LPFC_IO_NVME) {
qp->abts_nvme_io_bufs--;
@@ -566,7 +566,7 @@ lpfc_sli4_io_xri_aborted(struct lpfc_hba *phba,
if (iocbq->sli4_xritag != xri)
continue;
psb = container_of(iocbq, struct lpfc_io_buf, cur_iocbq);
- psb->exch_busy = 0;
+ psb->flags &= ~LPFC_SBUF_XBUSY;
spin_unlock_irqrestore(&phba->hbalock, iflag);
if (!list_empty(&pring->txq))
lpfc_worker_wake_up(phba);
@@ -786,7 +786,7 @@ lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *psb)
psb->prot_seg_cnt = 0;
qp = psb->hdwq;
- if (psb->exch_busy) {
+ if (psb->flags & LPFC_SBUF_XBUSY) {
spin_lock_irqsave(&qp->abts_io_buf_list_lock, iflag);
psb->pCmd = NULL;
list_add_tail(&psb->list, &qp->lpfc_abts_io_buf_list);
@@ -3812,7 +3812,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
/* Sanity check on return of outstanding command */
cmd = lpfc_cmd->pCmd;
- if (!cmd) {
+ if (!cmd || !phba) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
"2621 IO completion: Not an active IO\n");
spin_unlock(&lpfc_cmd->buf_lock);
@@ -3824,7 +3824,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
phba->sli4_hba.hdwq[idx].scsi_cstat.io_cmpls++;
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
- if (phba->cpucheck_on & LPFC_CHECK_SCSI_IO) {
+ if (unlikely(phba->cpucheck_on & LPFC_CHECK_SCSI_IO)) {
cpu = raw_smp_processor_id();
if (cpu < LPFC_CHECK_CPU_CNT && phba->sli4_hba.hdwq)
phba->sli4_hba.hdwq[idx].cpucheck_cmpl_io[cpu]++;
@@ -3835,7 +3835,10 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
lpfc_cmd->result = (pIocbOut->iocb.un.ulpWord[4] & IOERR_PARAM_MASK);
lpfc_cmd->status = pIocbOut->iocb.ulpStatus;
/* pick up SLI4 exhange busy status from HBA */
- lpfc_cmd->exch_busy = pIocbOut->iocb_flag & LPFC_EXCHANGE_BUSY;
+ if (pIocbOut->iocb_flag & LPFC_EXCHANGE_BUSY)
+ lpfc_cmd->flags |= LPFC_SBUF_XBUSY;
+ else
+ lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY;
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
if (lpfc_cmd->prot_data_type) {
@@ -3869,7 +3872,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
}
#endif
- if (lpfc_cmd->status) {
+ if (unlikely(lpfc_cmd->status)) {
if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
(lpfc_cmd->result & IOERR_DRVR_MASK))
lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
@@ -4002,7 +4005,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
scsi_get_resid(cmd));
}
- lpfc_update_stats(phba, lpfc_cmd);
+ lpfc_update_stats(vport, lpfc_cmd);
if (vport->cfg_max_scsicmpl_time &&
time_after(jiffies, lpfc_cmd->start_time +
msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) {
@@ -4610,17 +4613,18 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
}
- if (err == 2) {
- cmnd->result = DID_ERROR << 16;
- goto out_fail_command_release_buf;
- } else if (err) {
+ if (unlikely(err)) {
+ if (err == 2) {
+ cmnd->result = DID_ERROR << 16;
+ goto out_fail_command_release_buf;
+ }
goto out_host_busy_free_buf;
}
lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp);
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
- if (phba->cpucheck_on & LPFC_CHECK_SCSI_IO) {
+ if (unlikely(phba->cpucheck_on & LPFC_CHECK_SCSI_IO)) {
cpu = raw_smp_processor_id();
if (cpu < LPFC_CHECK_CPU_CNT) {
struct lpfc_sli4_hdw_queue *hdwq =
@@ -4843,20 +4847,21 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
ret_val = __lpfc_sli_issue_iocb(phba, LPFC_FCP_RING,
abtsiocb, 0);
}
- /* no longer need the lock after this point */
- spin_unlock_irqrestore(&phba->hbalock, flags);
if (ret_val == IOCB_ERROR) {
/* Indicate the IO is not being aborted by the driver. */
iocb->iocb_flag &= ~LPFC_DRIVER_ABORTED;
lpfc_cmd->waitq = NULL;
spin_unlock(&lpfc_cmd->buf_lock);
+ spin_unlock_irqrestore(&phba->hbalock, flags);
lpfc_sli_release_iocbq(phba, abtsiocb);
ret = FAILED;
goto out;
}
+ /* no longer need the lock after this point */
spin_unlock(&lpfc_cmd->buf_lock);
+ spin_unlock_irqrestore(&phba->hbalock, flags);
if (phba->cfg_poll & DISABLE_FCP_RING_INT)
lpfc_sli_handle_fast_ring_event(phba,
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 614f78dddafe..c82b5792da98 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -87,6 +87,10 @@ static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba,
struct lpfc_eqe *eqe);
static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba);
static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba);
+static struct lpfc_cqe *lpfc_sli4_cq_get(struct lpfc_queue *q);
+static void __lpfc_sli4_consume_cqe(struct lpfc_hba *phba,
+ struct lpfc_queue *cq,
+ struct lpfc_cqe *cqe);
static IOCB_t *
lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
@@ -467,25 +471,52 @@ __lpfc_sli4_consume_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq,
}
static void
-lpfc_sli4_eq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq)
+lpfc_sli4_eqcq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq)
{
- struct lpfc_eqe *eqe;
- uint32_t count = 0;
+ struct lpfc_eqe *eqe = NULL;
+ u32 eq_count = 0, cq_count = 0;
+ struct lpfc_cqe *cqe = NULL;
+ struct lpfc_queue *cq = NULL, *childq = NULL;
+ int cqid = 0;
/* walk all the EQ entries and drop on the floor */
eqe = lpfc_sli4_eq_get(eq);
while (eqe) {
+ /* Get the reference to the corresponding CQ */
+ cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
+ cq = NULL;
+
+ list_for_each_entry(childq, &eq->child_list, list) {
+ if (childq->queue_id == cqid) {
+ cq = childq;
+ break;
+ }
+ }
+ /* If CQ is valid, iterate through it and drop all the CQEs */
+ if (cq) {
+ cqe = lpfc_sli4_cq_get(cq);
+ while (cqe) {
+ __lpfc_sli4_consume_cqe(phba, cq, cqe);
+ cq_count++;
+ cqe = lpfc_sli4_cq_get(cq);
+ }
+ /* Clear and re-arm the CQ */
+ phba->sli4_hba.sli4_write_cq_db(phba, cq, cq_count,
+ LPFC_QUEUE_REARM);
+ cq_count = 0;
+ }
__lpfc_sli4_consume_eqe(phba, eq, eqe);
- count++;
+ eq_count++;
eqe = lpfc_sli4_eq_get(eq);
}
/* Clear and re-arm the EQ */
- phba->sli4_hba.sli4_write_eq_db(phba, eq, count, LPFC_QUEUE_REARM);
+ phba->sli4_hba.sli4_write_eq_db(phba, eq, eq_count, LPFC_QUEUE_REARM);
}
static int
-lpfc_sli4_process_eq(struct lpfc_hba *phba, struct lpfc_queue *eq)
+lpfc_sli4_process_eq(struct lpfc_hba *phba, struct lpfc_queue *eq,
+ uint8_t rearm)
{
struct lpfc_eqe *eqe;
int count = 0, consumed = 0;
@@ -519,8 +550,8 @@ lpfc_sli4_process_eq(struct lpfc_hba *phba, struct lpfc_queue *eq)
eq->queue_claimed = 0;
rearm_and_exit:
- /* Always clear and re-arm the EQ */
- phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed, LPFC_QUEUE_REARM);
+ /* Always clear the EQ. */
+ phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed, rearm);
return count;
}
@@ -2526,6 +2557,8 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
} else {
__lpfc_sli_rpi_release(vport, ndlp);
}
+ if (vport->load_flag & FC_UNLOADING)
+ lpfc_nlp_put(ndlp);
pmb->ctx_ndlp = NULL;
}
}
@@ -2672,7 +2705,8 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
"(%d):0323 Unknown Mailbox command "
"x%x (x%x/x%x) Cmpl\n",
- pmb->vport ? pmb->vport->vpi : 0,
+ pmb->vport ? pmb->vport->vpi :
+ LPFC_VPORT_UNKNOWN,
pmbox->mbxCommand,
lpfc_sli_config_mbox_subsys_get(phba,
pmb),
@@ -2693,7 +2727,8 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
"(%d):0305 Mbox cmd cmpl "
"error - RETRYing Data: x%x "
"(x%x/x%x) x%x x%x x%x\n",
- pmb->vport ? pmb->vport->vpi : 0,
+ pmb->vport ? pmb->vport->vpi :
+ LPFC_VPORT_UNKNOWN,
pmbox->mbxCommand,
lpfc_sli_config_mbox_subsys_get(phba,
pmb),
@@ -2701,7 +2736,8 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
pmb),
pmbox->mbxStatus,
pmbox->un.varWords[0],
- pmb->vport->port_state);
+ pmb->vport ? pmb->vport->port_state :
+ LPFC_VPORT_UNKNOWN);
pmbox->mbxStatus = 0;
pmbox->mbxOwner = OWN_HOST;
rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
@@ -6167,6 +6203,14 @@ lpfc_set_features(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox,
mbox->u.mqe.un.set_feature.feature = LPFC_SET_MDS_DIAGS;
mbox->u.mqe.un.set_feature.param_len = 8;
break;
+ case LPFC_SET_DUAL_DUMP:
+ bf_set(lpfc_mbx_set_feature_dd,
+ &mbox->u.mqe.un.set_feature, LPFC_ENABLE_DUAL_DUMP);
+ bf_set(lpfc_mbx_set_feature_ddquery,
+ &mbox->u.mqe.un.set_feature, 0);
+ mbox->u.mqe.un.set_feature.feature = LPFC_SET_DUAL_DUMP;
+ mbox->u.mqe.un.set_feature.param_len = 4;
+ break;
}
return;
@@ -6184,11 +6228,16 @@ lpfc_ras_stop_fwlog(struct lpfc_hba *phba)
{
struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
- ras_fwlog->ras_active = false;
+ spin_lock_irq(&phba->hbalock);
+ ras_fwlog->state = INACTIVE;
+ spin_unlock_irq(&phba->hbalock);
/* Disable FW logging to host memory */
writel(LPFC_CTL_PDEV_CTL_DDL_RAS,
phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET);
+
+ /* Wait 10ms for firmware to stop using DMA buffer */
+ usleep_range(10 * 1000, 20 * 1000);
}
/**
@@ -6224,7 +6273,9 @@ lpfc_sli4_ras_dma_free(struct lpfc_hba *phba)
ras_fwlog->lwpd.virt = NULL;
}
- ras_fwlog->ras_active = false;
+ spin_lock_irq(&phba->hbalock);
+ ras_fwlog->state = INACTIVE;
+ spin_unlock_irq(&phba->hbalock);
}
/**
@@ -6326,7 +6377,9 @@ lpfc_sli4_ras_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
goto disable_ras;
}
- ras_fwlog->ras_active = true;
+ spin_lock_irq(&phba->hbalock);
+ ras_fwlog->state = ACTIVE;
+ spin_unlock_irq(&phba->hbalock);
mempool_free(pmb, phba->mbox_mem_pool);
return;
@@ -6358,6 +6411,10 @@ lpfc_sli4_ras_fwlog_init(struct lpfc_hba *phba,
uint32_t len = 0, fwlog_buffsize, fwlog_entry_count;
int rc = 0;
+ spin_lock_irq(&phba->hbalock);
+ ras_fwlog->state = INACTIVE;
+ spin_unlock_irq(&phba->hbalock);
+
fwlog_buffsize = (LPFC_RAS_MIN_BUFF_POST_SIZE *
phba->cfg_ras_fwlog_buffsize);
fwlog_entry_count = (fwlog_buffsize/LPFC_RAS_MAX_ENTRY_SIZE);
@@ -6417,6 +6474,9 @@ lpfc_sli4_ras_fwlog_init(struct lpfc_hba *phba,
mbx_fwlog->u.request.lwpd.addr_lo = putPaddrLow(ras_fwlog->lwpd.phys);
mbx_fwlog->u.request.lwpd.addr_hi = putPaddrHigh(ras_fwlog->lwpd.phys);
+ spin_lock_irq(&phba->hbalock);
+ ras_fwlog->state = REG_INPROGRESS;
+ spin_unlock_irq(&phba->hbalock);
mbox->vport = phba->pport;
mbox->mbox_cmpl = lpfc_sli4_ras_mbox_cmpl;
@@ -7148,7 +7208,7 @@ lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq,
int
lpfc_sli4_hba_setup(struct lpfc_hba *phba)
{
- int rc, i, cnt, len;
+ int rc, i, cnt, len, dd;
LPFC_MBOXQ_t *mboxq;
struct lpfc_mqe *mqe;
uint8_t *vpd;
@@ -7399,6 +7459,23 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED);
spin_unlock_irq(&phba->hbalock);
+ /* Always try to enable dual dump feature if we can */
+ lpfc_set_features(phba, mboxq, LPFC_SET_DUAL_DUMP);
+ rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+ dd = bf_get(lpfc_mbx_set_feature_dd, &mboxq->u.mqe.un.set_feature);
+ if ((rc == MBX_SUCCESS) && (dd == LPFC_ENABLE_DUAL_DUMP))
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_INIT,
+ "6448 Dual Dump is enabled\n");
+ else
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI | LOG_INIT,
+ "6447 Dual Dump Mailbox x%x (x%x/x%x) failed, "
+ "rc:x%x dd:x%x\n",
+ bf_get(lpfc_mqe_command, &mboxq->u.mqe),
+ lpfc_sli_config_mbox_subsys_get(
+ phba, mboxq),
+ lpfc_sli_config_mbox_opcode_get(
+ phba, mboxq),
+ rc, dd);
/*
* Allocate all resources (xri,rpi,vpi,vfi) now. Subsequent
* calls depends on these resources to complete port setup.
@@ -7523,9 +7600,11 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
}
phba->sli4_hba.nvmet_xri_cnt = rc;
- cnt = phba->cfg_iocb_cnt * 1024;
- /* We need 1 iocbq for every SGL, for IO processing */
- cnt += phba->sli4_hba.nvmet_xri_cnt;
+ /* We allocate an iocbq for every receive context SGL.
+ * The additional allocation is for abort and ls handling.
+ */
+ cnt = phba->sli4_hba.nvmet_xri_cnt +
+ phba->sli4_hba.max_cfg_param.max_xri;
} else {
/* update host common xri-sgl sizes and mappings */
rc = lpfc_sli4_io_sgl_update(phba);
@@ -7547,14 +7626,17 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
rc = -ENODEV;
goto out_destroy_queue;
}
- cnt = phba->cfg_iocb_cnt * 1024;
+ /* Each lpfc_io_buf job structure has an iocbq element.
+ * This cnt provides for abort, els, ct and ls requests.
+ */
+ cnt = phba->sli4_hba.max_cfg_param.max_xri;
}
if (!phba->sli.iocbq_lookup) {
/* Initialize and populate the iocb list per host */
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
- "2821 initialize iocb list %d total %d\n",
- phba->cfg_iocb_cnt, cnt);
+ "2821 initialize iocb list with %d entries\n",
+ cnt);
rc = lpfc_init_iocb_list(phba, cnt);
if (rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -7892,7 +7974,7 @@ lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba)
if (mbox_pending)
/* process and rearm the EQ */
- lpfc_sli4_process_eq(phba, fpeq);
+ lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM);
else
/* Always clear and re-arm the EQ */
sli4_hba->sli4_write_eq_db(phba, fpeq, 0, LPFC_QUEUE_REARM);
@@ -8964,7 +9046,8 @@ lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
* @pring: Pointer to driver SLI ring object.
* @piocb: Pointer to address of newly added command iocb.
*
- * This function is called with hbalock held to add a command
+ * This function is called with hbalock held for SLI3 ports or
+ * the ring lock held for SLI4 ports to add a command
* iocb to the txq when SLI layer cannot submit the command iocb
* to the ring.
**/
@@ -8972,7 +9055,10 @@ void
__lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
struct lpfc_iocbq *piocb)
{
- lockdep_assert_held(&phba->hbalock);
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ lockdep_assert_held(&pring->ring_lock);
+ else
+ lockdep_assert_held(&phba->hbalock);
/* Insert the caller's iocb in the txq tail for later processing. */
list_add_tail(&piocb->list, &pring->txq);
}
@@ -9863,7 +9949,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
* __lpfc_sli_issue_iocb_s4 is used by other functions in the driver to issue
* an iocb command to an HBA with SLI-4 interface spec.
*
- * This function is called with hbalock held. The function will return success
+ * This function is called with ringlock held. The function will return success
* after it successfully submit the iocb to firmware or after adding to the
* txq.
**/
@@ -10053,10 +10139,13 @@ lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
struct lpfc_iocbq *piocb, uint32_t flag)
{
struct lpfc_sli_ring *pring;
+ struct lpfc_queue *eq;
unsigned long iflags;
int rc;
if (phba->sli_rev == LPFC_SLI_REV4) {
+ eq = phba->sli4_hba.hdwq[piocb->hba_wqidx].hba_eq;
+
pring = lpfc_sli4_calc_ring(phba, piocb);
if (unlikely(pring == NULL))
return IOCB_ERROR;
@@ -10064,6 +10153,8 @@ lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
spin_lock_irqsave(&pring->ring_lock, iflags);
rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
spin_unlock_irqrestore(&pring->ring_lock, iflags);
+
+ lpfc_sli4_poll_eq(eq, LPFC_POLL_FASTPATH);
} else {
/* For now, SLI2/3 will still use hbalock */
spin_lock_irqsave(&phba->hbalock, iflags);
@@ -10678,14 +10769,14 @@ lpfc_sli_host_down(struct lpfc_vport *vport)
set_bit(LPFC_DATA_READY, &phba->data_flags);
}
prev_pring_flag = pring->flag;
- spin_lock_irq(&pring->ring_lock);
+ spin_lock(&pring->ring_lock);
list_for_each_entry_safe(iocb, next_iocb,
&pring->txq, list) {
if (iocb->vport != vport)
continue;
list_move_tail(&iocb->list, &completions);
}
- spin_unlock_irq(&pring->ring_lock);
+ spin_unlock(&pring->ring_lock);
list_for_each_entry_safe(iocb, next_iocb,
&pring->txcmplq, list) {
if (iocb->vport != vport)
@@ -11050,9 +11141,6 @@ lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
irsp->ulpStatus, irsp->un.ulpWord[4]);
spin_unlock_irq(&phba->hbalock);
- if (irsp->ulpStatus == IOSTAT_LOCAL_REJECT &&
- irsp->un.ulpWord[4] == IOERR_SLI_ABORTED)
- lpfc_sli_release_iocbq(phba, abort_iocb);
}
release_iocb:
lpfc_sli_release_iocbq(phba, cmdiocb);
@@ -11736,7 +11824,10 @@ lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
!(cmdiocbq->iocb_flag & LPFC_IO_LIBDFC)) {
lpfc_cmd = container_of(cmdiocbq, struct lpfc_io_buf,
cur_iocbq);
- lpfc_cmd->exch_busy = rspiocbq->iocb_flag & LPFC_EXCHANGE_BUSY;
+ if (rspiocbq && (rspiocbq->iocb_flag & LPFC_EXCHANGE_BUSY))
+ lpfc_cmd->flags |= LPFC_SBUF_XBUSY;
+ else
+ lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY;
}
pdone_q = cmdiocbq->context_un.wait_queue;
@@ -13158,13 +13249,19 @@ send_current_mbox:
phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
/* Setting active mailbox pointer need to be in sync to flag clear */
phba->sli.mbox_active = NULL;
+ if (bf_get(lpfc_trailer_consumed, mcqe))
+ lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
spin_unlock_irqrestore(&phba->hbalock, iflags);
/* Wake up worker thread to post the next pending mailbox command */
lpfc_worker_wake_up(phba);
+ return workposted;
+
out_no_mqe_complete:
+ spin_lock_irqsave(&phba->hbalock, iflags);
if (bf_get(lpfc_trailer_consumed, mcqe))
lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
- return workposted;
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ return false;
}
/**
@@ -13217,7 +13314,6 @@ lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
struct lpfc_sli_ring *pring = cq->pring;
int txq_cnt = 0;
int txcmplq_cnt = 0;
- int fcp_txcmplq_cnt = 0;
/* Check for response status */
if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
@@ -13239,9 +13335,8 @@ lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
txcmplq_cnt++;
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d "
- "fcp_txcmplq_cnt=%d, els_txcmplq_cnt=%d\n",
+ "els_txcmplq_cnt=%d\n",
txq_cnt, phba->iocb_cnt,
- fcp_txcmplq_cnt,
txcmplq_cnt);
return false;
}
@@ -13592,6 +13687,7 @@ __lpfc_sli4_process_cq(struct lpfc_hba *phba, struct lpfc_queue *cq,
phba->sli4_hba.sli4_write_cq_db(phba, cq, consumed,
LPFC_QUEUE_NOARM);
consumed = 0;
+ cq->assoc_qp->q_flag |= HBA_EQ_DELAY_CHK;
}
if (count == LPFC_NVMET_CQ_NOTIFY)
@@ -14220,7 +14316,7 @@ lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
spin_lock_irqsave(&phba->hbalock, iflag);
if (phba->link_state < LPFC_LINK_DOWN)
/* Flush, clear interrupt, and rearm the EQ */
- lpfc_sli4_eq_flush(phba, fpeq);
+ lpfc_sli4_eqcq_flush(phba, fpeq);
spin_unlock_irqrestore(&phba->hbalock, iflag);
return IRQ_NONE;
}
@@ -14230,14 +14326,14 @@ lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
fpeq->last_cpu = raw_smp_processor_id();
if (icnt > LPFC_EQD_ISR_TRIGGER &&
- phba->cfg_irq_chann == 1 &&
+ fpeq->q_flag & HBA_EQ_DELAY_CHK &&
phba->cfg_auto_imax &&
fpeq->q_mode != LPFC_MAX_AUTO_EQ_DELAY &&
phba->sli.sli_flag & LPFC_SLI_USE_EQDR)
lpfc_sli4_mod_hba_eq_delay(phba, fpeq, LPFC_MAX_AUTO_EQ_DELAY);
/* process and rearm the EQ */
- ecount = lpfc_sli4_process_eq(phba, fpeq);
+ ecount = lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM);
if (unlikely(ecount == 0)) {
fpeq->EQ_no_entry++;
@@ -14297,6 +14393,147 @@ lpfc_sli4_intr_handler(int irq, void *dev_id)
return (hba_handled == true) ? IRQ_HANDLED : IRQ_NONE;
} /* lpfc_sli4_intr_handler */
+void lpfc_sli4_poll_hbtimer(struct timer_list *t)
+{
+ struct lpfc_hba *phba = from_timer(phba, t, cpuhp_poll_timer);
+ struct lpfc_queue *eq;
+ int i = 0;
+
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(eq, &phba->poll_list, _poll_list)
+ i += lpfc_sli4_poll_eq(eq, LPFC_POLL_SLOWPATH);
+ if (!list_empty(&phba->poll_list))
+ mod_timer(&phba->cpuhp_poll_timer,
+ jiffies + msecs_to_jiffies(LPFC_POLL_HB));
+
+ rcu_read_unlock();
+}
+
+inline int lpfc_sli4_poll_eq(struct lpfc_queue *eq, uint8_t path)
+{
+ struct lpfc_hba *phba = eq->phba;
+ int i = 0;
+
+ /*
+ * Unlocking an irq is one of the entry point to check
+ * for re-schedule, but we are good for io submission
+ * path as midlayer does a get_cpu to glue us in. Flush
+ * out the invalidate queue so we can see the updated
+ * value for flag.
+ */
+ smp_rmb();
+
+ if (READ_ONCE(eq->mode) == LPFC_EQ_POLL)
+ /* We will not likely get the completion for the caller
+ * during this iteration but i guess that's fine.
+ * Future io's coming on this eq should be able to
+ * pick it up. As for the case of single io's, they
+ * will be handled through a sched from polling timer
+ * function which is currently triggered every 1msec.
+ */
+ i = lpfc_sli4_process_eq(phba, eq, LPFC_QUEUE_NOARM);
+
+ return i;
+}
+
+static inline void lpfc_sli4_add_to_poll_list(struct lpfc_queue *eq)
+{
+ struct lpfc_hba *phba = eq->phba;
+
+ if (list_empty(&phba->poll_list)) {
+ timer_setup(&phba->cpuhp_poll_timer, lpfc_sli4_poll_hbtimer, 0);
+ /* kickstart slowpath processing for this eq */
+ mod_timer(&phba->cpuhp_poll_timer,
+ jiffies + msecs_to_jiffies(LPFC_POLL_HB));
+ }
+
+ list_add_rcu(&eq->_poll_list, &phba->poll_list);
+ synchronize_rcu();
+}
+
+static inline void lpfc_sli4_remove_from_poll_list(struct lpfc_queue *eq)
+{
+ struct lpfc_hba *phba = eq->phba;
+
+ /* Disable slowpath processing for this eq. Kick start the eq
+ * by RE-ARMING the eq's ASAP
+ */
+ list_del_rcu(&eq->_poll_list);
+ synchronize_rcu();
+
+ if (list_empty(&phba->poll_list))
+ del_timer_sync(&phba->cpuhp_poll_timer);
+}
+
+void lpfc_sli4_cleanup_poll_list(struct lpfc_hba *phba)
+{
+ struct lpfc_queue *eq, *next;
+
+ list_for_each_entry_safe(eq, next, &phba->poll_list, _poll_list)
+ list_del(&eq->_poll_list);
+
+ INIT_LIST_HEAD(&phba->poll_list);
+ synchronize_rcu();
+}
+
+static inline void
+__lpfc_sli4_switch_eqmode(struct lpfc_queue *eq, uint8_t mode)
+{
+ if (mode == eq->mode)
+ return;
+ /*
+ * currently this function is only called during a hotplug
+ * event and the cpu on which this function is executing
+ * is going offline. By now the hotplug has instructed
+ * the scheduler to remove this cpu from cpu active mask.
+ * So we don't need to work about being put aside by the
+ * scheduler for a high priority process. Yes, the inte-
+ * rrupts could come but they are known to retire ASAP.
+ */
+
+ /* Disable polling in the fastpath */
+ WRITE_ONCE(eq->mode, mode);
+ /* flush out the store buffer */
+ smp_wmb();
+
+ /*
+ * Add this eq to the polling list and start polling. For
+ * a grace period both interrupt handler and poller will
+ * try to process the eq _but_ that's fine. We have a
+ * synchronization mechanism in place (queue_claimed) to
+ * deal with it. This is just a draining phase for int-
+ * errupt handler (not eq's) as we have guranteed through
+ * barrier that all the CPUs have seen the new CQ_POLLED
+ * state. which will effectively disable the REARMING of
+ * the EQ. The whole idea is eq's die off eventually as
+ * we are not rearming EQ's anymore.
+ */
+ mode ? lpfc_sli4_add_to_poll_list(eq) :
+ lpfc_sli4_remove_from_poll_list(eq);
+}
+
+void lpfc_sli4_start_polling(struct lpfc_queue *eq)
+{
+ __lpfc_sli4_switch_eqmode(eq, LPFC_EQ_POLL);
+}
+
+void lpfc_sli4_stop_polling(struct lpfc_queue *eq)
+{
+ struct lpfc_hba *phba = eq->phba;
+
+ __lpfc_sli4_switch_eqmode(eq, LPFC_EQ_INTERRUPT);
+
+ /* Kick start for the pending io's in h/w.
+ * Once we switch back to interrupt processing on a eq
+ * the io path completion will only arm eq's when it
+ * receives a completion. But since eq's are in disa-
+ * rmed state it doesn't receive a completion. This
+ * creates a deadlock scenaro.
+ */
+ phba->sli4_hba.sli4_write_eq_db(phba, eq, 0, LPFC_QUEUE_REARM);
+}
+
/**
* lpfc_sli4_queue_free - free a queue structure and associated memory
* @queue: The queue structure to free.
@@ -14371,6 +14608,7 @@ lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t page_size,
return NULL;
INIT_LIST_HEAD(&queue->list);
+ INIT_LIST_HEAD(&queue->_poll_list);
INIT_LIST_HEAD(&queue->wq_list);
INIT_LIST_HEAD(&queue->wqfull_list);
INIT_LIST_HEAD(&queue->page_list);
@@ -18124,8 +18362,9 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
phba->sli4_hba.max_cfg_param.rpi_used++;
phba->sli4_hba.rpi_count++;
}
- lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
- "0001 rpi:%x max:%x lim:%x\n",
+ lpfc_printf_log(phba, KERN_INFO,
+ LOG_NODE | LOG_DISCOVERY,
+ "0001 Allocated rpi:x%x max:x%x lim:x%x\n",
(int) rpi, max_rpi, rpi_limit);
/*
@@ -18181,11 +18420,19 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
static void
__lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
{
+ /*
+ * if the rpi value indicates a prior unreg has already
+ * been done, skip the unreg.
+ */
+ if (rpi == LPFC_RPI_ALLOC_ERROR)
+ return;
+
if (test_and_clear_bit(rpi, phba->sli4_hba.rpi_bmask)) {
phba->sli4_hba.rpi_count--;
phba->sli4_hba.max_cfg_param.rpi_used--;
} else {
- lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ lpfc_printf_log(phba, KERN_INFO,
+ LOG_NODE | LOG_DISCOVERY,
"2016 rpi %x not inuse\n",
rpi);
}
@@ -19683,6 +19930,8 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
spin_unlock_irqrestore(&pring->ring_lock, iflags);
+
+ lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH);
return 0;
}
@@ -19703,6 +19952,8 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
}
lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
spin_unlock_irqrestore(&pring->ring_lock, iflags);
+
+ lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH);
return 0;
}
@@ -19731,6 +19982,8 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
}
lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
spin_unlock_irqrestore(&pring->ring_lock, iflags);
+
+ lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH);
return 0;
}
return WQE_ERROR;
@@ -20093,6 +20346,13 @@ void lpfc_release_io_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd,
lpfc_ncmd->cur_iocbq.wqe_cmpl = NULL;
lpfc_ncmd->cur_iocbq.iocb_cmpl = NULL;
+ if (phba->cfg_xpsgl && !phba->nvmet_support &&
+ !list_empty(&lpfc_ncmd->dma_sgl_xtra_list))
+ lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd);
+
+ if (!list_empty(&lpfc_ncmd->dma_cmd_rsp_list))
+ lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd);
+
if (phba->cfg_xri_rebalancing) {
if (lpfc_ncmd->expedite) {
/* Return to expedite pool */
@@ -20157,13 +20417,6 @@ void lpfc_release_io_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd,
spin_unlock_irqrestore(&qp->io_buf_list_put_lock,
iflag);
}
-
- if (phba->cfg_xpsgl && !phba->nvmet_support &&
- !list_empty(&lpfc_ncmd->dma_sgl_xtra_list))
- lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd);
-
- if (!list_empty(&lpfc_ncmd->dma_cmd_rsp_list))
- lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd);
}
/**
@@ -20399,8 +20652,9 @@ lpfc_get_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf)
struct sli4_hybrid_sgl *allocated_sgl = NULL;
struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
struct list_head *buf_list = &hdwq->sgl_list;
+ unsigned long iflags;
- spin_lock_irq(&hdwq->hdwq_lock);
+ spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
if (likely(!list_empty(buf_list))) {
/* break off 1 chunk from the sgl_list */
@@ -20412,9 +20666,9 @@ lpfc_get_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf)
}
} else {
/* allocate more */
- spin_unlock_irq(&hdwq->hdwq_lock);
+ spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
tmp = kmalloc_node(sizeof(*tmp), GFP_ATOMIC,
- cpu_to_node(smp_processor_id()));
+ cpu_to_node(hdwq->io_wq->chann));
if (!tmp) {
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
"8353 error kmalloc memory for HDWQ "
@@ -20434,7 +20688,7 @@ lpfc_get_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf)
return NULL;
}
- spin_lock_irq(&hdwq->hdwq_lock);
+ spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
list_add_tail(&tmp->list_node, &lpfc_buf->dma_sgl_xtra_list);
}
@@ -20442,7 +20696,7 @@ lpfc_get_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf)
struct sli4_hybrid_sgl,
list_node);
- spin_unlock_irq(&hdwq->hdwq_lock);
+ spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
return allocated_sgl;
}
@@ -20466,8 +20720,9 @@ lpfc_put_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf)
struct sli4_hybrid_sgl *tmp = NULL;
struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
struct list_head *buf_list = &hdwq->sgl_list;
+ unsigned long iflags;
- spin_lock_irq(&hdwq->hdwq_lock);
+ spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
if (likely(!list_empty(&lpfc_buf->dma_sgl_xtra_list))) {
list_for_each_entry_safe(list_entry, tmp,
@@ -20480,7 +20735,7 @@ lpfc_put_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf)
rc = -EINVAL;
}
- spin_unlock_irq(&hdwq->hdwq_lock);
+ spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
return rc;
}
@@ -20501,8 +20756,9 @@ lpfc_free_sgl_per_hdwq(struct lpfc_hba *phba,
struct list_head *buf_list = &hdwq->sgl_list;
struct sli4_hybrid_sgl *list_entry = NULL;
struct sli4_hybrid_sgl *tmp = NULL;
+ unsigned long iflags;
- spin_lock_irq(&hdwq->hdwq_lock);
+ spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
/* Free sgl pool */
list_for_each_entry_safe(list_entry, tmp,
@@ -20514,7 +20770,7 @@ lpfc_free_sgl_per_hdwq(struct lpfc_hba *phba,
kfree(list_entry);
}
- spin_unlock_irq(&hdwq->hdwq_lock);
+ spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
}
/**
@@ -20538,8 +20794,9 @@ lpfc_get_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
struct fcp_cmd_rsp_buf *allocated_buf = NULL;
struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
+ unsigned long iflags;
- spin_lock_irq(&hdwq->hdwq_lock);
+ spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
if (likely(!list_empty(buf_list))) {
/* break off 1 chunk from the list */
@@ -20552,9 +20809,9 @@ lpfc_get_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
}
} else {
/* allocate more */
- spin_unlock_irq(&hdwq->hdwq_lock);
+ spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
tmp = kmalloc_node(sizeof(*tmp), GFP_ATOMIC,
- cpu_to_node(smp_processor_id()));
+ cpu_to_node(hdwq->io_wq->chann));
if (!tmp) {
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
"8355 error kmalloc memory for HDWQ "
@@ -20579,7 +20836,7 @@ lpfc_get_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
tmp->fcp_rsp = (struct fcp_rsp *)((uint8_t *)tmp->fcp_cmnd +
sizeof(struct fcp_cmnd));
- spin_lock_irq(&hdwq->hdwq_lock);
+ spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
list_add_tail(&tmp->list_node, &lpfc_buf->dma_cmd_rsp_list);
}
@@ -20587,7 +20844,7 @@ lpfc_get_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
struct fcp_cmd_rsp_buf,
list_node);
- spin_unlock_irq(&hdwq->hdwq_lock);
+ spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
return allocated_buf;
}
@@ -20612,8 +20869,9 @@ lpfc_put_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
struct fcp_cmd_rsp_buf *tmp = NULL;
struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
+ unsigned long iflags;
- spin_lock_irq(&hdwq->hdwq_lock);
+ spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
if (likely(!list_empty(&lpfc_buf->dma_cmd_rsp_list))) {
list_for_each_entry_safe(list_entry, tmp,
@@ -20626,7 +20884,7 @@ lpfc_put_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
rc = -EINVAL;
}
- spin_unlock_irq(&hdwq->hdwq_lock);
+ spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
return rc;
}
@@ -20647,8 +20905,9 @@ lpfc_free_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
struct fcp_cmd_rsp_buf *list_entry = NULL;
struct fcp_cmd_rsp_buf *tmp = NULL;
+ unsigned long iflags;
- spin_lock_irq(&hdwq->hdwq_lock);
+ spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
/* Free cmd_rsp buf pool */
list_for_each_entry_safe(list_entry, tmp,
@@ -20661,5 +20920,5 @@ lpfc_free_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
kfree(list_entry);
}
- spin_unlock_irq(&hdwq->hdwq_lock);
+ spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
}
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index 37fbcb46387e..7bcf922a8be2 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -384,14 +384,13 @@ struct lpfc_io_buf {
struct lpfc_nodelist *ndlp;
uint32_t timeout;
- uint16_t flags; /* TBD convert exch_busy to flags */
+ uint16_t flags;
#define LPFC_SBUF_XBUSY 0x1 /* SLI4 hba reported XB on WCQE cmpl */
#define LPFC_SBUF_BUMP_QDEPTH 0x2 /* bumped queue depth counter */
/* External DIF device IO conversions */
#define LPFC_SBUF_NORMAL_DIF 0x4 /* normal mode to insert/strip */
#define LPFC_SBUF_PASS_DIF 0x8 /* insert/strip mode to passthru */
#define LPFC_SBUF_NOT_POSTED 0x10 /* SGL failed post to FW. */
- uint16_t exch_busy; /* SLI4 hba reported XB on complete WCQE */
uint16_t status; /* From IOCB Word 7- ulpStatus */
uint32_t result; /* From IOCB Word 4. */
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 0d4882a9e634..d963ca871383 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -41,8 +41,13 @@
/* Multi-queue arrangement for FCP EQ/CQ/WQ tuples */
#define LPFC_HBA_HDWQ_MIN 0
-#define LPFC_HBA_HDWQ_MAX 128
-#define LPFC_HBA_HDWQ_DEF 0
+#define LPFC_HBA_HDWQ_MAX 256
+#define LPFC_HBA_HDWQ_DEF LPFC_HBA_HDWQ_MIN
+
+/* irq_chann range, values */
+#define LPFC_IRQ_CHANN_MIN 0
+#define LPFC_IRQ_CHANN_MAX 256
+#define LPFC_IRQ_CHANN_DEF LPFC_IRQ_CHANN_MIN
/* FCP MQ queue count limiting */
#define LPFC_FCP_MQ_THRESHOLD_MIN 0
@@ -133,6 +138,23 @@ struct lpfc_rqb {
struct lpfc_queue {
struct list_head list;
struct list_head wq_list;
+
+ /*
+ * If interrupts are in effect on _all_ the eq's the footprint
+ * of polling code is zero (except mode). This memory is chec-
+ * ked for every io to see if the io needs to be polled and
+ * while completion to check if the eq's needs to be rearmed.
+ * Keep in same cacheline as the queue ptr to avoid cpu fetch
+ * stalls. Using 1B memory will leave us with 7B hole. Fill
+ * it with other frequently used members.
+ */
+ uint16_t last_cpu; /* most recent cpu */
+ uint16_t hdwq;
+ uint8_t qe_valid;
+ uint8_t mode; /* interrupt or polling */
+#define LPFC_EQ_INTERRUPT 0
+#define LPFC_EQ_POLL 1
+
struct list_head wqfull_list;
enum lpfc_sli4_queue_type type;
enum lpfc_sli4_queue_subtype subtype;
@@ -199,6 +221,7 @@ struct lpfc_queue {
uint8_t q_flag;
#define HBA_NVMET_WQFULL 0x1 /* We hit WQ Full condition for NVMET */
#define HBA_NVMET_CQ_NOTIFY 0x1 /* LPFC_NVMET_CQ_NOTIFY CQEs this EQE */
+#define HBA_EQ_DELAY_CHK 0x2 /* EQ is a candidate for coalescing */
#define LPFC_NVMET_CQ_NOTIFY 4
void __iomem *db_regaddr;
uint16_t dpp_enable;
@@ -239,10 +262,8 @@ struct lpfc_queue {
struct delayed_work sched_spwork;
uint64_t isr_timestamp;
- uint16_t hdwq;
- uint16_t last_cpu; /* most recent cpu */
- uint8_t qe_valid;
struct lpfc_queue *assoc_qp;
+ struct list_head _poll_list;
void **q_pgs; /* array to index entries per page */
};
@@ -451,11 +472,17 @@ struct lpfc_hba;
#define LPFC_SLI4_HANDLER_NAME_SZ 16
struct lpfc_hba_eq_hdl {
uint32_t idx;
+ uint16_t irq;
char handler_name[LPFC_SLI4_HANDLER_NAME_SZ];
struct lpfc_hba *phba;
struct lpfc_queue *eq;
+ struct cpumask aff_mask;
};
+#define lpfc_get_eq_hdl(eqidx) (&phba->sli4_hba.hba_eq_hdl[eqidx])
+#define lpfc_get_aff_mask(eqidx) (&phba->sli4_hba.hba_eq_hdl[eqidx].aff_mask)
+#define lpfc_get_irq(eqidx) (phba->sli4_hba.hba_eq_hdl[eqidx].irq)
+
/*BB Credit recovery value*/
struct lpfc_bbscn_params {
uint32_t word0;
@@ -513,6 +540,7 @@ struct lpfc_pc_sli4_params {
uint8_t cqav;
uint8_t wqsize;
uint8_t bv1s;
+ uint8_t pls;
#define LPFC_WQ_SZ64_SUPPORT 1
#define LPFC_WQ_SZ128_SUPPORT 2
uint8_t wqpcnt;
@@ -544,11 +572,10 @@ struct lpfc_sli4_lnk_info {
#define LPFC_SLI4_HANDLER_CNT (LPFC_HBA_IO_CHAN_MAX+ \
LPFC_FOF_IO_CHAN_NUM)
-/* Used for IRQ vector to CPU mapping */
+/* Used for tracking CPU mapping attributes */
struct lpfc_vector_map_info {
uint16_t phys_id;
uint16_t core_id;
- uint16_t irq;
uint16_t eq;
uint16_t hdwq;
uint16_t flag;
@@ -891,6 +918,7 @@ struct lpfc_sli4_hba {
struct lpfc_vector_map_info *cpu_map;
uint16_t num_possible_cpu;
uint16_t num_present_cpu;
+ struct cpumask numa_mask;
uint16_t curr_disp_cpu;
struct lpfc_eq_intr_info __percpu *eq_info;
uint32_t conf_trunk;
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index b8aae31ffda3..9e5ff58edaca 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -20,7 +20,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "12.4.0.0"
+#define LPFC_DRIVER_VERSION "12.6.0.2"
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */
diff --git a/drivers/scsi/mac_scsi.c b/drivers/scsi/mac_scsi.c
index 9c5566217ef6..b5dde9d0d054 100644
--- a/drivers/scsi/mac_scsi.c
+++ b/drivers/scsi/mac_scsi.c
@@ -464,7 +464,7 @@ static int __init mac_scsi_probe(struct platform_device *pdev)
mac_scsi_template.can_queue = setup_can_queue;
if (setup_cmd_per_lun > 0)
mac_scsi_template.cmd_per_lun = setup_cmd_per_lun;
- if (setup_sg_tablesize >= 0)
+ if (setup_sg_tablesize > 0)
mac_scsi_template.sg_tablesize = setup_sg_tablesize;
if (setup_hostid >= 0)
mac_scsi_template.this_id = setup_hostid & 7;
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index a6e788c02ff4..bd8184072bed 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -24,6 +24,8 @@
#define MEGASAS_VERSION "07.710.50.00-rc1"
#define MEGASAS_RELDATE "June 28, 2019"
+#define MEGASAS_MSIX_NAME_LEN 32
+
/*
* Device IDs
*/
@@ -2203,6 +2205,7 @@ struct megasas_aen_event {
};
struct megasas_irq_context {
+ char name[MEGASAS_MSIX_NAME_LEN];
struct megasas_instance *instance;
u32 MSIxIndex;
u32 os_irq;
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 42cf38c1ea99..c40fbea06cc5 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -5546,9 +5546,11 @@ megasas_setup_irqs_ioapic(struct megasas_instance *instance)
pdev = instance->pdev;
instance->irq_context[0].instance = instance;
instance->irq_context[0].MSIxIndex = 0;
+ snprintf(instance->irq_context->name, MEGASAS_MSIX_NAME_LEN, "%s%u",
+ "megasas", instance->host->host_no);
if (request_irq(pci_irq_vector(pdev, 0),
instance->instancet->service_isr, IRQF_SHARED,
- "megasas", &instance->irq_context[0])) {
+ instance->irq_context->name, &instance->irq_context[0])) {
dev_err(&instance->pdev->dev,
"Failed to register IRQ from %s %d\n",
__func__, __LINE__);
@@ -5580,8 +5582,10 @@ megasas_setup_irqs_msix(struct megasas_instance *instance, u8 is_probe)
for (i = 0; i < instance->msix_vectors; i++) {
instance->irq_context[i].instance = instance;
instance->irq_context[i].MSIxIndex = i;
+ snprintf(instance->irq_context[i].name, MEGASAS_MSIX_NAME_LEN, "%s%u-msix%u",
+ "megasas", instance->host->host_no, i);
if (request_irq(pci_irq_vector(pdev, i),
- instance->instancet->service_isr, 0, "megasas",
+ instance->instancet->service_isr, 0, instance->irq_context[i].name,
&instance->irq_context[i])) {
dev_err(&instance->pdev->dev,
"Failed to register IRQ for vector %d.\n", i);
diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c
index 50b8c1b12767..89c3685f5163 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fp.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fp.c
@@ -386,9 +386,8 @@ u32 MR_GetSpanBlock(u32 ld, u64 row, u64 *span_blk,
le64_to_cpu(quad->logEnd) && (mega_mod64(row - le64_to_cpu(quad->logStart),
le32_to_cpu(quad->diff))) == 0) {
if (span_blk != NULL) {
- u64 blk, debugBlk;
+ u64 blk;
blk = mega_div64_32((row-le64_to_cpu(quad->logStart)), le32_to_cpu(quad->diff));
- debugBlk = blk;
blk = (blk + le64_to_cpu(quad->offsetInSpan)) << raid->stripeShift;
*span_blk = blk;
@@ -699,9 +698,7 @@ static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld,
__le16 *pDevHandle = &io_info->devHandle;
u8 *pPdInterface = &io_info->pd_interface;
u32 logArm, rowMod, armQ, arm;
- struct fusion_context *fusion;
- fusion = instance->ctrl_context;
*pDevHandle = cpu_to_le16(MR_DEVHANDLE_INVALID);
/*Get row and span from io_info for Uneven Span IO.*/
@@ -801,9 +798,7 @@ u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow,
u64 *pdBlock = &io_info->pdBlock;
__le16 *pDevHandle = &io_info->devHandle;
u8 *pPdInterface = &io_info->pd_interface;
- struct fusion_context *fusion;
- fusion = instance->ctrl_context;
*pDevHandle = cpu_to_le16(MR_DEVHANDLE_INVALID);
row = mega_div64_32(stripRow, raid->rowDataSize);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index fea3cb6a090b..848fbec7bda6 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -3044,11 +3044,11 @@ _base_alloc_irq_vectors(struct MPT3SAS_ADAPTER *ioc)
descp = NULL;
ioc_info(ioc, " %d %d\n", ioc->high_iops_queues,
- ioc->msix_vector_count);
+ ioc->reply_queue_count);
i = pci_alloc_irq_vectors_affinity(ioc->pdev,
ioc->high_iops_queues,
- ioc->msix_vector_count, irq_flags, descp);
+ ioc->reply_queue_count, irq_flags, descp);
return i;
}
@@ -4242,10 +4242,12 @@ _base_display_OEMs_branding(struct MPT3SAS_ADAPTER *ioc)
static int
_base_display_fwpkg_version(struct MPT3SAS_ADAPTER *ioc)
{
- Mpi2FWImageHeader_t *FWImgHdr;
+ Mpi2FWImageHeader_t *fw_img_hdr;
+ Mpi26ComponentImageHeader_t *cmp_img_hdr;
Mpi25FWUploadRequest_t *mpi_request;
Mpi2FWUploadReply_t mpi_reply;
int r = 0;
+ u32 package_version = 0;
void *fwpkg_data = NULL;
dma_addr_t fwpkg_data_dma;
u16 smid, ioc_status;
@@ -4302,14 +4304,26 @@ _base_display_fwpkg_version(struct MPT3SAS_ADAPTER *ioc)
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
MPI2_IOCSTATUS_MASK;
if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
- FWImgHdr = (Mpi2FWImageHeader_t *)fwpkg_data;
- if (FWImgHdr->PackageVersion.Word) {
- ioc_info(ioc, "FW Package Version (%02d.%02d.%02d.%02d)\n",
- FWImgHdr->PackageVersion.Struct.Major,
- FWImgHdr->PackageVersion.Struct.Minor,
- FWImgHdr->PackageVersion.Struct.Unit,
- FWImgHdr->PackageVersion.Struct.Dev);
- }
+ fw_img_hdr = (Mpi2FWImageHeader_t *)fwpkg_data;
+ if (le32_to_cpu(fw_img_hdr->Signature) ==
+ MPI26_IMAGE_HEADER_SIGNATURE0_MPI26) {
+ cmp_img_hdr =
+ (Mpi26ComponentImageHeader_t *)
+ (fwpkg_data);
+ package_version =
+ le32_to_cpu(
+ cmp_img_hdr->ApplicationSpecific);
+ } else
+ package_version =
+ le32_to_cpu(
+ fw_img_hdr->PackageVersion.Word);
+ if (package_version)
+ ioc_info(ioc,
+ "FW Package Ver(%02d.%02d.%02d.%02d)\n",
+ ((package_version) & 0xFF000000) >> 24,
+ ((package_version) & 0x00FF0000) >> 16,
+ ((package_version) & 0x0000FF00) >> 8,
+ (package_version) & 0x000000FF);
} else {
_debug_dump_mf(&mpi_reply,
sizeof(Mpi2FWUploadReply_t)/4);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
index faca0a5e71f8..4ebf81ea4d2f 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
@@ -76,8 +76,8 @@
#define MPT3SAS_DRIVER_NAME "mpt3sas"
#define MPT3SAS_AUTHOR "Avago Technologies <MPT-FusionLinux.pdl@avagotech.com>"
#define MPT3SAS_DESCRIPTION "LSI MPT Fusion SAS 3.0 Device Driver"
-#define MPT3SAS_DRIVER_VERSION "31.100.00.00"
-#define MPT3SAS_MAJOR_VERSION 31
+#define MPT3SAS_DRIVER_VERSION "32.100.00.00"
+#define MPT3SAS_MAJOR_VERSION 32
#define MPT3SAS_MINOR_VERSION 100
#define MPT3SAS_BUILD_VERSION 0
#define MPT3SAS_RELEASE_VERSION 00
@@ -303,6 +303,8 @@ struct mpt3sas_nvme_cmd {
#define MPT3_DIAG_BUFFER_IS_REGISTERED (0x01)
#define MPT3_DIAG_BUFFER_IS_RELEASED (0x02)
#define MPT3_DIAG_BUFFER_IS_DIAG_RESET (0x04)
+#define MPT3_DIAG_BUFFER_IS_DRIVER_ALLOCATED (0x08)
+#define MPT3_DIAG_BUFFER_IS_APP_OWNED (0x10)
/*
* HP HBA branding
@@ -391,9 +393,12 @@ struct Mpi2ManufacturingPage11_t {
u8 Reserved6; /* 2Fh */
__le32 Reserved7[7]; /* 30h - 4Bh */
u8 NVMeAbortTO; /* 4Ch */
- u8 Reserved8; /* 4Dh */
- u16 Reserved9; /* 4Eh */
- __le32 Reserved10[4]; /* 50h - 60h */
+ u8 NumPerDevEvents; /* 4Dh */
+ u8 HostTraceBufferDecrementSizeKB; /* 4Eh */
+ u8 HostTraceBufferFlags; /* 4Fh */
+ u16 HostTraceBufferMaxSizeKB; /* 50h */
+ u16 HostTraceBufferMinSizeKB; /* 52h */
+ __le32 Reserved10[2]; /* 54h - 5Bh */
};
/**
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
index 7d696952b376..6874cf017739 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
@@ -466,6 +466,13 @@ void mpt3sas_ctl_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc)
if ((ioc->diag_buffer_status[i] &
MPT3_DIAG_BUFFER_IS_RELEASED))
continue;
+
+ /*
+ * add a log message to indicate the release
+ */
+ ioc_info(ioc,
+ "%s: Releasing the trace buffer due to adapter reset.",
+ __func__);
mpt3sas_send_diag_release(ioc, i, &issue_reset);
}
}
@@ -778,6 +785,18 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
case MPI2_FUNCTION_NVME_ENCAPSULATED:
{
nvme_encap_request = (Mpi26NVMeEncapsulatedRequest_t *)request;
+ if (!ioc->pcie_sg_lookup) {
+ dtmprintk(ioc, ioc_info(ioc,
+ "HBA doesn't support NVMe. Rejecting NVMe Encapsulated request.\n"
+ ));
+
+ if (ioc->logging_level & MPT_DEBUG_TM)
+ _debug_dump_mf(nvme_encap_request,
+ ioc->request_sz/4);
+ mpt3sas_base_free_smid(ioc, smid);
+ ret = -EINVAL;
+ goto out;
+ }
/*
* Get the Physical Address of the sense buffer.
* Use Error Response buffer address field to hold the sense
@@ -1484,6 +1503,26 @@ _ctl_diag_capability(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type)
return rc;
}
+/**
+ * _ctl_diag_get_bufftype - return diag buffer type
+ * either TRACE, SNAPSHOT, or EXTENDED
+ * @ioc: per adapter object
+ * @unique_id: specifies the unique_id for the buffer
+ *
+ * returns MPT3_DIAG_UID_NOT_FOUND if the id not found
+ */
+static u8
+_ctl_diag_get_bufftype(struct MPT3SAS_ADAPTER *ioc, u32 unique_id)
+{
+ u8 index;
+
+ for (index = 0; index < MPI2_DIAG_BUF_TYPE_COUNT; index++) {
+ if (ioc->unique_id[index] == unique_id)
+ return index;
+ }
+
+ return MPT3_DIAG_UID_NOT_FOUND;
+}
/**
* _ctl_diag_register_2 - wrapper for registering diag buffer support
@@ -1531,11 +1570,88 @@ _ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc,
return -EPERM;
}
+ if (diag_register->unique_id == 0) {
+ ioc_err(ioc,
+ "%s: Invalid UID(0x%08x), buffer_type(0x%02x)\n", __func__,
+ diag_register->unique_id, buffer_type);
+ return -EINVAL;
+ }
+
+ if ((ioc->diag_buffer_status[buffer_type] &
+ MPT3_DIAG_BUFFER_IS_APP_OWNED) &&
+ !(ioc->diag_buffer_status[buffer_type] &
+ MPT3_DIAG_BUFFER_IS_RELEASED)) {
+ ioc_err(ioc,
+ "%s: buffer_type(0x%02x) is already registered by application with UID(0x%08x)\n",
+ __func__, buffer_type, ioc->unique_id[buffer_type]);
+ return -EINVAL;
+ }
+
if (ioc->diag_buffer_status[buffer_type] &
MPT3_DIAG_BUFFER_IS_REGISTERED) {
- ioc_err(ioc, "%s: already has a registered buffer for buffer_type(0x%02x)\n",
- __func__, buffer_type);
- return -EINVAL;
+ /*
+ * If driver posts buffer initially, then an application wants
+ * to Register that buffer (own it) without Releasing first,
+ * the application Register command MUST have the same buffer
+ * type and size in the Register command (obtained from the
+ * Query command). Otherwise that Register command will be
+ * failed. If the application has released the buffer but wants
+ * to re-register it, it should be allowed as long as the
+ * Unique-Id/Size match.
+ */
+
+ if (ioc->unique_id[buffer_type] == MPT3DIAGBUFFUNIQUEID &&
+ ioc->diag_buffer_sz[buffer_type] ==
+ diag_register->requested_buffer_size) {
+
+ if (!(ioc->diag_buffer_status[buffer_type] &
+ MPT3_DIAG_BUFFER_IS_RELEASED)) {
+ dctlprintk(ioc, ioc_info(ioc,
+ "%s: diag_buffer (%d) ownership changed. old-ID(0x%08x), new-ID(0x%08x)\n",
+ __func__, buffer_type,
+ ioc->unique_id[buffer_type],
+ diag_register->unique_id));
+
+ /*
+ * Application wants to own the buffer with
+ * the same size.
+ */
+ ioc->unique_id[buffer_type] =
+ diag_register->unique_id;
+ rc = 0; /* success */
+ goto out;
+ }
+ } else if (ioc->unique_id[buffer_type] !=
+ MPT3DIAGBUFFUNIQUEID) {
+ if (ioc->unique_id[buffer_type] !=
+ diag_register->unique_id ||
+ ioc->diag_buffer_sz[buffer_type] !=
+ diag_register->requested_buffer_size ||
+ !(ioc->diag_buffer_status[buffer_type] &
+ MPT3_DIAG_BUFFER_IS_RELEASED)) {
+ ioc_err(ioc,
+ "%s: already has a registered buffer for buffer_type(0x%02x)\n",
+ __func__, buffer_type);
+ return -EINVAL;
+ }
+ } else {
+ ioc_err(ioc, "%s: already has a registered buffer for buffer_type(0x%02x)\n",
+ __func__, buffer_type);
+ return -EINVAL;
+ }
+ } else if (ioc->diag_buffer_status[buffer_type] &
+ MPT3_DIAG_BUFFER_IS_DRIVER_ALLOCATED) {
+
+ if (ioc->unique_id[buffer_type] != MPT3DIAGBUFFUNIQUEID ||
+ ioc->diag_buffer_sz[buffer_type] !=
+ diag_register->requested_buffer_size) {
+
+ ioc_err(ioc,
+ "%s: already a buffer is allocated for buffer_type(0x%02x) of size %d bytes, so please try registering again with same size\n",
+ __func__, buffer_type,
+ ioc->diag_buffer_sz[buffer_type]);
+ return -EINVAL;
+ }
}
if (diag_register->requested_buffer_size % 4) {
@@ -1560,7 +1676,8 @@ _ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc,
request_data = ioc->diag_buffer[buffer_type];
request_data_sz = diag_register->requested_buffer_size;
ioc->unique_id[buffer_type] = diag_register->unique_id;
- ioc->diag_buffer_status[buffer_type] = 0;
+ ioc->diag_buffer_status[buffer_type] &=
+ MPT3_DIAG_BUFFER_IS_DRIVER_ALLOCATED;
memcpy(ioc->product_specific[buffer_type],
diag_register->product_specific, MPT3_PRODUCT_SPECIFIC_DWORDS);
ioc->diagnostic_flags[buffer_type] = diag_register->diagnostic_flags;
@@ -1584,7 +1701,8 @@ _ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc,
ioc_err(ioc, "%s: failed allocating memory for diag buffers, requested size(%d)\n",
__func__, request_data_sz);
mpt3sas_base_free_smid(ioc, smid);
- return -ENOMEM;
+ rc = -ENOMEM;
+ goto out;
}
ioc->diag_buffer[buffer_type] = request_data;
ioc->diag_buffer_sz[buffer_type] = request_data_sz;
@@ -1649,9 +1767,12 @@ _ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc,
out:
- if (rc && request_data)
+ if (rc && request_data) {
dma_free_coherent(&ioc->pdev->dev, request_data_sz,
request_data, request_data_dma);
+ ioc->diag_buffer_status[buffer_type] &=
+ ~MPT3_DIAG_BUFFER_IS_DRIVER_ALLOCATED;
+ }
ioc->ctl_cmds.status = MPT3_CMD_NOT_USED;
return rc;
@@ -1669,6 +1790,10 @@ void
mpt3sas_enable_diag_buffer(struct MPT3SAS_ADAPTER *ioc, u8 bits_to_register)
{
struct mpt3_diag_register diag_register;
+ u32 ret_val;
+ u32 trace_buff_size = ioc->manu_pg11.HostTraceBufferMaxSizeKB<<10;
+ u32 min_trace_buff_size = 0;
+ u32 decr_trace_buff_size = 0;
memset(&diag_register, 0, sizeof(struct mpt3_diag_register));
@@ -1677,10 +1802,68 @@ mpt3sas_enable_diag_buffer(struct MPT3SAS_ADAPTER *ioc, u8 bits_to_register)
ioc->diag_trigger_master.MasterData =
(MASTER_TRIGGER_FW_FAULT + MASTER_TRIGGER_ADAPTER_RESET);
diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_TRACE;
- /* register for 2MB buffers */
- diag_register.requested_buffer_size = 2 * (1024 * 1024);
- diag_register.unique_id = 0x7075900;
- _ctl_diag_register_2(ioc, &diag_register);
+ diag_register.unique_id =
+ (ioc->hba_mpi_version_belonged == MPI2_VERSION) ?
+ (MPT2DIAGBUFFUNIQUEID):(MPT3DIAGBUFFUNIQUEID);
+
+ if (trace_buff_size != 0) {
+ diag_register.requested_buffer_size = trace_buff_size;
+ min_trace_buff_size =
+ ioc->manu_pg11.HostTraceBufferMinSizeKB<<10;
+ decr_trace_buff_size =
+ ioc->manu_pg11.HostTraceBufferDecrementSizeKB<<10;
+
+ if (min_trace_buff_size > trace_buff_size) {
+ /* The buff size is not set correctly */
+ ioc_err(ioc,
+ "Min Trace Buff size (%d KB) greater than Max Trace Buff size (%d KB)\n",
+ min_trace_buff_size>>10,
+ trace_buff_size>>10);
+ ioc_err(ioc,
+ "Using zero Min Trace Buff Size\n");
+ min_trace_buff_size = 0;
+ }
+
+ if (decr_trace_buff_size == 0) {
+ /*
+ * retry the min size if decrement
+ * is not available.
+ */
+ decr_trace_buff_size =
+ trace_buff_size - min_trace_buff_size;
+ }
+ } else {
+ /* register for 2MB buffers */
+ diag_register.requested_buffer_size = 2 * (1024 * 1024);
+ }
+
+ do {
+ ret_val = _ctl_diag_register_2(ioc, &diag_register);
+
+ if (ret_val == -ENOMEM && min_trace_buff_size &&
+ (trace_buff_size - decr_trace_buff_size) >=
+ min_trace_buff_size) {
+ /* adjust the buffer size */
+ trace_buff_size -= decr_trace_buff_size;
+ diag_register.requested_buffer_size =
+ trace_buff_size;
+ } else
+ break;
+ } while (true);
+
+ if (ret_val == -ENOMEM)
+ ioc_err(ioc,
+ "Cannot allocate trace buffer memory. Last memory tried = %d KB\n",
+ diag_register.requested_buffer_size>>10);
+ else if (ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE]
+ & MPT3_DIAG_BUFFER_IS_REGISTERED) {
+ ioc_err(ioc, "Trace buffer memory %d KB allocated\n",
+ diag_register.requested_buffer_size>>10);
+ if (ioc->hba_mpi_version_belonged != MPI2_VERSION)
+ ioc->diag_buffer_status[
+ MPI2_DIAG_BUF_TYPE_TRACE] |=
+ MPT3_DIAG_BUFFER_IS_DRIVER_ALLOCATED;
+ }
}
if (bits_to_register & 2) {
@@ -1723,6 +1906,12 @@ _ctl_diag_register(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
}
rc = _ctl_diag_register_2(ioc, &karg);
+
+ if (!rc && (ioc->diag_buffer_status[karg.buffer_type] &
+ MPT3_DIAG_BUFFER_IS_REGISTERED))
+ ioc->diag_buffer_status[karg.buffer_type] |=
+ MPT3_DIAG_BUFFER_IS_APP_OWNED;
+
return rc;
}
@@ -1752,7 +1941,13 @@ _ctl_diag_unregister(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
dctlprintk(ioc, ioc_info(ioc, "%s\n",
__func__));
- buffer_type = karg.unique_id & 0x000000ff;
+ buffer_type = _ctl_diag_get_bufftype(ioc, karg.unique_id);
+ if (buffer_type == MPT3_DIAG_UID_NOT_FOUND) {
+ ioc_err(ioc, "%s: buffer with unique_id(0x%08x) not found\n",
+ __func__, karg.unique_id);
+ return -EINVAL;
+ }
+
if (!_ctl_diag_capability(ioc, buffer_type)) {
ioc_err(ioc, "%s: doesn't have capability for buffer_type(0x%02x)\n",
__func__, buffer_type);
@@ -1785,12 +1980,21 @@ _ctl_diag_unregister(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
return -ENOMEM;
}
- request_data_sz = ioc->diag_buffer_sz[buffer_type];
- request_data_dma = ioc->diag_buffer_dma[buffer_type];
- dma_free_coherent(&ioc->pdev->dev, request_data_sz,
- request_data, request_data_dma);
- ioc->diag_buffer[buffer_type] = NULL;
- ioc->diag_buffer_status[buffer_type] = 0;
+ if (ioc->diag_buffer_status[buffer_type] &
+ MPT3_DIAG_BUFFER_IS_DRIVER_ALLOCATED) {
+ ioc->unique_id[buffer_type] = MPT3DIAGBUFFUNIQUEID;
+ ioc->diag_buffer_status[buffer_type] &=
+ ~MPT3_DIAG_BUFFER_IS_APP_OWNED;
+ ioc->diag_buffer_status[buffer_type] &=
+ ~MPT3_DIAG_BUFFER_IS_REGISTERED;
+ } else {
+ request_data_sz = ioc->diag_buffer_sz[buffer_type];
+ request_data_dma = ioc->diag_buffer_dma[buffer_type];
+ dma_free_coherent(&ioc->pdev->dev, request_data_sz,
+ request_data, request_data_dma);
+ ioc->diag_buffer[buffer_type] = NULL;
+ ioc->diag_buffer_status[buffer_type] = 0;
+ }
return 0;
}
@@ -1829,14 +2033,17 @@ _ctl_diag_query(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
return -EPERM;
}
- if ((ioc->diag_buffer_status[buffer_type] &
- MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
- ioc_err(ioc, "%s: buffer_type(0x%02x) is not registered\n",
- __func__, buffer_type);
- return -EINVAL;
+ if (!(ioc->diag_buffer_status[buffer_type] &
+ MPT3_DIAG_BUFFER_IS_DRIVER_ALLOCATED)) {
+ if ((ioc->diag_buffer_status[buffer_type] &
+ MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
+ ioc_err(ioc, "%s: buffer_type(0x%02x) is not registered\n",
+ __func__, buffer_type);
+ return -EINVAL;
+ }
}
- if (karg.unique_id & 0xffffff00) {
+ if (karg.unique_id) {
if (karg.unique_id != ioc->unique_id[buffer_type]) {
ioc_err(ioc, "%s: unique_id(0x%08x) is not registered\n",
__func__, karg.unique_id);
@@ -1851,13 +2058,21 @@ _ctl_diag_query(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
return -ENOMEM;
}
- if (ioc->diag_buffer_status[buffer_type] & MPT3_DIAG_BUFFER_IS_RELEASED)
- karg.application_flags = (MPT3_APP_FLAGS_APP_OWNED |
- MPT3_APP_FLAGS_BUFFER_VALID);
- else
- karg.application_flags = (MPT3_APP_FLAGS_APP_OWNED |
- MPT3_APP_FLAGS_BUFFER_VALID |
- MPT3_APP_FLAGS_FW_BUFFER_ACCESS);
+ if ((ioc->diag_buffer_status[buffer_type] &
+ MPT3_DIAG_BUFFER_IS_REGISTERED))
+ karg.application_flags |= MPT3_APP_FLAGS_BUFFER_VALID;
+
+ if (!(ioc->diag_buffer_status[buffer_type] &
+ MPT3_DIAG_BUFFER_IS_RELEASED))
+ karg.application_flags |= MPT3_APP_FLAGS_FW_BUFFER_ACCESS;
+
+ if (!(ioc->diag_buffer_status[buffer_type] &
+ MPT3_DIAG_BUFFER_IS_DRIVER_ALLOCATED))
+ karg.application_flags |= MPT3_APP_FLAGS_DYNAMIC_BUFFER_ALLOC;
+
+ if ((ioc->diag_buffer_status[buffer_type] &
+ MPT3_DIAG_BUFFER_IS_APP_OWNED))
+ karg.application_flags |= MPT3_APP_FLAGS_APP_OWNED;
for (i = 0; i < MPT3_PRODUCT_SPECIFIC_DWORDS; i++)
karg.product_specific[i] =
@@ -2002,7 +2217,13 @@ _ctl_diag_release(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
dctlprintk(ioc, ioc_info(ioc, "%s\n",
__func__));
- buffer_type = karg.unique_id & 0x000000ff;
+ buffer_type = _ctl_diag_get_bufftype(ioc, karg.unique_id);
+ if (buffer_type == MPT3_DIAG_UID_NOT_FOUND) {
+ ioc_err(ioc, "%s: buffer with unique_id(0x%08x) not found\n",
+ __func__, karg.unique_id);
+ return -EINVAL;
+ }
+
if (!_ctl_diag_capability(ioc, buffer_type)) {
ioc_err(ioc, "%s: doesn't have capability for buffer_type(0x%02x)\n",
__func__, buffer_type);
@@ -2026,7 +2247,7 @@ _ctl_diag_release(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
MPT3_DIAG_BUFFER_IS_RELEASED) {
ioc_err(ioc, "%s: buffer_type(0x%02x) is already released\n",
__func__, buffer_type);
- return 0;
+ return -EINVAL;
}
request_data = ioc->diag_buffer[buffer_type];
@@ -2086,7 +2307,13 @@ _ctl_diag_read_buffer(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
dctlprintk(ioc, ioc_info(ioc, "%s\n",
__func__));
- buffer_type = karg.unique_id & 0x000000ff;
+ buffer_type = _ctl_diag_get_bufftype(ioc, karg.unique_id);
+ if (buffer_type == MPT3_DIAG_UID_NOT_FOUND) {
+ ioc_err(ioc, "%s: buffer with unique_id(0x%08x) not found\n",
+ __func__, karg.unique_id);
+ return -EINVAL;
+ }
+
if (!_ctl_diag_capability(ioc, buffer_type)) {
ioc_err(ioc, "%s: doesn't have capability for buffer_type(0x%02x)\n",
__func__, buffer_type);
@@ -2210,6 +2437,8 @@ _ctl_diag_read_buffer(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
ioc->diag_buffer_status[buffer_type] |=
MPT3_DIAG_BUFFER_IS_REGISTERED;
+ ioc->diag_buffer_status[buffer_type] &=
+ ~MPT3_DIAG_BUFFER_IS_RELEASED;
dctlprintk(ioc, ioc_info(ioc, "%s: success\n", __func__));
} else {
ioc_info(ioc, "%s: ioc_status(0x%04x) log_info(0x%08x)\n",
@@ -3130,10 +3359,49 @@ host_trace_buffer_enable_store(struct device *cdev,
memset(&diag_register, 0, sizeof(struct mpt3_diag_register));
ioc_info(ioc, "posting host trace buffers\n");
diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_TRACE;
- diag_register.requested_buffer_size = (1024 * 1024);
- diag_register.unique_id = 0x7075900;
+
+ if (ioc->manu_pg11.HostTraceBufferMaxSizeKB != 0 &&
+ ioc->diag_buffer_sz[MPI2_DIAG_BUF_TYPE_TRACE] != 0) {
+ /* post the same buffer allocated previously */
+ diag_register.requested_buffer_size =
+ ioc->diag_buffer_sz[MPI2_DIAG_BUF_TYPE_TRACE];
+ } else {
+ /*
+ * Free the diag buffer memory which was previously
+ * allocated by an application.
+ */
+ if ((ioc->diag_buffer_sz[MPI2_DIAG_BUF_TYPE_TRACE] != 0)
+ &&
+ (ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
+ MPT3_DIAG_BUFFER_IS_APP_OWNED)) {
+ pci_free_consistent(ioc->pdev,
+ ioc->diag_buffer_sz[
+ MPI2_DIAG_BUF_TYPE_TRACE],
+ ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE],
+ ioc->diag_buffer_dma[
+ MPI2_DIAG_BUF_TYPE_TRACE]);
+ ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE] =
+ NULL;
+ }
+
+ diag_register.requested_buffer_size = (1024 * 1024);
+ }
+
+ diag_register.unique_id =
+ (ioc->hba_mpi_version_belonged == MPI2_VERSION) ?
+ (MPT2DIAGBUFFUNIQUEID):(MPT3DIAGBUFFUNIQUEID);
ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] = 0;
_ctl_diag_register_2(ioc, &diag_register);
+ if (ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
+ MPT3_DIAG_BUFFER_IS_REGISTERED) {
+ ioc_info(ioc,
+ "Trace buffer %d KB allocated through sysfs\n",
+ diag_register.requested_buffer_size>>10);
+ if (ioc->hba_mpi_version_belonged != MPI2_VERSION)
+ ioc->diag_buffer_status[
+ MPI2_DIAG_BUF_TYPE_TRACE] |=
+ MPT3_DIAG_BUFFER_IS_DRIVER_ALLOCATED;
+ }
} else if (!strcmp(str, "release")) {
/* exit out if host buffers are already released */
if (!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE])
@@ -3702,12 +3970,6 @@ mpt3sas_ctl_exit(ushort hbas_to_enumerate)
for (i = 0; i < MPI2_DIAG_BUF_TYPE_COUNT; i++) {
if (!ioc->diag_buffer[i])
continue;
- if (!(ioc->diag_buffer_status[i] &
- MPT3_DIAG_BUFFER_IS_REGISTERED))
- continue;
- if ((ioc->diag_buffer_status[i] &
- MPT3_DIAG_BUFFER_IS_RELEASED))
- continue;
dma_free_coherent(&ioc->pdev->dev,
ioc->diag_buffer_sz[i],
ioc->diag_buffer[i],
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.h b/drivers/scsi/mpt3sas/mpt3sas_ctl.h
index 18b46faef6f1..0f7aa4ddade0 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.h
@@ -95,6 +95,14 @@
#define MPT3DIAGREADBUFFER _IOWR(MPT3_MAGIC_NUMBER, 30, \
struct mpt3_diag_read_buffer)
+/* Trace Buffer default UniqueId */
+#define MPT2DIAGBUFFUNIQUEID (0x07075900)
+#define MPT3DIAGBUFFUNIQUEID (0x4252434D)
+
+/* UID not found */
+#define MPT3_DIAG_UID_NOT_FOUND (0xFF)
+
+
/**
* struct mpt3_ioctl_header - main header structure
* @ioc_number - IOC unit number
@@ -310,6 +318,7 @@ struct mpt3_ioctl_btdh_mapping {
#define MPT3_APP_FLAGS_APP_OWNED (0x0001)
#define MPT3_APP_FLAGS_BUFFER_VALID (0x0002)
#define MPT3_APP_FLAGS_FW_BUFFER_ACCESS (0x0004)
+#define MPT3_APP_FLAGS_DYNAMIC_BUFFER_ALLOC (0x0008)
/* flags for mpt3_diag_read_buffer */
#define MPT3_FLAGS_REREGISTER (0x0001)
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index c8e512ba6d39..a038be8a0e90 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -5161,7 +5161,7 @@ _scsih_smart_predicted_fault(struct MPT3SAS_ADAPTER *ioc, u16 handle)
/* insert into event log */
sz = offsetof(Mpi2EventNotificationReply_t, EventData) +
sizeof(Mpi2EventDataSasDeviceStatusChange_t);
- event_reply = kzalloc(sz, GFP_KERNEL);
+ event_reply = kzalloc(sz, GFP_ATOMIC);
if (!event_reply) {
ioc_err(ioc, "failure at %s:%d/%s()!\n",
__FILE__, __LINE__, __func__);
@@ -10193,6 +10193,8 @@ scsih_scan_start(struct Scsi_Host *shost)
int rc;
if (diag_buffer_enable != -1 && diag_buffer_enable != 0)
mpt3sas_enable_diag_buffer(ioc, diag_buffer_enable);
+ else if (ioc->manu_pg11.HostTraceBufferMaxSizeKB != 0)
+ mpt3sas_enable_diag_buffer(ioc, 1);
if (disable_discovery > 0)
return;
diff --git a/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c b/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c
index 6ac453fd5937..8ec9bab20ec4 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c
@@ -113,15 +113,21 @@ mpt3sas_process_trigger_data(struct MPT3SAS_ADAPTER *ioc,
struct SL_WH_TRIGGERS_EVENT_DATA_T *event_data)
{
u8 issue_reset = 0;
+ u32 *trig_data = (u32 *)&event_data->u.master;
dTriggerDiagPrintk(ioc, ioc_info(ioc, "%s: enter\n", __func__));
/* release the diag buffer trace */
if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
MPT3_DIAG_BUFFER_IS_RELEASED) == 0) {
- dTriggerDiagPrintk(ioc,
- ioc_info(ioc, "%s: release trace diag buffer\n",
- __func__));
+ /*
+ * add a log message so that user knows which event caused
+ * the release
+ */
+ ioc_info(ioc,
+ "%s: Releasing the trace buffer. Trigger_Type 0x%08x, Data[0] 0x%08x, Data[1] 0x%08x\n",
+ __func__, event_data->trigger_type,
+ trig_data[0], trig_data[1]);
mpt3sas_send_diag_release(ioc, MPI2_DIAG_BUF_TYPE_TRACE,
&issue_reset);
}
diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c
index 3e0b8ebe257f..a920eced92ec 100644
--- a/drivers/scsi/mvsas/mv_sas.c
+++ b/drivers/scsi/mvsas/mv_sas.c
@@ -1541,7 +1541,7 @@ out:
int mvs_abort_task_set(struct domain_device *dev, u8 *lun)
{
- int rc = TMF_RESP_FUNC_FAILED;
+ int rc;
struct mvs_tmf_task tmf_task;
tmf_task.tmf = TMF_ABORT_TASK_SET;
diff --git a/drivers/scsi/ncr53c8xx.c b/drivers/scsi/ncr53c8xx.c
index e0b427fdf818..11a2cb844ecb 100644
--- a/drivers/scsi/ncr53c8xx.c
+++ b/drivers/scsi/ncr53c8xx.c
@@ -1722,7 +1722,7 @@ struct ncb {
** Miscellaneous configuration and status parameters.
**----------------------------------------------------------------
*/
- u_char disc; /* Diconnection allowed */
+ u_char disc; /* Disconnection allowed */
u_char scsi_mode; /* Current SCSI BUS mode */
u_char order; /* Tag order to use */
u_char verbose; /* Verbosity for this controller*/
diff --git a/drivers/scsi/nsp32.c b/drivers/scsi/nsp32.c
index 70db79254155..b6e04d14292d 100644
--- a/drivers/scsi/nsp32.c
+++ b/drivers/scsi/nsp32.c
@@ -1542,7 +1542,7 @@ static void nsp32_scsi_done(struct scsi_cmnd *SCpnt)
* with ACK reply when below condition is matched:
* MsgIn 00: Command Complete.
* MsgIn 02: Save Data Pointer.
- * MsgIn 04: Diconnect.
+ * MsgIn 04: Disconnect.
* In other case, unexpected BUSFREE is detected.
*/
static int nsp32_busfree_occur(struct scsi_cmnd *SCpnt, unsigned short execph)
diff --git a/drivers/scsi/pcmcia/Kconfig b/drivers/scsi/pcmcia/Kconfig
index 2368f34efba3..dc9b74c9348a 100644
--- a/drivers/scsi/pcmcia/Kconfig
+++ b/drivers/scsi/pcmcia/Kconfig
@@ -32,7 +32,7 @@ config PCMCIA_FDOMAIN
config PCMCIA_NINJA_SCSI
tristate "NinjaSCSI-3 / NinjaSCSI-32Bi (16bit) PCMCIA support"
- depends on !64BIT
+ depends on !64BIT || COMPILE_TEST
help
If you intend to attach this type of PCMCIA SCSI host adapter to
your computer, say Y here and read
diff --git a/drivers/scsi/pcmcia/nsp_cs.c b/drivers/scsi/pcmcia/nsp_cs.c
index 97416e1dcc5b..93616f9fd6d7 100644
--- a/drivers/scsi/pcmcia/nsp_cs.c
+++ b/drivers/scsi/pcmcia/nsp_cs.c
@@ -56,9 +56,7 @@
MODULE_AUTHOR("YOKOTA Hiroshi <yokota@netlab.is.tsukuba.ac.jp>");
MODULE_DESCRIPTION("WorkBit NinjaSCSI-3 / NinjaSCSI-32Bi(16bit) PCMCIA SCSI host adapter module");
MODULE_SUPPORTED_DEVICE("sd,sr,sg,st");
-#ifdef MODULE_LICENSE
MODULE_LICENSE("GPL");
-#endif
#include "nsp_io.h"
diff --git a/drivers/scsi/pm8001/pm8001_ctl.c b/drivers/scsi/pm8001/pm8001_ctl.c
index 6b85016b4db3..7c6be2ec110d 100644
--- a/drivers/scsi/pm8001/pm8001_ctl.c
+++ b/drivers/scsi/pm8001/pm8001_ctl.c
@@ -70,6 +70,25 @@ static
DEVICE_ATTR(interface_rev, S_IRUGO, pm8001_ctl_mpi_interface_rev_show, NULL);
/**
+ * controller_fatal_error_show - check controller is under fatal err
+ * @cdev: pointer to embedded class device
+ * @buf: the buffer returned
+ *
+ * A sysfs 'read only' shost attribute.
+ */
+static ssize_t controller_fatal_error_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+ struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ pm8001_ha->controller_fatal_error);
+}
+static DEVICE_ATTR_RO(controller_fatal_error);
+
+/**
* pm8001_ctl_fw_version_show - firmware version
* @cdev: pointer to embedded class device
* @buf: the buffer returned
@@ -804,6 +823,7 @@ static DEVICE_ATTR(update_fw, S_IRUGO|S_IWUSR|S_IWGRP,
pm8001_show_update_fw, pm8001_store_update_fw);
struct device_attribute *pm8001_host_attrs[] = {
&dev_attr_interface_rev,
+ &dev_attr_controller_fatal_error,
&dev_attr_fw_version,
&dev_attr_update_fw,
&dev_attr_aap_log,
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
index 68a8217032d0..1cbcade747fe 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.c
+++ b/drivers/scsi/pm8001/pm8001_hwi.c
@@ -1336,10 +1336,13 @@ int pm8001_mpi_msg_free_get(struct inbound_queue_table *circularQ,
* @circularQ: the inbound queue we want to transfer to HBA.
* @opCode: the operation code represents commands which LLDD and fw recognized.
* @payload: the command payload of each operation command.
+ * @nb: size in bytes of the command payload
+ * @responseQueue: queue to interrupt on w/ command response (if any)
*/
int pm8001_mpi_build_cmd(struct pm8001_hba_info *pm8001_ha,
struct inbound_queue_table *circularQ,
- u32 opCode, void *payload, u32 responseQueue)
+ u32 opCode, void *payload, size_t nb,
+ u32 responseQueue)
{
u32 Header = 0, hpriority = 0, bc = 1, category = 0x02;
void *pMessage;
@@ -1350,10 +1353,13 @@ int pm8001_mpi_build_cmd(struct pm8001_hba_info *pm8001_ha,
pm8001_printk("No free mpi buffer\n"));
return -ENOMEM;
}
- BUG_ON(!payload);
- /*Copy to the payload*/
- memcpy(pMessage, payload, (pm8001_ha->iomb_size -
- sizeof(struct mpi_msg_hdr)));
+
+ if (nb > (pm8001_ha->iomb_size - sizeof(struct mpi_msg_hdr)))
+ nb = pm8001_ha->iomb_size - sizeof(struct mpi_msg_hdr);
+ memcpy(pMessage, payload, nb);
+ if (nb + sizeof(struct mpi_msg_hdr) < pm8001_ha->iomb_size)
+ memset(pMessage + nb, 0, pm8001_ha->iomb_size -
+ (nb + sizeof(struct mpi_msg_hdr)));
/*Build the header*/
Header = ((1 << 31) | (hpriority << 30) | ((bc & 0x1f) << 24)
@@ -1364,7 +1370,7 @@ int pm8001_mpi_build_cmd(struct pm8001_hba_info *pm8001_ha,
/*Update the PI to the firmware*/
pm8001_cw32(pm8001_ha, circularQ->pi_pci_bar,
circularQ->pi_offset, circularQ->producer_idx);
- PM8001_IO_DBG(pm8001_ha,
+ PM8001_DEVIO_DBG(pm8001_ha,
pm8001_printk("INB Q %x OPCODE:%x , UPDATED PI=%d CI=%d\n",
responseQueue, opCode, circularQ->producer_idx,
circularQ->consumer_index));
@@ -1436,6 +1442,10 @@ u32 pm8001_mpi_msg_consume(struct pm8001_hba_info *pm8001_ha,
/* read header */
header_tmp = pm8001_read_32(msgHeader);
msgHeader_tmp = cpu_to_le32(header_tmp);
+ PM8001_DEVIO_DBG(pm8001_ha, pm8001_printk(
+ "outbound opcode msgheader:%x ci=%d pi=%d\n",
+ msgHeader_tmp, circularQ->consumer_idx,
+ circularQ->producer_index));
if (0 != (le32_to_cpu(msgHeader_tmp) & 0x80000000)) {
if (OPC_OUB_SKIP_ENTRY !=
(le32_to_cpu(msgHeader_tmp) & 0xfff)) {
@@ -1604,7 +1614,8 @@ void pm8001_work_fn(struct work_struct *work)
break;
default:
- pm8001_printk("...query task failed!!!\n");
+ PM8001_DEVIO_DBG(pm8001_ha, pm8001_printk(
+ "...query task failed!!!\n"));
break;
});
@@ -1758,7 +1769,8 @@ static void pm8001_send_abort_all(struct pm8001_hba_info *pm8001_ha,
task_abort.device_id = cpu_to_le32(pm8001_ha_dev->device_id);
task_abort.tag = cpu_to_le32(ccb_tag);
- ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &task_abort, 0);
+ ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &task_abort,
+ sizeof(task_abort), 0);
if (ret)
pm8001_tag_free(pm8001_ha, ccb_tag);
@@ -1831,7 +1843,8 @@ static void pm8001_send_read_log(struct pm8001_hba_info *pm8001_ha,
sata_cmd.ncqtag_atap_dir_m |= ((0x1 << 7) | (0x5 << 9));
memcpy(&sata_cmd.sata_fis, &fis, sizeof(struct host_to_dev_fis));
- res = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd, 0);
+ res = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd,
+ sizeof(sata_cmd), 0);
if (res) {
sas_free_task(task);
pm8001_tag_free(pm8001_ha, ccb_tag);
@@ -1890,6 +1903,11 @@ mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb)
pm8001_printk("SAS Address of IO Failure Drive:"
"%016llx", SAS_ADDR(t->dev->sas_addr)));
+ if (status)
+ PM8001_IOERR_DBG(pm8001_ha, pm8001_printk(
+ "status:0x%x, tag:0x%x, task:0x%p\n",
+ status, tag, t));
+
switch (status) {
case IO_SUCCESS:
PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_SUCCESS"
@@ -2072,7 +2090,7 @@ mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb)
ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
break;
default:
- PM8001_IO_DBG(pm8001_ha,
+ PM8001_DEVIO_DBG(pm8001_ha,
pm8001_printk("Unknown status 0x%x\n", status));
/* not allowed case. Therefore, return failed status */
ts->resp = SAS_TASK_COMPLETE;
@@ -2125,7 +2143,7 @@ static void mpi_ssp_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
if (unlikely(!t || !t->lldd_task || !t->dev))
return;
ts = &t->task_status;
- PM8001_IO_DBG(pm8001_ha,
+ PM8001_DEVIO_DBG(pm8001_ha,
pm8001_printk("port_id = %x,device_id = %x\n",
port_id, dev_id));
switch (event) {
@@ -2263,7 +2281,7 @@ static void mpi_ssp_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
pm8001_printk(" IO_XFER_CMD_FRAME_ISSUED\n"));
return;
default:
- PM8001_IO_DBG(pm8001_ha,
+ PM8001_DEVIO_DBG(pm8001_ha,
pm8001_printk("Unknown status 0x%x\n", event));
/* not allowed case. Therefore, return failed status */
ts->resp = SAS_TASK_COMPLETE;
@@ -2352,6 +2370,12 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
pm8001_printk("ts null\n"));
return;
}
+
+ if (status)
+ PM8001_IOERR_DBG(pm8001_ha, pm8001_printk(
+ "status:0x%x, tag:0x%x, task::0x%p\n",
+ status, tag, t));
+
/* Print sas address of IO failed device */
if ((status != IO_SUCCESS) && (status != IO_OVERFLOW) &&
(status != IO_UNDERFLOW)) {
@@ -2652,7 +2676,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
break;
default:
- PM8001_IO_DBG(pm8001_ha,
+ PM8001_DEVIO_DBG(pm8001_ha,
pm8001_printk("Unknown status 0x%x\n", status));
/* not allowed case. Therefore, return failed status */
ts->resp = SAS_TASK_COMPLETE;
@@ -2723,7 +2747,7 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
if (unlikely(!t || !t->lldd_task || !t->dev))
return;
ts = &t->task_status;
- PM8001_IO_DBG(pm8001_ha, pm8001_printk(
+ PM8001_DEVIO_DBG(pm8001_ha, pm8001_printk(
"port_id:0x%x, device_id:0x%x, tag:0x%x, event:0x%x\n",
port_id, dev_id, tag, event));
switch (event) {
@@ -2872,7 +2896,7 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
ts->stat = SAS_OPEN_TO;
break;
default:
- PM8001_IO_DBG(pm8001_ha,
+ PM8001_DEVIO_DBG(pm8001_ha,
pm8001_printk("Unknown status 0x%x\n", event));
/* not allowed case. Therefore, return failed status */
ts->resp = SAS_TASK_COMPLETE;
@@ -2917,9 +2941,13 @@ mpi_smp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
t = ccb->task;
ts = &t->task_status;
pm8001_dev = ccb->device;
- if (status)
+ if (status) {
PM8001_FAIL_DBG(pm8001_ha,
pm8001_printk("smp IO status 0x%x\n", status));
+ PM8001_IOERR_DBG(pm8001_ha,
+ pm8001_printk("status:0x%x, tag:0x%x, task:0x%p\n",
+ status, tag, t));
+ }
if (unlikely(!t || !t->lldd_task || !t->dev))
return;
@@ -3070,7 +3098,7 @@ mpi_smp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
break;
default:
- PM8001_IO_DBG(pm8001_ha,
+ PM8001_DEVIO_DBG(pm8001_ha,
pm8001_printk("Unknown status 0x%x\n", status));
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DEV_NO_RESPONSE;
@@ -3355,7 +3383,8 @@ static void pm8001_hw_event_ack_req(struct pm8001_hba_info *pm8001_ha,
((phyId & 0x0F) << 4) | (port_id & 0x0F));
payload.param0 = cpu_to_le32(param0);
payload.param1 = cpu_to_le32(param1);
- pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
+ pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
+ sizeof(payload), 0);
}
static int pm8001_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha,
@@ -3416,7 +3445,7 @@ hw_event_sas_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
pm8001_get_lrate_mode(phy, link_rate);
break;
default:
- PM8001_MSG_DBG(pm8001_ha,
+ PM8001_DEVIO_DBG(pm8001_ha,
pm8001_printk("unknown device type(%x)\n", deviceType));
break;
}
@@ -3463,7 +3492,7 @@ hw_event_sata_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
struct sas_ha_struct *sas_ha = pm8001_ha->sas;
struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
unsigned long flags;
- PM8001_MSG_DBG(pm8001_ha,
+ PM8001_DEVIO_DBG(pm8001_ha,
pm8001_printk("HW_EVENT_SATA_PHY_UP port id = %d,"
" phy id = %d\n", port_id, phy_id));
port->port_state = portstate;
@@ -3541,7 +3570,7 @@ hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb)
break;
default:
port->port_attached = 0;
- PM8001_MSG_DBG(pm8001_ha,
+ PM8001_DEVIO_DBG(pm8001_ha,
pm8001_printk(" phy Down and(default) = %x\n",
portstate));
break;
@@ -3689,7 +3718,7 @@ int pm8001_mpi_fw_flash_update_resp(struct pm8001_hba_info *pm8001_ha,
pm8001_printk(": FLASH_UPDATE_DISABLED\n"));
break;
default:
- PM8001_MSG_DBG(pm8001_ha,
+ PM8001_DEVIO_DBG(pm8001_ha,
pm8001_printk("No matched status = %d\n", status));
break;
}
@@ -3805,8 +3834,9 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void* piomb)
struct sas_ha_struct *sas_ha = pm8001_ha->sas;
struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
struct asd_sas_phy *sas_phy = sas_ha->sas_phy[phy_id];
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("outbound queue HW event & event type : "));
+ PM8001_DEVIO_DBG(pm8001_ha, pm8001_printk(
+ "SPC HW event for portid:%d, phyid:%d, event:%x, status:%x\n",
+ port_id, phy_id, eventType, status));
switch (eventType) {
case HW_EVENT_PHY_START_STATUS:
PM8001_MSG_DBG(pm8001_ha,
@@ -3990,7 +4020,7 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void* piomb)
pm8001_printk("EVENT_BROADCAST_ASYNCH_EVENT\n"));
break;
default:
- PM8001_MSG_DBG(pm8001_ha,
+ PM8001_DEVIO_DBG(pm8001_ha,
pm8001_printk("Unknown event type = %x\n", eventType));
break;
}
@@ -4161,7 +4191,7 @@ static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb)
pm8001_printk("OPC_OUB_SAS_RE_INITIALIZE\n"));
break;
default:
- PM8001_MSG_DBG(pm8001_ha,
+ PM8001_DEVIO_DBG(pm8001_ha,
pm8001_printk("Unknown outbound Queue IOMB OPC = %x\n",
opc));
break;
@@ -4284,7 +4314,7 @@ static int pm8001_chip_smp_req(struct pm8001_hba_info *pm8001_ha,
cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_resp)-4);
build_smp_cmd(pm8001_dev->device_id, smp_cmd.tag, &smp_cmd);
rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc,
- (u32 *)&smp_cmd, 0);
+ &smp_cmd, sizeof(smp_cmd), 0);
if (rc)
goto err_out_2;
@@ -4352,7 +4382,8 @@ static int pm8001_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,
ssp_cmd.len = cpu_to_le32(task->total_xfer_len);
ssp_cmd.esgl = 0;
}
- ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &ssp_cmd, 0);
+ ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &ssp_cmd,
+ sizeof(ssp_cmd), 0);
return ret;
}
@@ -4461,7 +4492,8 @@ static int pm8001_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
}
}
- ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd, 0);
+ ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd,
+ sizeof(sata_cmd), 0);
return ret;
}
@@ -4496,7 +4528,8 @@ pm8001_chip_phy_start_req(struct pm8001_hba_info *pm8001_ha, u8 phy_id)
memcpy(payload.sas_identify.sas_addr,
pm8001_ha->sas_addr, SAS_ADDR_SIZE);
payload.sas_identify.phy_id = phy_id;
- ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload, 0);
+ ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload,
+ sizeof(payload), 0);
return ret;
}
@@ -4518,7 +4551,8 @@ static int pm8001_chip_phy_stop_req(struct pm8001_hba_info *pm8001_ha,
memset(&payload, 0, sizeof(payload));
payload.tag = cpu_to_le32(tag);
payload.phy_id = cpu_to_le32(phy_id);
- ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload, 0);
+ ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload,
+ sizeof(payload), 0);
return ret;
}
@@ -4577,7 +4611,8 @@ static int pm8001_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha,
cpu_to_le32(ITNT | (firstBurstSize * 0x10000));
memcpy(payload.sas_addr, pm8001_dev->sas_device->sas_addr,
SAS_ADDR_SIZE);
- rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
+ rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
+ sizeof(payload), 0);
return rc;
}
@@ -4598,7 +4633,8 @@ int pm8001_chip_dereg_dev_req(struct pm8001_hba_info *pm8001_ha,
payload.device_id = cpu_to_le32(device_id);
PM8001_MSG_DBG(pm8001_ha,
pm8001_printk("unregister device device_id = %d\n", device_id));
- ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
+ ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
+ sizeof(payload), 0);
return ret;
}
@@ -4621,7 +4657,8 @@ static int pm8001_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha,
payload.tag = cpu_to_le32(1);
payload.phyop_phyid =
cpu_to_le32(((phy_op & 0xff) << 8) | (phyId & 0x0F));
- ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
+ ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
+ sizeof(payload), 0);
return ret;
}
@@ -4649,6 +4686,9 @@ static irqreturn_t
pm8001_chip_isr(struct pm8001_hba_info *pm8001_ha, u8 vec)
{
pm8001_chip_interrupt_disable(pm8001_ha, vec);
+ PM8001_DEVIO_DBG(pm8001_ha, pm8001_printk(
+ "irq vec %d, ODMR:0x%x\n",
+ vec, pm8001_cr32(pm8001_ha, 0, 0x30)));
process_oq(pm8001_ha, vec);
pm8001_chip_interrupt_enable(pm8001_ha, vec);
return IRQ_HANDLED;
@@ -4672,7 +4712,8 @@ static int send_task_abort(struct pm8001_hba_info *pm8001_ha, u32 opc,
task_abort.device_id = cpu_to_le32(dev_id);
task_abort.tag = cpu_to_le32(cmd_tag);
}
- ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &task_abort, 0);
+ ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &task_abort,
+ sizeof(task_abort), 0);
return ret;
}
@@ -4729,7 +4770,8 @@ int pm8001_chip_ssp_tm_req(struct pm8001_hba_info *pm8001_ha,
if (pm8001_ha->chip_id != chip_8001)
sspTMCmd.ds_ads_m = 0x08;
circularQ = &pm8001_ha->inbnd_q_tbl[0];
- ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sspTMCmd, 0);
+ ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sspTMCmd,
+ sizeof(sspTMCmd), 0);
return ret;
}
@@ -4819,7 +4861,8 @@ int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha,
default:
break;
}
- rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &nvmd_req, 0);
+ rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &nvmd_req,
+ sizeof(nvmd_req), 0);
if (rc) {
kfree(fw_control_context);
pm8001_tag_free(pm8001_ha, tag);
@@ -4903,7 +4946,8 @@ int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha,
default:
break;
}
- rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &nvmd_req, 0);
+ rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &nvmd_req,
+ sizeof(nvmd_req), 0);
if (rc) {
kfree(fw_control_context);
pm8001_tag_free(pm8001_ha, tag);
@@ -4938,7 +4982,8 @@ pm8001_chip_fw_flash_update_build(struct pm8001_hba_info *pm8001_ha,
cpu_to_le32(lower_32_bits(le64_to_cpu(info->sgl.addr)));
payload.sgl_addr_hi =
cpu_to_le32(upper_32_bits(le64_to_cpu(info->sgl.addr)));
- ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
+ ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
+ sizeof(payload), 0);
return ret;
}
@@ -4960,6 +5005,8 @@ pm8001_chip_fw_flash_update_req(struct pm8001_hba_info *pm8001_ha,
if (!fw_control_context)
return -ENOMEM;
fw_control = (struct fw_control_info *)&ioctl_payload->func_specific;
+ PM8001_DEVIO_DBG(pm8001_ha, pm8001_printk(
+ "dma fw_control context input length :%x\n", fw_control->len));
memcpy(buffer, fw_control->buffer, fw_control->len);
flash_update_info.sgl.addr = cpu_to_le64(phys_addr);
flash_update_info.sgl.im_len.len = cpu_to_le32(fw_control->len);
@@ -5083,7 +5130,8 @@ pm8001_chip_set_dev_state_req(struct pm8001_hba_info *pm8001_ha,
payload.tag = cpu_to_le32(tag);
payload.device_id = cpu_to_le32(pm8001_dev->device_id);
payload.nds = cpu_to_le32(state);
- rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
+ rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
+ sizeof(payload), 0);
return rc;
}
@@ -5108,7 +5156,8 @@ pm8001_chip_sas_re_initialization(struct pm8001_hba_info *pm8001_ha)
payload.SSAHOLT = cpu_to_le32(0xd << 25);
payload.sata_hol_tmo = cpu_to_le32(80);
payload.open_reject_cmdretries_data_retries = cpu_to_le32(0xff00ff);
- rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
+ rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
+ sizeof(payload), 0);
if (rc)
pm8001_tag_free(pm8001_ha, tag);
return rc;
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index 3374f553c617..c6d9da7b546e 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -41,6 +41,19 @@
#include <linux/slab.h>
#include "pm8001_sas.h"
#include "pm8001_chips.h"
+#include "pm80xx_hwi.h"
+
+static ulong logging_level = PM8001_FAIL_LOGGING | PM8001_IOERR_LOGGING;
+module_param(logging_level, ulong, 0644);
+MODULE_PARM_DESC(logging_level, " bits for enabling logging info.");
+
+static ulong link_rate = LINKRATE_15 | LINKRATE_30 | LINKRATE_60 | LINKRATE_120;
+module_param(link_rate, ulong, 0644);
+MODULE_PARM_DESC(link_rate, "Enable link rate.\n"
+ " 1: Link rate 1.5G\n"
+ " 2: Link rate 3.0G\n"
+ " 4: Link rate 6.0G\n"
+ " 8: Link rate 12.0G\n");
static struct scsi_transport_template *pm8001_stt;
@@ -432,7 +445,7 @@ static int pm8001_ioremap(struct pm8001_hba_info *pm8001_ha)
} else {
pm8001_ha->io_mem[logicalBar].membase = 0;
pm8001_ha->io_mem[logicalBar].memsize = 0;
- pm8001_ha->io_mem[logicalBar].memvirtaddr = 0;
+ pm8001_ha->io_mem[logicalBar].memvirtaddr = NULL;
}
logicalBar++;
}
@@ -466,7 +479,15 @@ static struct pm8001_hba_info *pm8001_pci_alloc(struct pci_dev *pdev,
pm8001_ha->sas = sha;
pm8001_ha->shost = shost;
pm8001_ha->id = pm8001_id++;
- pm8001_ha->logging_level = 0x01;
+ pm8001_ha->logging_level = logging_level;
+ if (link_rate >= 1 && link_rate <= 15)
+ pm8001_ha->link_rate = (link_rate << 8);
+ else {
+ pm8001_ha->link_rate = LINKRATE_15 | LINKRATE_30 |
+ LINKRATE_60 | LINKRATE_120;
+ PM8001_FAIL_DBG(pm8001_ha, pm8001_printk(
+ "Setting link rate to default value\n"));
+ }
sprintf(pm8001_ha->name, "%s%d", DRV_NAME, pm8001_ha->id);
/* IOMB size is 128 for 8088/89 controllers */
if (pm8001_ha->chip_id != chip_8001)
@@ -873,7 +894,6 @@ static u32 pm8001_setup_msix(struct pm8001_hba_info *pm8001_ha)
u32 number_of_intr;
int flag = 0;
int rc;
- static char intr_drvname[PM8001_MAX_MSIX_VEC][sizeof(DRV_NAME)+3];
/* SPCv controllers supports 64 msi-x */
if (pm8001_ha->chip_id == chip_8001) {
@@ -894,14 +914,16 @@ static u32 pm8001_setup_msix(struct pm8001_hba_info *pm8001_ha)
rc, pm8001_ha->number_of_intr));
for (i = 0; i < number_of_intr; i++) {
- snprintf(intr_drvname[i], sizeof(intr_drvname[0]),
- DRV_NAME"%d", i);
+ snprintf(pm8001_ha->intr_drvname[i],
+ sizeof(pm8001_ha->intr_drvname[0]),
+ "%s-%d", pm8001_ha->name, i);
pm8001_ha->irq_vector[i].irq_id = i;
pm8001_ha->irq_vector[i].drv_inst = pm8001_ha;
rc = request_irq(pci_irq_vector(pm8001_ha->pdev, i),
pm8001_interrupt_handler_msix, flag,
- intr_drvname[i], &(pm8001_ha->irq_vector[i]));
+ pm8001_ha->intr_drvname[i],
+ &(pm8001_ha->irq_vector[i]));
if (rc) {
for (j = 0; j < i; j++) {
free_irq(pci_irq_vector(pm8001_ha->pdev, i),
@@ -942,7 +964,7 @@ intx:
pm8001_ha->irq_vector[0].irq_id = 0;
pm8001_ha->irq_vector[0].drv_inst = pm8001_ha;
rc = request_irq(pdev->irq, pm8001_interrupt_handler_intx, IRQF_SHARED,
- DRV_NAME, SHOST_TO_SAS_HA(pm8001_ha->shost));
+ pm8001_ha->name, SHOST_TO_SAS_HA(pm8001_ha->shost));
return rc;
}
diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
index 7e48154e11c3..b7cbc312843e 100644
--- a/drivers/scsi/pm8001/pm8001_sas.c
+++ b/drivers/scsi/pm8001/pm8001_sas.c
@@ -119,7 +119,7 @@ int pm8001_mem_alloc(struct pci_dev *pdev, void **virt_addr,
mem_virt_alloc = dma_alloc_coherent(&pdev->dev, mem_size + align,
&mem_dma_handle, GFP_KERNEL);
if (!mem_virt_alloc) {
- pm8001_printk("memory allocation error\n");
+ pr_err("pm80xx: memory allocation error\n");
return -1;
}
*pphys_addr = mem_dma_handle;
@@ -249,6 +249,8 @@ int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
return 0;
default:
+ PM8001_DEVIO_DBG(pm8001_ha,
+ pm8001_printk("func 0x%x\n", func));
rc = -EOPNOTSUPP;
}
msleep(300);
@@ -384,8 +386,9 @@ static int pm8001_task_exec(struct sas_task *task,
struct pm8001_port *port = NULL;
struct sas_task *t = task;
struct pm8001_ccb_info *ccb;
- u32 tag = 0xdeadbeef, rc, n_elem = 0;
+ u32 tag = 0xdeadbeef, rc = 0, n_elem = 0;
unsigned long flags = 0;
+ enum sas_protocol task_proto = t->task_proto;
if (!dev->port) {
struct task_status_struct *tsm = &t->task_status;
@@ -410,7 +413,7 @@ static int pm8001_task_exec(struct sas_task *task,
pm8001_dev = dev->lldd_dev;
port = &pm8001_ha->port[sas_find_local_port_id(dev)];
if (DEV_IS_GONE(pm8001_dev) || !port->port_attached) {
- if (sas_protocol_ata(t->task_proto)) {
+ if (sas_protocol_ata(task_proto)) {
struct task_status_struct *ts = &t->task_status;
ts->resp = SAS_TASK_UNDELIVERED;
ts->stat = SAS_PHY_DOWN;
@@ -432,7 +435,7 @@ static int pm8001_task_exec(struct sas_task *task,
goto err_out;
ccb = &pm8001_ha->ccb_info[tag];
- if (!sas_protocol_ata(t->task_proto)) {
+ if (!sas_protocol_ata(task_proto)) {
if (t->num_scatter) {
n_elem = dma_map_sg(pm8001_ha->dev,
t->scatter,
@@ -452,7 +455,7 @@ static int pm8001_task_exec(struct sas_task *task,
ccb->ccb_tag = tag;
ccb->task = t;
ccb->device = pm8001_dev;
- switch (t->task_proto) {
+ switch (task_proto) {
case SAS_PROTOCOL_SMP:
rc = pm8001_task_prep_smp(pm8001_ha, ccb);
break;
@@ -469,8 +472,7 @@ static int pm8001_task_exec(struct sas_task *task,
break;
default:
dev_printk(KERN_ERR, pm8001_ha->dev,
- "unknown sas_task proto: 0x%x\n",
- t->task_proto);
+ "unknown sas_task proto: 0x%x\n", task_proto);
rc = -EINVAL;
break;
}
@@ -493,7 +495,7 @@ err_out_tag:
pm8001_tag_free(pm8001_ha, tag);
err_out:
dev_printk(KERN_ERR, pm8001_ha->dev, "pm8001 exec failed[%d]!\n", rc);
- if (!sas_protocol_ata(t->task_proto))
+ if (!sas_protocol_ata(task_proto))
if (n_elem)
dma_unmap_sg(pm8001_ha->dev, t->scatter, t->num_scatter,
t->data_dir);
@@ -1179,7 +1181,7 @@ int pm8001_query_task(struct sas_task *task)
break;
}
}
- pm8001_printk(":rc= %d\n", rc);
+ pr_err("pm80xx: rc= %d\n", rc);
return rc;
}
@@ -1202,8 +1204,8 @@ int pm8001_abort_task(struct sas_task *task)
pm8001_dev = dev->lldd_dev;
pm8001_ha = pm8001_find_ha_by_dev(dev);
phy_id = pm8001_dev->attached_phy;
- rc = pm8001_find_tag(task, &tag);
- if (rc == 0) {
+ ret = pm8001_find_tag(task, &tag);
+ if (ret == 0) {
pm8001_printk("no tag for task:%p\n", task);
return TMF_RESP_FUNC_FAILED;
}
@@ -1241,26 +1243,50 @@ int pm8001_abort_task(struct sas_task *task)
/* 2. Send Phy Control Hard Reset */
reinit_completion(&completion);
+ phy->port_reset_status = PORT_RESET_TMO;
phy->reset_success = false;
phy->enable_completion = &completion;
phy->reset_completion = &completion_reset;
ret = PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id,
PHY_HARD_RESET);
- if (ret)
- goto out;
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("Waiting for local phy ctl\n"));
- wait_for_completion(&completion);
- if (!phy->reset_success)
+ if (ret) {
+ phy->enable_completion = NULL;
+ phy->reset_completion = NULL;
goto out;
+ }
- /* 3. Wait for Port Reset complete / Port reset TMO */
+ /* In the case of the reset timeout/fail we still
+ * abort the command at the firmware. The assumption
+ * here is that the drive is off doing something so
+ * that it's not processing requests, and we want to
+ * avoid getting a completion for this and either
+ * leaking the task in libsas or losing the race and
+ * getting a double free.
+ */
PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("Waiting for local phy ctl\n"));
+ ret = wait_for_completion_timeout(&completion,
+ PM8001_TASK_TIMEOUT * HZ);
+ if (!ret || !phy->reset_success) {
+ phy->enable_completion = NULL;
+ phy->reset_completion = NULL;
+ } else {
+ /* 3. Wait for Port Reset complete or
+ * Port reset TMO
+ */
+ PM8001_MSG_DBG(pm8001_ha,
pm8001_printk("Waiting for Port reset\n"));
- wait_for_completion(&completion_reset);
- if (phy->port_reset_status) {
- pm8001_dev_gone_notify(dev);
- goto out;
+ ret = wait_for_completion_timeout(
+ &completion_reset,
+ PM8001_TASK_TIMEOUT * HZ);
+ if (!ret)
+ phy->reset_completion = NULL;
+ WARN_ON(phy->port_reset_status ==
+ PORT_RESET_TMO);
+ if (phy->port_reset_status == PORT_RESET_TMO) {
+ pm8001_dev_gone_notify(dev);
+ goto out;
+ }
}
/*
diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h
index ff17c6aff63d..93438c8f67da 100644
--- a/drivers/scsi/pm8001/pm8001_sas.h
+++ b/drivers/scsi/pm8001/pm8001_sas.h
@@ -66,8 +66,11 @@
#define PM8001_EH_LOGGING 0x10 /* libsas EH function logging*/
#define PM8001_IOCTL_LOGGING 0x20 /* IOCTL message logging */
#define PM8001_MSG_LOGGING 0x40 /* misc message logging */
-#define pm8001_printk(format, arg...) printk(KERN_INFO "pm80xx %s %d:" \
- format, __func__, __LINE__, ## arg)
+#define PM8001_DEV_LOGGING 0x80 /* development message logging */
+#define PM8001_DEVIO_LOGGING 0x100 /* development io message logging */
+#define PM8001_IOERR_LOGGING 0x200 /* development io err message logging */
+#define pm8001_printk(format, arg...) pr_info("%s:: %s %d:" \
+ format, pm8001_ha->name, __func__, __LINE__, ## arg)
#define PM8001_CHECK_LOGGING(HBA, LEVEL, CMD) \
do { \
if (unlikely(HBA->logging_level & LEVEL)) \
@@ -97,6 +100,14 @@ do { \
#define PM8001_MSG_DBG(HBA, CMD) \
PM8001_CHECK_LOGGING(HBA, PM8001_MSG_LOGGING, CMD)
+#define PM8001_DEV_DBG(HBA, CMD) \
+ PM8001_CHECK_LOGGING(HBA, PM8001_DEV_LOGGING, CMD)
+
+#define PM8001_DEVIO_DBG(HBA, CMD) \
+ PM8001_CHECK_LOGGING(HBA, PM8001_DEVIO_LOGGING, CMD)
+
+#define PM8001_IOERR_DBG(HBA, CMD) \
+ PM8001_CHECK_LOGGING(HBA, PM8001_IOERR_LOGGING, CMD)
#define PM8001_USE_TASKLET
#define PM8001_USE_MSIX
@@ -141,6 +152,8 @@ struct pm8001_ioctl_payload {
#define MPI_FATAL_EDUMP_TABLE_HANDSHAKE 0x0C /* FDDHSHK */
#define MPI_FATAL_EDUMP_TABLE_STATUS 0x10 /* FDDTSTAT */
#define MPI_FATAL_EDUMP_TABLE_ACCUM_LEN 0x14 /* ACCDDLEN */
+#define MPI_FATAL_EDUMP_TABLE_TOTAL_LEN 0x18 /* TOTALLEN */
+#define MPI_FATAL_EDUMP_TABLE_SIGNATURE 0x1C /* SIGNITURE */
#define MPI_FATAL_EDUMP_HANDSHAKE_RDY 0x1
#define MPI_FATAL_EDUMP_HANDSHAKE_BUSY 0x0
#define MPI_FATAL_EDUMP_TABLE_STAT_RSVD 0x0
@@ -496,6 +509,7 @@ struct pm8001_hba_info {
u32 forensic_last_offset;
u32 fatal_forensic_shift_offset;
u32 forensic_fatal_step;
+ u32 forensic_preserved_accumulated_transfer;
u32 evtlog_ib_offset;
u32 evtlog_ob_offset;
void __iomem *msg_unit_tbl_addr;/*Message Unit Table Addr*/
@@ -530,11 +544,14 @@ struct pm8001_hba_info {
struct pm8001_ccb_info *ccb_info;
#ifdef PM8001_USE_MSIX
int number_of_intr;/*will be used in remove()*/
+ char intr_drvname[PM8001_MAX_MSIX_VEC]
+ [PM8001_NAME_LENGTH+1+3+1];
#endif
#ifdef PM8001_USE_TASKLET
struct tasklet_struct tasklet[PM8001_MAX_MSIX_VEC];
#endif
u32 logging_level;
+ u32 link_rate;
u32 fw_status;
u32 smp_exp_mode;
bool controller_fatal_error;
@@ -663,7 +680,8 @@ int pm8001_mem_alloc(struct pci_dev *pdev, void **virt_addr,
void pm8001_chip_iounmap(struct pm8001_hba_info *pm8001_ha);
int pm8001_mpi_build_cmd(struct pm8001_hba_info *pm8001_ha,
struct inbound_queue_table *circularQ,
- u32 opCode, void *payload, u32 responseQueue);
+ u32 opCode, void *payload, size_t nb,
+ u32 responseQueue);
int pm8001_mpi_msg_free_get(struct inbound_queue_table *circularQ,
u16 messageSize, void **messagePtr);
u32 pm8001_mpi_msg_free_set(struct pm8001_hba_info *pm8001_ha, void *pMsg,
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c
index 73261902d75d..19601138e889 100644
--- a/drivers/scsi/pm8001/pm80xx_hwi.c
+++ b/drivers/scsi/pm8001/pm80xx_hwi.c
@@ -37,6 +37,7 @@
* POSSIBILITY OF SUCH DAMAGES.
*
*/
+ #include <linux/version.h>
#include <linux/slab.h>
#include "pm8001_sas.h"
#include "pm80xx_hwi.h"
@@ -75,7 +76,7 @@ void pm80xx_pci_mem_copy(struct pm8001_hba_info *pm8001_ha, u32 soffset,
destination1 = (u32 *)destination;
for (index = 0; index < dw_count; index += 4, destination1++) {
- offset = (soffset + index / 4);
+ offset = (soffset + index);
if (offset < (64 * 1024)) {
value = pm8001_cr32(pm8001_ha, bus_base_number, offset);
*destination1 = cpu_to_le32(value);
@@ -92,9 +93,12 @@ ssize_t pm80xx_get_fatal_dump(struct device *cdev,
struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
void __iomem *fatal_table_address = pm8001_ha->fatal_tbl_addr;
u32 accum_len , reg_val, index, *temp;
+ u32 status = 1;
unsigned long start;
u8 *direct_data;
char *fatal_error_data = buf;
+ u32 length_to_read;
+ u32 offset;
pm8001_ha->forensic_info.data_buf.direct_data = buf;
if (pm8001_ha->chip_id == chip_8001) {
@@ -104,16 +108,35 @@ ssize_t pm80xx_get_fatal_dump(struct device *cdev,
return (char *)pm8001_ha->forensic_info.data_buf.direct_data -
(char *)buf;
}
+ /* initialize variables for very first call from host application */
if (pm8001_ha->forensic_info.data_buf.direct_offset == 0) {
PM8001_IO_DBG(pm8001_ha,
pm8001_printk("forensic_info TYPE_NON_FATAL..............\n"));
direct_data = (u8 *)fatal_error_data;
pm8001_ha->forensic_info.data_type = TYPE_NON_FATAL;
pm8001_ha->forensic_info.data_buf.direct_len = SYSFS_OFFSET;
+ pm8001_ha->forensic_info.data_buf.direct_offset = 0;
pm8001_ha->forensic_info.data_buf.read_len = 0;
+ pm8001_ha->forensic_preserved_accumulated_transfer = 0;
- pm8001_ha->forensic_info.data_buf.direct_data = direct_data;
+ /* Write signature to fatal dump table */
+ pm8001_mw32(fatal_table_address,
+ MPI_FATAL_EDUMP_TABLE_SIGNATURE, 0x1234abcd);
+ pm8001_ha->forensic_info.data_buf.direct_data = direct_data;
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("ossaHwCB: status1 %d\n", status));
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("ossaHwCB: read_len 0x%x\n",
+ pm8001_ha->forensic_info.data_buf.read_len));
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("ossaHwCB: direct_len 0x%x\n",
+ pm8001_ha->forensic_info.data_buf.direct_len));
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("ossaHwCB: direct_offset 0x%x\n",
+ pm8001_ha->forensic_info.data_buf.direct_offset));
+ }
+ if (pm8001_ha->forensic_info.data_buf.direct_offset == 0) {
/* start to get data */
/* Program the MEMBASE II Shifting Register with 0x00.*/
pm8001_cw32(pm8001_ha, 0, MEMBASE_II_SHIFT_REGISTER,
@@ -126,30 +149,66 @@ ssize_t pm80xx_get_fatal_dump(struct device *cdev,
/* Read until accum_len is retrived */
accum_len = pm8001_mr32(fatal_table_address,
MPI_FATAL_EDUMP_TABLE_ACCUM_LEN);
- PM8001_IO_DBG(pm8001_ha, pm8001_printk("accum_len 0x%x\n",
- accum_len));
+ /* Determine length of data between previously stored transfer length
+ * and current accumulated transfer length
+ */
+ length_to_read =
+ accum_len - pm8001_ha->forensic_preserved_accumulated_transfer;
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("get_fatal_spcv: accum_len 0x%x\n", accum_len));
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("get_fatal_spcv: length_to_read 0x%x\n",
+ length_to_read));
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("get_fatal_spcv: last_offset 0x%x\n",
+ pm8001_ha->forensic_last_offset));
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("get_fatal_spcv: read_len 0x%x\n",
+ pm8001_ha->forensic_info.data_buf.read_len));
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("get_fatal_spcv:: direct_len 0x%x\n",
+ pm8001_ha->forensic_info.data_buf.direct_len));
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("get_fatal_spcv:: direct_offset 0x%x\n",
+ pm8001_ha->forensic_info.data_buf.direct_offset));
+
+ /* If accumulated length failed to read correctly fail the attempt.*/
if (accum_len == 0xFFFFFFFF) {
PM8001_IO_DBG(pm8001_ha,
pm8001_printk("Possible PCI issue 0x%x not expected\n",
- accum_len));
- return -EIO;
+ accum_len));
+ return status;
}
- if (accum_len == 0 || accum_len >= 0x100000) {
+ /* If accumulated length is zero fail the attempt */
+ if (accum_len == 0) {
pm8001_ha->forensic_info.data_buf.direct_data +=
sprintf(pm8001_ha->forensic_info.data_buf.direct_data,
- "%08x ", 0xFFFFFFFF);
+ "%08x ", 0xFFFFFFFF);
return (char *)pm8001_ha->forensic_info.data_buf.direct_data -
(char *)buf;
}
+ /* Accumulated length is good so start capturing the first data */
temp = (u32 *)pm8001_ha->memoryMap.region[FORENSIC_MEM].virt_ptr;
if (pm8001_ha->forensic_fatal_step == 0) {
moreData:
+ /* If data to read is less than SYSFS_OFFSET then reduce the
+ * length of dataLen
+ */
+ if (pm8001_ha->forensic_last_offset + SYSFS_OFFSET
+ > length_to_read) {
+ pm8001_ha->forensic_info.data_buf.direct_len =
+ length_to_read -
+ pm8001_ha->forensic_last_offset;
+ } else {
+ pm8001_ha->forensic_info.data_buf.direct_len =
+ SYSFS_OFFSET;
+ }
if (pm8001_ha->forensic_info.data_buf.direct_data) {
/* Data is in bar, copy to host memory */
- pm80xx_pci_mem_copy(pm8001_ha, pm8001_ha->fatal_bar_loc,
- pm8001_ha->memoryMap.region[FORENSIC_MEM].virt_ptr,
- pm8001_ha->forensic_info.data_buf.direct_len ,
- 1);
+ pm80xx_pci_mem_copy(pm8001_ha,
+ pm8001_ha->fatal_bar_loc,
+ pm8001_ha->memoryMap.region[FORENSIC_MEM].virt_ptr,
+ pm8001_ha->forensic_info.data_buf.direct_len, 1);
}
pm8001_ha->fatal_bar_loc +=
pm8001_ha->forensic_info.data_buf.direct_len;
@@ -160,21 +219,29 @@ moreData:
pm8001_ha->forensic_info.data_buf.read_len =
pm8001_ha->forensic_info.data_buf.direct_len;
- if (pm8001_ha->forensic_last_offset >= accum_len) {
+ if (pm8001_ha->forensic_last_offset >= length_to_read) {
pm8001_ha->forensic_info.data_buf.direct_data +=
sprintf(pm8001_ha->forensic_info.data_buf.direct_data,
"%08x ", 3);
- for (index = 0; index < (SYSFS_OFFSET / 4); index++) {
+ for (index = 0; index <
+ (pm8001_ha->forensic_info.data_buf.direct_len
+ / 4); index++) {
pm8001_ha->forensic_info.data_buf.direct_data +=
- sprintf(pm8001_ha->
- forensic_info.data_buf.direct_data,
- "%08x ", *(temp + index));
+ sprintf(
+ pm8001_ha->forensic_info.data_buf.direct_data,
+ "%08x ", *(temp + index));
}
pm8001_ha->fatal_bar_loc = 0;
pm8001_ha->forensic_fatal_step = 1;
pm8001_ha->fatal_forensic_shift_offset = 0;
pm8001_ha->forensic_last_offset = 0;
+ status = 0;
+ offset = (int)
+ ((char *)pm8001_ha->forensic_info.data_buf.direct_data
+ - (char *)buf);
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("get_fatal_spcv:return1 0x%x\n", offset));
return (char *)pm8001_ha->
forensic_info.data_buf.direct_data -
(char *)buf;
@@ -184,12 +251,20 @@ moreData:
sprintf(pm8001_ha->
forensic_info.data_buf.direct_data,
"%08x ", 2);
- for (index = 0; index < (SYSFS_OFFSET / 4); index++) {
- pm8001_ha->forensic_info.data_buf.direct_data +=
- sprintf(pm8001_ha->
+ for (index = 0; index <
+ (pm8001_ha->forensic_info.data_buf.direct_len
+ / 4); index++) {
+ pm8001_ha->forensic_info.data_buf.direct_data
+ += sprintf(pm8001_ha->
forensic_info.data_buf.direct_data,
"%08x ", *(temp + index));
}
+ status = 0;
+ offset = (int)
+ ((char *)pm8001_ha->forensic_info.data_buf.direct_data
+ - (char *)buf);
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("get_fatal_spcv:return2 0x%x\n", offset));
return (char *)pm8001_ha->
forensic_info.data_buf.direct_data -
(char *)buf;
@@ -199,63 +274,122 @@ moreData:
pm8001_ha->forensic_info.data_buf.direct_data +=
sprintf(pm8001_ha->forensic_info.data_buf.direct_data,
"%08x ", 2);
- for (index = 0; index < 256; index++) {
+ for (index = 0; index <
+ (pm8001_ha->forensic_info.data_buf.direct_len
+ / 4) ; index++) {
pm8001_ha->forensic_info.data_buf.direct_data +=
sprintf(pm8001_ha->
- forensic_info.data_buf.direct_data,
- "%08x ", *(temp + index));
+ forensic_info.data_buf.direct_data,
+ "%08x ", *(temp + index));
}
pm8001_ha->fatal_forensic_shift_offset += 0x100;
pm8001_cw32(pm8001_ha, 0, MEMBASE_II_SHIFT_REGISTER,
pm8001_ha->fatal_forensic_shift_offset);
pm8001_ha->fatal_bar_loc = 0;
+ status = 0;
+ offset = (int)
+ ((char *)pm8001_ha->forensic_info.data_buf.direct_data
+ - (char *)buf);
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("get_fatal_spcv: return3 0x%x\n", offset));
return (char *)pm8001_ha->forensic_info.data_buf.direct_data -
(char *)buf;
}
if (pm8001_ha->forensic_fatal_step == 1) {
- pm8001_ha->fatal_forensic_shift_offset = 0;
- /* Read 64K of the debug data. */
- pm8001_cw32(pm8001_ha, 0, MEMBASE_II_SHIFT_REGISTER,
- pm8001_ha->fatal_forensic_shift_offset);
- pm8001_mw32(fatal_table_address,
- MPI_FATAL_EDUMP_TABLE_HANDSHAKE,
+ /* store previous accumulated length before triggering next
+ * accumulated length update
+ */
+ pm8001_ha->forensic_preserved_accumulated_transfer =
+ pm8001_mr32(fatal_table_address,
+ MPI_FATAL_EDUMP_TABLE_ACCUM_LEN);
+
+ /* continue capturing the fatal log until Dump status is 0x3 */
+ if (pm8001_mr32(fatal_table_address,
+ MPI_FATAL_EDUMP_TABLE_STATUS) <
+ MPI_FATAL_EDUMP_TABLE_STAT_NF_SUCCESS_DONE) {
+
+ /* reset fddstat bit by writing to zero*/
+ pm8001_mw32(fatal_table_address,
+ MPI_FATAL_EDUMP_TABLE_STATUS, 0x0);
+
+ /* set dump control value to '1' so that new data will
+ * be transferred to shared memory
+ */
+ pm8001_mw32(fatal_table_address,
+ MPI_FATAL_EDUMP_TABLE_HANDSHAKE,
MPI_FATAL_EDUMP_HANDSHAKE_RDY);
- /* Poll FDDHSHK until clear */
- start = jiffies + (2 * HZ); /* 2 sec */
+ /*Poll FDDHSHK until clear */
+ start = jiffies + (2 * HZ); /* 2 sec */
- do {
- reg_val = pm8001_mr32(fatal_table_address,
+ do {
+ reg_val = pm8001_mr32(fatal_table_address,
MPI_FATAL_EDUMP_TABLE_HANDSHAKE);
- } while ((reg_val) && time_before(jiffies, start));
+ } while ((reg_val) && time_before(jiffies, start));
- if (reg_val != 0) {
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("TIMEOUT:MEMBASE_II_SHIFT_REGISTER"
- " = 0x%x\n", reg_val));
- return -EIO;
- }
-
- /* Read the next 64K of the debug data. */
- pm8001_ha->forensic_fatal_step = 0;
- if (pm8001_mr32(fatal_table_address,
- MPI_FATAL_EDUMP_TABLE_STATUS) !=
- MPI_FATAL_EDUMP_TABLE_STAT_NF_SUCCESS_DONE) {
- pm8001_mw32(fatal_table_address,
- MPI_FATAL_EDUMP_TABLE_HANDSHAKE, 0);
- goto moreData;
- } else {
- pm8001_ha->forensic_info.data_buf.direct_data +=
- sprintf(pm8001_ha->
- forensic_info.data_buf.direct_data,
- "%08x ", 4);
- pm8001_ha->forensic_info.data_buf.read_len = 0xFFFFFFFF;
- pm8001_ha->forensic_info.data_buf.direct_len = 0;
- pm8001_ha->forensic_info.data_buf.direct_offset = 0;
- pm8001_ha->forensic_info.data_buf.read_len = 0;
+ if (reg_val != 0) {
+ PM8001_FAIL_DBG(pm8001_ha, pm8001_printk(
+ "TIMEOUT:MPI_FATAL_EDUMP_TABLE_HDSHAKE 0x%x\n",
+ reg_val));
+ /* Fail the dump if a timeout occurs */
+ pm8001_ha->forensic_info.data_buf.direct_data +=
+ sprintf(
+ pm8001_ha->forensic_info.data_buf.direct_data,
+ "%08x ", 0xFFFFFFFF);
+ return((char *)
+ pm8001_ha->forensic_info.data_buf.direct_data
+ - (char *)buf);
+ }
+ /* Poll status register until set to 2 or
+ * 3 for up to 2 seconds
+ */
+ start = jiffies + (2 * HZ); /* 2 sec */
+
+ do {
+ reg_val = pm8001_mr32(fatal_table_address,
+ MPI_FATAL_EDUMP_TABLE_STATUS);
+ } while (((reg_val != 2) || (reg_val != 3)) &&
+ time_before(jiffies, start));
+
+ if (reg_val < 2) {
+ PM8001_FAIL_DBG(pm8001_ha, pm8001_printk(
+ "TIMEOUT:MPI_FATAL_EDUMP_TABLE_STATUS = 0x%x\n",
+ reg_val));
+ /* Fail the dump if a timeout occurs */
+ pm8001_ha->forensic_info.data_buf.direct_data +=
+ sprintf(
+ pm8001_ha->forensic_info.data_buf.direct_data,
+ "%08x ", 0xFFFFFFFF);
+ pm8001_cw32(pm8001_ha, 0,
+ MEMBASE_II_SHIFT_REGISTER,
+ pm8001_ha->fatal_forensic_shift_offset);
+ }
+ /* Read the next block of the debug data.*/
+ length_to_read = pm8001_mr32(fatal_table_address,
+ MPI_FATAL_EDUMP_TABLE_ACCUM_LEN) -
+ pm8001_ha->forensic_preserved_accumulated_transfer;
+ if (length_to_read != 0x0) {
+ pm8001_ha->forensic_fatal_step = 0;
+ goto moreData;
+ } else {
+ pm8001_ha->forensic_info.data_buf.direct_data +=
+ sprintf(
+ pm8001_ha->forensic_info.data_buf.direct_data,
+ "%08x ", 4);
+ pm8001_ha->forensic_info.data_buf.read_len
+ = 0xFFFFFFFF;
+ pm8001_ha->forensic_info.data_buf.direct_len
+ = 0;
+ pm8001_ha->forensic_info.data_buf.direct_offset
+ = 0;
+ pm8001_ha->forensic_info.data_buf.read_len = 0;
+ }
}
}
-
+ offset = (int)((char *)pm8001_ha->forensic_info.data_buf.direct_data
+ - (char *)buf);
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("get_fatal_spcv: return4 0x%x\n", offset));
return (char *)pm8001_ha->forensic_info.data_buf.direct_data -
(char *)buf;
}
@@ -317,6 +451,25 @@ static void read_main_config_table(struct pm8001_hba_info *pm8001_ha)
pm8001_mr32(address, MAIN_MPI_ILA_RELEASE_TYPE);
pm8001_ha->main_cfg_tbl.pm80xx_tbl.inc_fw_version =
pm8001_mr32(address, MAIN_MPI_INACTIVE_FW_VERSION);
+
+ PM8001_DEV_DBG(pm8001_ha, pm8001_printk(
+ "Main cfg table: sign:%x interface rev:%x fw_rev:%x\n",
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.signature,
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.interface_rev,
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.firmware_rev));
+
+ PM8001_DEV_DBG(pm8001_ha, pm8001_printk(
+ "table offset: gst:%x iq:%x oq:%x int vec:%x phy attr:%x\n",
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.gst_offset,
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.inbound_queue_offset,
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.outbound_queue_offset,
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.int_vec_table_offset,
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.phy_attr_table_offset));
+
+ PM8001_DEV_DBG(pm8001_ha, pm8001_printk(
+ "Main cfg table; ila rev:%x Inactive fw rev:%x\n",
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.ila_version,
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.inc_fw_version));
}
/**
@@ -521,6 +674,11 @@ static void init_default_table_values(struct pm8001_hba_info *pm8001_ha)
pm8001_mr32(addressib, (offsetib + 0x18));
pm8001_ha->inbnd_q_tbl[i].producer_idx = 0;
pm8001_ha->inbnd_q_tbl[i].consumer_index = 0;
+
+ PM8001_DEV_DBG(pm8001_ha, pm8001_printk(
+ "IQ %d pi_bar 0x%x pi_offset 0x%x\n", i,
+ pm8001_ha->inbnd_q_tbl[i].pi_pci_bar,
+ pm8001_ha->inbnd_q_tbl[i].pi_offset));
}
for (i = 0; i < PM8001_MAX_SPCV_OUTB_NUM; i++) {
pm8001_ha->outbnd_q_tbl[i].element_size_cnt =
@@ -549,6 +707,11 @@ static void init_default_table_values(struct pm8001_hba_info *pm8001_ha)
pm8001_mr32(addressob, (offsetob + 0x18));
pm8001_ha->outbnd_q_tbl[i].consumer_idx = 0;
pm8001_ha->outbnd_q_tbl[i].producer_index = 0;
+
+ PM8001_DEV_DBG(pm8001_ha, pm8001_printk(
+ "OQ %d ci_bar 0x%x ci_offset 0x%x\n", i,
+ pm8001_ha->outbnd_q_tbl[i].ci_pci_bar,
+ pm8001_ha->outbnd_q_tbl[i].ci_offset));
}
}
@@ -582,6 +745,10 @@ static void update_main_config_table(struct pm8001_hba_info *pm8001_ha)
((pm8001_ha->number_of_intr - 1) << 8);
pm8001_mw32(address, MAIN_FATAL_ERROR_INTERRUPT,
pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_interrupt);
+ PM8001_DEV_DBG(pm8001_ha, pm8001_printk(
+ "Updated Fatal error interrupt vector 0x%x\n",
+ pm8001_mr32(address, MAIN_FATAL_ERROR_INTERRUPT)));
+
pm8001_mw32(address, MAIN_EVENT_CRC_CHECK,
pm8001_ha->main_cfg_tbl.pm80xx_tbl.crc_core_dump);
@@ -591,6 +758,9 @@ static void update_main_config_table(struct pm8001_hba_info *pm8001_ha)
pm8001_ha->main_cfg_tbl.pm80xx_tbl.gpio_led_mapping |= 0x20000000;
pm8001_mw32(address, MAIN_GPIO_LED_FLAGS_OFFSET,
pm8001_ha->main_cfg_tbl.pm80xx_tbl.gpio_led_mapping);
+ PM8001_DEV_DBG(pm8001_ha, pm8001_printk(
+ "Programming DW 0x21 in main cfg table with 0x%x\n",
+ pm8001_mr32(address, MAIN_GPIO_LED_FLAGS_OFFSET)));
pm8001_mw32(address, MAIN_PORT_RECOVERY_TIMER,
pm8001_ha->main_cfg_tbl.pm80xx_tbl.port_recovery_timer);
@@ -629,6 +799,21 @@ static void update_inbnd_queue_table(struct pm8001_hba_info *pm8001_ha,
pm8001_ha->inbnd_q_tbl[number].ci_upper_base_addr);
pm8001_mw32(address, offset + IB_CI_BASE_ADDR_LO_OFFSET,
pm8001_ha->inbnd_q_tbl[number].ci_lower_base_addr);
+
+ PM8001_DEV_DBG(pm8001_ha, pm8001_printk(
+ "IQ %d: Element pri size 0x%x\n",
+ number,
+ pm8001_ha->inbnd_q_tbl[number].element_pri_size_cnt));
+
+ PM8001_DEV_DBG(pm8001_ha, pm8001_printk(
+ "IQ upr base addr 0x%x IQ lwr base addr 0x%x\n",
+ pm8001_ha->inbnd_q_tbl[number].upper_base_addr,
+ pm8001_ha->inbnd_q_tbl[number].lower_base_addr));
+
+ PM8001_DEV_DBG(pm8001_ha, pm8001_printk(
+ "CI upper base addr 0x%x CI lower base addr 0x%x\n",
+ pm8001_ha->inbnd_q_tbl[number].ci_upper_base_addr,
+ pm8001_ha->inbnd_q_tbl[number].ci_lower_base_addr));
}
/**
@@ -652,6 +837,21 @@ static void update_outbnd_queue_table(struct pm8001_hba_info *pm8001_ha,
pm8001_ha->outbnd_q_tbl[number].pi_lower_base_addr);
pm8001_mw32(address, offset + OB_INTERRUPT_COALES_OFFSET,
pm8001_ha->outbnd_q_tbl[number].interrup_vec_cnt_delay);
+
+ PM8001_DEV_DBG(pm8001_ha, pm8001_printk(
+ "OQ %d: Element pri size 0x%x\n",
+ number,
+ pm8001_ha->outbnd_q_tbl[number].element_size_cnt));
+
+ PM8001_DEV_DBG(pm8001_ha, pm8001_printk(
+ "OQ upr base addr 0x%x OQ lwr base addr 0x%x\n",
+ pm8001_ha->outbnd_q_tbl[number].upper_base_addr,
+ pm8001_ha->outbnd_q_tbl[number].lower_base_addr));
+
+ PM8001_DEV_DBG(pm8001_ha, pm8001_printk(
+ "PI upper base addr 0x%x PI lower base addr 0x%x\n",
+ pm8001_ha->outbnd_q_tbl[number].pi_upper_base_addr,
+ pm8001_ha->outbnd_q_tbl[number].pi_lower_base_addr));
}
/**
@@ -669,9 +869,9 @@ static int mpi_init_check(struct pm8001_hba_info *pm8001_ha)
pm8001_cw32(pm8001_ha, 0, MSGU_IBDB_SET, SPCv_MSGU_CFG_TABLE_UPDATE);
/* wait until Inbound DoorBell Clear Register toggled */
if (IS_SPCV_12G(pm8001_ha->pdev)) {
- max_wait_count = 4 * 1000 * 1000;/* 4 sec */
+ max_wait_count = SPCV_DOORBELL_CLEAR_TIMEOUT;
} else {
- max_wait_count = 2 * 1000 * 1000;/* 2 sec */
+ max_wait_count = SPC_DOORBELL_CLEAR_TIMEOUT;
}
do {
udelay(1);
@@ -797,7 +997,7 @@ static void init_pci_device_addresses(struct pm8001_hba_info *pm8001_ha)
value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_0);
offset = value & 0x03FFFFFF; /* scratch pad 0 TBL address */
- PM8001_INIT_DBG(pm8001_ha,
+ PM8001_DEV_DBG(pm8001_ha,
pm8001_printk("Scratchpad 0 Offset: 0x%x value 0x%x\n",
offset, value));
pcilogic = (value & 0xFC000000) >> 26;
@@ -885,7 +1085,12 @@ pm80xx_set_thermal_config(struct pm8001_hba_info *pm8001_ha)
(THERMAL_ENABLE << 8) | page_code;
payload.cfg_pg[1] = (LTEMPHIL << 24) | (RTEMPHIL << 8);
- rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
+ PM8001_DEV_DBG(pm8001_ha, pm8001_printk(
+ "Setting up thermal config. cfg_pg 0 0x%x cfg_pg 1 0x%x\n",
+ payload.cfg_pg[0], payload.cfg_pg[1]));
+
+ rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
+ sizeof(payload), 0);
if (rc)
pm8001_tag_free(pm8001_ha, tag);
return rc;
@@ -967,7 +1172,8 @@ pm80xx_set_sas_protocol_timer_config(struct pm8001_hba_info *pm8001_ha)
memcpy(&payload.cfg_pg, &SASConfigPage,
sizeof(SASProtocolTimerConfig_t));
- rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
+ rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
+ sizeof(payload), 0);
if (rc)
pm8001_tag_free(pm8001_ha, tag);
@@ -1090,7 +1296,12 @@ static int pm80xx_encrypt_update(struct pm8001_hba_info *pm8001_ha)
payload.new_curidx_ksop = ((1 << 24) | (1 << 16) | (1 << 8) |
KEK_MGMT_SUBOP_KEYCARDUPDATE);
- rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
+ PM8001_DEV_DBG(pm8001_ha, pm8001_printk(
+ "Saving Encryption info to flash. payload 0x%x\n",
+ payload.new_curidx_ksop));
+
+ rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
+ sizeof(payload), 0);
if (rc)
pm8001_tag_free(pm8001_ha, tag);
@@ -1241,7 +1452,7 @@ pm80xx_chip_soft_rst(struct pm8001_hba_info *pm8001_ha)
pm8001_printk("reset register before write : 0x%x\n", regval));
pm8001_cw32(pm8001_ha, 0, SPC_REG_SOFT_RESET, SPCv_NORMAL_RESET_VALUE);
- mdelay(500);
+ msleep(500);
regval = pm8001_cr32(pm8001_ha, 0, SPC_REG_SOFT_RESET);
PM8001_INIT_DBG(pm8001_ha,
@@ -1443,7 +1654,10 @@ static void pm80xx_send_abort_all(struct pm8001_hba_info *pm8001_ha,
task_abort.device_id = cpu_to_le32(pm8001_ha_dev->device_id);
task_abort.tag = cpu_to_le32(ccb_tag);
- ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &task_abort, 0);
+ ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &task_abort,
+ sizeof(task_abort), 0);
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("Executing abort task end\n"));
if (ret) {
sas_free_task(task);
pm8001_tag_free(pm8001_ha, ccb_tag);
@@ -1519,7 +1733,9 @@ static void pm80xx_send_read_log(struct pm8001_hba_info *pm8001_ha,
sata_cmd.ncqtag_atap_dir_m_dad |= ((0x1 << 7) | (0x5 << 9));
memcpy(&sata_cmd.sata_fis, &fis, sizeof(struct host_to_dev_fis));
- res = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd, 0);
+ res = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd,
+ sizeof(sata_cmd), 0);
+ PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("Executing read log end\n"));
if (res) {
sas_free_task(task);
pm8001_tag_free(pm8001_ha, ccb_tag);
@@ -1570,6 +1786,10 @@ mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb)
if (unlikely(!t || !t->lldd_task || !t->dev))
return;
ts = &t->task_status;
+
+ PM8001_DEV_DBG(pm8001_ha, pm8001_printk(
+ "tag::0x%x, status::0x%x task::0x%p\n", tag, status, t));
+
/* Print sas address of IO failed device */
if ((status != IO_SUCCESS) && (status != IO_OVERFLOW) &&
(status != IO_UNDERFLOW))
@@ -1772,7 +1992,7 @@ mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb)
ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
break;
default:
- PM8001_IO_DBG(pm8001_ha,
+ PM8001_DEVIO_DBG(pm8001_ha,
pm8001_printk("Unknown status 0x%x\n", status));
/* not allowed case. Therefore, return failed status */
ts->resp = SAS_TASK_COMPLETE;
@@ -1826,7 +2046,7 @@ static void mpi_ssp_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
if (unlikely(!t || !t->lldd_task || !t->dev))
return;
ts = &t->task_status;
- PM8001_IO_DBG(pm8001_ha,
+ PM8001_IOERR_DBG(pm8001_ha,
pm8001_printk("port_id:0x%x, tag:0x%x, event:0x%x\n",
port_id, tag, event));
switch (event) {
@@ -1963,7 +2183,7 @@ static void mpi_ssp_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
ts->stat = SAS_DATA_OVERRUN;
break;
case IO_XFER_ERROR_INTERNAL_CRC_ERROR:
- PM8001_IO_DBG(pm8001_ha,
+ PM8001_IOERR_DBG(pm8001_ha,
pm8001_printk("IO_XFR_ERROR_INTERNAL_CRC_ERROR\n"));
/* TBC: used default set values */
ts->resp = SAS_TASK_COMPLETE;
@@ -1974,7 +2194,7 @@ static void mpi_ssp_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
pm8001_printk("IO_XFER_CMD_FRAME_ISSUED\n"));
return;
default:
- PM8001_IO_DBG(pm8001_ha,
+ PM8001_DEVIO_DBG(pm8001_ha,
pm8001_printk("Unknown status 0x%x\n", event));
/* not allowed case. Therefore, return failed status */
ts->resp = SAS_TASK_COMPLETE;
@@ -2062,6 +2282,12 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
pm8001_printk("ts null\n"));
return;
}
+
+ if (unlikely(status))
+ PM8001_IOERR_DBG(pm8001_ha, pm8001_printk(
+ "status:0x%x, tag:0x%x, task::0x%p\n",
+ status, tag, t));
+
/* Print sas address of IO failed device */
if ((status != IO_SUCCESS) && (status != IO_OVERFLOW) &&
(status != IO_UNDERFLOW)) {
@@ -2365,7 +2591,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
break;
default:
- PM8001_IO_DBG(pm8001_ha,
+ PM8001_DEVIO_DBG(pm8001_ha,
pm8001_printk("Unknown status 0x%x\n", status));
/* not allowed case. Therefore, return failed status */
ts->resp = SAS_TASK_COMPLETE;
@@ -2382,6 +2608,8 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
pm8001_printk("task 0x%p done with io_status 0x%x"
" resp 0x%x stat 0x%x but aborted by upper layer!\n",
t, status, ts->resp, ts->stat));
+ if (t->slow_task)
+ complete(&t->slow_task->completion);
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
} else {
spin_unlock_irqrestore(&t->task_state_lock, flags);
@@ -2435,7 +2663,7 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
}
ts = &t->task_status;
- PM8001_IO_DBG(pm8001_ha,
+ PM8001_IOERR_DBG(pm8001_ha,
pm8001_printk("port_id:0x%x, tag:0x%x, event:0x%x\n",
port_id, tag, event));
switch (event) {
@@ -2655,6 +2883,9 @@ mpi_smp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
if (unlikely(!t || !t->lldd_task || !t->dev))
return;
+ PM8001_DEV_DBG(pm8001_ha,
+ pm8001_printk("tag::0x%x status::0x%x\n", tag, status));
+
switch (status) {
case IO_SUCCESS:
@@ -2822,7 +3053,7 @@ mpi_smp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
break;
default:
- PM8001_IO_DBG(pm8001_ha,
+ PM8001_DEVIO_DBG(pm8001_ha,
pm8001_printk("Unknown status 0x%x\n", status));
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DEV_NO_RESPONSE;
@@ -2873,7 +3104,8 @@ static void pm80xx_hw_event_ack_req(struct pm8001_hba_info *pm8001_ha,
((phyId & 0xFF) << 24) | (port_id & 0xFF));
payload.param0 = cpu_to_le32(param0);
payload.param1 = cpu_to_le32(param1);
- pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
+ pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
+ sizeof(payload), 0);
}
static int pm80xx_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha,
@@ -2964,7 +3196,7 @@ hw_event_sas_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
pm8001_get_lrate_mode(phy, link_rate);
break;
default:
- PM8001_MSG_DBG(pm8001_ha,
+ PM8001_DEVIO_DBG(pm8001_ha,
pm8001_printk("unknown device type(%x)\n", deviceType));
break;
}
@@ -2984,7 +3216,7 @@ hw_event_sas_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
pm8001_get_attached_sas_addr(phy, phy->sas_phy.attached_sas_addr);
spin_unlock_irqrestore(&phy->sas_phy.frame_rcvd_lock, flags);
if (pm8001_ha->flags == PM8001F_RUN_TIME)
- mdelay(200);/*delay a moment to wait disk to spinup*/
+ msleep(200);/*delay a moment to wait disk to spinup*/
pm8001_bytes_dmaed(pm8001_ha, phy_id);
}
@@ -3013,7 +3245,7 @@ hw_event_sata_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
struct sas_ha_struct *sas_ha = pm8001_ha->sas;
struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
unsigned long flags;
- PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+ PM8001_DEVIO_DBG(pm8001_ha, pm8001_printk(
"port id %d, phy id %d link_rate %d portstate 0x%x\n",
port_id, phy_id, link_rate, portstate));
@@ -3101,7 +3333,7 @@ hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb)
break;
default:
port->port_attached = 0;
- PM8001_MSG_DBG(pm8001_ha,
+ PM8001_DEVIO_DBG(pm8001_ha,
pm8001_printk(" Phy Down and(default) = 0x%x\n",
portstate));
break;
@@ -3130,8 +3362,10 @@ static int mpi_phy_start_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
if (status == 0) {
phy->phy_state = PHY_LINK_DOWN;
if (pm8001_ha->flags == PM8001F_RUN_TIME &&
- phy->enable_completion != NULL)
+ phy->enable_completion != NULL) {
complete(phy->enable_completion);
+ phy->enable_completion = NULL;
+ }
}
return 0;
@@ -3191,7 +3425,7 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
struct pm8001_port *port = &pm8001_ha->port[port_id];
struct asd_sas_phy *sas_phy = sas_ha->sas_phy[phy_id];
- PM8001_MSG_DBG(pm8001_ha,
+ PM8001_DEV_DBG(pm8001_ha,
pm8001_printk("portid:%d phyid:%d event:0x%x status:0x%x\n",
port_id, phy_id, eventType, status));
@@ -3376,7 +3610,7 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
pm8001_printk("EVENT_BROADCAST_ASYNCH_EVENT\n"));
break;
default:
- PM8001_MSG_DBG(pm8001_ha,
+ PM8001_DEVIO_DBG(pm8001_ha,
pm8001_printk("Unknown event type 0x%x\n", eventType));
break;
}
@@ -3758,7 +3992,7 @@ static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb)
ssp_coalesced_comp_resp(pm8001_ha, piomb);
break;
default:
- PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+ PM8001_DEVIO_DBG(pm8001_ha, pm8001_printk(
"Unknown outbound Queue IOMB OPC = 0x%x\n", opc));
break;
}
@@ -3991,8 +4225,8 @@ static int pm80xx_chip_smp_req(struct pm8001_hba_info *pm8001_ha,
build_smp_cmd(pm8001_dev->device_id, smp_cmd.tag,
&smp_cmd, pm8001_ha->smp_exp_mode, length);
- rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc,
- (u32 *)&smp_cmd, 0);
+ rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &smp_cmd,
+ sizeof(smp_cmd), 0);
if (rc)
goto err_out_2;
return 0;
@@ -4200,7 +4434,7 @@ static int pm80xx_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,
}
q_index = (u32) (pm8001_dev->id & 0x00ffffff) % PM8001_MAX_OUTB_NUM;
ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc,
- &ssp_cmd, q_index);
+ &ssp_cmd, sizeof(ssp_cmd), q_index);
return ret;
}
@@ -4441,7 +4675,7 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
}
q_index = (u32) (pm8001_ha_dev->id & 0x00ffffff) % PM8001_MAX_OUTB_NUM;
ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc,
- &sata_cmd, q_index);
+ &sata_cmd, sizeof(sata_cmd), q_index);
return ret;
}
@@ -4465,23 +4699,9 @@ pm80xx_chip_phy_start_req(struct pm8001_hba_info *pm8001_ha, u8 phy_id)
PM8001_INIT_DBG(pm8001_ha,
pm8001_printk("PHY START REQ for phy_id %d\n", phy_id));
- /*
- ** [0:7] PHY Identifier
- ** [8:11] link rate 1.5G, 3G, 6G
- ** [12:13] link mode 01b SAS mode; 10b SATA mode; 11b Auto mode
- ** [14] 0b disable spin up hold; 1b enable spin up hold
- ** [15] ob no change in current PHY analig setup 1b enable using SPAST
- */
- if (!IS_SPCV_12G(pm8001_ha->pdev))
- payload.ase_sh_lm_slr_phyid = cpu_to_le32(SPINHOLD_DISABLE |
- LINKMODE_AUTO | LINKRATE_15 |
- LINKRATE_30 | LINKRATE_60 | phy_id);
- else
- payload.ase_sh_lm_slr_phyid = cpu_to_le32(SPINHOLD_DISABLE |
- LINKMODE_AUTO | LINKRATE_15 |
- LINKRATE_30 | LINKRATE_60 | LINKRATE_120 |
- phy_id);
+ payload.ase_sh_lm_slr_phyid = cpu_to_le32(SPINHOLD_DISABLE |
+ LINKMODE_AUTO | pm8001_ha->link_rate | phy_id);
/* SSC Disable and SAS Analog ST configuration */
/**
payload.ase_sh_lm_slr_phyid =
@@ -4494,9 +4714,10 @@ pm80xx_chip_phy_start_req(struct pm8001_hba_info *pm8001_ha, u8 phy_id)
payload.sas_identify.dev_type = SAS_END_DEVICE;
payload.sas_identify.initiator_bits = SAS_PROTOCOL_ALL;
memcpy(payload.sas_identify.sas_addr,
- &pm8001_ha->phy[phy_id].dev_sas_addr, SAS_ADDR_SIZE);
+ &pm8001_ha->sas_addr, SAS_ADDR_SIZE);
payload.sas_identify.phy_id = phy_id;
- ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload, 0);
+ ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload,
+ sizeof(payload), 0);
return ret;
}
@@ -4518,7 +4739,8 @@ static int pm80xx_chip_phy_stop_req(struct pm8001_hba_info *pm8001_ha,
memset(&payload, 0, sizeof(payload));
payload.tag = cpu_to_le32(tag);
payload.phy_id = cpu_to_le32(phy_id);
- ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload, 0);
+ ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload,
+ sizeof(payload), 0);
return ret;
}
@@ -4584,7 +4806,8 @@ static int pm80xx_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha,
memcpy(payload.sas_addr, pm8001_dev->sas_device->sas_addr,
SAS_ADDR_SIZE);
- rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
+ rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
+ sizeof(payload), 0);
if (rc)
pm8001_tag_free(pm8001_ha, tag);
@@ -4614,7 +4837,8 @@ static int pm80xx_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha,
payload.tag = cpu_to_le32(tag);
payload.phyop_phyid =
cpu_to_le32(((phy_op & 0xFF) << 8) | (phyId & 0xFF));
- return pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
+ return pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
+ sizeof(payload), 0);
}
static u32 pm80xx_chip_is_our_interrupt(struct pm8001_hba_info *pm8001_ha)
@@ -4641,6 +4865,9 @@ static irqreturn_t
pm80xx_chip_isr(struct pm8001_hba_info *pm8001_ha, u8 vec)
{
pm80xx_chip_interrupt_disable(pm8001_ha, vec);
+ PM8001_DEVIO_DBG(pm8001_ha, pm8001_printk(
+ "irq vec %d, ODMR:0x%x\n",
+ vec, pm8001_cr32(pm8001_ha, 0, 0x30)));
process_oq(pm8001_ha, vec);
pm80xx_chip_interrupt_enable(pm8001_ha, vec);
return IRQ_HANDLED;
@@ -4669,7 +4896,8 @@ void mpi_set_phy_profile_req(struct pm8001_hba_info *pm8001_ha,
payload.reserved[j] = cpu_to_le32(*((u32 *)buf + i));
j++;
}
- rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
+ rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
+ sizeof(payload), 0);
if (rc)
pm8001_tag_free(pm8001_ha, tag);
}
@@ -4711,7 +4939,8 @@ void pm8001_set_phy_profile_single(struct pm8001_hba_info *pm8001_ha,
for (i = 0; i < length; i++)
payload.reserved[i] = cpu_to_le32(*(buf + i));
- rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
+ rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
+ sizeof(payload), 0);
if (rc)
pm8001_tag_free(pm8001_ha, tag);
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.h b/drivers/scsi/pm8001/pm80xx_hwi.h
index dc9ab7689060..701951a0f715 100644
--- a/drivers/scsi/pm8001/pm80xx_hwi.h
+++ b/drivers/scsi/pm8001/pm80xx_hwi.h
@@ -220,6 +220,9 @@
#define SAS_DOPNRJT_RTRY_TMO 128
#define SAS_COPNRJT_RTRY_TMO 128
+#define SPCV_DOORBELL_CLEAR_TIMEOUT (30 * 1000 * 1000) /* 30 sec */
+#define SPC_DOORBELL_CLEAR_TIMEOUT (15 * 1000 * 1000) /* 15 sec */
+
/*
Making ORR bigger than IT NEXUS LOSS which is 2000000us = 2 second.
Assuming a bigger value 3 second, 3000000/128 = 23437.5 where 128
diff --git a/drivers/scsi/qedf/qedf_dbg.h b/drivers/scsi/qedf/qedf_dbg.h
index d979f095aeda..2386bfb73c46 100644
--- a/drivers/scsi/qedf/qedf_dbg.h
+++ b/drivers/scsi/qedf/qedf_dbg.h
@@ -42,7 +42,7 @@ extern uint qedf_debug;
#define QEDF_LOG_LPORT 0x4000 /* lport logs */
#define QEDF_LOG_ELS 0x8000 /* ELS logs */
#define QEDF_LOG_NPIV 0x10000 /* NPIV logs */
-#define QEDF_LOG_SESS 0x20000 /* Conection setup, cleanup */
+#define QEDF_LOG_SESS 0x20000 /* Connection setup, cleanup */
#define QEDF_LOG_TID 0x80000 /*
* FW TID context acquire
* free
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index 59ca98f12afd..604856e72cfb 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -1926,6 +1926,13 @@ static int qedf_fcoe_reset(struct Scsi_Host *shost)
return 0;
}
+static void qedf_get_host_port_id(struct Scsi_Host *shost)
+{
+ struct fc_lport *lport = shost_priv(shost);
+
+ fc_host_port_id(shost) = lport->port_id;
+}
+
static struct fc_host_statistics *qedf_fc_get_host_stats(struct Scsi_Host
*shost)
{
@@ -1996,6 +2003,7 @@ static struct fc_function_template qedf_fc_transport_fn = {
.show_host_active_fc4s = 1,
.show_host_maxframe_size = 1,
+ .get_host_port_id = qedf_get_host_port_id,
.show_host_port_id = 1,
.show_host_supported_speeds = 1,
.get_host_speed = fc_get_host_speed,
diff --git a/drivers/scsi/qedi/qedi_dbg.h b/drivers/scsi/qedi/qedi_dbg.h
index 243acc8b520a..37d084086fd4 100644
--- a/drivers/scsi/qedi/qedi_dbg.h
+++ b/drivers/scsi/qedi/qedi_dbg.h
@@ -44,7 +44,7 @@ extern uint qedi_dbg_log;
#define QEDI_LOG_LPORT 0x4000 /* lport logs */
#define QEDI_LOG_ELS 0x8000 /* ELS logs */
#define QEDI_LOG_NPIV 0x10000 /* NPIV logs */
-#define QEDI_LOG_SESS 0x20000 /* Conection setup, cleanup */
+#define QEDI_LOG_SESS 0x20000 /* Connection setup, cleanup */
#define QEDI_LOG_UIO 0x40000 /* iSCSI UIO logs */
#define QEDI_LOG_TID 0x80000 /* FW TID context acquire,
* free
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 7259bce85e0e..ae97e2f310a3 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -102,8 +102,10 @@ qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj,
qla8044_idc_lock(ha);
qla82xx_set_reset_owner(vha);
qla8044_idc_unlock(ha);
- } else
+ } else {
+ ha->fw_dump_mpi = 1;
qla2x00_system_error(vha);
+ }
break;
case 4:
if (IS_P3P_TYPE(ha)) {
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 6ffa9877c28b..460f443f6471 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -591,19 +591,23 @@ typedef struct srb {
*/
uint8_t cmd_type;
uint8_t pad[3];
- atomic_t ref_count;
struct kref cmd_kref; /* need to migrate ref_count over to this */
void *priv;
wait_queue_head_t nvme_ls_waitq;
struct fc_port *fcport;
struct scsi_qla_host *vha;
unsigned int start_timer:1;
+ unsigned int abort:1;
+ unsigned int aborted:1;
+ unsigned int completed:1;
+
uint32_t handle;
uint16_t flags;
uint16_t type;
const char *name;
int iocbs;
struct qla_qpair *qpair;
+ struct srb *cmd_sp;
struct list_head elem;
u32 gen1; /* scratch */
u32 gen2; /* scratch */
@@ -2277,7 +2281,7 @@ typedef struct {
uint8_t fabric_port_name[WWN_SIZE];
uint16_t fp_speed;
uint8_t fc4_type;
- uint8_t fc4f_nvme; /* nvme fc4 feature bits */
+ uint8_t fc4_features;
} sw_info_t;
/* FCP-4 types */
@@ -2445,7 +2449,7 @@ typedef struct fc_port {
u32 supported_classes;
uint8_t fc4_type;
- uint8_t fc4f_nvme;
+ uint8_t fc4_features;
uint8_t scan_state;
unsigned long last_queue_full;
@@ -2476,6 +2480,11 @@ typedef struct fc_port {
u16 n2n_chip_reset;
} fc_port_t;
+enum {
+ FC4_PRIORITY_NVME = 1,
+ FC4_PRIORITY_FCP = 2,
+};
+
#define QLA_FCPORT_SCAN 1
#define QLA_FCPORT_FOUND 2
@@ -4291,6 +4300,8 @@ struct qla_hw_data {
atomic_t nvme_active_aen_cnt;
uint16_t nvme_last_rptd_aen; /* Last recorded aen count */
+ uint8_t fc4_type_priority;
+
atomic_t zio_threshold;
uint16_t last_zio_threshold;
@@ -4816,6 +4827,23 @@ struct sff_8247_a0 {
ha->current_topology == ISP_CFG_N || \
!ha->current_topology)
+#define NVME_TYPE(fcport) \
+ (fcport->fc4_type & FS_FC4TYPE_NVME) \
+
+#define FCP_TYPE(fcport) \
+ (fcport->fc4_type & FS_FC4TYPE_FCP) \
+
+#define NVME_ONLY_TARGET(fcport) \
+ (NVME_TYPE(fcport) && !FCP_TYPE(fcport)) \
+
+#define NVME_FCP_TARGET(fcport) \
+ (FCP_TYPE(fcport) && NVME_TYPE(fcport)) \
+
+#define NVME_TARGET(ha, fcport) \
+ ((NVME_FCP_TARGET(fcport) && \
+ (ha->fc4_type_priority == FC4_PRIORITY_NVME)) || \
+ NVME_ONLY_TARGET(fcport)) \
+
#include "qla_target.h"
#include "qla_gbl.h"
#include "qla_dbg.h"
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index 732bb871c433..59f6903e5abe 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -2101,4 +2101,6 @@ struct qla_fcp_prio_cfg {
#define FA_FLASH_LAYOUT_ADDR_83 (0x3F1000/4)
#define FA_FLASH_LAYOUT_ADDR_28 (0x11000/4)
+#define NVRAM_DUAL_FCP_NVME_FLAG_OFFSET 0x196
+
#endif
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index d11416dcee4e..5b163ad85c34 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -917,4 +917,5 @@ int qla2x00_set_data_rate(scsi_qla_host_t *vha, uint16_t mode);
/* nvme.c */
void qla_nvme_unregister_remote_port(struct fc_port *fcport);
+void qla_handle_els_plogi_done(scsi_qla_host_t *vha, struct event_arg *ea);
#endif /* _QLA_GBL_H */
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index 5298ed10059f..67230688b05e 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -248,7 +248,7 @@ qla2x00_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport)
WWN_SIZE);
fcport->fc4_type = (ct_rsp->rsp.ga_nxt.fc4_types[2] & BIT_0) ?
- FC4_TYPE_FCP_SCSI : FC4_TYPE_OTHER;
+ FS_FC4TYPE_FCP : FC4_TYPE_OTHER;
if (ct_rsp->rsp.ga_nxt.port_type != NS_N_PORT_TYPE &&
ct_rsp->rsp.ga_nxt.port_type != NS_NL_PORT_TYPE)
@@ -2887,7 +2887,7 @@ qla2x00_gff_id(scsi_qla_host_t *vha, sw_info_t *list)
struct ct_sns_req *ct_req;
struct ct_sns_rsp *ct_rsp;
struct qla_hw_data *ha = vha->hw;
- uint8_t fcp_scsi_features = 0;
+ uint8_t fcp_scsi_features = 0, nvme_features = 0;
struct ct_arg arg;
for (i = 0; i < ha->max_fibre_devices; i++) {
@@ -2933,14 +2933,19 @@ qla2x00_gff_id(scsi_qla_host_t *vha, sw_info_t *list)
ct_rsp->rsp.gff_id.fc4_features[GFF_FCP_SCSI_OFFSET];
fcp_scsi_features &= 0x0f;
- if (fcp_scsi_features)
- list[i].fc4_type = FC4_TYPE_FCP_SCSI;
- else
- list[i].fc4_type = FC4_TYPE_OTHER;
+ if (fcp_scsi_features) {
+ list[i].fc4_type = FS_FC4TYPE_FCP;
+ list[i].fc4_features = fcp_scsi_features;
+ }
- list[i].fc4f_nvme =
+ nvme_features =
ct_rsp->rsp.gff_id.fc4_features[GFF_NVME_OFFSET];
- list[i].fc4f_nvme &= 0xf;
+ nvme_features &= 0xf;
+
+ if (nvme_features) {
+ list[i].fc4_type |= FS_FC4TYPE_NVME;
+ list[i].fc4_features = nvme_features;
+ }
}
/* Last device exit. */
@@ -3005,7 +3010,7 @@ static void qla24xx_async_gpsc_sp_done(srb_t *sp, int res)
fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
if (res == QLA_FUNCTION_TIMEOUT)
- return;
+ goto done;
if (res == (DID_ERROR << 16)) {
/* entry status error */
@@ -3435,6 +3440,8 @@ void qla24xx_async_gffid_sp_done(srb_t *sp, int res)
fc_port_t *fcport = sp->fcport;
struct ct_sns_rsp *ct_rsp;
struct event_arg ea;
+ uint8_t fc4_scsi_feat;
+ uint8_t fc4_nvme_feat;
ql_dbg(ql_dbg_disc, vha, 0x2133,
"Async done-%s res %x ID %x. %8phC\n",
@@ -3442,24 +3449,25 @@ void qla24xx_async_gffid_sp_done(srb_t *sp, int res)
fcport->flags &= ~FCF_ASYNC_SENT;
ct_rsp = &fcport->ct_desc.ct_sns->p.rsp;
+ fc4_scsi_feat = ct_rsp->rsp.gff_id.fc4_features[GFF_FCP_SCSI_OFFSET];
+ fc4_nvme_feat = ct_rsp->rsp.gff_id.fc4_features[GFF_NVME_OFFSET];
+
/*
* FC-GS-7, 5.2.3.12 FC-4 Features - format
* The format of the FC-4 Features object, as defined by the FC-4,
* Shall be an array of 4-bit values, one for each type code value
*/
if (!res) {
- if (ct_rsp->rsp.gff_id.fc4_features[GFF_FCP_SCSI_OFFSET] & 0xf) {
+ if (fc4_scsi_feat & 0xf) {
/* w1 b00:03 */
- fcport->fc4_type =
- ct_rsp->rsp.gff_id.fc4_features[GFF_FCP_SCSI_OFFSET];
- fcport->fc4_type &= 0xf;
- }
+ fcport->fc4_type = FS_FC4TYPE_FCP;
+ fcport->fc4_features = fc4_scsi_feat & 0xf;
+ }
- if (ct_rsp->rsp.gff_id.fc4_features[GFF_NVME_OFFSET] & 0xf) {
+ if (fc4_nvme_feat & 0xf) {
/* w5 [00:03]/28h */
- fcport->fc4f_nvme =
- ct_rsp->rsp.gff_id.fc4_features[GFF_NVME_OFFSET];
- fcport->fc4f_nvme &= 0xf;
+ fcport->fc4_type |= FS_FC4TYPE_NVME;
+ fcport->fc4_features = fc4_nvme_feat & 0xf;
}
}
@@ -3563,7 +3571,7 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp)
u8 recheck = 0;
u16 dup = 0, dup_cnt = 0;
- ql_dbg(ql_dbg_disc, vha, 0xffff,
+ ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff,
"%s enter\n", __func__);
if (sp->gen1 != vha->hw->base_qpair->chip_reset) {
@@ -3580,8 +3588,9 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp)
set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
} else {
- ql_dbg(ql_dbg_disc, vha, 0xffff,
- "Fabric scan failed on all retries.\n");
+ ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff,
+ "%s: Fabric scan failed for %d retries.\n",
+ __func__, vha->scan.scan_retry);
}
goto out;
}
@@ -4047,7 +4056,7 @@ done_free_sp:
void qla24xx_async_gpnft_done(scsi_qla_host_t *vha, srb_t *sp)
{
- ql_dbg(ql_dbg_disc, vha, 0xffff,
+ ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff,
"%s enter\n", __func__);
qla24xx_async_gnnft(vha, sp, sp->gen2);
}
@@ -4061,7 +4070,7 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp)
u32 rspsz;
unsigned long flags;
- ql_dbg(ql_dbg_disc, vha, 0xffff,
+ ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff,
"%s enter\n", __func__);
if (!vha->flags.online)
@@ -4070,14 +4079,15 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp)
spin_lock_irqsave(&vha->work_lock, flags);
if (vha->scan.scan_flags & SF_SCANNING) {
spin_unlock_irqrestore(&vha->work_lock, flags);
- ql_dbg(ql_dbg_disc, vha, 0xffff, "scan active\n");
+ ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff,
+ "%s: scan active\n", __func__);
return rval;
}
vha->scan.scan_flags |= SF_SCANNING;
spin_unlock_irqrestore(&vha->work_lock, flags);
if (fc4_type == FC4_TYPE_FCP_SCSI) {
- ql_dbg(ql_dbg_disc, vha, 0xffff,
+ ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff,
"%s: Performing FCP Scan\n", __func__);
if (sp)
@@ -4132,7 +4142,7 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp)
}
sp->u.iocb_cmd.u.ctarg.rsp_size = rspsz;
- ql_dbg(ql_dbg_disc, vha, 0xffff,
+ ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff,
"%s scan list size %d\n", __func__, vha->scan.size);
memset(vha->scan.l, 0, vha->scan.size);
@@ -4197,8 +4207,8 @@ done_free_sp:
spin_lock_irqsave(&vha->work_lock, flags);
vha->scan.scan_flags &= ~SF_SCANNING;
if (vha->scan.scan_flags == 0) {
- ql_dbg(ql_dbg_disc, vha, 0xffff,
- "%s: schedule\n", __func__);
+ ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff,
+ "%s: Scan scheduled.\n", __func__);
vha->scan.scan_flags |= SF_QUEUED;
schedule_delayed_work(&vha->scan.scan_work, 5);
}
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 1d041313ec52..1dbee8800218 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -17,7 +17,6 @@
#include <asm/prom.h>
#endif
-#include <target/target_core_base.h>
#include "qla_target.h"
/*
@@ -101,8 +100,22 @@ static void qla24xx_abort_iocb_timeout(void *data)
u32 handle;
unsigned long flags;
+ if (sp->cmd_sp)
+ ql_dbg(ql_dbg_async, sp->vha, 0x507c,
+ "Abort timeout - cmd hdl=%x, cmd type=%x hdl=%x, type=%x\n",
+ sp->cmd_sp->handle, sp->cmd_sp->type,
+ sp->handle, sp->type);
+ else
+ ql_dbg(ql_dbg_async, sp->vha, 0x507c,
+ "Abort timeout 2 - hdl=%x, type=%x\n",
+ sp->handle, sp->type);
+
spin_lock_irqsave(qpair->qp_lock_ptr, flags);
for (handle = 1; handle < qpair->req->num_outstanding_cmds; handle++) {
+ if (sp->cmd_sp && (qpair->req->outstanding_cmds[handle] ==
+ sp->cmd_sp))
+ qpair->req->outstanding_cmds[handle] = NULL;
+
/* removing the abort */
if (qpair->req->outstanding_cmds[handle] == sp) {
qpair->req->outstanding_cmds[handle] = NULL;
@@ -111,6 +124,9 @@ static void qla24xx_abort_iocb_timeout(void *data)
}
spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
+ if (sp->cmd_sp)
+ sp->cmd_sp->done(sp->cmd_sp, QLA_OS_TIMER_EXPIRED);
+
abt->u.abt.comp_status = CS_TIMEOUT;
sp->done(sp, QLA_OS_TIMER_EXPIRED);
}
@@ -142,6 +158,7 @@ static int qla24xx_async_abort_cmd(srb_t *cmd_sp, bool wait)
sp->type = SRB_ABT_CMD;
sp->name = "abort";
sp->qpair = cmd_sp->qpair;
+ sp->cmd_sp = cmd_sp;
if (wait)
sp->flags = SRB_WAKEUP_ON_COMP;
@@ -328,7 +345,7 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
else
lio->u.logio.flags |= SRB_LOGIN_COND_PLOGI;
- if (fcport->fc4f_nvme)
+ if (NVME_TARGET(vha->hw, fcport))
lio->u.logio.flags |= SRB_LOGIN_SKIP_PRLI;
ql_dbg(ql_dbg_disc, vha, 0x2072,
@@ -726,19 +743,17 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
loop_id = le16_to_cpu(e->nport_handle);
loop_id = (loop_id & 0x7fff);
- if (fcport->fc4f_nvme)
+ if (NVME_TARGET(vha->hw, fcport))
current_login_state = e->current_login_state >> 4;
else
current_login_state = e->current_login_state & 0xf;
-
ql_dbg(ql_dbg_disc, vha, 0x20e2,
- "%s found %8phC CLS [%x|%x] nvme %d ID[%02x%02x%02x|%02x%02x%02x] lid[%d|%d]\n",
+ "%s found %8phC CLS [%x|%x] fc4_type %d ID[%06x|%06x] lid[%d|%d]\n",
__func__, fcport->port_name,
e->current_login_state, fcport->fw_login_state,
- fcport->fc4f_nvme, id.b.domain, id.b.area, id.b.al_pa,
- fcport->d_id.b.domain, fcport->d_id.b.area,
- fcport->d_id.b.al_pa, loop_id, fcport->loop_id);
+ fcport->fc4_type, id.b24, fcport->d_id.b24,
+ loop_id, fcport->loop_id);
switch (fcport->disc_state) {
case DSC_DELETE_PEND:
@@ -1135,19 +1150,18 @@ static void qla24xx_async_gpdb_sp_done(srb_t *sp, int res)
"Async done-%s res %x, WWPN %8phC mb[1]=%x mb[2]=%x \n",
sp->name, res, fcport->port_name, mb[1], mb[2]);
- if (res == QLA_FUNCTION_TIMEOUT) {
- dma_pool_free(sp->vha->hw->s_dma_pool, sp->u.iocb_cmd.u.mbx.in,
- sp->u.iocb_cmd.u.mbx.in_dma);
- return;
- }
-
fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
+
+ if (res == QLA_FUNCTION_TIMEOUT)
+ goto done;
+
memset(&ea, 0, sizeof(ea));
ea.fcport = fcport;
ea.sp = sp;
qla24xx_handle_gpdb_event(vha, &ea);
+done:
dma_pool_free(ha->s_dma_pool, sp->u.iocb_cmd.u.mbx.in,
sp->u.iocb_cmd.u.mbx.in_dma);
@@ -1225,13 +1239,13 @@ qla24xx_async_prli(struct scsi_qla_host *vha, fc_port_t *fcport)
sp->done = qla2x00_async_prli_sp_done;
lio->u.logio.flags = 0;
- if (fcport->fc4f_nvme)
+ if (NVME_TARGET(vha->hw, fcport))
lio->u.logio.flags |= SRB_LOGIN_NVME_PRLI;
ql_dbg(ql_dbg_disc, vha, 0x211b,
"Async-prli - %8phC hdl=%x, loopid=%x portid=%06x retries=%d %s.\n",
fcport->port_name, sp->handle, fcport->loop_id, fcport->d_id.b24,
- fcport->login_retry, fcport->fc4f_nvme ? "nvme" : "fc");
+ fcport->login_retry, NVME_TARGET(vha->hw, fcport) ? "nvme" : "fc");
rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS) {
@@ -1382,14 +1396,14 @@ void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
fcport->flags &= ~FCF_ASYNC_SENT;
ql_dbg(ql_dbg_disc, vha, 0x20d2,
- "%s %8phC DS %d LS %d nvme %x rc %d\n", __func__, fcport->port_name,
- fcport->disc_state, pd->current_login_state, fcport->fc4f_nvme,
- ea->rc);
+ "%s %8phC DS %d LS %d fc4_type %x rc %d\n", __func__,
+ fcport->port_name, fcport->disc_state, pd->current_login_state,
+ fcport->fc4_type, ea->rc);
if (fcport->disc_state == DSC_DELETE_PEND)
return;
- if (fcport->fc4f_nvme)
+ if (NVME_TARGET(vha->hw, fcport))
ls = pd->current_login_state >> 4;
else
ls = pd->current_login_state & 0xf;
@@ -1578,7 +1592,8 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
ql_dbg(ql_dbg_disc, vha, 0x2118,
"%s %d %8phC post %s PRLI\n",
__func__, __LINE__, fcport->port_name,
- fcport->fc4f_nvme ? "NVME" : "FC");
+ NVME_TARGET(vha->hw, fcport) ? "NVME" :
+ "FC");
qla24xx_post_prli_work(vha, fcport);
}
break;
@@ -1701,6 +1716,15 @@ void qla24xx_handle_relogin_event(scsi_qla_host_t *vha,
qla24xx_fcport_handle_login(vha, fcport);
}
+void qla_handle_els_plogi_done(scsi_qla_host_t *vha,
+ struct event_arg *ea)
+{
+ ql_dbg(ql_dbg_disc, vha, 0x2118,
+ "%s %d %8phC post PRLI\n",
+ __func__, __LINE__, ea->fcport->port_name);
+ qla24xx_post_prli_work(vha, ea->fcport);
+}
+
/*
* RSCN(s) came in for this fcport, but the RSCN(s) was not able
* to be consumed by the fcport
@@ -1860,38 +1884,26 @@ qla24xx_handle_prli_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
break;
}
- if (ea->fcport->fc4f_nvme) {
+ /*
+ * Retry PRLI with other FC-4 type if failure occurred on dual
+ * FCP/NVMe port
+ */
+ if (NVME_FCP_TARGET(ea->fcport)) {
ql_dbg(ql_dbg_disc, vha, 0x2118,
- "%s %d %8phC post fc4 prli\n",
- __func__, __LINE__, ea->fcport->port_name);
- ea->fcport->fc4f_nvme = 0;
- qla24xx_post_prli_work(vha, ea->fcport);
- return;
+ "%s %d %8phC post %s prli\n",
+ __func__, __LINE__, ea->fcport->port_name,
+ (ea->fcport->fc4_type & FS_FC4TYPE_NVME) ?
+ "NVMe" : "FCP");
+ if (vha->hw->fc4_type_priority == FC4_PRIORITY_NVME)
+ ea->fcport->fc4_type &= ~FS_FC4TYPE_NVME;
+ else
+ ea->fcport->fc4_type &= ~FS_FC4TYPE_FCP;
}
- /* at this point both PRLI NVME & PRLI FCP failed */
- if (N2N_TOPO(vha->hw)) {
- if (ea->fcport->n2n_link_reset_cnt < 3) {
- ea->fcport->n2n_link_reset_cnt++;
- /*
- * remote port is not sending Plogi. Reset
- * link to kick start his state machine
- */
- set_bit(N2N_LINK_RESET, &vha->dpc_flags);
- } else {
- ql_log(ql_log_warn, vha, 0x2119,
- "%s %d %8phC Unable to reconnect\n",
- __func__, __LINE__, ea->fcport->port_name);
- }
- } else {
- /*
- * switch connect. login failed. Take connection
- * down and allow relogin to retrigger
- */
- ea->fcport->flags &= ~FCF_ASYNC_SENT;
- ea->fcport->keep_nport_handle = 0;
- qlt_schedule_sess_for_deletion(ea->fcport);
- }
+ ea->fcport->flags &= ~FCF_ASYNC_SENT;
+ ea->fcport->keep_nport_handle = 0;
+ ea->fcport->logout_on_delete = 1;
+ qlt_schedule_sess_for_deletion(ea->fcport);
break;
}
}
@@ -1952,7 +1964,7 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
* force a relogin attempt via implicit LOGO, PLOGI, and PRLI
* requests.
*/
- if (ea->fcport->fc4f_nvme) {
+ if (NVME_TARGET(vha->hw, ea->fcport)) {
ql_dbg(ql_dbg_disc, vha, 0x2117,
"%s %d %8phC post prli\n",
__func__, __LINE__, ea->fcport->port_name);
@@ -2206,8 +2218,18 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
ql_dbg(ql_dbg_init, vha, 0x0061,
"Configure NVRAM parameters...\n");
+ /* Let priority default to FCP, can be overridden by nvram_config */
+ ha->fc4_type_priority = FC4_PRIORITY_FCP;
+
ha->isp_ops->nvram_config(vha);
+ if (ha->fc4_type_priority != FC4_PRIORITY_FCP &&
+ ha->fc4_type_priority != FC4_PRIORITY_NVME)
+ ha->fc4_type_priority = FC4_PRIORITY_FCP;
+
+ ql_log(ql_log_info, vha, 0xffff, "FC4 priority set to %s\n",
+ ha->fc4_type_priority == FC4_PRIORITY_FCP ? "FCP" : "NVMe");
+
if (ha->flags.disable_serdes) {
/* Mask HBA via NVRAM settings? */
ql_log(ql_log_info, vha, 0x0077,
@@ -5382,7 +5404,7 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
qla2x00_iidma_fcport(vha, fcport);
- if (fcport->fc4f_nvme) {
+ if (NVME_TARGET(vha->hw, fcport)) {
qla_nvme_register_remote(vha, fcport);
fcport->disc_state = DSC_LOGIN_COMPLETE;
qla2x00_set_fcport_state(fcport, FCS_ONLINE);
@@ -5710,11 +5732,8 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha)
new_fcport->fc4_type = swl[swl_idx].fc4_type;
new_fcport->nvme_flag = 0;
- new_fcport->fc4f_nvme = 0;
if (vha->flags.nvme_enabled &&
- swl[swl_idx].fc4f_nvme) {
- new_fcport->fc4f_nvme =
- swl[swl_idx].fc4f_nvme;
+ swl[swl_idx].fc4_type & FS_FC4TYPE_NVME) {
ql_log(ql_log_info, vha, 0x2131,
"FOUND: NVME port %8phC as FC Type 28h\n",
new_fcport->port_name);
@@ -5770,7 +5789,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha)
/* Bypass ports whose FCP-4 type is not FCP_SCSI */
if (ql2xgffidenable &&
- (new_fcport->fc4_type != FC4_TYPE_FCP_SCSI &&
+ (!(new_fcport->fc4_type & FS_FC4TYPE_FCP) &&
new_fcport->fc4_type != FC4_TYPE_UNKNOWN))
continue;
@@ -5839,7 +5858,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha)
break;
}
- if (fcport->fc4f_nvme) {
+ if (NVME_TARGET(vha->hw, fcport)) {
if (fcport->disc_state == DSC_DELETE_PEND) {
fcport->disc_state = DSC_GNL;
vha->fcport_count--;
@@ -8514,6 +8533,9 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
/* N2N: driver will initiate Login instead of FW */
icb->firmware_options_3 |= BIT_8;
+ /* Determine NVMe/FCP priority for target ports */
+ ha->fc4_type_priority = qla2xxx_get_fc4_priority(vha);
+
if (rval) {
ql_log(ql_log_warn, vha, 0x0076,
"NVRAM configuration failed.\n");
@@ -9003,8 +9025,6 @@ int qla2xxx_delete_qpair(struct scsi_qla_host *vha, struct qla_qpair *qpair)
struct qla_hw_data *ha = qpair->hw;
qpair->delete_in_progress = 1;
- while (atomic_read(&qpair->ref_count))
- msleep(500);
ret = qla25xx_delete_req_que(vha, qpair->req);
if (ret != QLA_SUCCESS)
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index 0c3d907af769..352aba4127f7 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -307,3 +307,15 @@ qla_83xx_start_iocbs(struct qla_qpair *qpair)
WRT_REG_DWORD(req->req_q_in, req->ring_index);
}
+
+static inline int
+qla2xxx_get_fc4_priority(struct scsi_qla_host *vha)
+{
+ uint32_t data;
+
+ data =
+ ((uint8_t *)vha->hw->nvram)[NVRAM_DUAL_FCP_NVME_FLAG_OFFSET];
+
+
+ return (data >> 6) & BIT_0 ? FC4_PRIORITY_FCP : FC4_PRIORITY_NVME;
+}
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 518eb954cf42..b25f87ff8cde 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -2740,6 +2740,10 @@ static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res)
struct scsi_qla_host *vha = sp->vha;
struct event_arg ea;
struct qla_work_evt *e;
+ struct fc_port *conflict_fcport;
+ port_id_t cid; /* conflict Nport id */
+ u32 *fw_status = sp->u.iocb_cmd.u.els_plogi.fw_status;
+ u16 lid;
ql_dbg(ql_dbg_disc, vha, 0x3072,
"%s ELS done rc %d hdl=%x, portid=%06x %8phC\n",
@@ -2751,14 +2755,101 @@ static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res)
if (sp->flags & SRB_WAKEUP_ON_COMP)
complete(&lio->u.els_plogi.comp);
else {
- if (res) {
- set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
- } else {
+ switch (fw_status[0]) {
+ case CS_DATA_UNDERRUN:
+ case CS_COMPLETE:
memset(&ea, 0, sizeof(ea));
ea.fcport = fcport;
- ea.data[0] = MBS_COMMAND_COMPLETE;
- ea.sp = sp;
- qla24xx_handle_plogi_done_event(vha, &ea);
+ ea.rc = res;
+ qla_handle_els_plogi_done(vha, &ea);
+ break;
+
+ case CS_IOCB_ERROR:
+ switch (fw_status[1]) {
+ case LSC_SCODE_PORTID_USED:
+ lid = fw_status[2] & 0xffff;
+ qlt_find_sess_invalidate_other(vha,
+ wwn_to_u64(fcport->port_name),
+ fcport->d_id, lid, &conflict_fcport);
+ if (conflict_fcport) {
+ /*
+ * Another fcport shares the same
+ * loop_id & nport id; conflict
+ * fcport needs to finish cleanup
+ * before this fcport can proceed
+ * to login.
+ */
+ conflict_fcport->conflict = fcport;
+ fcport->login_pause = 1;
+ ql_dbg(ql_dbg_disc, vha, 0x20ed,
+ "%s %d %8phC pid %06x inuse with lid %#x post gidpn\n",
+ __func__, __LINE__,
+ fcport->port_name,
+ fcport->d_id.b24, lid);
+ } else {
+ ql_dbg(ql_dbg_disc, vha, 0x20ed,
+ "%s %d %8phC pid %06x inuse with lid %#x sched del\n",
+ __func__, __LINE__,
+ fcport->port_name,
+ fcport->d_id.b24, lid);
+ qla2x00_clear_loop_id(fcport);
+ set_bit(lid, vha->hw->loop_id_map);
+ fcport->loop_id = lid;
+ fcport->keep_nport_handle = 0;
+ qlt_schedule_sess_for_deletion(fcport);
+ }
+ break;
+
+ case LSC_SCODE_NPORT_USED:
+ cid.b.domain = (fw_status[2] >> 16) & 0xff;
+ cid.b.area = (fw_status[2] >> 8) & 0xff;
+ cid.b.al_pa = fw_status[2] & 0xff;
+ cid.b.rsvd_1 = 0;
+
+ ql_dbg(ql_dbg_disc, vha, 0x20ec,
+ "%s %d %8phC lid %#x in use with pid %06x post gnl\n",
+ __func__, __LINE__, fcport->port_name,
+ fcport->loop_id, cid.b24);
+ set_bit(fcport->loop_id,
+ vha->hw->loop_id_map);
+ fcport->loop_id = FC_NO_LOOP_ID;
+ qla24xx_post_gnl_work(vha, fcport);
+ break;
+
+ case LSC_SCODE_NOXCB:
+ vha->hw->exch_starvation++;
+ if (vha->hw->exch_starvation > 5) {
+ ql_log(ql_log_warn, vha, 0xd046,
+ "Exchange starvation. Resetting RISC\n");
+ vha->hw->exch_starvation = 0;
+ set_bit(ISP_ABORT_NEEDED,
+ &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ }
+ /* fall through */
+ default:
+ ql_dbg(ql_dbg_disc, vha, 0x20eb,
+ "%s %8phC cmd error fw_status 0x%x 0x%x 0x%x\n",
+ __func__, sp->fcport->port_name,
+ fw_status[0], fw_status[1], fw_status[2]);
+
+ fcport->flags &= ~FCF_ASYNC_SENT;
+ fcport->disc_state = DSC_LOGIN_FAILED;
+ set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+ break;
+ }
+ break;
+
+ default:
+ ql_dbg(ql_dbg_disc, vha, 0x20eb,
+ "%s %8phC cmd error 2 fw_status 0x%x 0x%x 0x%x\n",
+ __func__, sp->fcport->port_name,
+ fw_status[0], fw_status[1], fw_status[2]);
+
+ sp->fcport->flags &= ~FCF_ASYNC_SENT;
+ sp->fcport->disc_state = DSC_LOGIN_FAILED;
+ set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+ break;
}
e = qla2x00_alloc_work(vha, QLA_EVT_UNMAP);
@@ -2792,11 +2883,12 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
return -ENOMEM;
}
+ fcport->flags |= FCF_ASYNC_SENT;
+ fcport->disc_state = DSC_LOGIN_PEND;
elsio = &sp->u.iocb_cmd;
ql_dbg(ql_dbg_io, vha, 0x3073,
"Enter: PLOGI portid=%06x\n", fcport->d_id.b24);
- fcport->flags |= FCF_ASYNC_SENT;
sp->type = SRB_ELS_DCMD;
sp->name = "ELS_DCMD";
sp->fcport = fcport;
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 009fd5a33fcd..1b8f297449cf 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -1227,11 +1227,32 @@ global_port_update:
break;
case MBA_IDC_AEN:
- mb[4] = RD_REG_WORD(&reg24->mailbox4);
- mb[5] = RD_REG_WORD(&reg24->mailbox5);
- mb[6] = RD_REG_WORD(&reg24->mailbox6);
- mb[7] = RD_REG_WORD(&reg24->mailbox7);
- qla83xx_handle_8200_aen(vha, mb);
+ if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
+ ha->flags.fw_init_done = 0;
+ ql_log(ql_log_warn, vha, 0xffff,
+ "MPI Heartbeat stop. Chip reset needed. MB0[%xh] MB1[%xh] MB2[%xh] MB3[%xh]\n",
+ mb[0], mb[1], mb[2], mb[3]);
+
+ if ((mb[1] & BIT_8) ||
+ (mb[2] & BIT_8)) {
+ ql_log(ql_log_warn, vha, 0xd013,
+ "MPI Heartbeat stop. FW dump needed\n");
+ ha->fw_dump_mpi = 1;
+ ha->isp_ops->fw_dump(vha, 1);
+ }
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ } else if (IS_QLA83XX(ha)) {
+ mb[4] = RD_REG_WORD(&reg24->mailbox4);
+ mb[5] = RD_REG_WORD(&reg24->mailbox5);
+ mb[6] = RD_REG_WORD(&reg24->mailbox6);
+ mb[7] = RD_REG_WORD(&reg24->mailbox7);
+ qla83xx_handle_8200_aen(vha, mb);
+ } else {
+ ql_dbg(ql_dbg_async, vha, 0x5052,
+ "skip Heartbeat processing mb0-3=[0x%04x] [0x%04x] [0x%04x] [0x%04x]\n",
+ mb[0], mb[1], mb[2], mb[3]);
+ }
break;
case MBA_DPORT_DIAGNOSTICS:
@@ -2466,6 +2487,11 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
return;
}
+ if (sp->abort)
+ sp->aborted = 1;
+ else
+ sp->completed = 1;
+
if (sp->cmd_type != TYPE_SRB) {
req->outstanding_cmds[handle] = NULL;
ql_dbg(ql_dbg_io, vha, 0x3015,
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 4a1f21c11758..0cf94f05f008 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -1932,7 +1932,7 @@ qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt)
pd24 = (struct port_database_24xx *) pd;
/* Check for logged in state. */
- if (fcport->fc4f_nvme) {
+ if (NVME_TARGET(ha, fcport)) {
current_login_state = pd24->current_login_state >> 4;
last_login_state = pd24->last_login_state >> 4;
} else {
@@ -3899,8 +3899,9 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
fcport->scan_state = QLA_FCPORT_FOUND;
fcport->n2n_flag = 1;
fcport->keep_nport_handle = 1;
+ fcport->fc4_type = FS_FC4TYPE_FCP;
if (vha->flags.nvme_enabled)
- fcport->fc4f_nvme = 1;
+ fcport->fc4_type |= FS_FC4TYPE_NVME;
switch (fcport->disc_state) {
case DSC_DELETED:
@@ -6287,17 +6288,13 @@ int qla24xx_send_mb_cmd(struct scsi_qla_host *vha, mbx_cmd_t *mcp)
case QLA_SUCCESS:
ql_dbg(ql_dbg_mbx, vha, 0x119d, "%s: %s done.\n",
__func__, sp->name);
- sp->free(sp);
break;
default:
ql_dbg(ql_dbg_mbx, vha, 0x119e, "%s: %s Failed. %x.\n",
__func__, sp->name, rval);
- sp->free(sp);
break;
}
- return rval;
-
done_free_sp:
sp->free(sp);
done:
@@ -6362,7 +6359,7 @@ int __qla24xx_parse_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport,
uint64_t zero = 0;
u8 current_login_state, last_login_state;
- if (fcport->fc4f_nvme) {
+ if (NVME_TARGET(vha->hw, fcport)) {
current_login_state = pd->current_login_state >> 4;
last_login_state = pd->last_login_state >> 4;
} else {
@@ -6397,8 +6394,8 @@ int __qla24xx_parse_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport,
fcport->d_id.b.al_pa = pd->port_id[2];
fcport->d_id.b.rsvd_1 = 0;
- if (fcport->fc4f_nvme) {
- fcport->port_type = 0;
+ if (NVME_TARGET(vha->hw, fcport)) {
+ fcport->port_type = FCT_NVME;
if ((pd->prli_svc_param_word_3[0] & BIT_5) == 0)
fcport->port_type |= FCT_NVME_INITIATOR;
if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0)
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index 238240984bc1..eabc5127174e 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -946,7 +946,7 @@ int qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
sp = qla2x00_get_sp(base_vha, NULL, GFP_KERNEL);
if (!sp)
- goto done;
+ return rval;
sp->type = SRB_CTRL_VP;
sp->name = "ctrl_vp";
@@ -962,7 +962,7 @@ int qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
ql_dbg(ql_dbg_async, vha, 0xffff,
"%s: %s Failed submission. %x.\n",
__func__, sp->name, rval);
- goto done_free_sp;
+ goto done;
}
ql_dbg(ql_dbg_vport, vha, 0x113f, "%s hndl %x submitted\n",
@@ -980,16 +980,13 @@ int qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
case QLA_SUCCESS:
ql_dbg(ql_dbg_vport, vha, 0xffff, "%s: %s done.\n",
__func__, sp->name);
- goto done_free_sp;
+ break;
default:
ql_dbg(ql_dbg_vport, vha, 0xffff, "%s: %s Failed. %x.\n",
__func__, sp->name, rval);
- goto done_free_sp;
+ break;
}
done:
- return rval;
-
-done_free_sp:
sp->free(sp);
return rval;
}
diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c
index 6cc19e060afc..941aa53363f5 100644
--- a/drivers/scsi/qla2xxx/qla_nvme.c
+++ b/drivers/scsi/qla2xxx/qla_nvme.c
@@ -224,8 +224,8 @@ static void qla_nvme_abort_work(struct work_struct *work)
if (ha->flags.host_shutting_down) {
ql_log(ql_log_info, sp->fcport->vha, 0xffff,
- "%s Calling done on sp: %p, type: 0x%x, sp->ref_count: 0x%x\n",
- __func__, sp, sp->type, atomic_read(&sp->ref_count));
+ "%s Calling done on sp: %p, type: 0x%x\n",
+ __func__, sp, sp->type);
sp->done(sp, 0);
goto out;
}
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 726ad4cbf4a6..8b84bc4a6ac8 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -698,11 +698,6 @@ void qla2x00_sp_compl(srb_t *sp, int res)
struct scsi_cmnd *cmd = GET_CMD_SP(sp);
struct completion *comp = sp->comp;
- if (WARN_ON_ONCE(atomic_read(&sp->ref_count) == 0))
- return;
-
- atomic_dec(&sp->ref_count);
-
sp->free(sp);
cmd->result = res;
CMD_SP(cmd) = NULL;
@@ -794,11 +789,6 @@ void qla2xxx_qpair_sp_compl(srb_t *sp, int res)
struct scsi_cmnd *cmd = GET_CMD_SP(sp);
struct completion *comp = sp->comp;
- if (WARN_ON_ONCE(atomic_read(&sp->ref_count) == 0))
- return;
-
- atomic_dec(&sp->ref_count);
-
sp->free(sp);
cmd->result = res;
CMD_SP(cmd) = NULL;
@@ -903,7 +893,7 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
sp->u.scmd.cmd = cmd;
sp->type = SRB_SCSI_CMD;
- atomic_set(&sp->ref_count, 1);
+
CMD_SP(cmd) = (void *)sp;
sp->free = qla2x00_sp_free_dma;
sp->done = qla2x00_sp_compl;
@@ -985,18 +975,16 @@ qla2xxx_mqueuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd,
sp->u.scmd.cmd = cmd;
sp->type = SRB_SCSI_CMD;
- atomic_set(&sp->ref_count, 1);
CMD_SP(cmd) = (void *)sp;
sp->free = qla2xxx_qpair_sp_free_dma;
sp->done = qla2xxx_qpair_sp_compl;
- sp->qpair = qpair;
rval = ha->isp_ops->start_scsi_mq(sp);
if (rval != QLA_SUCCESS) {
ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3078,
"Start scsi failed rval=%d for cmd=%p.\n", rval, cmd);
if (rval == QLA_INTERFACE_ERROR)
- goto qc24_fail_command;
+ goto qc24_free_sp_fail_command;
goto qc24_host_busy_free_sp;
}
@@ -1008,6 +996,11 @@ qc24_host_busy_free_sp:
qc24_target_busy:
return SCSI_MLQUEUE_TARGET_BUSY;
+qc24_free_sp_fail_command:
+ sp->free(sp);
+ CMD_SP(cmd) = NULL;
+ qla2xxx_rel_qpair_sp(sp->qpair, sp);
+
qc24_fail_command:
cmd->scsi_done(cmd);
@@ -1184,16 +1177,6 @@ qla2x00_wait_for_chip_reset(scsi_qla_host_t *vha)
return return_status;
}
-static int
-sp_get(struct srb *sp)
-{
- if (!refcount_inc_not_zero((refcount_t *)&sp->ref_count))
- /* kref get fail */
- return ENXIO;
- else
- return 0;
-}
-
#define ISP_REG_DISCONNECT 0xffffffffU
/**************************************************************************
* qla2x00_isp_reg_stat
@@ -1249,6 +1232,9 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
uint64_t lun;
int rval;
struct qla_hw_data *ha = vha->hw;
+ uint32_t ratov_j;
+ struct qla_qpair *qpair;
+ unsigned long flags;
if (qla2x00_isp_reg_stat(ha)) {
ql_log(ql_log_info, vha, 0x8042,
@@ -1261,13 +1247,26 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
return ret;
sp = scsi_cmd_priv(cmd);
+ qpair = sp->qpair;
- if (sp->fcport && sp->fcport->deleted)
+ if ((sp->fcport && sp->fcport->deleted) || !qpair)
return SUCCESS;
- /* Return if the command has already finished. */
- if (sp_get(sp))
+ spin_lock_irqsave(qpair->qp_lock_ptr, flags);
+ if (sp->completed) {
+ spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
return SUCCESS;
+ }
+
+ if (sp->abort || sp->aborted) {
+ spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
+ return FAILED;
+ }
+
+ sp->abort = 1;
+ sp->comp = &comp;
+ spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
+
id = cmd->device->id;
lun = cmd->device->lun;
@@ -1276,47 +1275,37 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
"Aborting from RISC nexus=%ld:%d:%llu sp=%p cmd=%p handle=%x\n",
vha->host_no, id, lun, sp, cmd, sp->handle);
+ /*
+ * Abort will release the original Command/sp from FW. Let the
+ * original command call scsi_done. In return, he will wakeup
+ * this sleeping thread.
+ */
rval = ha->isp_ops->abort_command(sp);
+
ql_dbg(ql_dbg_taskm, vha, 0x8003,
"Abort command mbx cmd=%p, rval=%x.\n", cmd, rval);
+ /* Wait for the command completion. */
+ ratov_j = ha->r_a_tov/10 * 4 * 1000;
+ ratov_j = msecs_to_jiffies(ratov_j);
switch (rval) {
case QLA_SUCCESS:
- /*
- * The command has been aborted. That means that the firmware
- * won't report a completion.
- */
- sp->done(sp, DID_ABORT << 16);
- ret = SUCCESS;
- break;
- case QLA_FUNCTION_PARAMETER_ERROR: {
- /* Wait for the command completion. */
- uint32_t ratov = ha->r_a_tov/10;
- uint32_t ratov_j = msecs_to_jiffies(4 * ratov * 1000);
-
- WARN_ON_ONCE(sp->comp);
- sp->comp = &comp;
if (!wait_for_completion_timeout(&comp, ratov_j)) {
ql_dbg(ql_dbg_taskm, vha, 0xffff,
"%s: Abort wait timer (4 * R_A_TOV[%d]) expired\n",
- __func__, ha->r_a_tov);
+ __func__, ha->r_a_tov/10);
ret = FAILED;
} else {
ret = SUCCESS;
}
break;
- }
default:
- /*
- * Either abort failed or abort and completion raced. Let
- * the SCSI core retry the abort in the former case.
- */
ret = FAILED;
break;
}
sp->comp = NULL;
- atomic_dec(&sp->ref_count);
+
ql_log(ql_log_info, vha, 0x801c,
"Abort command issued nexus=%ld:%d:%llu -- %x.\n",
vha->host_no, id, lun, ret);
@@ -1708,32 +1697,53 @@ static void qla2x00_abort_srb(struct qla_qpair *qp, srb_t *sp, const int res,
scsi_qla_host_t *vha = qp->vha;
struct qla_hw_data *ha = vha->hw;
int rval;
+ bool ret_cmd;
+ uint32_t ratov_j;
- if (sp_get(sp))
+ if (qla2x00_chip_is_down(vha)) {
+ sp->done(sp, res);
return;
+ }
if (sp->type == SRB_NVME_CMD || sp->type == SRB_NVME_LS ||
(sp->type == SRB_SCSI_CMD && !ha->flags.eeh_busy &&
!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
!qla2x00_isp_reg_stat(ha))) {
+ if (sp->comp) {
+ sp->done(sp, res);
+ return;
+ }
+
sp->comp = &comp;
+ sp->abort = 1;
spin_unlock_irqrestore(qp->qp_lock_ptr, *flags);
- rval = ha->isp_ops->abort_command(sp);
+ rval = ha->isp_ops->abort_command(sp);
+ /* Wait for command completion. */
+ ret_cmd = false;
+ ratov_j = ha->r_a_tov/10 * 4 * 1000;
+ ratov_j = msecs_to_jiffies(ratov_j);
switch (rval) {
case QLA_SUCCESS:
- sp->done(sp, res);
+ if (wait_for_completion_timeout(&comp, ratov_j)) {
+ ql_dbg(ql_dbg_taskm, vha, 0xffff,
+ "%s: Abort wait timer (4 * R_A_TOV[%d]) expired\n",
+ __func__, ha->r_a_tov/10);
+ ret_cmd = true;
+ }
+ /* else FW return SP to driver */
break;
- case QLA_FUNCTION_PARAMETER_ERROR:
- wait_for_completion(&comp);
+ default:
+ ret_cmd = true;
break;
}
spin_lock_irqsave(qp->qp_lock_ptr, *flags);
- sp->comp = NULL;
+ if (ret_cmd && (!sp->completed || !sp->aborted))
+ sp->done(sp, res);
+ } else {
+ sp->done(sp, res);
}
-
- atomic_dec(&sp->ref_count);
}
static void
@@ -1755,7 +1765,6 @@ __qla2x00_abort_all_cmds(struct qla_qpair *qp, int res)
for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
sp = req->outstanding_cmds[cnt];
if (sp) {
- req->outstanding_cmds[cnt] = NULL;
switch (sp->cmd_type) {
case TYPE_SRB:
qla2x00_abort_srb(qp, sp, res, &flags);
@@ -1777,6 +1786,7 @@ __qla2x00_abort_all_cmds(struct qla_qpair *qp, int res)
default:
break;
}
+ req->outstanding_cmds[cnt] = NULL;
}
}
spin_unlock_irqrestore(qp->qp_lock_ptr, flags);
@@ -3492,6 +3502,29 @@ disable_device:
return ret;
}
+static void __qla_set_remove_flag(scsi_qla_host_t *base_vha)
+{
+ scsi_qla_host_t *vp;
+ unsigned long flags;
+ struct qla_hw_data *ha;
+
+ if (!base_vha)
+ return;
+
+ ha = base_vha->hw;
+
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ list_for_each_entry(vp, &ha->vp_list, list)
+ set_bit(PFLG_DRIVER_REMOVING, &vp->pci_flags);
+
+ /*
+ * Indicate device removal to prevent future board_disable
+ * and wait until any pending board_disable has completed.
+ */
+ set_bit(PFLG_DRIVER_REMOVING, &base_vha->pci_flags);
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+}
+
static void
qla2x00_shutdown(struct pci_dev *pdev)
{
@@ -3508,7 +3541,7 @@ qla2x00_shutdown(struct pci_dev *pdev)
* Prevent future board_disable and wait
* until any pending board_disable has completed.
*/
- set_bit(PFLG_DRIVER_REMOVING, &vha->pci_flags);
+ __qla_set_remove_flag(vha);
cancel_work_sync(&ha->board_disable);
if (!atomic_read(&pdev->enable_cnt))
@@ -3668,10 +3701,7 @@ qla2x00_remove_one(struct pci_dev *pdev)
ha = base_vha->hw;
ql_log(ql_log_info, base_vha, 0xb079,
"Removing driver\n");
-
- /* Indicate device removal to prevent future board_disable and wait
- * until any pending board_disable has completed. */
- set_bit(PFLG_DRIVER_REMOVING, &base_vha->pci_flags);
+ __qla_set_remove_flag(base_vha);
cancel_work_sync(&ha->board_disable);
/*
@@ -4666,7 +4696,8 @@ qla2x00_mem_free(struct qla_hw_data *ha)
ha->sfp_data = NULL;
if (ha->flt)
- dma_free_coherent(&ha->pdev->dev, SFP_DEV_SIZE,
+ dma_free_coherent(&ha->pdev->dev,
+ sizeof(struct qla_flt_header) + FLT_REGIONS_SIZE,
ha->flt, ha->flt_dma);
ha->flt = NULL;
ha->flt_dma = 0;
@@ -5042,19 +5073,17 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
fcport->d_id = e->u.new_sess.id;
fcport->flags |= FCF_FABRIC_DEVICE;
fcport->fw_login_state = DSC_LS_PLOGI_PEND;
- if (e->u.new_sess.fc4_type == FS_FC4TYPE_FCP)
- fcport->fc4_type = FC4_TYPE_FCP_SCSI;
-
- if (e->u.new_sess.fc4_type == FS_FC4TYPE_NVME) {
- fcport->fc4_type = FC4_TYPE_OTHER;
- fcport->fc4f_nvme = FC4_TYPE_NVME;
- }
memcpy(fcport->port_name, e->u.new_sess.port_name,
WWN_SIZE);
- if (e->u.new_sess.fc4_type & FS_FCP_IS_N2N)
+ fcport->fc4_type = e->u.new_sess.fc4_type;
+ if (e->u.new_sess.fc4_type & FS_FCP_IS_N2N) {
+ fcport->fc4_type = FS_FC4TYPE_FCP;
fcport->n2n_flag = 1;
+ if (vha->flags.nvme_enabled)
+ fcport->fc4_type |= FS_FC4TYPE_NVME;
+ }
} else {
ql_dbg(ql_dbg_disc, vha, 0xffff,
@@ -5158,7 +5187,8 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
fcport->flags &= ~FCF_FABRIC_DEVICE;
fcport->keep_nport_handle = 1;
if (vha->flags.nvme_enabled) {
- fcport->fc4f_nvme = 1;
+ fcport->fc4_type =
+ (FS_FC4TYPE_NVME | FS_FC4TYPE_FCP);
fcport->n2n_flag = 1;
}
fcport->fw_login_state = 0;
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index a06e56224a55..51b275a575a5 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -463,7 +463,7 @@ void qlt_response_pkt_all_vps(struct scsi_qla_host *vha,
case IMMED_NOTIFY_TYPE:
{
- struct scsi_qla_host *host = vha;
+ struct scsi_qla_host *host;
struct imm_ntfy_from_isp *entry =
(struct imm_ntfy_from_isp *)pkt;
diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c
index 294d77c02cdf..5b0c057def2b 100644
--- a/drivers/scsi/qla2xxx/qla_tmpl.c
+++ b/drivers/scsi/qla2xxx/qla_tmpl.c
@@ -10,6 +10,7 @@
#define ISPREG(vha) (&(vha)->hw->iobase->isp24)
#define IOBAR(reg) offsetof(typeof(*(reg)), iobase_addr)
#define IOBASE(vha) IOBAR(ISPREG(vha))
+#define INVALID_ENTRY ((struct qla27xx_fwdt_entry *)0xffffffffffffffffUL)
static inline void
qla27xx_insert16(uint16_t value, void *buf, ulong *len)
@@ -261,6 +262,7 @@ qla27xx_fwdt_entry_t262(struct scsi_qla_host *vha,
ulong start = le32_to_cpu(ent->t262.start_addr);
ulong end = le32_to_cpu(ent->t262.end_addr);
ulong dwords;
+ int rc;
ql_dbg(ql_dbg_misc, vha, 0xd206,
"%s: rdram(%x) [%lx]\n", __func__, ent->t262.ram_area, *len);
@@ -308,7 +310,13 @@ qla27xx_fwdt_entry_t262(struct scsi_qla_host *vha,
dwords = end - start + 1;
if (buf) {
buf += *len;
- qla24xx_dump_ram(vha->hw, start, buf, dwords, &buf);
+ rc = qla24xx_dump_ram(vha->hw, start, buf, dwords, &buf);
+ if (rc != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_async, vha, 0xffff,
+ "%s: dump ram MB failed. Area %xh start %lxh end %lxh\n",
+ __func__, area, start, end);
+ return INVALID_ENTRY;
+ }
}
*len += dwords * sizeof(uint32_t);
done:
@@ -838,6 +846,13 @@ qla27xx_walk_template(struct scsi_qla_host *vha,
ent = qla27xx_find_entry(type)(vha, ent, buf, len);
if (!ent)
break;
+
+ if (ent == INVALID_ENTRY) {
+ *len = 0;
+ ql_dbg(ql_dbg_async, vha, 0xffff,
+ "Unable to capture FW dump");
+ goto bailout;
+ }
}
if (tmp->count)
@@ -847,6 +862,9 @@ qla27xx_walk_template(struct scsi_qla_host *vha,
if (ent)
ql_dbg(ql_dbg_misc, vha, 0xd019,
"%s: missing end entry\n", __func__);
+
+bailout:
+ cpu_to_le32s(&tmp->count); /* endianize residual count */
}
static void
@@ -999,8 +1017,9 @@ qla27xx_fwdump(scsi_qla_host_t *vha, int hardware_locked)
uint j;
ulong len;
void *buf = vha->hw->fw_dump;
+ uint count = vha->hw->fw_dump_mpi ? 2 : 1;
- for (j = 0; j < 2; j++, fwdt++, buf += len) {
+ for (j = 0; j < count; j++, fwdt++, buf += len) {
ql_log(ql_log_warn, vha, 0xd011,
"-> fwdt%u running...\n", j);
if (!fwdt->template) {
@@ -1010,7 +1029,9 @@ qla27xx_fwdump(scsi_qla_host_t *vha, int hardware_locked)
}
len = qla27xx_execute_fwdt_template(vha,
fwdt->template, buf);
- if (len != fwdt->dump_size) {
+ if (len == 0) {
+ goto bailout;
+ } else if (len != fwdt->dump_size) {
ql_log(ql_log_warn, vha, 0xd013,
"-> fwdt%u fwdump residual=%+ld\n",
j, fwdt->dump_size - len);
@@ -1025,6 +1046,8 @@ qla27xx_fwdump(scsi_qla_host_t *vha, int hardware_locked)
qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP);
}
+bailout:
+ vha->hw->fw_dump_mpi = 0;
#ifndef __CHECKER__
if (!hardware_locked)
spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index a8f2a953ceff..03bd3b712b77 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,7 +7,7 @@
/*
* Driver version
*/
-#define QLA2XXX_VERSION "10.01.00.19-k"
+#define QLA2XXX_VERSION "10.01.00.21-k"
#define QLA_DRIVER_MAJOR_VER 10
#define QLA_DRIVER_MINOR_VER 1
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c
index dac9a7013208..02636b4785c5 100644
--- a/drivers/scsi/qla4xxx/ql4_mbx.c
+++ b/drivers/scsi/qla4xxx/ql4_mbx.c
@@ -640,9 +640,6 @@ int qla4xxx_initialize_fw_cb(struct scsi_qla_host * ha)
if (qla4xxx_get_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma) !=
QLA_SUCCESS) {
- dma_free_coherent(&ha->pdev->dev,
- sizeof(struct addr_ctrl_blk),
- init_fw_cb, init_fw_cb_dma);
goto exit_init_fw_cb;
}
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 7a1b6c76f263..930e4803d888 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -186,7 +186,7 @@ void scsi_finish_command(struct scsi_cmnd *cmd)
struct scsi_driver *drv;
unsigned int good_bytes;
- scsi_device_unbusy(sdev);
+ scsi_device_unbusy(sdev, cmd);
/*
* Clear the flags that say that the device/target/host is no longer
@@ -465,10 +465,14 @@ void scsi_attach_vpd(struct scsi_device *sdev)
return;
for (i = 4; i < vpd_buf->len; i++) {
+ if (vpd_buf->data[i] == 0x0)
+ scsi_update_vpd_page(sdev, 0x0, &sdev->vpd_pg0);
if (vpd_buf->data[i] == 0x80)
scsi_update_vpd_page(sdev, 0x80, &sdev->vpd_pg80);
if (vpd_buf->data[i] == 0x83)
scsi_update_vpd_page(sdev, 0x83, &sdev->vpd_pg83);
+ if (vpd_buf->data[i] == 0x89)
+ scsi_update_vpd_page(sdev, 0x89, &sdev->vpd_pg89);
}
kfree(vpd_buf);
}
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index d323523f5f9d..44cb054d5e66 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -1025,7 +1025,7 @@ static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
static int p_fill_from_dev_buffer(struct scsi_cmnd *scp, const void *arr,
int arr_len, unsigned int off_dst)
{
- int act_len, n;
+ unsigned int act_len, n;
struct scsi_data_buffer *sdb = &scp->sdb;
off_t skip = off_dst;
@@ -1039,7 +1039,7 @@ static int p_fill_from_dev_buffer(struct scsi_cmnd *scp, const void *arr,
pr_debug("%s: off_dst=%u, scsi_bufflen=%u, act_len=%u, resid=%d\n",
__func__, off_dst, scsi_bufflen(scp), act_len,
scsi_get_resid(scp));
- n = (int)scsi_bufflen(scp) - ((int)off_dst + act_len);
+ n = scsi_bufflen(scp) - (off_dst + act_len);
scsi_set_resid(scp, min(scsi_get_resid(scp), n));
return 0;
}
@@ -5263,6 +5263,11 @@ static int __init scsi_debug_init(void)
return -EINVAL;
}
+ if (sdebug_num_tgts < 0) {
+ pr_err("num_tgts must be >= 0\n");
+ return -EINVAL;
+ }
+
if (sdebug_guard > 1) {
pr_err("guard must be 0 or 1\n");
return -EINVAL;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 91c007d26c1e..3e7a45d0daca 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -189,7 +189,7 @@ static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, bool unbusy)
* active on the host/device.
*/
if (unbusy)
- scsi_device_unbusy(device);
+ scsi_device_unbusy(device, cmd);
/*
* Requeue this command. It will go before all other commands
@@ -321,20 +321,20 @@ static void scsi_init_cmd_errh(struct scsi_cmnd *cmd)
}
/*
- * Decrement the host_busy counter and wake up the error handler if necessary.
- * Avoid as follows that the error handler is not woken up if shost->host_busy
- * == shost->host_failed: use call_rcu() in scsi_eh_scmd_add() in combination
- * with an RCU read lock in this function to ensure that this function in its
- * entirety either finishes before scsi_eh_scmd_add() increases the
+ * Wake up the error handler if necessary. Avoid as follows that the error
+ * handler is not woken up if host in-flight requests number ==
+ * shost->host_failed: use call_rcu() in scsi_eh_scmd_add() in combination
+ * with an RCU read lock in this function to ensure that this function in
+ * its entirety either finishes before scsi_eh_scmd_add() increases the
* host_failed counter or that it notices the shost state change made by
* scsi_eh_scmd_add().
*/
-static void scsi_dec_host_busy(struct Scsi_Host *shost)
+static void scsi_dec_host_busy(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
{
unsigned long flags;
rcu_read_lock();
- atomic_dec(&shost->host_busy);
+ __clear_bit(SCMD_STATE_INFLIGHT, &cmd->state);
if (unlikely(scsi_host_in_recovery(shost))) {
spin_lock_irqsave(shost->host_lock, flags);
if (shost->host_failed || shost->host_eh_scheduled)
@@ -344,12 +344,12 @@ static void scsi_dec_host_busy(struct Scsi_Host *shost)
rcu_read_unlock();
}
-void scsi_device_unbusy(struct scsi_device *sdev)
+void scsi_device_unbusy(struct scsi_device *sdev, struct scsi_cmnd *cmd)
{
struct Scsi_Host *shost = sdev->host;
struct scsi_target *starget = scsi_target(sdev);
- scsi_dec_host_busy(shost);
+ scsi_dec_host_busy(shost, cmd);
if (starget->can_queue > 0)
atomic_dec(&starget->target_busy);
@@ -430,9 +430,6 @@ static inline bool scsi_target_is_busy(struct scsi_target *starget)
static inline bool scsi_host_is_busy(struct Scsi_Host *shost)
{
- if (shost->can_queue > 0 &&
- atomic_read(&shost->host_busy) >= shost->can_queue)
- return true;
if (atomic_read(&shost->host_blocked) > 0)
return true;
if (shost->host_self_blocked)
@@ -1139,6 +1136,7 @@ void scsi_init_command(struct scsi_device *dev, struct scsi_cmnd *cmd)
unsigned int flags = cmd->flags & SCMD_PRESERVED_FLAGS;
unsigned long jiffies_at_alloc;
int retries;
+ bool in_flight;
if (!blk_rq_is_scsi(rq) && !(flags & SCMD_INITIALIZED)) {
flags |= SCMD_INITIALIZED;
@@ -1147,6 +1145,7 @@ void scsi_init_command(struct scsi_device *dev, struct scsi_cmnd *cmd)
jiffies_at_alloc = cmd->jiffies_at_alloc;
retries = cmd->retries;
+ in_flight = test_bit(SCMD_STATE_INFLIGHT, &cmd->state);
/* zero out the cmd, except for the embedded scsi_request */
memset((char *)cmd + sizeof(cmd->req), 0,
sizeof(*cmd) - sizeof(cmd->req) + dev->host->hostt->cmd_size);
@@ -1158,6 +1157,8 @@ void scsi_init_command(struct scsi_device *dev, struct scsi_cmnd *cmd)
INIT_DELAYED_WORK(&cmd->abort_work, scmd_eh_abort_handler);
cmd->jiffies_at_alloc = jiffies_at_alloc;
cmd->retries = retries;
+ if (in_flight)
+ __set_bit(SCMD_STATE_INFLIGHT, &cmd->state);
scsi_add_cmd_to_list(cmd);
}
@@ -1367,16 +1368,14 @@ out_dec:
*/
static inline int scsi_host_queue_ready(struct request_queue *q,
struct Scsi_Host *shost,
- struct scsi_device *sdev)
+ struct scsi_device *sdev,
+ struct scsi_cmnd *cmd)
{
- unsigned int busy;
-
if (scsi_host_in_recovery(shost))
return 0;
- busy = atomic_inc_return(&shost->host_busy) - 1;
if (atomic_read(&shost->host_blocked) > 0) {
- if (busy)
+ if (scsi_host_busy(shost) > 0)
goto starved;
/*
@@ -1390,8 +1389,6 @@ static inline int scsi_host_queue_ready(struct request_queue *q,
"unblocking host at zero depth\n"));
}
- if (shost->can_queue > 0 && busy >= shost->can_queue)
- goto starved;
if (shost->host_self_blocked)
goto starved;
@@ -1403,6 +1400,8 @@ static inline int scsi_host_queue_ready(struct request_queue *q,
spin_unlock_irq(shost->host_lock);
}
+ __set_bit(SCMD_STATE_INFLIGHT, &cmd->state);
+
return 1;
starved:
@@ -1411,7 +1410,7 @@ starved:
list_add_tail(&sdev->starved_entry, &shost->starved_list);
spin_unlock_irq(shost->host_lock);
out_dec:
- scsi_dec_host_busy(shost);
+ scsi_dec_host_busy(shost, cmd);
return 0;
}
@@ -1665,7 +1664,7 @@ static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
ret = BLK_STS_RESOURCE;
if (!scsi_target_queue_ready(shost, sdev))
goto out_put_budget;
- if (!scsi_host_queue_ready(q, shost, sdev))
+ if (!scsi_host_queue_ready(q, shost, sdev, cmd))
goto out_dec_target_busy;
if (!(req->rq_flags & RQF_DONTPREP)) {
@@ -1697,7 +1696,7 @@ static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
return BLK_STS_OK;
out_dec_host_busy:
- scsi_dec_host_busy(shost);
+ scsi_dec_host_busy(shost, cmd);
out_dec_target_busy:
if (scsi_target(sdev)->can_queue > 0)
atomic_dec(&scsi_target(sdev)->target_busy);
diff --git a/drivers/scsi/scsi_logging.c b/drivers/scsi/scsi_logging.c
index c6ed0b12e807..c91fa3feb930 100644
--- a/drivers/scsi/scsi_logging.c
+++ b/drivers/scsi/scsi_logging.c
@@ -390,6 +390,7 @@ void scsi_print_result(const struct scsi_cmnd *cmd, const char *msg,
const char *mlret_string = scsi_mlreturn_string(disposition);
const char *hb_string = scsi_hostbyte_string(cmd->result);
const char *db_string = scsi_driverbyte_string(cmd->result);
+ unsigned long cmd_age = (jiffies - cmd->jiffies_at_alloc) / HZ;
logbuf = scsi_log_reserve_buffer(&logbuf_len);
if (!logbuf)
@@ -431,10 +432,15 @@ void scsi_print_result(const struct scsi_cmnd *cmd, const char *msg,
if (db_string)
off += scnprintf(logbuf + off, logbuf_len - off,
- "driverbyte=%s", db_string);
+ "driverbyte=%s ", db_string);
else
off += scnprintf(logbuf + off, logbuf_len - off,
- "driverbyte=0x%02x", driver_byte(cmd->result));
+ "driverbyte=0x%02x ",
+ driver_byte(cmd->result));
+
+ off += scnprintf(logbuf + off, logbuf_len - off,
+ "cmd_age=%lus", cmd_age);
+
out_printk:
dev_printk(KERN_INFO, &cmd->device->sdev_gendev, "%s", logbuf);
scsi_log_release_buffer(logbuf);
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index cc2859d76d81..3bff9f7aa684 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -87,7 +87,7 @@ int scsi_noretry_cmd(struct scsi_cmnd *scmd);
extern void scsi_add_cmd_to_list(struct scsi_cmnd *cmd);
extern void scsi_del_cmd_from_list(struct scsi_cmnd *cmd);
extern int scsi_maybe_unblock_host(struct scsi_device *sdev);
-extern void scsi_device_unbusy(struct scsi_device *sdev);
+extern void scsi_device_unbusy(struct scsi_device *sdev, struct scsi_cmnd *cmd);
extern void scsi_queue_insert(struct scsi_cmnd *cmd, int reason);
extern void scsi_io_completion(struct scsi_cmnd *, unsigned int);
extern void scsi_run_host_queues(struct Scsi_Host *shost);
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index cc51f4756077..677b5c5403d2 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -437,6 +437,7 @@ static void scsi_device_dev_release_usercontext(struct work_struct *work)
struct device *parent;
struct list_head *this, *tmp;
struct scsi_vpd *vpd_pg80 = NULL, *vpd_pg83 = NULL;
+ struct scsi_vpd *vpd_pg0 = NULL, *vpd_pg89 = NULL;
unsigned long flags;
sdev = container_of(work, struct scsi_device, ew.work);
@@ -466,16 +467,24 @@ static void scsi_device_dev_release_usercontext(struct work_struct *work)
sdev->request_queue = NULL;
mutex_lock(&sdev->inquiry_mutex);
+ vpd_pg0 = rcu_replace_pointer(sdev->vpd_pg0, vpd_pg0,
+ lockdep_is_held(&sdev->inquiry_mutex));
vpd_pg80 = rcu_replace_pointer(sdev->vpd_pg80, vpd_pg80,
lockdep_is_held(&sdev->inquiry_mutex));
vpd_pg83 = rcu_replace_pointer(sdev->vpd_pg83, vpd_pg83,
lockdep_is_held(&sdev->inquiry_mutex));
+ vpd_pg89 = rcu_replace_pointer(sdev->vpd_pg89, vpd_pg89,
+ lockdep_is_held(&sdev->inquiry_mutex));
mutex_unlock(&sdev->inquiry_mutex);
+ if (vpd_pg0)
+ kfree_rcu(vpd_pg0, rcu);
if (vpd_pg83)
kfree_rcu(vpd_pg83, rcu);
if (vpd_pg80)
kfree_rcu(vpd_pg80, rcu);
+ if (vpd_pg89)
+ kfree_rcu(vpd_pg89, rcu);
kfree(sdev->inquiry);
kfree(sdev);
@@ -868,6 +877,8 @@ static struct bin_attribute dev_attr_vpd_##_page = { \
sdev_vpd_pg_attr(pg83);
sdev_vpd_pg_attr(pg80);
+sdev_vpd_pg_attr(pg89);
+sdev_vpd_pg_attr(pg0);
static ssize_t show_inquiry(struct file *filep, struct kobject *kobj,
struct bin_attribute *bin_attr,
@@ -1200,12 +1211,18 @@ static umode_t scsi_sdev_bin_attr_is_visible(struct kobject *kobj,
struct scsi_device *sdev = to_scsi_device(dev);
+ if (attr == &dev_attr_vpd_pg0 && !sdev->vpd_pg0)
+ return 0;
+
if (attr == &dev_attr_vpd_pg80 && !sdev->vpd_pg80)
return 0;
if (attr == &dev_attr_vpd_pg83 && !sdev->vpd_pg83)
return 0;
+ if (attr == &dev_attr_vpd_pg89 && !sdev->vpd_pg89)
+ return 0;
+
return S_IRUGO;
}
@@ -1248,8 +1265,10 @@ static struct attribute *scsi_sdev_attrs[] = {
};
static struct bin_attribute *scsi_sdev_bin_attrs[] = {
+ &dev_attr_vpd_pg0,
&dev_attr_vpd_pg83,
&dev_attr_vpd_pg80,
+ &dev_attr_vpd_pg89,
&dev_attr_inquiry,
NULL
};
@@ -1309,7 +1328,8 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev)
device_enable_async_suspend(&sdev->sdev_gendev);
scsi_autopm_get_target(starget);
pm_runtime_set_active(&sdev->sdev_gendev);
- pm_runtime_forbid(&sdev->sdev_gendev);
+ if (!sdev->rpm_autosuspend)
+ pm_runtime_forbid(&sdev->sdev_gendev);
pm_runtime_enable(&sdev->sdev_gendev);
scsi_autopm_put_target(starget);
diff --git a/drivers/scsi/scsi_trace.c b/drivers/scsi/scsi_trace.c
index 0f17e7dac1b0..ac35c301c792 100644
--- a/drivers/scsi/scsi_trace.c
+++ b/drivers/scsi/scsi_trace.c
@@ -9,7 +9,7 @@
#include <trace/events/scsi.h>
#define SERVICE_ACTION16(cdb) (cdb[1] & 0x1f)
-#define SERVICE_ACTION32(cdb) ((cdb[8] << 8) | cdb[9])
+#define SERVICE_ACTION32(cdb) (get_unaligned_be16(&cdb[8]))
static const char *
scsi_trace_misc(struct trace_seq *, unsigned char *, int);
@@ -18,15 +18,18 @@ static const char *
scsi_trace_rw6(struct trace_seq *p, unsigned char *cdb, int len)
{
const char *ret = trace_seq_buffer_ptr(p);
- sector_t lba = 0, txlen = 0;
+ u32 lba = 0, txlen;
lba |= ((cdb[1] & 0x1F) << 16);
lba |= (cdb[2] << 8);
lba |= cdb[3];
- txlen = cdb[4];
+ /*
+ * From SBC-2: a TRANSFER LENGTH field set to zero specifies that 256
+ * logical blocks shall be read (READ(6)) or written (WRITE(6)).
+ */
+ txlen = cdb[4] ? cdb[4] : 256;
- trace_seq_printf(p, "lba=%llu txlen=%llu",
- (unsigned long long)lba, (unsigned long long)txlen);
+ trace_seq_printf(p, "lba=%u txlen=%u", lba, txlen);
trace_seq_putc(p, 0);
return ret;
@@ -36,17 +39,12 @@ static const char *
scsi_trace_rw10(struct trace_seq *p, unsigned char *cdb, int len)
{
const char *ret = trace_seq_buffer_ptr(p);
- sector_t lba = 0, txlen = 0;
+ u32 lba, txlen;
- lba |= (cdb[2] << 24);
- lba |= (cdb[3] << 16);
- lba |= (cdb[4] << 8);
- lba |= cdb[5];
- txlen |= (cdb[7] << 8);
- txlen |= cdb[8];
+ lba = get_unaligned_be32(&cdb[2]);
+ txlen = get_unaligned_be16(&cdb[7]);
- trace_seq_printf(p, "lba=%llu txlen=%llu protect=%u",
- (unsigned long long)lba, (unsigned long long)txlen,
+ trace_seq_printf(p, "lba=%u txlen=%u protect=%u", lba, txlen,
cdb[1] >> 5);
if (cdb[0] == WRITE_SAME)
@@ -61,19 +59,12 @@ static const char *
scsi_trace_rw12(struct trace_seq *p, unsigned char *cdb, int len)
{
const char *ret = trace_seq_buffer_ptr(p);
- sector_t lba = 0, txlen = 0;
-
- lba |= (cdb[2] << 24);
- lba |= (cdb[3] << 16);
- lba |= (cdb[4] << 8);
- lba |= cdb[5];
- txlen |= (cdb[6] << 24);
- txlen |= (cdb[7] << 16);
- txlen |= (cdb[8] << 8);
- txlen |= cdb[9];
-
- trace_seq_printf(p, "lba=%llu txlen=%llu protect=%u",
- (unsigned long long)lba, (unsigned long long)txlen,
+ u32 lba, txlen;
+
+ lba = get_unaligned_be32(&cdb[2]);
+ txlen = get_unaligned_be32(&cdb[6]);
+
+ trace_seq_printf(p, "lba=%u txlen=%u protect=%u", lba, txlen,
cdb[1] >> 5);
trace_seq_putc(p, 0);
@@ -84,23 +75,13 @@ static const char *
scsi_trace_rw16(struct trace_seq *p, unsigned char *cdb, int len)
{
const char *ret = trace_seq_buffer_ptr(p);
- sector_t lba = 0, txlen = 0;
-
- lba |= ((u64)cdb[2] << 56);
- lba |= ((u64)cdb[3] << 48);
- lba |= ((u64)cdb[4] << 40);
- lba |= ((u64)cdb[5] << 32);
- lba |= (cdb[6] << 24);
- lba |= (cdb[7] << 16);
- lba |= (cdb[8] << 8);
- lba |= cdb[9];
- txlen |= (cdb[10] << 24);
- txlen |= (cdb[11] << 16);
- txlen |= (cdb[12] << 8);
- txlen |= cdb[13];
-
- trace_seq_printf(p, "lba=%llu txlen=%llu protect=%u",
- (unsigned long long)lba, (unsigned long long)txlen,
+ u64 lba;
+ u32 txlen;
+
+ lba = get_unaligned_be64(&cdb[2]);
+ txlen = get_unaligned_be32(&cdb[10]);
+
+ trace_seq_printf(p, "lba=%llu txlen=%u protect=%u", lba, txlen,
cdb[1] >> 5);
if (cdb[0] == WRITE_SAME_16)
@@ -115,8 +96,8 @@ static const char *
scsi_trace_rw32(struct trace_seq *p, unsigned char *cdb, int len)
{
const char *ret = trace_seq_buffer_ptr(p), *cmd;
- sector_t lba = 0, txlen = 0;
- u32 ei_lbrt = 0;
+ u64 lba;
+ u32 ei_lbrt, txlen;
switch (SERVICE_ACTION32(cdb)) {
case READ_32:
@@ -136,26 +117,12 @@ scsi_trace_rw32(struct trace_seq *p, unsigned char *cdb, int len)
goto out;
}
- lba |= ((u64)cdb[12] << 56);
- lba |= ((u64)cdb[13] << 48);
- lba |= ((u64)cdb[14] << 40);
- lba |= ((u64)cdb[15] << 32);
- lba |= (cdb[16] << 24);
- lba |= (cdb[17] << 16);
- lba |= (cdb[18] << 8);
- lba |= cdb[19];
- ei_lbrt |= (cdb[20] << 24);
- ei_lbrt |= (cdb[21] << 16);
- ei_lbrt |= (cdb[22] << 8);
- ei_lbrt |= cdb[23];
- txlen |= (cdb[28] << 24);
- txlen |= (cdb[29] << 16);
- txlen |= (cdb[30] << 8);
- txlen |= cdb[31];
-
- trace_seq_printf(p, "%s_32 lba=%llu txlen=%llu protect=%u ei_lbrt=%u",
- cmd, (unsigned long long)lba,
- (unsigned long long)txlen, cdb[10] >> 5, ei_lbrt);
+ lba = get_unaligned_be64(&cdb[12]);
+ ei_lbrt = get_unaligned_be32(&cdb[20]);
+ txlen = get_unaligned_be32(&cdb[28]);
+
+ trace_seq_printf(p, "%s_32 lba=%llu txlen=%u protect=%u ei_lbrt=%u",
+ cmd, lba, txlen, cdb[10] >> 5, ei_lbrt);
if (SERVICE_ACTION32(cdb) == WRITE_SAME_32)
trace_seq_printf(p, " unmap=%u", cdb[10] >> 3 & 1);
@@ -170,7 +137,7 @@ static const char *
scsi_trace_unmap(struct trace_seq *p, unsigned char *cdb, int len)
{
const char *ret = trace_seq_buffer_ptr(p);
- unsigned int regions = cdb[7] << 8 | cdb[8];
+ unsigned int regions = get_unaligned_be16(&cdb[7]);
trace_seq_printf(p, "regions=%u", (regions - 8) / 16);
trace_seq_putc(p, 0);
@@ -182,8 +149,8 @@ static const char *
scsi_trace_service_action_in(struct trace_seq *p, unsigned char *cdb, int len)
{
const char *ret = trace_seq_buffer_ptr(p), *cmd;
- sector_t lba = 0;
- u32 alloc_len = 0;
+ u64 lba;
+ u32 alloc_len;
switch (SERVICE_ACTION16(cdb)) {
case SAI_READ_CAPACITY_16:
@@ -197,21 +164,10 @@ scsi_trace_service_action_in(struct trace_seq *p, unsigned char *cdb, int len)
goto out;
}
- lba |= ((u64)cdb[2] << 56);
- lba |= ((u64)cdb[3] << 48);
- lba |= ((u64)cdb[4] << 40);
- lba |= ((u64)cdb[5] << 32);
- lba |= (cdb[6] << 24);
- lba |= (cdb[7] << 16);
- lba |= (cdb[8] << 8);
- lba |= cdb[9];
- alloc_len |= (cdb[10] << 24);
- alloc_len |= (cdb[11] << 16);
- alloc_len |= (cdb[12] << 8);
- alloc_len |= cdb[13];
-
- trace_seq_printf(p, "%s lba=%llu alloc_len=%u", cmd,
- (unsigned long long)lba, alloc_len);
+ lba = get_unaligned_be64(&cdb[2]);
+ alloc_len = get_unaligned_be32(&cdb[10]);
+
+ trace_seq_printf(p, "%s lba=%llu alloc_len=%u", cmd, lba, alloc_len);
out:
trace_seq_putc(p, 0);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 13925021473d..7dc17821f873 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -3390,6 +3390,10 @@ static int sd_probe(struct device *dev)
}
blk_pm_runtime_init(sdp->request_queue, dev);
+ if (sdp->rpm_autosuspend) {
+ pm_runtime_set_autosuspend_delay(dev,
+ sdp->host->hostt->rpm_autosuspend_delay);
+ }
device_add_disk(dev, gd, NULL);
if (sdkp->capacity)
sd_dif_config_host(sdkp);
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 9e4ef22b3579..160748ad9c0f 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -429,18 +429,26 @@ sg_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos)
SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
"sg_read: count=%d\n", (int) count));
- if (!access_ok(buf, count))
- return -EFAULT;
if (sfp->force_packid && (count >= SZ_SG_HEADER)) {
- old_hdr = kmalloc(SZ_SG_HEADER, GFP_KERNEL);
- if (!old_hdr)
- return -ENOMEM;
- if (__copy_from_user(old_hdr, buf, SZ_SG_HEADER)) {
- retval = -EFAULT;
- goto free_old_hdr;
- }
+ old_hdr = memdup_user(buf, SZ_SG_HEADER);
+ if (IS_ERR(old_hdr))
+ return PTR_ERR(old_hdr);
if (old_hdr->reply_len < 0) {
if (count >= SZ_SG_IO_HDR) {
+ /*
+ * This is stupid.
+ *
+ * We're copying the whole sg_io_hdr_t from user
+ * space just to get the 'pack_id' field. But the
+ * field is at different offsets for the compat
+ * case, so we'll use "get_sg_io_hdr()" to copy
+ * the whole thing and convert it.
+ *
+ * We could do something like just calculating the
+ * offset based of 'in_compat_syscall()', but the
+ * 'compat_sg_io_hdr' definition is in the wrong
+ * place for that.
+ */
sg_io_hdr_t *new_hdr;
new_hdr = kmalloc(SZ_SG_IO_HDR, GFP_KERNEL);
if (!new_hdr) {
@@ -537,7 +545,7 @@ sg_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos)
/* Now copy the result back to the user buffer. */
if (count >= SZ_SG_HEADER) {
- if (__copy_to_user(buf, old_hdr, SZ_SG_HEADER)) {
+ if (copy_to_user(buf, old_hdr, SZ_SG_HEADER)) {
retval = -EFAULT;
goto free_old_hdr;
}
@@ -623,11 +631,9 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
scsi_block_when_processing_errors(sdp->device)))
return -ENXIO;
- if (!access_ok(buf, count))
- return -EFAULT; /* protects following copy_from_user()s + get_user()s */
if (count < SZ_SG_HEADER)
return -EIO;
- if (__copy_from_user(&old_hdr, buf, SZ_SG_HEADER))
+ if (copy_from_user(&old_hdr, buf, SZ_SG_HEADER))
return -EFAULT;
blocking = !(filp->f_flags & O_NONBLOCK);
if (old_hdr.reply_len < 0)
@@ -636,13 +642,15 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
if (count < (SZ_SG_HEADER + 6))
return -EIO; /* The minimum scsi command length is 6 bytes. */
+ buf += SZ_SG_HEADER;
+ if (get_user(opcode, buf))
+ return -EFAULT;
+
if (!(srp = sg_add_request(sfp))) {
SCSI_LOG_TIMEOUT(1, sg_printk(KERN_INFO, sdp,
"sg_write: queue full\n"));
return -EDOM;
}
- buf += SZ_SG_HEADER;
- __get_user(opcode, buf);
mutex_lock(&sfp->f_mutex);
if (sfp->next_cmd_len > 0) {
cmd_size = sfp->next_cmd_len;
@@ -685,7 +693,7 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
hp->flags = input_size; /* structure abuse ... */
hp->pack_id = old_hdr.pack_id;
hp->usr_ptr = NULL;
- if (__copy_from_user(cmnd, buf, cmd_size))
+ if (copy_from_user(cmnd, buf, cmd_size))
return -EFAULT;
/*
* SG_DXFER_TO_FROM_DEV is functionally equivalent to SG_DXFER_FROM_DEV,
@@ -720,8 +728,6 @@ sg_new_write(Sg_fd *sfp, struct file *file, const char __user *buf,
if (count < SZ_SG_IO_HDR)
return -EINVAL;
- if (!access_ok(buf, count))
- return -EFAULT; /* protects following copy_from_user()s + get_user()s */
sfp->cmd_q = 1; /* when sg_io_hdr seen, set command queuing on */
if (!(srp = sg_add_request(sfp))) {
@@ -759,11 +765,7 @@ sg_new_write(Sg_fd *sfp, struct file *file, const char __user *buf,
sg_remove_request(sfp, srp);
return -EMSGSIZE;
}
- if (!access_ok(hp->cmdp, hp->cmd_len)) {
- sg_remove_request(sfp, srp);
- return -EFAULT; /* protects following copy_from_user()s + get_user()s */
- }
- if (__copy_from_user(cmnd, hp->cmdp, hp->cmd_len)) {
+ if (copy_from_user(cmnd, hp->cmdp, hp->cmd_len)) {
sg_remove_request(sfp, srp);
return -EFAULT;
}
@@ -940,8 +942,6 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
return -ENODEV;
if (!scsi_block_when_processing_errors(sdp->device))
return -ENXIO;
- if (!access_ok(p, SZ_SG_IO_HDR))
- return -EFAULT;
result = sg_new_write(sfp, filp, p, SZ_SG_IO_HDR,
1, read_only, 1, &srp);
if (result < 0)
@@ -986,26 +986,21 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
case SG_GET_LOW_DMA:
return put_user((int) sdp->device->host->unchecked_isa_dma, ip);
case SG_GET_SCSI_ID:
- if (!access_ok(p, sizeof (sg_scsi_id_t)))
- return -EFAULT;
- else {
- sg_scsi_id_t __user *sg_idp = p;
+ {
+ sg_scsi_id_t v;
if (atomic_read(&sdp->detaching))
return -ENODEV;
- __put_user((int) sdp->device->host->host_no,
- &sg_idp->host_no);
- __put_user((int) sdp->device->channel,
- &sg_idp->channel);
- __put_user((int) sdp->device->id, &sg_idp->scsi_id);
- __put_user((int) sdp->device->lun, &sg_idp->lun);
- __put_user((int) sdp->device->type, &sg_idp->scsi_type);
- __put_user((short) sdp->device->host->cmd_per_lun,
- &sg_idp->h_cmd_per_lun);
- __put_user((short) sdp->device->queue_depth,
- &sg_idp->d_queue_depth);
- __put_user(0, &sg_idp->unused[0]);
- __put_user(0, &sg_idp->unused[1]);
+ memset(&v, 0, sizeof(v));
+ v.host_no = sdp->device->host->host_no;
+ v.channel = sdp->device->channel;
+ v.scsi_id = sdp->device->id;
+ v.lun = sdp->device->lun;
+ v.scsi_type = sdp->device->type;
+ v.h_cmd_per_lun = sdp->device->host->cmd_per_lun;
+ v.d_queue_depth = sdp->device->queue_depth;
+ if (copy_to_user(p, &v, sizeof(sg_scsi_id_t)))
+ return -EFAULT;
return 0;
}
case SG_SET_FORCE_PACK_ID:
@@ -1015,20 +1010,16 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
sfp->force_packid = val ? 1 : 0;
return 0;
case SG_GET_PACK_ID:
- if (!access_ok(ip, sizeof (int)))
- return -EFAULT;
read_lock_irqsave(&sfp->rq_list_lock, iflags);
list_for_each_entry(srp, &sfp->rq_list, entry) {
if ((1 == srp->done) && (!srp->sg_io_owned)) {
read_unlock_irqrestore(&sfp->rq_list_lock,
iflags);
- __put_user(srp->header.pack_id, ip);
- return 0;
+ return put_user(srp->header.pack_id, ip);
}
}
read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
- __put_user(-1, ip);
- return 0;
+ return put_user(-1, ip);
case SG_GET_NUM_WAITING:
read_lock_irqsave(&sfp->rq_list_lock, iflags);
val = 0;
@@ -2017,12 +2008,12 @@ sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer)
num = 1 << (PAGE_SHIFT + schp->page_order);
for (k = 0; k < schp->k_use_sg && schp->pages[k]; k++) {
if (num > num_read_xfer) {
- if (__copy_to_user(outp, page_address(schp->pages[k]),
+ if (copy_to_user(outp, page_address(schp->pages[k]),
num_read_xfer))
return -EFAULT;
break;
} else {
- if (__copy_to_user(outp, page_address(schp->pages[k]),
+ if (copy_to_user(outp, page_address(schp->pages[k]),
num))
return -EFAULT;
num_read_xfer -= num;
diff --git a/drivers/scsi/smartpqi/smartpqi.h b/drivers/scsi/smartpqi/smartpqi.h
index 79d2af36f655..1129fe7a27ed 100644
--- a/drivers/scsi/smartpqi/smartpqi.h
+++ b/drivers/scsi/smartpqi/smartpqi.h
@@ -276,7 +276,9 @@ struct pqi_raid_path_request {
u8 reserved4 : 2;
u8 additional_cdb_bytes_usage : 3;
u8 reserved5 : 3;
- u8 cdb[32];
+ u8 cdb[16];
+ u8 reserved6[12];
+ __le32 timeout;
struct pqi_sg_descriptor
sg_descriptors[PQI_MAX_EMBEDDED_SG_DESCRIPTORS];
};
@@ -385,7 +387,8 @@ struct pqi_task_management_request {
struct pqi_iu_header header;
__le16 request_id;
__le16 nexus_id;
- u8 reserved[4];
+ u8 reserved[2];
+ __le16 timeout;
u8 lun_number[8];
__le16 protocol_specific;
__le16 outbound_queue_id_to_manage;
@@ -445,7 +448,7 @@ struct pqi_vendor_general_response {
struct pqi_ofa_memory {
__le64 signature; /* "OFA_QRM" */
- __le16 version; /* version of this struct(1 = 1st version) */
+ __le16 version; /* version of this struct (1 = 1st version) */
u8 reserved[62];
__le32 bytes_allocated; /* total allocated memory in bytes */
__le16 num_memory_descriptors;
@@ -761,6 +764,8 @@ struct pqi_config_table_firmware_features {
#define PQI_FIRMWARE_FEATURE_OFA 0
#define PQI_FIRMWARE_FEATURE_SMP 1
#define PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE 11
+#define PQI_FIRMWARE_FEATURE_RAID_IU_TIMEOUT 13
+#define PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT 14
struct pqi_config_table_debug {
struct pqi_config_table_section_header header;
@@ -826,10 +831,17 @@ union pqi_reset_register {
struct report_lun_header {
__be32 list_length;
- u8 extended_response;
+ u8 flags;
u8 reserved[3];
};
+/* for flags field of struct report_lun_header */
+#define CISS_REPORT_LOG_FLAG_UNIQUE_LUN_ID (1 << 0)
+#define CISS_REPORT_LOG_FLAG_QUEUE_DEPTH (1 << 5)
+#define CISS_REPORT_LOG_FLAG_DRIVE_TYPE_MIX (1 << 6)
+
+#define CISS_REPORT_PHYS_FLAG_OTHER (1 << 1)
+
struct report_log_lun_extended_entry {
u8 lunid[8];
u8 volume_id[16];
@@ -851,7 +863,7 @@ struct report_phys_lun_extended_entry {
};
/* for device_flags field of struct report_phys_lun_extended_entry */
-#define REPORT_PHYS_LUN_DEV_FLAG_AIO_ENABLED 0x8
+#define CISS_REPORT_PHYS_DEV_FLAG_AIO_ENABLED 0x8
struct report_phys_lun_extended {
struct report_lun_header header;
@@ -864,7 +876,7 @@ struct raid_map_disk_data {
u8 reserved[2];
};
-/* constants for flags field of RAID map */
+/* for flags field of RAID map */
#define RAID_MAP_ENCRYPTION_ENABLED 0x1
struct raid_map {
@@ -907,7 +919,6 @@ struct pqi_scsi_dev {
u8 scsi3addr[8];
__be64 wwid;
u8 volume_id[16];
- u8 unique_id[16];
u8 is_physical_device : 1;
u8 is_external_raid_device : 1;
u8 is_expander_smp_device : 1;
@@ -954,13 +965,9 @@ struct pqi_scsi_dev {
};
/* VPD inquiry pages */
-#define SCSI_VPD_SUPPORTED_PAGES 0x0 /* standard page */
-#define SCSI_VPD_DEVICE_ID 0x83 /* standard page */
#define CISS_VPD_LV_DEVICE_GEOMETRY 0xc1 /* vendor-specific page */
#define CISS_VPD_LV_BYPASS_STATUS 0xc2 /* vendor-specific page */
#define CISS_VPD_LV_STATUS 0xc3 /* vendor-specific page */
-#define SCSI_VPD_HEADER_SZ 4
-#define SCSI_VPD_DEVICE_ID_IDX 8 /* Index of page id in page */
#define VPD_PAGE (1 << 8)
@@ -1130,13 +1137,16 @@ struct pqi_ctrl_info {
struct mutex ofa_mutex; /* serialize ofa */
bool controller_online;
bool block_requests;
- bool in_shutdown;
+ bool block_device_reset;
bool in_ofa;
+ bool in_shutdown;
u8 inbound_spanning_supported : 1;
u8 outbound_spanning_supported : 1;
u8 pqi_mode_enabled : 1;
u8 pqi_reset_quiesce_supported : 1;
u8 soft_reset_handshake_supported : 1;
+ u8 raid_iu_timeout_supported: 1;
+ u8 tmf_iu_timeout_supported: 1;
struct list_head scsi_device_list;
spinlock_t scsi_device_list_lock;
@@ -1170,9 +1180,10 @@ struct pqi_ctrl_info {
spinlock_t raid_bypass_retry_list_lock;
struct work_struct raid_bypass_retry_work;
- struct pqi_ofa_memory *pqi_ofa_mem_virt_addr;
- dma_addr_t pqi_ofa_mem_dma_handle;
- void **pqi_ofa_chunk_virt_addr;
+ struct pqi_ofa_memory *pqi_ofa_mem_virt_addr;
+ dma_addr_t pqi_ofa_mem_dma_handle;
+ void **pqi_ofa_chunk_virt_addr;
+ atomic_t sync_cmds_outstanding;
};
enum pqi_ctrl_mode {
@@ -1191,10 +1202,6 @@ enum pqi_ctrl_mode {
#define CISS_REPORT_PHYS 0xc3 /* Report Physical LUNs */
#define CISS_GET_RAID_MAP 0xc8
-/* constants for CISS_REPORT_LOG/CISS_REPORT_PHYS commands */
-#define CISS_REPORT_LOG_EXTENDED 0x1
-#define CISS_REPORT_PHYS_EXTENDED 0x2
-
/* BMIC commands */
#define BMIC_IDENTIFY_CONTROLLER 0x11
#define BMIC_IDENTIFY_PHYSICAL_DEVICE 0x15
@@ -1208,7 +1215,7 @@ enum pqi_ctrl_mode {
#define BMIC_SET_DIAG_OPTIONS 0xf4
#define BMIC_SENSE_DIAG_OPTIONS 0xf5
-#define CSMI_CC_SAS_SMP_PASSTHRU 0X17
+#define CSMI_CC_SAS_SMP_PASSTHRU 0x17
#define SA_FLUSH_CACHE 0x1
@@ -1244,10 +1251,12 @@ struct bmic_sense_subsystem_info {
u8 ctrl_serial_number[16];
};
-#define SA_EXPANDER_SMP_DEVICE 0x05
-#define SA_CONTROLLER_DEVICE 0x07
-/*SCSI Invalid Device Type for SAS devices*/
-#define PQI_SAS_SCSI_INVALID_DEVTYPE 0xff
+/* constants for device_type field */
+#define SA_DEVICE_TYPE_SATA 0x1
+#define SA_DEVICE_TYPE_SAS 0x2
+#define SA_DEVICE_TYPE_EXPANDER_SMP 0x5
+#define SA_DEVICE_TYPE_CONTROLLER 0x7
+#define SA_DEVICE_TYPE_NVME 0x9
struct bmic_identify_physical_device {
u8 scsi_bus; /* SCSI Bus number on controller */
@@ -1273,7 +1282,7 @@ struct bmic_identify_physical_device {
__le32 rpm; /* drive rotational speed in RPM */
u8 device_type; /* type of drive */
u8 sata_version; /* only valid when device_type = */
- /* BMIC_DEVICE_TYPE_SATA */
+ /* SA_DEVICE_TYPE_SATA */
__le64 big_total_block_count;
__le64 ris_starting_lba;
__le32 ris_size;
@@ -1396,18 +1405,6 @@ struct bmic_diag_options {
#pragma pack()
-static inline struct pqi_ctrl_info *shost_to_hba(struct Scsi_Host *shost)
-{
- void *hostdata = shost_priv(shost);
-
- return *((struct pqi_ctrl_info **)hostdata);
-}
-
-static inline bool pqi_ctrl_offline(struct pqi_ctrl_info *ctrl_info)
-{
- return !ctrl_info->controller_online;
-}
-
static inline void pqi_ctrl_busy(struct pqi_ctrl_info *ctrl_info)
{
atomic_inc(&ctrl_info->num_busy_threads);
@@ -1418,9 +1415,11 @@ static inline void pqi_ctrl_unbusy(struct pqi_ctrl_info *ctrl_info)
atomic_dec(&ctrl_info->num_busy_threads);
}
-static inline bool pqi_ctrl_blocked(struct pqi_ctrl_info *ctrl_info)
+static inline struct pqi_ctrl_info *shost_to_hba(struct Scsi_Host *shost)
{
- return ctrl_info->block_requests;
+ void *hostdata = shost_priv(shost);
+
+ return *((struct pqi_ctrl_info **)hostdata);
}
void pqi_sas_smp_handler(struct bsg_job *job, struct Scsi_Host *shost,
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
index ea5409bebf57..7b7ef3acb504 100644
--- a/drivers/scsi/smartpqi/smartpqi_init.c
+++ b/drivers/scsi/smartpqi/smartpqi_init.c
@@ -33,11 +33,11 @@
#define BUILD_TIMESTAMP
#endif
-#define DRIVER_VERSION "1.2.8-026"
+#define DRIVER_VERSION "1.2.10-025"
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 2
-#define DRIVER_RELEASE 8
-#define DRIVER_REVISION 26
+#define DRIVER_RELEASE 10
+#define DRIVER_REVISION 25
#define DRIVER_NAME "Microsemi PQI Driver (v" \
DRIVER_VERSION BUILD_TIMESTAMP ")"
@@ -211,6 +211,11 @@ static inline bool pqi_is_external_raid_addr(u8 *scsi3addr)
return scsi3addr[2] != 0;
}
+static inline bool pqi_ctrl_offline(struct pqi_ctrl_info *ctrl_info)
+{
+ return !ctrl_info->controller_online;
+}
+
static inline void pqi_check_ctrl_health(struct pqi_ctrl_info *ctrl_info)
{
if (ctrl_info->controller_online)
@@ -235,6 +240,21 @@ static inline void pqi_save_ctrl_mode(struct pqi_ctrl_info *ctrl_info,
sis_write_driver_scratch(ctrl_info, mode);
}
+static inline void pqi_ctrl_block_device_reset(struct pqi_ctrl_info *ctrl_info)
+{
+ ctrl_info->block_device_reset = true;
+}
+
+static inline bool pqi_device_reset_blocked(struct pqi_ctrl_info *ctrl_info)
+{
+ return ctrl_info->block_device_reset;
+}
+
+static inline bool pqi_ctrl_blocked(struct pqi_ctrl_info *ctrl_info)
+{
+ return ctrl_info->block_requests;
+}
+
static inline void pqi_ctrl_block_requests(struct pqi_ctrl_info *ctrl_info)
{
ctrl_info->block_requests = true;
@@ -331,6 +351,16 @@ static inline bool pqi_device_in_remove(struct pqi_ctrl_info *ctrl_info,
return device->in_remove && !ctrl_info->in_shutdown;
}
+static inline void pqi_ctrl_shutdown_start(struct pqi_ctrl_info *ctrl_info)
+{
+ ctrl_info->in_shutdown = true;
+}
+
+static inline bool pqi_ctrl_in_shutdown(struct pqi_ctrl_info *ctrl_info)
+{
+ return ctrl_info->in_shutdown;
+}
+
static inline void pqi_schedule_rescan_worker_with_delay(
struct pqi_ctrl_info *ctrl_info, unsigned long delay)
{
@@ -360,6 +390,11 @@ static inline void pqi_cancel_rescan_worker(struct pqi_ctrl_info *ctrl_info)
cancel_delayed_work_sync(&ctrl_info->rescan_work);
}
+static inline void pqi_cancel_event_worker(struct pqi_ctrl_info *ctrl_info)
+{
+ cancel_work_sync(&ctrl_info->event_work);
+}
+
static inline u32 pqi_read_heartbeat_counter(struct pqi_ctrl_info *ctrl_info)
{
if (!ctrl_info->heartbeat_counter)
@@ -377,7 +412,7 @@ static inline u8 pqi_read_soft_reset_status(struct pqi_ctrl_info *ctrl_info)
}
static inline void pqi_clear_soft_reset_status(struct pqi_ctrl_info *ctrl_info,
- u8 clear)
+ u8 clear)
{
u8 status;
@@ -462,9 +497,9 @@ static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info,
request->data_direction = SOP_READ_FLAG;
cdb[0] = cmd;
if (cmd == CISS_REPORT_PHYS)
- cdb[1] = CISS_REPORT_PHYS_EXTENDED;
+ cdb[1] = CISS_REPORT_PHYS_FLAG_OTHER;
else
- cdb[1] = CISS_REPORT_LOG_EXTENDED;
+ cdb[1] = CISS_REPORT_LOG_FLAG_UNIQUE_LUN_ID;
put_unaligned_be32(cdb_length, &cdb[6]);
break;
case CISS_GET_RAID_MAP:
@@ -567,13 +602,12 @@ static void pqi_free_io_request(struct pqi_io_request *io_request)
}
static int pqi_send_scsi_raid_request(struct pqi_ctrl_info *ctrl_info, u8 cmd,
- u8 *scsi3addr, void *buffer, size_t buffer_length, u16 vpd_page,
- struct pqi_raid_error_info *error_info,
- unsigned long timeout_msecs)
+ u8 *scsi3addr, void *buffer, size_t buffer_length, u16 vpd_page,
+ struct pqi_raid_error_info *error_info, unsigned long timeout_msecs)
{
int rc;
- enum dma_data_direction dir;
struct pqi_raid_path_request request;
+ enum dma_data_direction dir;
rc = pqi_build_raid_path_request(ctrl_info, &request,
cmd, scsi3addr, buffer,
@@ -581,44 +615,44 @@ static int pqi_send_scsi_raid_request(struct pqi_ctrl_info *ctrl_info, u8 cmd,
if (rc)
return rc;
- rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
- 0, error_info, timeout_msecs);
+ rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
+ error_info, timeout_msecs);
pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
+
return rc;
}
-/* Helper functions for pqi_send_scsi_raid_request */
+/* helper functions for pqi_send_scsi_raid_request */
static inline int pqi_send_ctrl_raid_request(struct pqi_ctrl_info *ctrl_info,
- u8 cmd, void *buffer, size_t buffer_length)
+ u8 cmd, void *buffer, size_t buffer_length)
{
return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID,
- buffer, buffer_length, 0, NULL, NO_TIMEOUT);
+ buffer, buffer_length, 0, NULL, NO_TIMEOUT);
}
static inline int pqi_send_ctrl_raid_with_error(struct pqi_ctrl_info *ctrl_info,
- u8 cmd, void *buffer, size_t buffer_length,
- struct pqi_raid_error_info *error_info)
+ u8 cmd, void *buffer, size_t buffer_length,
+ struct pqi_raid_error_info *error_info)
{
return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID,
- buffer, buffer_length, 0, error_info, NO_TIMEOUT);
+ buffer, buffer_length, 0, error_info, NO_TIMEOUT);
}
-
static inline int pqi_identify_controller(struct pqi_ctrl_info *ctrl_info,
- struct bmic_identify_controller *buffer)
+ struct bmic_identify_controller *buffer)
{
return pqi_send_ctrl_raid_request(ctrl_info, BMIC_IDENTIFY_CONTROLLER,
- buffer, sizeof(*buffer));
+ buffer, sizeof(*buffer));
}
static inline int pqi_sense_subsystem_info(struct pqi_ctrl_info *ctrl_info,
- struct bmic_sense_subsystem_info *sense_info)
+ struct bmic_sense_subsystem_info *sense_info)
{
return pqi_send_ctrl_raid_request(ctrl_info,
- BMIC_SENSE_SUBSYSTEM_INFORMATION,
- sense_info, sizeof(*sense_info));
+ BMIC_SENSE_SUBSYSTEM_INFORMATION, sense_info,
+ sizeof(*sense_info));
}
static inline int pqi_scsi_inquiry(struct pqi_ctrl_info *ctrl_info,
@@ -628,83 +662,9 @@ static inline int pqi_scsi_inquiry(struct pqi_ctrl_info *ctrl_info,
buffer, buffer_length, vpd_page, NULL, NO_TIMEOUT);
}
-static bool pqi_vpd_page_supported(struct pqi_ctrl_info *ctrl_info,
- u8 *scsi3addr, u16 vpd_page)
-{
- int rc;
- int i;
- int pages;
- unsigned char *buf, bufsize;
-
- buf = kzalloc(256, GFP_KERNEL);
- if (!buf)
- return false;
-
- /* Get the size of the page list first */
- rc = pqi_scsi_inquiry(ctrl_info, scsi3addr,
- VPD_PAGE | SCSI_VPD_SUPPORTED_PAGES,
- buf, SCSI_VPD_HEADER_SZ);
- if (rc != 0)
- goto exit_unsupported;
-
- pages = buf[3];
- if ((pages + SCSI_VPD_HEADER_SZ) <= 255)
- bufsize = pages + SCSI_VPD_HEADER_SZ;
- else
- bufsize = 255;
-
- /* Get the whole VPD page list */
- rc = pqi_scsi_inquiry(ctrl_info, scsi3addr,
- VPD_PAGE | SCSI_VPD_SUPPORTED_PAGES,
- buf, bufsize);
- if (rc != 0)
- goto exit_unsupported;
-
- pages = buf[3];
- for (i = 1; i <= pages; i++)
- if (buf[3 + i] == vpd_page)
- goto exit_supported;
-
-exit_unsupported:
- kfree(buf);
- return false;
-
-exit_supported:
- kfree(buf);
- return true;
-}
-
-static int pqi_get_device_id(struct pqi_ctrl_info *ctrl_info,
- u8 *scsi3addr, u8 *device_id, int buflen)
-{
- int rc;
- unsigned char *buf;
-
- if (!pqi_vpd_page_supported(ctrl_info, scsi3addr, SCSI_VPD_DEVICE_ID))
- return 1; /* function not supported */
-
- buf = kzalloc(64, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- rc = pqi_scsi_inquiry(ctrl_info, scsi3addr,
- VPD_PAGE | SCSI_VPD_DEVICE_ID,
- buf, 64);
- if (rc == 0) {
- if (buflen > 16)
- buflen = 16;
- memcpy(device_id, &buf[SCSI_VPD_DEVICE_ID_IDX], buflen);
- }
-
- kfree(buf);
-
- return rc;
-}
-
static int pqi_identify_physical_device(struct pqi_ctrl_info *ctrl_info,
struct pqi_scsi_dev *device,
- struct bmic_identify_physical_device *buffer,
- size_t buffer_length)
+ struct bmic_identify_physical_device *buffer, size_t buffer_length)
{
int rc;
enum dma_data_direction dir;
@@ -725,6 +685,7 @@ static int pqi_identify_physical_device(struct pqi_ctrl_info *ctrl_info,
0, NULL, NO_TIMEOUT);
pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
+
return rc;
}
@@ -763,7 +724,7 @@ int pqi_csmi_smp_passthru(struct pqi_ctrl_info *ctrl_info,
buffer, buffer_length, error_info);
}
-#define PQI_FETCH_PTRAID_DATA (1UL<<31)
+#define PQI_FETCH_PTRAID_DATA (1 << 31)
static int pqi_set_diag_rescan(struct pqi_ctrl_info *ctrl_info)
{
@@ -775,14 +736,15 @@ static int pqi_set_diag_rescan(struct pqi_ctrl_info *ctrl_info)
return -ENOMEM;
rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SENSE_DIAG_OPTIONS,
- diag, sizeof(*diag));
+ diag, sizeof(*diag));
if (rc)
goto out;
diag->options |= cpu_to_le32(PQI_FETCH_PTRAID_DATA);
- rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SET_DIAG_OPTIONS,
- diag, sizeof(*diag));
+ rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SET_DIAG_OPTIONS, diag,
+ sizeof(*diag));
+
out:
kfree(diag);
@@ -793,7 +755,7 @@ static inline int pqi_write_host_wellness(struct pqi_ctrl_info *ctrl_info,
void *buffer, size_t buffer_length)
{
return pqi_send_ctrl_raid_request(ctrl_info, BMIC_WRITE_HOST_WELLNESS,
- buffer, buffer_length);
+ buffer, buffer_length);
}
#pragma pack(1)
@@ -946,7 +908,7 @@ static inline int pqi_report_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd,
void *buffer, size_t buffer_length)
{
return pqi_send_ctrl_raid_request(ctrl_info, cmd, buffer,
- buffer_length);
+ buffer_length);
}
static int pqi_report_phys_logical_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd,
@@ -1280,9 +1242,9 @@ static void pqi_get_raid_bypass_status(struct pqi_ctrl_info *ctrl_info,
if (rc)
goto out;
-#define RAID_BYPASS_STATUS 4
-#define RAID_BYPASS_CONFIGURED 0x1
-#define RAID_BYPASS_ENABLED 0x2
+#define RAID_BYPASS_STATUS 4
+#define RAID_BYPASS_CONFIGURED 0x1
+#define RAID_BYPASS_ENABLED 0x2
bypass_status = buffer[RAID_BYPASS_STATUS];
device->raid_bypass_configured =
@@ -1385,14 +1347,6 @@ static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info,
}
}
- if (pqi_get_device_id(ctrl_info, device->scsi3addr,
- device->unique_id, sizeof(device->unique_id)) < 0)
- dev_warn(&ctrl_info->pci_dev->dev,
- "Can't get device id for scsi %d:%d:%d:%d\n",
- ctrl_info->scsi_host->host_no,
- device->bus, device->target,
- device->lun);
-
out:
kfree(buffer);
@@ -1413,6 +1367,7 @@ static void pqi_get_physical_disk_info(struct pqi_ctrl_info *ctrl_info,
device->queue_depth = PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH;
return;
}
+
device->box_index = id_phys->box_index;
device->phys_box_on_bus = id_phys->phys_box_on_bus;
device->phy_connected_dev_type = id_phys->phy_connected_dev_type[0];
@@ -1828,7 +1783,7 @@ static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
device = new_device_list[i];
find_result = pqi_scsi_find_entry(ctrl_info, device,
- &matching_device);
+ &matching_device);
switch (find_result) {
case DEVICE_SAME:
@@ -2057,9 +2012,8 @@ static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
rc = -ENOMEM;
goto out;
}
- if (pqi_hide_vsep) {
- int i;
+ if (pqi_hide_vsep) {
for (i = num_physicals - 1; i >= 0; i--) {
phys_lun_ext_entry =
&physdev_list->lun_entries[i];
@@ -2132,7 +2086,7 @@ static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
device->is_physical_device = is_physical_device;
if (is_physical_device) {
if (phys_lun_ext_entry->device_type ==
- SA_EXPANDER_SMP_DEVICE)
+ SA_DEVICE_TYPE_EXPANDER_SMP)
device->is_expander_smp_device = true;
} else {
device->is_external_raid_device =
@@ -2169,16 +2123,13 @@ static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
if (device->is_physical_device) {
device->wwid = phys_lun_ext_entry->wwid;
if ((phys_lun_ext_entry->device_flags &
- REPORT_PHYS_LUN_DEV_FLAG_AIO_ENABLED) &&
+ CISS_REPORT_PHYS_DEV_FLAG_AIO_ENABLED) &&
phys_lun_ext_entry->aio_handle) {
device->aio_enabled = true;
- device->aio_handle =
- phys_lun_ext_entry->aio_handle;
+ device->aio_handle =
+ phys_lun_ext_entry->aio_handle;
}
-
- pqi_get_physical_disk_info(ctrl_info,
- device, id_phys);
-
+ pqi_get_physical_disk_info(ctrl_info, device, id_phys);
} else {
memcpy(device->volume_id, log_lun_ext_entry->volume_id,
sizeof(device->volume_id));
@@ -3158,7 +3109,7 @@ static enum pqi_soft_reset_status pqi_poll_for_soft_reset_status(
}
static void pqi_process_soft_reset(struct pqi_ctrl_info *ctrl_info,
- enum pqi_soft_reset_status reset_status)
+ enum pqi_soft_reset_status reset_status)
{
int rc;
@@ -3202,8 +3153,8 @@ static void pqi_ofa_process_event(struct pqi_ctrl_info *ctrl_info,
if (event_id == PQI_EVENT_OFA_QUIESCE) {
dev_info(&ctrl_info->pci_dev->dev,
- "Received Online Firmware Activation quiesce event for controller %u\n",
- ctrl_info->ctrl_id);
+ "Received Online Firmware Activation quiesce event for controller %u\n",
+ ctrl_info->ctrl_id);
pqi_ofa_ctrl_quiesce(ctrl_info);
pqi_acknowledge_event(ctrl_info, event);
if (ctrl_info->soft_reset_handshake_supported) {
@@ -3223,8 +3174,8 @@ static void pqi_ofa_process_event(struct pqi_ctrl_info *ctrl_info,
pqi_ofa_free_host_buffer(ctrl_info);
pqi_acknowledge_event(ctrl_info, event);
dev_info(&ctrl_info->pci_dev->dev,
- "Online Firmware Activation(%u) cancel reason : %u\n",
- ctrl_info->ctrl_id, event->ofa_cancel_reason);
+ "Online Firmware Activation(%u) cancel reason : %u\n",
+ ctrl_info->ctrl_id, event->ofa_cancel_reason);
}
mutex_unlock(&ctrl_info->ofa_mutex);
@@ -3403,7 +3354,7 @@ static unsigned int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info)
#define PQI_LEGACY_INTX_MASK 0x1
static inline void pqi_configure_legacy_intx(struct pqi_ctrl_info *ctrl_info,
- bool enable_intx)
+ bool enable_intx)
{
u32 intx_mask;
struct pqi_device_registers __iomem *pqi_registers;
@@ -3841,7 +3792,7 @@ static int pqi_create_admin_queues(struct pqi_ctrl_info *ctrl_info)
&pqi_registers->admin_oq_pi_addr);
reg = PQI_ADMIN_IQ_NUM_ELEMENTS |
- (PQI_ADMIN_OQ_NUM_ELEMENTS) << 8 |
+ (PQI_ADMIN_OQ_NUM_ELEMENTS << 8) |
(admin_queues->int_msg_num << 16);
writel(reg, &pqi_registers->admin_iq_num_elements);
writel(PQI_CREATE_ADMIN_QUEUE_PAIR,
@@ -4048,8 +3999,8 @@ static void pqi_raid_synchronous_complete(struct pqi_io_request *io_request,
complete(waiting);
}
-static int pqi_process_raid_io_error_synchronous(struct pqi_raid_error_info
- *error_info)
+static int pqi_process_raid_io_error_synchronous(
+ struct pqi_raid_error_info *error_info)
{
int rc = -EIO;
@@ -4122,6 +4073,8 @@ static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
goto out;
}
+ atomic_inc(&ctrl_info->sync_cmds_outstanding);
+
io_request = pqi_alloc_io_request(ctrl_info);
put_unaligned_le16(io_request->index,
@@ -4168,6 +4121,7 @@ static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
pqi_free_io_request(io_request);
+ atomic_dec(&ctrl_info->sync_cmds_outstanding);
out:
up(&ctrl_info->sync_request_sem);
@@ -4665,11 +4619,11 @@ static void pqi_free_all_io_requests(struct pqi_ctrl_info *ctrl_info)
static inline int pqi_alloc_error_buffer(struct pqi_ctrl_info *ctrl_info)
{
- ctrl_info->error_buffer = dma_alloc_coherent(&ctrl_info->pci_dev->dev,
- ctrl_info->error_buffer_length,
- &ctrl_info->error_buffer_dma_handle,
- GFP_KERNEL);
+ ctrl_info->error_buffer = dma_alloc_coherent(&ctrl_info->pci_dev->dev,
+ ctrl_info->error_buffer_length,
+ &ctrl_info->error_buffer_dma_handle,
+ GFP_KERNEL);
if (!ctrl_info->error_buffer)
return -ENOMEM;
@@ -5402,7 +5356,7 @@ static int pqi_scsi_queue_command(struct Scsi_Host *shost,
pqi_ctrl_busy(ctrl_info);
if (pqi_ctrl_blocked(ctrl_info) || pqi_device_in_reset(device) ||
- pqi_ctrl_in_ofa(ctrl_info)) {
+ pqi_ctrl_in_ofa(ctrl_info) || pqi_ctrl_in_shutdown(ctrl_info)) {
rc = SCSI_MLQUEUE_HOST_BUSY;
goto out;
}
@@ -5419,7 +5373,7 @@ static int pqi_scsi_queue_command(struct Scsi_Host *shost,
if (pqi_is_logical_device(device)) {
raid_bypassed = false;
if (device->raid_bypass_enabled &&
- !blk_rq_is_passthrough(scmd->request)) {
+ !blk_rq_is_passthrough(scmd->request)) {
rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device,
scmd, queue_group);
if (rc == 0 || rc == SCSI_MLQUEUE_HOST_BUSY)
@@ -5650,6 +5604,18 @@ static int pqi_ctrl_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
return 0;
}
+static int pqi_ctrl_wait_for_pending_sync_cmds(struct pqi_ctrl_info *ctrl_info)
+{
+ while (atomic_read(&ctrl_info->sync_cmds_outstanding)) {
+ pqi_check_ctrl_health(ctrl_info);
+ if (pqi_ctrl_offline(ctrl_info))
+ return -ENXIO;
+ usleep_range(1000, 2000);
+ }
+
+ return 0;
+}
+
static void pqi_lun_reset_complete(struct pqi_io_request *io_request,
void *context)
{
@@ -5658,7 +5624,8 @@ static void pqi_lun_reset_complete(struct pqi_io_request *io_request,
complete(waiting);
}
-#define PQI_LUN_RESET_TIMEOUT_SECS 10
+#define PQI_LUN_RESET_TIMEOUT_SECS 30
+#define PQI_LUN_RESET_POLL_COMPLETION_SECS 10
static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info,
struct pqi_scsi_dev *device, struct completion *wait)
@@ -5667,7 +5634,7 @@ static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info,
while (1) {
if (wait_for_completion_io_timeout(wait,
- PQI_LUN_RESET_TIMEOUT_SECS * PQI_HZ)) {
+ PQI_LUN_RESET_POLL_COMPLETION_SECS * PQI_HZ)) {
rc = 0;
break;
}
@@ -5704,6 +5671,9 @@ static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info,
memcpy(request->lun_number, device->scsi3addr,
sizeof(request->lun_number));
request->task_management_function = SOP_TASK_MANAGEMENT_LUN_RESET;
+ if (ctrl_info->tmf_iu_timeout_supported)
+ put_unaligned_le16(PQI_LUN_RESET_TIMEOUT_SECS,
+ &request->timeout);
pqi_start_io(ctrl_info,
&ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
@@ -5733,7 +5703,7 @@ static int _pqi_device_reset(struct pqi_ctrl_info *ctrl_info,
for (retries = 0;;) {
rc = pqi_lun_reset(ctrl_info, device);
- if (rc != -EAGAIN || ++retries > PQI_LUN_RESET_RETRIES)
+ if (rc == 0 || ++retries > PQI_LUN_RESET_RETRIES)
break;
msleep(PQI_LUN_RESET_RETRY_INTERVAL_MSECS);
}
@@ -5787,17 +5757,17 @@ static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd)
shost->host_no, device->bus, device->target, device->lun);
pqi_check_ctrl_health(ctrl_info);
- if (pqi_ctrl_offline(ctrl_info)) {
- dev_err(&ctrl_info->pci_dev->dev,
- "controller %u offlined - cannot send device reset\n",
- ctrl_info->ctrl_id);
+ if (pqi_ctrl_offline(ctrl_info) ||
+ pqi_device_reset_blocked(ctrl_info)) {
rc = FAILED;
goto out;
}
pqi_wait_until_ofa_finished(ctrl_info);
+ atomic_inc(&ctrl_info->sync_cmds_outstanding);
rc = pqi_device_reset(ctrl_info, device);
+ atomic_dec(&ctrl_info->sync_cmds_outstanding);
out:
dev_err(&ctrl_info->pci_dev->dev,
@@ -6066,6 +6036,9 @@ static int pqi_passthru_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
put_unaligned_le16(iu_length, &request.header.iu_length);
+ if (ctrl_info->raid_iu_timeout_supported)
+ put_unaligned_le32(iocommand.Request.Timeout, &request.timeout);
+
rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
PQI_SYNC_FLAGS_INTERRUPTABLE, &pqi_error_info, NO_TIMEOUT);
@@ -6119,7 +6092,7 @@ static int pqi_ioctl(struct scsi_device *sdev, unsigned int cmd,
ctrl_info = shost_to_hba(sdev->host);
- if (pqi_ctrl_in_ofa(ctrl_info))
+ if (pqi_ctrl_in_ofa(ctrl_info) || pqi_ctrl_in_shutdown(ctrl_info))
return -EBUSY;
switch (cmd) {
@@ -6160,14 +6133,8 @@ static ssize_t pqi_firmware_version_show(struct device *dev,
static ssize_t pqi_driver_version_show(struct device *dev,
struct device_attribute *attr, char *buffer)
{
- struct Scsi_Host *shost;
- struct pqi_ctrl_info *ctrl_info;
-
- shost = class_to_shost(dev);
- ctrl_info = shost_to_hba(shost);
-
- return snprintf(buffer, PAGE_SIZE,
- "%s\n", DRIVER_VERSION BUILD_TIMESTAMP);
+ return snprintf(buffer, PAGE_SIZE, "%s\n",
+ DRIVER_VERSION BUILD_TIMESTAMP);
}
static ssize_t pqi_serial_number_show(struct device *dev,
@@ -6283,7 +6250,7 @@ static ssize_t pqi_unique_id_show(struct device *dev,
struct scsi_device *sdev;
struct pqi_scsi_dev *device;
unsigned long flags;
- unsigned char uid[16];
+ u8 unique_id[16];
sdev = to_scsi_device(dev);
ctrl_info = shost_to_hba(sdev->host);
@@ -6296,16 +6263,22 @@ static ssize_t pqi_unique_id_show(struct device *dev,
flags);
return -ENODEV;
}
- memcpy(uid, device->unique_id, sizeof(uid));
+
+ if (device->is_physical_device) {
+ memset(unique_id, 0, 8);
+ memcpy(unique_id + 8, &device->wwid, sizeof(device->wwid));
+ } else {
+ memcpy(unique_id, device->volume_id, sizeof(device->volume_id));
+ }
spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
return snprintf(buffer, PAGE_SIZE,
"%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X\n",
- uid[0], uid[1], uid[2], uid[3],
- uid[4], uid[5], uid[6], uid[7],
- uid[8], uid[9], uid[10], uid[11],
- uid[12], uid[13], uid[14], uid[15]);
+ unique_id[0], unique_id[1], unique_id[2], unique_id[3],
+ unique_id[4], unique_id[5], unique_id[6], unique_id[7],
+ unique_id[8], unique_id[9], unique_id[10], unique_id[11],
+ unique_id[12], unique_id[13], unique_id[14], unique_id[15]);
}
static ssize_t pqi_lunid_show(struct device *dev,
@@ -6328,6 +6301,7 @@ static ssize_t pqi_lunid_show(struct device *dev,
flags);
return -ENODEV;
}
+
memcpy(lunid, device->scsi3addr, sizeof(lunid));
spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
@@ -6335,7 +6309,8 @@ static ssize_t pqi_lunid_show(struct device *dev,
return snprintf(buffer, PAGE_SIZE, "0x%8phN\n", lunid);
}
-#define MAX_PATHS 8
+#define MAX_PATHS 8
+
static ssize_t pqi_path_info_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -6347,9 +6322,9 @@ static ssize_t pqi_path_info_show(struct device *dev,
int output_len = 0;
u8 box;
u8 bay;
- u8 path_map_index = 0;
+ u8 path_map_index;
char *active;
- unsigned char phys_connector[2];
+ u8 phys_connector[2];
sdev = to_scsi_device(dev);
ctrl_info = shost_to_hba(sdev->host);
@@ -6365,7 +6340,7 @@ static ssize_t pqi_path_info_show(struct device *dev,
bay = device->bay;
for (i = 0; i < MAX_PATHS; i++) {
- path_map_index = 1<<i;
+ path_map_index = 1 << i;
if (i == device->active_path_index)
active = "Active";
else if (device->path_map & path_map_index)
@@ -6416,10 +6391,10 @@ end_buffer:
}
spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+
return output_len;
}
-
static ssize_t pqi_sas_address_show(struct device *dev,
struct device_attribute *attr, char *buffer)
{
@@ -6440,6 +6415,7 @@ static ssize_t pqi_sas_address_show(struct device *dev,
flags);
return -ENODEV;
}
+
sas_address = device->sas_address;
spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
@@ -6844,6 +6820,27 @@ static void pqi_firmware_feature_status(struct pqi_ctrl_info *ctrl_info,
firmware_feature->feature_name);
}
+static void pqi_ctrl_update_feature_flags(struct pqi_ctrl_info *ctrl_info,
+ struct pqi_firmware_feature *firmware_feature)
+{
+ switch (firmware_feature->feature_bit) {
+ case PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE:
+ ctrl_info->soft_reset_handshake_supported =
+ firmware_feature->enabled;
+ break;
+ case PQI_FIRMWARE_FEATURE_RAID_IU_TIMEOUT:
+ ctrl_info->raid_iu_timeout_supported =
+ firmware_feature->enabled;
+ break;
+ case PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT:
+ ctrl_info->tmf_iu_timeout_supported =
+ firmware_feature->enabled;
+ break;
+ }
+
+ pqi_firmware_feature_status(ctrl_info, firmware_feature);
+}
+
static inline void pqi_firmware_feature_update(struct pqi_ctrl_info *ctrl_info,
struct pqi_firmware_feature *firmware_feature)
{
@@ -6867,7 +6864,17 @@ static struct pqi_firmware_feature pqi_firmware_features[] = {
{
.feature_name = "New Soft Reset Handshake",
.feature_bit = PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE,
- .feature_status = pqi_firmware_feature_status,
+ .feature_status = pqi_ctrl_update_feature_flags,
+ },
+ {
+ .feature_name = "RAID IU Timeout",
+ .feature_bit = PQI_FIRMWARE_FEATURE_RAID_IU_TIMEOUT,
+ .feature_status = pqi_ctrl_update_feature_flags,
+ },
+ {
+ .feature_name = "TMF IU Timeout",
+ .feature_bit = PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT,
+ .feature_status = pqi_ctrl_update_feature_flags,
},
};
@@ -6921,7 +6928,6 @@ static void pqi_process_firmware_features(
return;
}
- ctrl_info->soft_reset_handshake_supported = false;
for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
if (!pqi_firmware_features[i].supported)
continue;
@@ -6929,10 +6935,6 @@ static void pqi_process_firmware_features(
firmware_features_iomem_addr,
pqi_firmware_features[i].feature_bit)) {
pqi_firmware_features[i].enabled = true;
- if (pqi_firmware_features[i].feature_bit ==
- PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE)
- ctrl_info->soft_reset_handshake_supported =
- true;
}
pqi_firmware_feature_update(ctrl_info,
&pqi_firmware_features[i]);
@@ -7074,13 +7076,20 @@ static int pqi_force_sis_mode(struct pqi_ctrl_info *ctrl_info)
return pqi_revert_to_sis_mode(ctrl_info);
}
+#define PQI_POST_RESET_DELAY_B4_MSGU_READY 5000
+
static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
{
int rc;
- rc = pqi_force_sis_mode(ctrl_info);
- if (rc)
- return rc;
+ if (reset_devices) {
+ sis_soft_reset(ctrl_info);
+ msleep(PQI_POST_RESET_DELAY_B4_MSGU_READY);
+ } else {
+ rc = pqi_force_sis_mode(ctrl_info);
+ if (rc)
+ return rc;
+ }
/*
* Wait until the controller is ready to start accepting SIS
@@ -7386,7 +7395,7 @@ static int pqi_ctrl_init_resume(struct pqi_ctrl_info *ctrl_info)
rc = pqi_get_ctrl_product_details(ctrl_info);
if (rc) {
dev_err(&ctrl_info->pci_dev->dev,
- "error obtaining product detail\n");
+ "error obtaining product details\n");
return rc;
}
@@ -7514,6 +7523,7 @@ static struct pqi_ctrl_info *pqi_alloc_ctrl_info(int numa_node)
INIT_WORK(&ctrl_info->event_work, pqi_event_worker);
atomic_set(&ctrl_info->num_interrupts, 0);
+ atomic_set(&ctrl_info->sync_cmds_outstanding, 0);
INIT_DELAYED_WORK(&ctrl_info->rescan_work, pqi_rescan_worker);
INIT_DELAYED_WORK(&ctrl_info->update_time_work, pqi_update_time_worker);
@@ -7721,6 +7731,8 @@ static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info,
dev_err(dev, "Failed to allocate host buffer of size = %u",
bytes_requested);
}
+
+ return;
}
static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info)
@@ -7787,8 +7799,6 @@ static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info)
0, NULL, NO_TIMEOUT);
}
-#define PQI_POST_RESET_DELAY_B4_MSGU_READY 5000
-
static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info)
{
msleep(PQI_POST_RESET_DELAY_B4_MSGU_READY);
@@ -7956,28 +7966,73 @@ static void pqi_pci_remove(struct pci_dev *pci_dev)
pqi_remove_ctrl(ctrl_info);
}
+static void pqi_crash_if_pending_command(struct pqi_ctrl_info *ctrl_info)
+{
+ unsigned int i;
+ struct pqi_io_request *io_request;
+ struct scsi_cmnd *scmd;
+
+ for (i = 0; i < ctrl_info->max_io_slots; i++) {
+ io_request = &ctrl_info->io_request_pool[i];
+ if (atomic_read(&io_request->refcount) == 0)
+ continue;
+ scmd = io_request->scmd;
+ WARN_ON(scmd != NULL); /* IO command from SML */
+ WARN_ON(scmd == NULL); /* Non-IO cmd or driver initiated*/
+ }
+}
+
static void pqi_shutdown(struct pci_dev *pci_dev)
{
int rc;
struct pqi_ctrl_info *ctrl_info;
ctrl_info = pci_get_drvdata(pci_dev);
- if (!ctrl_info)
- goto error;
+ if (!ctrl_info) {
+ dev_err(&pci_dev->dev,
+ "cache could not be flushed\n");
+ return;
+ }
+
+ pqi_disable_events(ctrl_info);
+ pqi_wait_until_ofa_finished(ctrl_info);
+ pqi_cancel_update_time_worker(ctrl_info);
+ pqi_cancel_rescan_worker(ctrl_info);
+ pqi_cancel_event_worker(ctrl_info);
+
+ pqi_ctrl_shutdown_start(ctrl_info);
+ pqi_ctrl_wait_until_quiesced(ctrl_info);
+
+ rc = pqi_ctrl_wait_for_pending_io(ctrl_info, NO_TIMEOUT);
+ if (rc) {
+ dev_err(&pci_dev->dev,
+ "wait for pending I/O failed\n");
+ return;
+ }
+
+ pqi_ctrl_block_device_reset(ctrl_info);
+ pqi_wait_until_lun_reset_finished(ctrl_info);
/*
* Write all data in the controller's battery-backed cache to
* storage.
*/
rc = pqi_flush_cache(ctrl_info, SHUTDOWN);
- pqi_free_interrupts(ctrl_info);
- pqi_reset(ctrl_info);
- if (rc == 0)
+ if (rc)
+ dev_err(&pci_dev->dev,
+ "unable to flush controller cache\n");
+
+ pqi_ctrl_block_requests(ctrl_info);
+
+ rc = pqi_ctrl_wait_for_pending_sync_cmds(ctrl_info);
+ if (rc) {
+ dev_err(&pci_dev->dev,
+ "wait for pending sync cmds failed\n");
return;
+ }
-error:
- dev_warn(&pci_dev->dev,
- "unable to flush controller cache\n");
+ pqi_crash_if_pending_command(ctrl_info);
+ pqi_reset(ctrl_info);
}
static void pqi_process_lockup_action_param(void)
@@ -8686,6 +8741,8 @@ static void __attribute__((unused)) verify_structures(void)
BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
cdb) != 32);
BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
+ timeout) != 60);
+ BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
sg_descriptors) != 64);
BUILD_BUG_ON(sizeof(struct pqi_raid_path_request) !=
PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
@@ -8840,6 +8897,8 @@ static void __attribute__((unused)) verify_structures(void)
BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
nexus_id) != 10);
BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
+ timeout) != 14);
+ BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
lun_number) != 16);
BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
protocol_specific) != 24);
diff --git a/drivers/scsi/smartpqi/smartpqi_sas_transport.c b/drivers/scsi/smartpqi/smartpqi_sas_transport.c
index 6776dfc1d317..b7289112455c 100644
--- a/drivers/scsi/smartpqi/smartpqi_sas_transport.c
+++ b/drivers/scsi/smartpqi/smartpqi_sas_transport.c
@@ -45,9 +45,9 @@ static void pqi_free_sas_phy(struct pqi_sas_phy *pqi_sas_phy)
struct sas_phy *phy = pqi_sas_phy->phy;
sas_port_delete_phy(pqi_sas_phy->parent_port->port, phy);
- sas_phy_free(phy);
if (pqi_sas_phy->added_to_port)
list_del(&pqi_sas_phy->phy_list_entry);
+ sas_phy_delete(phy);
kfree(pqi_sas_phy);
}
@@ -312,7 +312,6 @@ static int pqi_sas_get_linkerrors(struct sas_phy *phy)
static int pqi_sas_get_enclosure_identifier(struct sas_rphy *rphy,
u64 *identifier)
{
-
int rc;
unsigned long flags;
struct Scsi_Host *shost;
@@ -361,7 +360,7 @@ static int pqi_sas_get_enclosure_identifier(struct sas_rphy *rphy,
}
}
- if (found_device->phy_connected_dev_type != SA_CONTROLLER_DEVICE) {
+ if (found_device->phy_connected_dev_type != SA_DEVICE_TYPE_CONTROLLER) {
rc = -EINVAL;
goto out;
}
@@ -382,12 +381,10 @@ out:
spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
return rc;
-
}
static int pqi_sas_get_bay_identifier(struct sas_rphy *rphy)
{
-
int rc;
unsigned long flags;
struct pqi_ctrl_info *ctrl_info;
@@ -482,7 +479,6 @@ pqi_build_csmi_smp_passthru_buffer(struct sas_rphy *rphy,
req_size -= SMP_CRC_FIELD_LENGTH;
put_unaligned_le32(req_size, &parameters->request_length);
-
put_unaligned_le32(resp_size, &parameters->response_length);
sg_copy_to_buffer(job->request_payload.sg_list,
@@ -512,12 +508,12 @@ void pqi_sas_smp_handler(struct bsg_job *job, struct Scsi_Host *shost,
struct sas_rphy *rphy)
{
int rc;
- struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
+ struct pqi_ctrl_info *ctrl_info;
struct bmic_csmi_smp_passthru_buffer *smp_buf;
struct pqi_raid_error_info error_info;
unsigned int reslen = 0;
- pqi_ctrl_busy(ctrl_info);
+ ctrl_info = shost_to_hba(shost);
if (job->reply_payload.payload_len == 0) {
rc = -ENOMEM;
@@ -539,16 +535,6 @@ void pqi_sas_smp_handler(struct bsg_job *job, struct Scsi_Host *shost,
goto out;
}
- if (pqi_ctrl_offline(ctrl_info)) {
- rc = -ENXIO;
- goto out;
- }
-
- if (pqi_ctrl_blocked(ctrl_info)) {
- rc = -EBUSY;
- goto out;
- }
-
smp_buf = pqi_build_csmi_smp_passthru_buffer(rphy, job);
if (!smp_buf) {
rc = -ENOMEM;
diff --git a/drivers/scsi/sun3_scsi.c b/drivers/scsi/sun3_scsi.c
index 955e4c938d49..701b842296f0 100644
--- a/drivers/scsi/sun3_scsi.c
+++ b/drivers/scsi/sun3_scsi.c
@@ -501,7 +501,7 @@ static struct scsi_host_template sun3_scsi_template = {
.eh_host_reset_handler = sun3scsi_host_reset,
.can_queue = 16,
.this_id = 7,
- .sg_tablesize = SG_NONE,
+ .sg_tablesize = 1,
.cmd_per_lun = 2,
.dma_boundary = PAGE_SIZE - 1,
.cmd_size = NCR5380_CMD_SIZE,
@@ -523,7 +523,7 @@ static int __init sun3_scsi_probe(struct platform_device *pdev)
sun3_scsi_template.can_queue = setup_can_queue;
if (setup_cmd_per_lun > 0)
sun3_scsi_template.cmd_per_lun = setup_cmd_per_lun;
- if (setup_sg_tablesize >= 0)
+ if (setup_sg_tablesize > 0)
sun3_scsi_template.sg_tablesize = setup_sg_tablesize;
if (setup_hostid >= 0)
sun3_scsi_template.this_id = setup_hostid & 7;
diff --git a/drivers/scsi/ufs/Kconfig b/drivers/scsi/ufs/Kconfig
index 0b845ab7c3bf..d14c2243e02a 100644
--- a/drivers/scsi/ufs/Kconfig
+++ b/drivers/scsi/ufs/Kconfig
@@ -132,6 +132,16 @@ config SCSI_UFS_HISI
Select this if you have UFS controller on Hisilicon chipset.
If unsure, say N.
+config SCSI_UFS_TI_J721E
+ tristate "TI glue layer for Cadence UFS Controller"
+ depends on OF && HAS_IOMEM && (ARCH_K3 || COMPILE_TEST)
+ help
+ This selects driver for TI glue layer for Cadence UFS Host
+ Controller IP.
+
+ Selects this if you have TI platform with UFS controller.
+ If unsure, say N.
+
config SCSI_UFS_BSG
bool "Universal Flash Storage BSG device node"
depends on SCSI_UFSHCD
diff --git a/drivers/scsi/ufs/Makefile b/drivers/scsi/ufs/Makefile
index 2a9097939bcb..94c6c5d7334b 100644
--- a/drivers/scsi/ufs/Makefile
+++ b/drivers/scsi/ufs/Makefile
@@ -11,3 +11,4 @@ obj-$(CONFIG_SCSI_UFSHCD_PCI) += ufshcd-pci.o
obj-$(CONFIG_SCSI_UFSHCD_PLATFORM) += ufshcd-pltfrm.o
obj-$(CONFIG_SCSI_UFS_HISI) += ufs-hisi.o
obj-$(CONFIG_SCSI_UFS_MEDIATEK) += ufs-mediatek.o
+obj-$(CONFIG_SCSI_UFS_TI_J721E) += ti-j721e-ufs.o
diff --git a/drivers/scsi/ufs/ti-j721e-ufs.c b/drivers/scsi/ufs/ti-j721e-ufs.c
new file mode 100644
index 000000000000..5216d228cdd9
--- /dev/null
+++ b/drivers/scsi/ufs/ti-j721e-ufs.c
@@ -0,0 +1,90 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/
+//
+
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#define TI_UFS_SS_CTRL 0x4
+#define TI_UFS_SS_RST_N_PCS BIT(0)
+#define TI_UFS_SS_CLK_26MHZ BIT(4)
+
+static int ti_j721e_ufs_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ unsigned long clk_rate;
+ void __iomem *regbase;
+ struct clk *clk;
+ u32 reg = 0;
+ int ret;
+
+ regbase = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(regbase))
+ return PTR_ERR(regbase);
+
+ pm_runtime_enable(dev);
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(dev);
+ return ret;
+ }
+
+ /* Select MPHY refclk frequency */
+ clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(clk)) {
+ dev_err(dev, "Cannot claim MPHY clock.\n");
+ return PTR_ERR(clk);
+ }
+ clk_rate = clk_get_rate(clk);
+ if (clk_rate == 26000000)
+ reg |= TI_UFS_SS_CLK_26MHZ;
+ devm_clk_put(dev, clk);
+
+ /* Take UFS slave device out of reset */
+ reg |= TI_UFS_SS_RST_N_PCS;
+ writel(reg, regbase + TI_UFS_SS_CTRL);
+
+ ret = of_platform_populate(pdev->dev.of_node, NULL, NULL,
+ dev);
+ if (ret) {
+ dev_err(dev, "failed to populate child nodes %d\n", ret);
+ pm_runtime_put_sync(dev);
+ }
+
+ return ret;
+}
+
+static int ti_j721e_ufs_remove(struct platform_device *pdev)
+{
+ of_platform_depopulate(&pdev->dev);
+ pm_runtime_put_sync(&pdev->dev);
+
+ return 0;
+}
+
+static const struct of_device_id ti_j721e_ufs_of_match[] = {
+ {
+ .compatible = "ti,j721e-ufs",
+ },
+ { },
+};
+
+static struct platform_driver ti_j721e_ufs_driver = {
+ .probe = ti_j721e_ufs_probe,
+ .remove = ti_j721e_ufs_remove,
+ .driver = {
+ .name = "ti-j721e-ufs",
+ .of_match_table = ti_j721e_ufs_of_match,
+ },
+};
+module_platform_driver(ti_j721e_ufs_driver);
+
+MODULE_AUTHOR("Vignesh Raghavendra <vigneshr@ti.com>");
+MODULE_DESCRIPTION("TI UFS host controller glue driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/scsi/ufs/ufs-hisi.c b/drivers/scsi/ufs/ufs-hisi.c
index 6bbb1679bb91..5d6487350a6c 100644
--- a/drivers/scsi/ufs/ufs-hisi.c
+++ b/drivers/scsi/ufs/ufs-hisi.c
@@ -452,10 +452,7 @@ static int ufs_hisi_get_resource(struct ufs_hisi_host *host)
/* get resource of ufs sys ctrl */
host->ufs_sys_ctrl = devm_platform_ioremap_resource(pdev, 1);
- if (IS_ERR(host->ufs_sys_ctrl))
- return PTR_ERR(host->ufs_sys_ctrl);
-
- return 0;
+ return PTR_ERR_OR_ZERO(host->ufs_sys_ctrl);
}
static void ufs_hisi_set_pm_lvl(struct ufs_hba *hba)
diff --git a/drivers/scsi/ufs/ufs-mediatek.c b/drivers/scsi/ufs/ufs-mediatek.c
index 0f6ff33ce52e..83e28edc3ac5 100644
--- a/drivers/scsi/ufs/ufs-mediatek.c
+++ b/drivers/scsi/ufs/ufs-mediatek.c
@@ -147,6 +147,9 @@ static int ufs_mtk_init(struct ufs_hba *hba)
if (err)
goto out_variant_clear;
+ /* Enable runtime autosuspend */
+ hba->caps |= UFSHCD_CAP_RPM_AUTOSUSPEND;
+
/*
* ufshcd_vops_init() is invoked after
* ufshcd_setup_clock(true) in ufshcd_hba_init() thus
diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c
index a5b71487a206..c69c29a1ceb9 100644
--- a/drivers/scsi/ufs/ufs-qcom.c
+++ b/drivers/scsi/ufs/ufs-qcom.c
@@ -246,6 +246,44 @@ static void ufs_qcom_select_unipro_mode(struct ufs_qcom_host *host)
mb();
}
+/**
+ * ufs_qcom_host_reset - reset host controller and PHY
+ */
+static int ufs_qcom_host_reset(struct ufs_hba *hba)
+{
+ int ret = 0;
+ struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+
+ if (!host->core_reset) {
+ dev_warn(hba->dev, "%s: reset control not set\n", __func__);
+ goto out;
+ }
+
+ ret = reset_control_assert(host->core_reset);
+ if (ret) {
+ dev_err(hba->dev, "%s: core_reset assert failed, err = %d\n",
+ __func__, ret);
+ goto out;
+ }
+
+ /*
+ * The hardware requirement for delay between assert/deassert
+ * is at least 3-4 sleep clock (32.7KHz) cycles, which comes to
+ * ~125us (4/32768). To be on the safe side add 200us delay.
+ */
+ usleep_range(200, 210);
+
+ ret = reset_control_deassert(host->core_reset);
+ if (ret)
+ dev_err(hba->dev, "%s: core_reset deassert failed, err = %d\n",
+ __func__, ret);
+
+ usleep_range(1000, 1100);
+
+out:
+ return ret;
+}
+
static int ufs_qcom_power_up_sequence(struct ufs_hba *hba)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
@@ -254,6 +292,12 @@ static int ufs_qcom_power_up_sequence(struct ufs_hba *hba)
bool is_rate_B = (UFS_QCOM_LIMIT_HS_RATE == PA_HS_MODE_B)
? true : false;
+ /* Reset UFS Host Controller and PHY */
+ ret = ufs_qcom_host_reset(hba);
+ if (ret)
+ dev_warn(hba->dev, "%s: host reset returned %d\n",
+ __func__, ret);
+
if (is_rate_B)
phy_set_mode(phy, PHY_MODE_UFS_HS_B);
@@ -1101,6 +1145,15 @@ static int ufs_qcom_init(struct ufs_hba *hba)
host->hba = hba;
ufshcd_set_variant(hba, host);
+ /* Setup the reset control of HCI */
+ host->core_reset = devm_reset_control_get(hba->dev, "rst");
+ if (IS_ERR(host->core_reset)) {
+ err = PTR_ERR(host->core_reset);
+ dev_warn(dev, "Failed to get reset control %d\n", err);
+ host->core_reset = NULL;
+ err = 0;
+ }
+
/* Fire up the reset controller. Failure here is non-fatal. */
host->rcdev.of_node = dev->of_node;
host->rcdev.ops = &ufs_qcom_reset_ops;
diff --git a/drivers/scsi/ufs/ufs-qcom.h b/drivers/scsi/ufs/ufs-qcom.h
index d401f174bb70..2d95e7cc7187 100644
--- a/drivers/scsi/ufs/ufs-qcom.h
+++ b/drivers/scsi/ufs/ufs-qcom.h
@@ -6,6 +6,7 @@
#define UFS_QCOM_H_
#include <linux/reset-controller.h>
+#include <linux/reset.h>
#define MAX_UFS_QCOM_HOSTS 1
#define MAX_U32 (~(u32)0)
@@ -233,6 +234,8 @@ struct ufs_qcom_host {
u32 dbg_print_en;
struct ufs_qcom_testbus testbus;
+ /* Reset control of HCI */
+ struct reset_control *core_reset;
struct reset_controller_dev rcdev;
struct gpio_desc *device_reset;
diff --git a/drivers/scsi/ufs/ufs-sysfs.c b/drivers/scsi/ufs/ufs-sysfs.c
index 969a36b15897..ad2abc96c0f1 100644
--- a/drivers/scsi/ufs/ufs-sysfs.c
+++ b/drivers/scsi/ufs/ufs-sysfs.c
@@ -126,13 +126,16 @@ static void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit)
return;
spin_lock_irqsave(hba->host->host_lock, flags);
- if (hba->ahit == ahit)
- goto out_unlock;
- hba->ahit = ahit;
- if (!pm_runtime_suspended(hba->dev))
- ufshcd_writel(hba, hba->ahit, REG_AUTO_HIBERNATE_IDLE_TIMER);
-out_unlock:
+ if (hba->ahit != ahit)
+ hba->ahit = ahit;
spin_unlock_irqrestore(hba->host->host_lock, flags);
+ if (!pm_runtime_suspended(hba->dev)) {
+ pm_runtime_get_sync(hba->dev);
+ ufshcd_hold(hba, false);
+ ufshcd_auto_hibern8_enable(hba);
+ ufshcd_release(hba);
+ pm_runtime_put(hba->dev);
+ }
}
/* Convert Auto-Hibernate Idle Timer register value to microseconds */
diff --git a/drivers/scsi/ufs/ufs_bsg.c b/drivers/scsi/ufs/ufs_bsg.c
index dc2f6d2b46ed..baeecee35d1e 100644
--- a/drivers/scsi/ufs/ufs_bsg.c
+++ b/drivers/scsi/ufs/ufs_bsg.c
@@ -162,6 +162,7 @@ out:
/**
* ufs_bsg_remove - detach and remove the added ufs-bsg node
+ * @hba: per adapter object
*
* Should be called when unloading the driver.
*/
diff --git a/drivers/scsi/ufs/ufshcd-dwc.c b/drivers/scsi/ufs/ufshcd-dwc.c
index fb9e2ff4f8d2..6a901da2d15a 100644
--- a/drivers/scsi/ufs/ufshcd-dwc.c
+++ b/drivers/scsi/ufs/ufshcd-dwc.c
@@ -80,7 +80,7 @@ static int ufshcd_dwc_link_is_up(struct ufs_hba *hba)
*/
static int ufshcd_dwc_connection_setup(struct ufs_hba *hba)
{
- const struct ufshcd_dme_attr_val setup_attrs[] = {
+ static const struct ufshcd_dme_attr_val setup_attrs[] = {
{ UIC_ARG_MIB(T_CONNECTIONSTATE), 0, DME_LOCAL },
{ UIC_ARG_MIB(N_DEVICEID), 0, DME_LOCAL },
{ UIC_ARG_MIB(N_DEVICEID_VALID), 0, DME_LOCAL },
diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.c b/drivers/scsi/ufs/ufshcd-pltfrm.c
index 8d40dc918f4e..76f9be71c31b 100644
--- a/drivers/scsi/ufs/ufshcd-pltfrm.c
+++ b/drivers/scsi/ufs/ufshcd-pltfrm.c
@@ -402,7 +402,6 @@ int ufshcd_pltfrm_init(struct platform_device *pdev,
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
- dev_err(dev, "IRQ resource not available\n");
err = -ENODEV;
goto out;
}
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 11a87f51c442..b5966faf3e98 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -88,6 +88,9 @@
/* Interrupt aggregation default timeout, unit: 40us */
#define INT_AGGR_DEF_TO 0x02
+/* default delay of autosuspend: 2000 ms */
+#define RPM_AUTOSUSPEND_DELAY_MS 2000
+
#define ufshcd_toggle_vreg(_dev, _vreg, _on) \
({ \
int _ret; \
@@ -114,7 +117,7 @@ int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len,
if (offset % 4 != 0 || len % 4 != 0) /* keep readl happy */
return -EINVAL;
- regs = kzalloc(len, GFP_KERNEL);
+ regs = kzalloc(len, GFP_ATOMIC);
if (!regs)
return -ENOMEM;
@@ -237,7 +240,7 @@ static struct ufs_dev_fix ufs_fixups[] = {
END_FIX
};
-static void ufshcd_tmc_handler(struct ufs_hba *hba);
+static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba);
static void ufshcd_async_scan(void *data, async_cookie_t cookie);
static int ufshcd_reset_and_restore(struct ufs_hba *hba);
static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd);
@@ -1607,7 +1610,7 @@ static void ufshcd_gate_work(struct work_struct *work)
* state to CLKS_ON.
*/
if (hba->clk_gating.is_suspended ||
- (hba->clk_gating.state == REQ_CLKS_ON)) {
+ (hba->clk_gating.state != REQ_CLKS_OFF)) {
hba->clk_gating.state = CLKS_ON;
trace_ufshcd_clk_gating(dev_name(hba->dev),
hba->clk_gating.state);
@@ -1935,8 +1938,8 @@ int ufshcd_copy_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
memcpy(hba->dev_cmd.query.descriptor, descp, resp_len);
} else {
dev_warn(hba->dev,
- "%s: Response size is bigger than buffer",
- __func__);
+ "%s: rsp size %d is bigger than buffer size %d",
+ __func__, resp_len, buf_len);
return -EINVAL;
}
}
@@ -2986,10 +2989,10 @@ static int __ufshcd_query_descriptor(struct ufs_hba *hba,
goto out_unlock;
}
- hba->dev_cmd.query.descriptor = NULL;
*buf_len = be16_to_cpu(response->upiu_res.length);
out_unlock:
+ hba->dev_cmd.query.descriptor = NULL;
mutex_unlock(&hba->dev_cmd.lock);
out:
ufshcd_release(hba);
@@ -3856,6 +3859,9 @@ static int ufshcd_link_recovery(struct ufs_hba *hba)
ufshcd_set_eh_in_progress(hba);
spin_unlock_irqrestore(hba->host->host_lock, flags);
+ /* Reset the attached device */
+ ufshcd_vops_device_reset(hba);
+
ret = ufshcd_host_reset_and_restore(hba);
spin_lock_irqsave(hba->host->host_lock, flags);
@@ -3885,15 +3891,24 @@ static int __ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
ktime_to_us(ktime_sub(ktime_get(), start)), ret);
if (ret) {
+ int err;
+
dev_err(hba->dev, "%s: hibern8 enter failed. ret = %d\n",
__func__, ret);
/*
- * If link recovery fails then return error so that caller
- * don't retry the hibern8 enter again.
+ * If link recovery fails then return error code returned from
+ * ufshcd_link_recovery().
+ * If link recovery succeeds then return -EAGAIN to attempt
+ * hibern8 enter retry again.
*/
- if (ufshcd_link_recovery(hba))
- ret = -ENOLINK;
+ err = ufshcd_link_recovery(hba);
+ if (err) {
+ dev_err(hba->dev, "%s: link recovery failed", __func__);
+ ret = err;
+ } else {
+ ret = -EAGAIN;
+ }
} else
ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER,
POST_CHANGE);
@@ -3907,7 +3922,7 @@ static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
for (retries = UIC_HIBERN8_ENTER_RETRIES; retries > 0; retries--) {
ret = __ufshcd_uic_hibern8_enter(hba);
- if (!ret || ret == -ENOLINK)
+ if (!ret)
goto out;
}
out:
@@ -3941,7 +3956,7 @@ static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
return ret;
}
-static void ufshcd_auto_hibern8_enable(struct ufs_hba *hba)
+void ufshcd_auto_hibern8_enable(struct ufs_hba *hba)
{
unsigned long flags;
@@ -4631,9 +4646,14 @@ static int ufshcd_change_queue_depth(struct scsi_device *sdev, int depth)
*/
static int ufshcd_slave_configure(struct scsi_device *sdev)
{
+ struct ufs_hba *hba = shost_priv(sdev->host);
struct request_queue *q = sdev->request_queue;
blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1);
+
+ if (ufshcd_is_rpm_autosuspend_allowed(hba))
+ sdev->rpm_autosuspend = 1;
+
return 0;
}
@@ -4788,19 +4808,29 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
* ufshcd_uic_cmd_compl - handle completion of uic command
* @hba: per adapter instance
* @intr_status: interrupt status generated by the controller
+ *
+ * Returns
+ * IRQ_HANDLED - If interrupt is valid
+ * IRQ_NONE - If invalid interrupt
*/
-static void ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
+static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
{
+ irqreturn_t retval = IRQ_NONE;
+
if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) {
hba->active_uic_cmd->argument2 |=
ufshcd_get_uic_cmd_result(hba);
hba->active_uic_cmd->argument3 =
ufshcd_get_dme_attr_val(hba);
complete(&hba->active_uic_cmd->done);
+ retval = IRQ_HANDLED;
}
- if ((intr_status & UFSHCD_UIC_PWR_MASK) && hba->uic_async_done)
+ if ((intr_status & UFSHCD_UIC_PWR_MASK) && hba->uic_async_done) {
complete(hba->uic_async_done);
+ retval = IRQ_HANDLED;
+ }
+ return retval;
}
/**
@@ -4856,8 +4886,12 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
/**
* ufshcd_transfer_req_compl - handle SCSI and query command completion
* @hba: per adapter instance
+ *
+ * Returns
+ * IRQ_HANDLED - If interrupt is valid
+ * IRQ_NONE - If invalid interrupt
*/
-static void ufshcd_transfer_req_compl(struct ufs_hba *hba)
+static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba)
{
unsigned long completed_reqs;
u32 tr_doorbell;
@@ -4876,7 +4910,12 @@ static void ufshcd_transfer_req_compl(struct ufs_hba *hba)
tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
completed_reqs = tr_doorbell ^ hba->outstanding_reqs;
- __ufshcd_transfer_req_compl(hba, completed_reqs);
+ if (completed_reqs) {
+ __ufshcd_transfer_req_compl(hba, completed_reqs);
+ return IRQ_HANDLED;
+ } else {
+ return IRQ_NONE;
+ }
}
/**
@@ -5395,61 +5434,77 @@ out:
/**
* ufshcd_update_uic_error - check and set fatal UIC error flags.
* @hba: per-adapter instance
+ *
+ * Returns
+ * IRQ_HANDLED - If interrupt is valid
+ * IRQ_NONE - If invalid interrupt
*/
-static void ufshcd_update_uic_error(struct ufs_hba *hba)
+static irqreturn_t ufshcd_update_uic_error(struct ufs_hba *hba)
{
u32 reg;
+ irqreturn_t retval = IRQ_NONE;
/* PHY layer lane error */
reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER);
/* Ignore LINERESET indication, as this is not an error */
if ((reg & UIC_PHY_ADAPTER_LAYER_ERROR) &&
- (reg & UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK)) {
+ (reg & UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK)) {
/*
* To know whether this error is fatal or not, DB timeout
* must be checked but this error is handled separately.
*/
dev_dbg(hba->dev, "%s: UIC Lane error reported\n", __func__);
ufshcd_update_reg_hist(&hba->ufs_stats.pa_err, reg);
+ retval |= IRQ_HANDLED;
}
/* PA_INIT_ERROR is fatal and needs UIC reset */
reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER);
- if (reg)
+ if ((reg & UIC_DATA_LINK_LAYER_ERROR) &&
+ (reg & UIC_DATA_LINK_LAYER_ERROR_CODE_MASK)) {
ufshcd_update_reg_hist(&hba->ufs_stats.dl_err, reg);
- if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT)
- hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR;
- else if (hba->dev_quirks &
- UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
- if (reg & UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED)
- hba->uic_error |=
- UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
- else if (reg & UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT)
- hba->uic_error |= UFSHCD_UIC_DL_TCx_REPLAY_ERROR;
+ if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT)
+ hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR;
+ else if (hba->dev_quirks &
+ UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
+ if (reg & UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED)
+ hba->uic_error |=
+ UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
+ else if (reg & UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT)
+ hba->uic_error |= UFSHCD_UIC_DL_TCx_REPLAY_ERROR;
+ }
+ retval |= IRQ_HANDLED;
}
/* UIC NL/TL/DME errors needs software retry */
reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER);
- if (reg) {
+ if ((reg & UIC_NETWORK_LAYER_ERROR) &&
+ (reg & UIC_NETWORK_LAYER_ERROR_CODE_MASK)) {
ufshcd_update_reg_hist(&hba->ufs_stats.nl_err, reg);
hba->uic_error |= UFSHCD_UIC_NL_ERROR;
+ retval |= IRQ_HANDLED;
}
reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_TRANSPORT_LAYER);
- if (reg) {
+ if ((reg & UIC_TRANSPORT_LAYER_ERROR) &&
+ (reg & UIC_TRANSPORT_LAYER_ERROR_CODE_MASK)) {
ufshcd_update_reg_hist(&hba->ufs_stats.tl_err, reg);
hba->uic_error |= UFSHCD_UIC_TL_ERROR;
+ retval |= IRQ_HANDLED;
}
reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DME);
- if (reg) {
+ if ((reg & UIC_DME_ERROR) &&
+ (reg & UIC_DME_ERROR_CODE_MASK)) {
ufshcd_update_reg_hist(&hba->ufs_stats.dme_err, reg);
hba->uic_error |= UFSHCD_UIC_DME_ERROR;
+ retval |= IRQ_HANDLED;
}
dev_dbg(hba->dev, "%s: UIC error flags = 0x%08x\n",
__func__, hba->uic_error);
+ return retval;
}
static bool ufshcd_is_auto_hibern8_error(struct ufs_hba *hba,
@@ -5472,10 +5527,15 @@ static bool ufshcd_is_auto_hibern8_error(struct ufs_hba *hba,
/**
* ufshcd_check_errors - Check for errors that need s/w attention
* @hba: per-adapter instance
+ *
+ * Returns
+ * IRQ_HANDLED - If interrupt is valid
+ * IRQ_NONE - If invalid interrupt
*/
-static void ufshcd_check_errors(struct ufs_hba *hba)
+static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba)
{
bool queue_eh_work = false;
+ irqreturn_t retval = IRQ_NONE;
if (hba->errors & INT_FATAL_ERRORS) {
ufshcd_update_reg_hist(&hba->ufs_stats.fatal_err, hba->errors);
@@ -5484,7 +5544,7 @@ static void ufshcd_check_errors(struct ufs_hba *hba)
if (hba->errors & UIC_ERROR) {
hba->uic_error = 0;
- ufshcd_update_uic_error(hba);
+ retval = ufshcd_update_uic_error(hba);
if (hba->uic_error)
queue_eh_work = true;
}
@@ -5532,6 +5592,7 @@ static void ufshcd_check_errors(struct ufs_hba *hba)
}
schedule_work(&hba->eh_work);
}
+ retval |= IRQ_HANDLED;
}
/*
* if (!queue_eh_work) -
@@ -5539,44 +5600,62 @@ static void ufshcd_check_errors(struct ufs_hba *hba)
* itself without s/w intervention or errors that will be
* handled by the SCSI core layer.
*/
+ return retval;
}
/**
* ufshcd_tmc_handler - handle task management function completion
* @hba: per adapter instance
+ *
+ * Returns
+ * IRQ_HANDLED - If interrupt is valid
+ * IRQ_NONE - If invalid interrupt
*/
-static void ufshcd_tmc_handler(struct ufs_hba *hba)
+static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba)
{
u32 tm_doorbell;
tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
hba->tm_condition = tm_doorbell ^ hba->outstanding_tasks;
- wake_up(&hba->tm_wq);
+ if (hba->tm_condition) {
+ wake_up(&hba->tm_wq);
+ return IRQ_HANDLED;
+ } else {
+ return IRQ_NONE;
+ }
}
/**
* ufshcd_sl_intr - Interrupt service routine
* @hba: per adapter instance
* @intr_status: contains interrupts generated by the controller
+ *
+ * Returns
+ * IRQ_HANDLED - If interrupt is valid
+ * IRQ_NONE - If invalid interrupt
*/
-static void ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
+static irqreturn_t ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
{
+ irqreturn_t retval = IRQ_NONE;
+
hba->errors = UFSHCD_ERROR_MASK & intr_status;
if (ufshcd_is_auto_hibern8_error(hba, intr_status))
hba->errors |= (UFSHCD_UIC_HIBERN8_MASK & intr_status);
if (hba->errors)
- ufshcd_check_errors(hba);
+ retval |= ufshcd_check_errors(hba);
if (intr_status & UFSHCD_UIC_MASK)
- ufshcd_uic_cmd_compl(hba, intr_status);
+ retval |= ufshcd_uic_cmd_compl(hba, intr_status);
if (intr_status & UTP_TASK_REQ_COMPL)
- ufshcd_tmc_handler(hba);
+ retval |= ufshcd_tmc_handler(hba);
if (intr_status & UTP_TRANSFER_REQ_COMPL)
- ufshcd_transfer_req_compl(hba);
+ retval |= ufshcd_transfer_req_compl(hba);
+
+ return retval;
}
/**
@@ -5584,8 +5663,9 @@ static void ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
* @irq: irq number
* @__hba: pointer to adapter instance
*
- * Returns IRQ_HANDLED - If interrupt is valid
- * IRQ_NONE - If invalid interrupt
+ * Returns
+ * IRQ_HANDLED - If interrupt is valid
+ * IRQ_NONE - If invalid interrupt
*/
static irqreturn_t ufshcd_intr(int irq, void *__hba)
{
@@ -5608,14 +5688,18 @@ static irqreturn_t ufshcd_intr(int irq, void *__hba)
intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
if (intr_status)
ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
- if (enabled_intr_status) {
- ufshcd_sl_intr(hba, enabled_intr_status);
- retval = IRQ_HANDLED;
- }
+ if (enabled_intr_status)
+ retval |= ufshcd_sl_intr(hba, enabled_intr_status);
intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
} while (intr_status && --retries);
+ if (retval == IRQ_NONE) {
+ dev_err(hba->dev, "%s: Unhandled interrupt 0x%08x\n",
+ __func__, intr_status);
+ ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: ");
+ }
+
spin_unlock(hba->host->host_lock);
return retval;
}
@@ -5760,9 +5844,9 @@ static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
* @hba: per-adapter instance
* @req_upiu: upiu request
* @rsp_upiu: upiu reply
- * @msgcode: message code, one of UPIU Transaction Codes Initiator to Target
* @desc_buff: pointer to descriptor buffer, NULL if NA
* @buff_len: descriptor size, 0 if NA
+ * @cmd_type: specifies the type (NOP, Query...)
* @desc_op: descriptor operation
*
* Those type of requests uses UTP Transfer Request Descriptor - utrd.
@@ -5776,7 +5860,7 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
struct utp_upiu_req *req_upiu,
struct utp_upiu_req *rsp_upiu,
u8 *desc_buff, int *buff_len,
- int cmd_type,
+ enum dev_cmd_type cmd_type,
enum query_opcode desc_op)
{
struct ufshcd_lrb *lrbp;
@@ -5856,7 +5940,9 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
memcpy(desc_buff, descp, resp_len);
*buff_len = resp_len;
} else {
- dev_warn(hba->dev, "rsp size is bigger than buffer");
+ dev_warn(hba->dev,
+ "%s: rsp size %d is bigger than buffer size %d",
+ __func__, resp_len, *buff_len);
*buff_len = 0;
err = -EINVAL;
}
@@ -5891,7 +5977,7 @@ int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
enum query_opcode desc_op)
{
int err;
- int cmd_type = DEV_CMD_TYPE_QUERY;
+ enum dev_cmd_type cmd_type = DEV_CMD_TYPE_QUERY;
struct utp_task_req_desc treq = { { 0 }, };
int ocs_value;
u8 tm_f = be32_to_cpu(req_upiu->header.dword_1) >> 16 & MASK_TM_FUNC;
@@ -6770,23 +6856,13 @@ static void ufshcd_init_desc_sizes(struct ufs_hba *hba)
&hba->desc_size.geom_desc);
if (err)
hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE;
+
err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_HEALTH, 0,
&hba->desc_size.hlth_desc);
if (err)
hba->desc_size.hlth_desc = QUERY_DESC_HEALTH_DEF_SIZE;
}
-static void ufshcd_def_desc_sizes(struct ufs_hba *hba)
-{
- hba->desc_size.dev_desc = QUERY_DESC_DEVICE_DEF_SIZE;
- hba->desc_size.pwr_desc = QUERY_DESC_POWER_DEF_SIZE;
- hba->desc_size.interc_desc = QUERY_DESC_INTERCONNECT_DEF_SIZE;
- hba->desc_size.conf_desc = QUERY_DESC_CONFIGURATION_DEF_SIZE;
- hba->desc_size.unit_desc = QUERY_DESC_UNIT_DEF_SIZE;
- hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE;
- hba->desc_size.hlth_desc = QUERY_DESC_HEALTH_DEF_SIZE;
-}
-
static struct ufs_ref_clk ufs_ref_clk_freqs[] = {
{19200000, REF_CLK_FREQ_19_2_MHZ},
{26000000, REF_CLK_FREQ_26_MHZ},
@@ -6881,9 +6957,6 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
/* UniPro link is active now */
ufshcd_set_link_active(hba);
- /* Enable Auto-Hibernate if configured */
- ufshcd_auto_hibern8_enable(hba);
-
ret = ufshcd_verify_dev_init(hba);
if (ret)
goto out;
@@ -6934,6 +7007,9 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
/* set the state as operational after switching to desired gear */
hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
+ /* Enable Auto-Hibernate if configured */
+ ufshcd_auto_hibern8_enable(hba);
+
/*
* If we are in error handling context or in power management callbacks
* context, no need to scan the host
@@ -7069,6 +7145,7 @@ static struct scsi_host_template ufshcd_driver_template = {
.track_queue_depth = 1,
.sdev_groups = ufshcd_driver_groups,
.dma_boundary = PAGE_SIZE - 1,
+ .rpm_autosuspend_delay = RPM_AUTOSUSPEND_DELAY_MS,
};
static int ufshcd_config_vreg_load(struct device *dev, struct ufs_vreg *vreg,
@@ -7950,12 +8027,12 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
if (hba->clk_scaling.is_allowed)
ufshcd_resume_clkscaling(hba);
- /* Schedule clock gating in case of no access to UFS device yet */
- ufshcd_release(hba);
-
/* Enable Auto-Hibernate if configured */
ufshcd_auto_hibern8_enable(hba);
+ /* Schedule clock gating in case of no access to UFS device yet */
+ ufshcd_release(hba);
+
goto out;
set_old_link_state:
@@ -8274,9 +8351,6 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
hba->mmio_base = mmio_base;
hba->irq = irq;
- /* Set descriptor lengths to specification defaults */
- ufshcd_def_desc_sizes(hba);
-
err = ufshcd_hba_init(hba);
if (err)
goto out_error;
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index c94cfda52829..2740f6941ec6 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -716,6 +716,12 @@ struct ufs_hba {
* the performance of ongoing read/write operations.
*/
#define UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND (1 << 5)
+ /*
+ * This capability allows host controller driver to automatically
+ * enable runtime power management by itself instead of waiting
+ * for userspace to control the power management.
+ */
+#define UFSHCD_CAP_RPM_AUTOSUSPEND (1 << 6)
struct devfreq *devfreq;
struct ufs_clk_scaling clk_scaling;
@@ -749,6 +755,10 @@ static inline bool ufshcd_can_autobkops_during_suspend(struct ufs_hba *hba)
{
return hba->caps & UFSHCD_CAP_AUTO_BKOPS_SUSPEND;
}
+static inline bool ufshcd_is_rpm_autosuspend_allowed(struct ufs_hba *hba)
+{
+ return hba->caps & UFSHCD_CAP_RPM_AUTOSUSPEND;
+}
static inline bool ufshcd_is_intr_aggr_allowed(struct ufs_hba *hba)
{
@@ -916,6 +926,8 @@ int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
enum flag_idn idn, bool *flag_res);
+void ufshcd_auto_hibern8_enable(struct ufs_hba *hba);
+
#define SD_ASCII_STD true
#define SD_RAW false
int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index,
diff --git a/drivers/scsi/ufs/ufshci.h b/drivers/scsi/ufs/ufshci.h
index dbb75cd28dc8..c2961d37cc1c 100644
--- a/drivers/scsi/ufs/ufshci.h
+++ b/drivers/scsi/ufs/ufshci.h
@@ -195,7 +195,7 @@ enum {
/* UECDL - Host UIC Error Code Data Link Layer 3Ch */
#define UIC_DATA_LINK_LAYER_ERROR 0x80000000
-#define UIC_DATA_LINK_LAYER_ERROR_CODE_MASK 0x7FFF
+#define UIC_DATA_LINK_LAYER_ERROR_CODE_MASK 0xFFFF
#define UIC_DATA_LINK_LAYER_ERROR_TCX_REP_TIMER_EXP 0x2
#define UIC_DATA_LINK_LAYER_ERROR_AFCX_REQ_TIMER_EXP 0x4
#define UIC_DATA_LINK_LAYER_ERROR_FCX_PRO_TIMER_EXP 0x8
diff --git a/drivers/scsi/zorro_esp.c b/drivers/scsi/zorro_esp.c
index ca8e3abeb2c7..a23a8e5794f5 100644
--- a/drivers/scsi/zorro_esp.c
+++ b/drivers/scsi/zorro_esp.c
@@ -218,7 +218,14 @@ static int fastlane_esp_irq_pending(struct esp *esp)
static u32 zorro_esp_dma_length_limit(struct esp *esp, u32 dma_addr,
u32 dma_len)
{
- return dma_len > 0xFFFF ? 0xFFFF : dma_len;
+ return dma_len > (1U << 16) ? (1U << 16) : dma_len;
+}
+
+static u32 fastlane_esp_dma_length_limit(struct esp *esp, u32 dma_addr,
+ u32 dma_len)
+{
+ /* The old driver used 0xfffc as limit, so do that here too */
+ return dma_len > 0xfffc ? 0xfffc : dma_len;
}
static void zorro_esp_reset_dma(struct esp *esp)
@@ -604,7 +611,7 @@ static const struct esp_driver_ops fastlane_esp_ops = {
.esp_write8 = zorro_esp_write8,
.esp_read8 = zorro_esp_read8,
.irq_pending = fastlane_esp_irq_pending,
- .dma_length_limit = zorro_esp_dma_length_limit,
+ .dma_length_limit = fastlane_esp_dma_length_limit,
.reset_dma = zorro_esp_reset_dma,
.dma_drain = zorro_esp_dma_drain,
.dma_invalidate = fastlane_esp_dma_invalidate,
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_ddp.c b/drivers/target/iscsi/cxgbit/cxgbit_ddp.c
index 54bb1ebd8eb5..af35251232eb 100644
--- a/drivers/target/iscsi/cxgbit/cxgbit_ddp.c
+++ b/drivers/target/iscsi/cxgbit/cxgbit_ddp.c
@@ -297,7 +297,6 @@ int cxgbit_ddp_init(struct cxgbit_device *cdev)
struct cxgb4_lld_info *lldi = &cdev->lldi;
struct net_device *ndev = cdev->lldi.ports[0];
struct cxgbi_tag_format tformat;
- unsigned int ppmax;
int ret, i;
if (!lldi->vr->iscsi.size) {
@@ -305,8 +304,6 @@ int cxgbit_ddp_init(struct cxgbit_device *cdev)
return -EACCES;
}
- ppmax = lldi->vr->iscsi.size >> PPOD_SIZE_SHIFT;
-
memset(&tformat, 0, sizeof(struct cxgbi_tag_format));
for (i = 0; i < 4; i++)
tformat.pgsz_order[i] = (lldi->iscsi_pgsz_order >> (i << 3))
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index d19e051f2bc2..7251a87bb576 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -1165,7 +1165,9 @@ int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
hdr->cmdsn, be32_to_cpu(hdr->data_length), payload_length,
conn->cid);
- target_get_sess_cmd(&cmd->se_cmd, true);
+ if (target_get_sess_cmd(&cmd->se_cmd, true) < 0)
+ return iscsit_add_reject_cmd(cmd,
+ ISCSI_REASON_WAITING_FOR_LOGOUT, buf);
cmd->sense_reason = transport_lookup_cmd_lun(&cmd->se_cmd,
scsilun_to_int(&hdr->lun));
@@ -2002,7 +2004,9 @@ iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
conn->sess->se_sess, 0, DMA_NONE,
TCM_SIMPLE_TAG, cmd->sense_buffer + 2);
- target_get_sess_cmd(&cmd->se_cmd, true);
+ if (target_get_sess_cmd(&cmd->se_cmd, true) < 0)
+ return iscsit_add_reject_cmd(cmd,
+ ISCSI_REASON_WAITING_FOR_LOGOUT, buf);
/*
* TASK_REASSIGN for ERL=2 / connection stays inside of
@@ -2189,24 +2193,22 @@ iscsit_process_text_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
}
goto empty_sendtargets;
}
- if (strncmp("SendTargets", text_in, 11) != 0) {
+ if (strncmp("SendTargets=", text_in, 12) != 0) {
pr_err("Received Text Data that is not"
" SendTargets, cannot continue.\n");
goto reject;
}
+ /* '=' confirmed in strncmp */
text_ptr = strchr(text_in, '=');
- if (!text_ptr) {
- pr_err("No \"=\" separator found in Text Data,"
- " cannot continue.\n");
- goto reject;
- }
- if (!strncmp("=All", text_ptr, 4)) {
+ BUG_ON(!text_ptr);
+ if (!strncmp("=All", text_ptr, 5)) {
cmd->cmd_flags |= ICF_SENDTARGETS_ALL;
} else if (!strncmp("=iqn.", text_ptr, 5) ||
!strncmp("=eui.", text_ptr, 5)) {
cmd->cmd_flags |= ICF_SENDTARGETS_SINGLE;
} else {
- pr_err("Unable to locate valid SendTargets=%s value\n", text_ptr);
+ pr_err("Unable to locate valid SendTargets%s value\n",
+ text_ptr);
goto reject;
}
@@ -4232,6 +4234,8 @@ int iscsit_close_connection(
* must wait until they have completed.
*/
iscsit_check_conn_usage_count(conn);
+ target_sess_cmd_list_set_waiting(sess->se_sess);
+ target_wait_for_sess_cmds(sess->se_sess);
ahash_request_free(conn->conn_tx_hash);
if (conn->conn_rx_hash) {
diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c
index 51ddca2033e0..0e54627d9aa8 100644
--- a/drivers/target/iscsi/iscsi_target_auth.c
+++ b/drivers/target/iscsi/iscsi_target_auth.c
@@ -18,6 +18,22 @@
#include "iscsi_target_nego.h"
#include "iscsi_target_auth.h"
+static char *chap_get_digest_name(const int digest_type)
+{
+ switch (digest_type) {
+ case CHAP_DIGEST_MD5:
+ return "md5";
+ case CHAP_DIGEST_SHA1:
+ return "sha1";
+ case CHAP_DIGEST_SHA256:
+ return "sha256";
+ case CHAP_DIGEST_SHA3_256:
+ return "sha3-256";
+ default:
+ return NULL;
+ }
+}
+
static int chap_gen_challenge(
struct iscsi_conn *conn,
int caller,
@@ -25,16 +41,21 @@ static int chap_gen_challenge(
unsigned int *c_len)
{
int ret;
- unsigned char challenge_asciihex[CHAP_CHALLENGE_LENGTH * 2 + 1];
+ unsigned char *challenge_asciihex;
struct iscsi_chap *chap = conn->auth_protocol;
- memset(challenge_asciihex, 0, CHAP_CHALLENGE_LENGTH * 2 + 1);
+ challenge_asciihex = kzalloc(chap->challenge_len * 2 + 1, GFP_KERNEL);
+ if (!challenge_asciihex)
+ return -ENOMEM;
- ret = get_random_bytes_wait(chap->challenge, CHAP_CHALLENGE_LENGTH);
+ memset(chap->challenge, 0, MAX_CHAP_CHALLENGE_LEN);
+
+ ret = get_random_bytes_wait(chap->challenge, chap->challenge_len);
if (unlikely(ret))
- return ret;
+ goto out;
+
bin2hex(challenge_asciihex, chap->challenge,
- CHAP_CHALLENGE_LENGTH);
+ chap->challenge_len);
/*
* Set CHAP_C, and copy the generated challenge into c_str.
*/
@@ -43,12 +64,29 @@ static int chap_gen_challenge(
pr_debug("[%s] Sending CHAP_C=0x%s\n\n", (caller) ? "server" : "client",
challenge_asciihex);
+
+out:
+ kfree(challenge_asciihex);
+ return ret;
+}
+
+static int chap_test_algorithm(const char *name)
+{
+ struct crypto_shash *tfm;
+
+ tfm = crypto_alloc_shash(name, 0, 0);
+ if (IS_ERR(tfm))
+ return -1;
+
+ crypto_free_shash(tfm);
return 0;
}
static int chap_check_algorithm(const char *a_str)
{
- char *tmp, *orig, *token;
+ char *tmp, *orig, *token, *digest_name;
+ long digest_type;
+ int r = CHAP_DIGEST_UNKNOWN;
tmp = kstrdup(a_str, GFP_KERNEL);
if (!tmp) {
@@ -70,15 +108,24 @@ static int chap_check_algorithm(const char *a_str)
if (!token)
goto out;
- if (!strncmp(token, "5", 1)) {
- pr_debug("Selected MD5 Algorithm\n");
- kfree(orig);
- return CHAP_DIGEST_MD5;
+ if (kstrtol(token, 10, &digest_type))
+ continue;
+
+ digest_name = chap_get_digest_name(digest_type);
+ if (!digest_name)
+ continue;
+
+ pr_debug("Selected %s Algorithm\n", digest_name);
+ if (chap_test_algorithm(digest_name) < 0) {
+ pr_err("failed to allocate %s algo\n", digest_name);
+ } else {
+ r = digest_type;
+ goto out;
}
}
out:
kfree(orig);
- return CHAP_DIGEST_UNKNOWN;
+ return r;
}
static void chap_close(struct iscsi_conn *conn)
@@ -94,7 +141,7 @@ static struct iscsi_chap *chap_server_open(
char *aic_str,
unsigned int *aic_len)
{
- int ret;
+ int digest_type;
struct iscsi_chap *chap;
if (!(auth->naf_flags & NAF_USERID_SET) ||
@@ -109,17 +156,19 @@ static struct iscsi_chap *chap_server_open(
return NULL;
chap = conn->auth_protocol;
- ret = chap_check_algorithm(a_str);
- switch (ret) {
+ digest_type = chap_check_algorithm(a_str);
+ switch (digest_type) {
case CHAP_DIGEST_MD5:
- pr_debug("[server] Got CHAP_A=5\n");
- /*
- * Send back CHAP_A set to MD5.
- */
- *aic_len = sprintf(aic_str, "CHAP_A=5");
- *aic_len += 1;
- chap->digest_type = CHAP_DIGEST_MD5;
- pr_debug("[server] Sending CHAP_A=%d\n", chap->digest_type);
+ chap->digest_size = MD5_SIGNATURE_SIZE;
+ break;
+ case CHAP_DIGEST_SHA1:
+ chap->digest_size = SHA1_SIGNATURE_SIZE;
+ break;
+ case CHAP_DIGEST_SHA256:
+ chap->digest_size = SHA256_SIGNATURE_SIZE;
+ break;
+ case CHAP_DIGEST_SHA3_256:
+ chap->digest_size = SHA3_256_SIGNATURE_SIZE;
break;
case CHAP_DIGEST_UNKNOWN:
default:
@@ -128,6 +177,16 @@ static struct iscsi_chap *chap_server_open(
return NULL;
}
+ chap->digest_name = chap_get_digest_name(digest_type);
+
+ /* Tie the challenge length to the digest size */
+ chap->challenge_len = chap->digest_size;
+
+ pr_debug("[server] Got CHAP_A=%d\n", digest_type);
+ *aic_len = sprintf(aic_str, "CHAP_A=%d", digest_type);
+ *aic_len += 1;
+ pr_debug("[server] Sending CHAP_A=%d\n", digest_type);
+
/*
* Set Identifier.
*/
@@ -146,7 +205,7 @@ static struct iscsi_chap *chap_server_open(
return chap;
}
-static int chap_server_compute_md5(
+static int chap_server_compute_hash(
struct iscsi_conn *conn,
struct iscsi_node_auth *auth,
char *nr_in_ptr,
@@ -155,36 +214,57 @@ static int chap_server_compute_md5(
{
unsigned long id;
unsigned char id_as_uchar;
- unsigned char digest[MD5_SIGNATURE_SIZE];
- unsigned char type, response[MD5_SIGNATURE_SIZE * 2 + 2];
- unsigned char identifier[10], *challenge = NULL;
- unsigned char *challenge_binhex = NULL;
- unsigned char client_digest[MD5_SIGNATURE_SIZE];
- unsigned char server_digest[MD5_SIGNATURE_SIZE];
+ unsigned char type;
+ unsigned char identifier[10], *initiatorchg = NULL;
+ unsigned char *initiatorchg_binhex = NULL;
+ unsigned char *digest = NULL;
+ unsigned char *response = NULL;
+ unsigned char *client_digest = NULL;
+ unsigned char *server_digest = NULL;
unsigned char chap_n[MAX_CHAP_N_SIZE], chap_r[MAX_RESPONSE_LENGTH];
size_t compare_len;
struct iscsi_chap *chap = conn->auth_protocol;
struct crypto_shash *tfm = NULL;
struct shash_desc *desc = NULL;
- int auth_ret = -1, ret, challenge_len;
+ int auth_ret = -1, ret, initiatorchg_len;
+
+ digest = kzalloc(chap->digest_size, GFP_KERNEL);
+ if (!digest) {
+ pr_err("Unable to allocate the digest buffer\n");
+ goto out;
+ }
+
+ response = kzalloc(chap->digest_size * 2 + 2, GFP_KERNEL);
+ if (!response) {
+ pr_err("Unable to allocate the response buffer\n");
+ goto out;
+ }
+
+ client_digest = kzalloc(chap->digest_size, GFP_KERNEL);
+ if (!client_digest) {
+ pr_err("Unable to allocate the client_digest buffer\n");
+ goto out;
+ }
+
+ server_digest = kzalloc(chap->digest_size, GFP_KERNEL);
+ if (!server_digest) {
+ pr_err("Unable to allocate the server_digest buffer\n");
+ goto out;
+ }
memset(identifier, 0, 10);
memset(chap_n, 0, MAX_CHAP_N_SIZE);
memset(chap_r, 0, MAX_RESPONSE_LENGTH);
- memset(digest, 0, MD5_SIGNATURE_SIZE);
- memset(response, 0, MD5_SIGNATURE_SIZE * 2 + 2);
- memset(client_digest, 0, MD5_SIGNATURE_SIZE);
- memset(server_digest, 0, MD5_SIGNATURE_SIZE);
- challenge = kzalloc(CHAP_CHALLENGE_STR_LEN, GFP_KERNEL);
- if (!challenge) {
+ initiatorchg = kzalloc(CHAP_CHALLENGE_STR_LEN, GFP_KERNEL);
+ if (!initiatorchg) {
pr_err("Unable to allocate challenge buffer\n");
goto out;
}
- challenge_binhex = kzalloc(CHAP_CHALLENGE_STR_LEN, GFP_KERNEL);
- if (!challenge_binhex) {
- pr_err("Unable to allocate challenge_binhex buffer\n");
+ initiatorchg_binhex = kzalloc(CHAP_CHALLENGE_STR_LEN, GFP_KERNEL);
+ if (!initiatorchg_binhex) {
+ pr_err("Unable to allocate initiatorchg_binhex buffer\n");
goto out;
}
/*
@@ -219,18 +299,18 @@ static int chap_server_compute_md5(
pr_err("Could not find CHAP_R.\n");
goto out;
}
- if (strlen(chap_r) != MD5_SIGNATURE_SIZE * 2) {
+ if (strlen(chap_r) != chap->digest_size * 2) {
pr_err("Malformed CHAP_R\n");
goto out;
}
- if (hex2bin(client_digest, chap_r, MD5_SIGNATURE_SIZE) < 0) {
+ if (hex2bin(client_digest, chap_r, chap->digest_size) < 0) {
pr_err("Malformed CHAP_R\n");
goto out;
}
pr_debug("[server] Got CHAP_R=%s\n", chap_r);
- tfm = crypto_alloc_shash("md5", 0, 0);
+ tfm = crypto_alloc_shash(chap->digest_name, 0, 0);
if (IS_ERR(tfm)) {
tfm = NULL;
pr_err("Unable to allocate struct crypto_shash\n");
@@ -265,21 +345,23 @@ static int chap_server_compute_md5(
}
ret = crypto_shash_finup(desc, chap->challenge,
- CHAP_CHALLENGE_LENGTH, server_digest);
+ chap->challenge_len, server_digest);
if (ret < 0) {
pr_err("crypto_shash_finup() failed for challenge\n");
goto out;
}
- bin2hex(response, server_digest, MD5_SIGNATURE_SIZE);
- pr_debug("[server] MD5 Server Digest: %s\n", response);
+ bin2hex(response, server_digest, chap->digest_size);
+ pr_debug("[server] %s Server Digest: %s\n",
+ chap->digest_name, response);
- if (memcmp(server_digest, client_digest, MD5_SIGNATURE_SIZE) != 0) {
- pr_debug("[server] MD5 Digests do not match!\n\n");
+ if (memcmp(server_digest, client_digest, chap->digest_size) != 0) {
+ pr_debug("[server] %s Digests do not match!\n\n",
+ chap->digest_name);
goto out;
} else
- pr_debug("[server] MD5 Digests match, CHAP connection"
- " successful.\n\n");
+ pr_debug("[server] %s Digests match, CHAP connection"
+ " successful.\n\n", chap->digest_name);
/*
* One way authentication has succeeded, return now if mutual
* authentication is not enabled.
@@ -317,7 +399,7 @@ static int chap_server_compute_md5(
* Get CHAP_C.
*/
if (extract_param(nr_in_ptr, "CHAP_C", CHAP_CHALLENGE_STR_LEN,
- challenge, &type) < 0) {
+ initiatorchg, &type) < 0) {
pr_err("Could not find CHAP_C.\n");
goto out;
}
@@ -326,26 +408,28 @@ static int chap_server_compute_md5(
pr_err("Could not find CHAP_C.\n");
goto out;
}
- challenge_len = DIV_ROUND_UP(strlen(challenge), 2);
- if (!challenge_len) {
+ initiatorchg_len = DIV_ROUND_UP(strlen(initiatorchg), 2);
+ if (!initiatorchg_len) {
pr_err("Unable to convert incoming challenge\n");
goto out;
}
- if (challenge_len > 1024) {
+ if (initiatorchg_len > 1024) {
pr_err("CHAP_C exceeds maximum binary size of 1024 bytes\n");
goto out;
}
- if (hex2bin(challenge_binhex, challenge, challenge_len) < 0) {
+ if (hex2bin(initiatorchg_binhex, initiatorchg, initiatorchg_len) < 0) {
pr_err("Malformed CHAP_C\n");
goto out;
}
- pr_debug("[server] Got CHAP_C=%s\n", challenge);
+ pr_debug("[server] Got CHAP_C=%s\n", initiatorchg);
/*
* During mutual authentication, the CHAP_C generated by the
* initiator must not match the original CHAP_C generated by
* the target.
*/
- if (!memcmp(challenge_binhex, chap->challenge, CHAP_CHALLENGE_LENGTH)) {
+ if (initiatorchg_len == chap->challenge_len &&
+ !memcmp(initiatorchg_binhex, chap->challenge,
+ initiatorchg_len)) {
pr_err("initiator CHAP_C matches target CHAP_C, failing"
" login attempt\n");
goto out;
@@ -377,7 +461,7 @@ static int chap_server_compute_md5(
/*
* Convert received challenge to binary hex.
*/
- ret = crypto_shash_finup(desc, challenge_binhex, challenge_len,
+ ret = crypto_shash_finup(desc, initiatorchg_binhex, initiatorchg_len,
digest);
if (ret < 0) {
pr_err("crypto_shash_finup() failed for ma challenge\n");
@@ -393,7 +477,7 @@ static int chap_server_compute_md5(
/*
* Convert response from binary hex to ascii hext.
*/
- bin2hex(response, digest, MD5_SIGNATURE_SIZE);
+ bin2hex(response, digest, chap->digest_size);
*nr_out_len += sprintf(nr_out_ptr + *nr_out_len, "CHAP_R=0x%s",
response);
*nr_out_len += 1;
@@ -403,33 +487,15 @@ out:
kzfree(desc);
if (tfm)
crypto_free_shash(tfm);
- kfree(challenge);
- kfree(challenge_binhex);
+ kfree(initiatorchg);
+ kfree(initiatorchg_binhex);
+ kfree(digest);
+ kfree(response);
+ kfree(server_digest);
+ kfree(client_digest);
return auth_ret;
}
-static int chap_got_response(
- struct iscsi_conn *conn,
- struct iscsi_node_auth *auth,
- char *nr_in_ptr,
- char *nr_out_ptr,
- unsigned int *nr_out_len)
-{
- struct iscsi_chap *chap = conn->auth_protocol;
-
- switch (chap->digest_type) {
- case CHAP_DIGEST_MD5:
- if (chap_server_compute_md5(conn, auth, nr_in_ptr,
- nr_out_ptr, nr_out_len) < 0)
- return -1;
- return 0;
- default:
- pr_err("Unknown CHAP digest type %d!\n",
- chap->digest_type);
- return -1;
- }
-}
-
u32 chap_main_loop(
struct iscsi_conn *conn,
struct iscsi_node_auth *auth,
@@ -448,7 +514,7 @@ u32 chap_main_loop(
return 0;
} else if (chap->chap_state == CHAP_STAGE_SERVER_AIC) {
convert_null_to_semi(in_text, *in_len);
- if (chap_got_response(conn, auth, in_text, out_text,
+ if (chap_server_compute_hash(conn, auth, in_text, out_text,
out_len) < 0) {
chap_close(conn);
return 2;
diff --git a/drivers/target/iscsi/iscsi_target_auth.h b/drivers/target/iscsi/iscsi_target_auth.h
index d5600ac30b53..fc75c1c20e23 100644
--- a/drivers/target/iscsi/iscsi_target_auth.h
+++ b/drivers/target/iscsi/iscsi_target_auth.h
@@ -6,14 +6,19 @@
#define CHAP_DIGEST_UNKNOWN 0
#define CHAP_DIGEST_MD5 5
-#define CHAP_DIGEST_SHA 6
+#define CHAP_DIGEST_SHA1 6
+#define CHAP_DIGEST_SHA256 7
+#define CHAP_DIGEST_SHA3_256 8
-#define CHAP_CHALLENGE_LENGTH 16
+#define MAX_CHAP_CHALLENGE_LEN 32
#define CHAP_CHALLENGE_STR_LEN 4096
-#define MAX_RESPONSE_LENGTH 64 /* sufficient for MD5 */
+#define MAX_RESPONSE_LENGTH 128 /* sufficient for SHA3 256 */
#define MAX_CHAP_N_SIZE 512
#define MD5_SIGNATURE_SIZE 16 /* 16 bytes in a MD5 message digest */
+#define SHA1_SIGNATURE_SIZE 20 /* 20 bytes in a SHA1 message digest */
+#define SHA256_SIGNATURE_SIZE 32 /* 32 bytes in a SHA256 message digest */
+#define SHA3_256_SIGNATURE_SIZE 32 /* 32 bytes in a SHA3 256 message digest */
#define CHAP_STAGE_CLIENT_A 1
#define CHAP_STAGE_SERVER_AIC 2
@@ -28,9 +33,11 @@ extern u32 chap_main_loop(struct iscsi_conn *, struct iscsi_node_auth *, char *,
int *, int *);
struct iscsi_chap {
- unsigned char digest_type;
unsigned char id;
- unsigned char challenge[CHAP_CHALLENGE_LENGTH];
+ unsigned char challenge[MAX_CHAP_CHALLENGE_LEN];
+ unsigned int challenge_len;
+ unsigned char *digest_name;
+ unsigned int digest_size;
unsigned int authenticate_target;
unsigned int chap_state;
} ____cacheline_aligned;
diff --git a/drivers/target/iscsi/iscsi_target_parameters.h b/drivers/target/iscsi/iscsi_target_parameters.h
index daf47f38e081..240c4c4344f6 100644
--- a/drivers/target/iscsi/iscsi_target_parameters.h
+++ b/drivers/target/iscsi/iscsi_target_parameters.h
@@ -93,9 +93,6 @@ extern void iscsi_set_session_parameters(struct iscsi_sess_ops *,
#define OFMARKER "OFMarker"
#define IFMARKINT "IFMarkInt"
#define OFMARKINT "OFMarkInt"
-#define X_EXTENSIONKEY "X-com.sbei.version"
-#define X_EXTENSIONKEY_CISCO_NEW "X-com.cisco.protocol"
-#define X_EXTENSIONKEY_CISCO_OLD "X-com.cisco.iscsi.draft"
/*
* Parameter names of iSCSI Extentions for RDMA (iSER). See RFC-5046
diff --git a/drivers/target/target_core_fabric_lib.c b/drivers/target/target_core_fabric_lib.c
index 3c79411c4cd0..6b4b354c88aa 100644
--- a/drivers/target/target_core_fabric_lib.c
+++ b/drivers/target/target_core_fabric_lib.c
@@ -118,7 +118,7 @@ static int srp_get_pr_transport_id(
memset(buf + 8, 0, leading_zero_bytes);
rc = hex2bin(buf + 8 + leading_zero_bytes, p, count);
if (rc < 0) {
- pr_debug("hex2bin failed for %s: %d\n", __func__, rc);
+ pr_debug("hex2bin failed for %s: %d\n", p, rc);
return rc;
}
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
index e5a71addbb06..d24e0a3ba3ff 100644
--- a/drivers/target/target_core_tpg.c
+++ b/drivers/target/target_core_tpg.c
@@ -32,9 +32,6 @@
extern struct se_device *g_lun0_dev;
-static DEFINE_SPINLOCK(tpg_lock);
-static LIST_HEAD(tpg_list);
-
/* __core_tpg_get_initiator_node_acl():
*
* mutex_lock(&tpg->acl_node_mutex); must be held when calling
@@ -475,7 +472,6 @@ int core_tpg_register(
se_tpg->se_tpg_wwn = se_wwn;
atomic_set(&se_tpg->tpg_pr_ref_count, 0);
INIT_LIST_HEAD(&se_tpg->acl_node_list);
- INIT_LIST_HEAD(&se_tpg->se_tpg_node);
INIT_LIST_HEAD(&se_tpg->tpg_sess_list);
spin_lock_init(&se_tpg->session_lock);
mutex_init(&se_tpg->tpg_lun_mutex);
@@ -494,10 +490,6 @@ int core_tpg_register(
}
}
- spin_lock_bh(&tpg_lock);
- list_add_tail(&se_tpg->se_tpg_node, &tpg_list);
- spin_unlock_bh(&tpg_lock);
-
pr_debug("TARGET_CORE[%s]: Allocated portal_group for endpoint: %s, "
"Proto: %d, Portal Tag: %u\n", se_tpg->se_tpg_tfo->fabric_name,
se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg) ?
@@ -519,10 +511,6 @@ int core_tpg_deregister(struct se_portal_group *se_tpg)
tfo->tpg_get_wwn(se_tpg) ? tfo->tpg_get_wwn(se_tpg) : NULL,
se_tpg->proto_id, tfo->tpg_get_tag(se_tpg));
- spin_lock_bh(&tpg_lock);
- list_del(&se_tpg->se_tpg_node);
- spin_unlock_bh(&tpg_lock);
-
while (atomic_read(&se_tpg->tpg_pr_ref_count) != 0)
cpu_relax();
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 7f06a62f8661..ea482d4b1f00 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -584,6 +584,15 @@ void transport_free_session(struct se_session *se_sess)
}
EXPORT_SYMBOL(transport_free_session);
+static int target_release_res(struct se_device *dev, void *data)
+{
+ struct se_session *sess = data;
+
+ if (dev->reservation_holder == sess)
+ target_release_reservation(dev);
+ return 0;
+}
+
void transport_deregister_session(struct se_session *se_sess)
{
struct se_portal_group *se_tpg = se_sess->se_tpg;
@@ -600,6 +609,12 @@ void transport_deregister_session(struct se_session *se_sess)
se_sess->fabric_sess_ptr = NULL;
spin_unlock_irqrestore(&se_tpg->session_lock, flags);
+ /*
+ * Since the session is being removed, release SPC-2
+ * reservations held by the session that is disappearing.
+ */
+ target_for_each_device(target_release_res, se_sess);
+
pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n",
se_tpg->se_tpg_tfo->fabric_name);
/*
@@ -1243,6 +1258,19 @@ target_check_max_data_sg_nents(struct se_cmd *cmd, struct se_device *dev,
return TCM_NO_SENSE;
}
+/**
+ * target_cmd_size_check - Check whether there will be a residual.
+ * @cmd: SCSI command.
+ * @size: Data buffer size derived from CDB. The data buffer size provided by
+ * the SCSI transport driver is available in @cmd->data_length.
+ *
+ * Compare the data buffer size from the CDB with the data buffer limit from the transport
+ * header. Set @cmd->residual_count and SCF_OVERFLOW_BIT or SCF_UNDERFLOW_BIT if necessary.
+ *
+ * Note: target drivers set @cmd->data_length by calling transport_init_se_cmd().
+ *
+ * Return: TCM_NO_SENSE
+ */
sense_reason_t
target_cmd_size_check(struct se_cmd *cmd, unsigned int size)
{
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index 35be1be87d2a..0b9dfa6b17bc 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -499,7 +499,7 @@ static inline bool tcmu_get_empty_block(struct tcmu_dev *udev,
schedule_delayed_work(&tcmu_unmap_work, 0);
/* try to get new page from the mm */
- page = alloc_page(GFP_KERNEL);
+ page = alloc_page(GFP_NOIO);
if (!page)
goto err_alloc;
@@ -573,7 +573,7 @@ static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd)
struct tcmu_dev *udev = TCMU_DEV(se_dev);
struct tcmu_cmd *tcmu_cmd;
- tcmu_cmd = kmem_cache_zalloc(tcmu_cmd_cache, GFP_KERNEL);
+ tcmu_cmd = kmem_cache_zalloc(tcmu_cmd_cache, GFP_NOIO);
if (!tcmu_cmd)
return NULL;
@@ -584,7 +584,7 @@ static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd)
tcmu_cmd_reset_dbi_cur(tcmu_cmd);
tcmu_cmd->dbi_cnt = tcmu_cmd_get_block_cnt(tcmu_cmd);
tcmu_cmd->dbi = kcalloc(tcmu_cmd->dbi_cnt, sizeof(uint32_t),
- GFP_KERNEL);
+ GFP_NOIO);
if (!tcmu_cmd->dbi) {
kmem_cache_free(tcmu_cmd_cache, tcmu_cmd);
return NULL;
diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c
index b9b1e92c6f8d..425c1070de08 100644
--- a/drivers/target/target_core_xcopy.c
+++ b/drivers/target/target_core_xcopy.c
@@ -467,7 +467,6 @@ int target_xcopy_setup_pt(void)
}
memset(&xcopy_pt_tpg, 0, sizeof(struct se_portal_group));
- INIT_LIST_HEAD(&xcopy_pt_tpg.se_tpg_node);
INIT_LIST_HEAD(&xcopy_pt_tpg.acl_node_list);
INIT_LIST_HEAD(&xcopy_pt_tpg.tpg_sess_list);
diff --git a/drivers/usb/storage/ene_ub6250.c b/drivers/usb/storage/ene_ub6250.c
index 8b1b73065421..98c1aa594e6c 100644
--- a/drivers/usb/storage/ene_ub6250.c
+++ b/drivers/usb/storage/ene_ub6250.c
@@ -561,7 +561,7 @@ static int ene_send_scsi_cmd(struct us_data *us, u8 fDir, void *buf, int use_sg)
residue = min(residue, transfer_length);
if (us->srb != NULL)
scsi_set_resid(us->srb, max(scsi_get_resid(us->srb),
- (int)residue));
+ residue));
}
if (bcs->Status != US_BULK_STAT_OK)
diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c
index 96cb0409dd89..238a8088e17f 100644
--- a/drivers/usb/storage/transport.c
+++ b/drivers/usb/storage/transport.c
@@ -1284,8 +1284,7 @@ int usb_stor_Bulk_transport(struct scsi_cmnd *srb, struct us_data *us)
} else {
residue = min(residue, transfer_length);
- scsi_set_resid(srb, max(scsi_get_resid(srb),
- (int) residue));
+ scsi_set_resid(srb, max(scsi_get_resid(srb), residue));
}
}
diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
index 475b9c692827..95bba3ba6ac6 100644
--- a/drivers/usb/storage/uas.c
+++ b/drivers/usb/storage/uas.c
@@ -869,7 +869,6 @@ static struct scsi_host_template uas_host_template = {
.eh_abort_handler = uas_eh_abort_handler,
.eh_device_reset_handler = uas_eh_device_reset_handler,
.this_id = -1,
- .sg_tablesize = SG_NONE,
.skip_settle_delay = 1,
.dma_boundary = PAGE_SIZE - 1,
};
diff --git a/include/scsi/iscsi_proto.h b/include/scsi/iscsi_proto.h
index b71b5c4f418c..533f56733ba8 100644
--- a/include/scsi/iscsi_proto.h
+++ b/include/scsi/iscsi_proto.h
@@ -627,6 +627,7 @@ struct iscsi_reject {
#define ISCSI_REASON_BOOKMARK_INVALID 9
#define ISCSI_REASON_BOOKMARK_NO_RESOURCES 10
#define ISCSI_REASON_NEGOTIATION_RESET 11
+#define ISCSI_REASON_WAITING_FOR_LOGOUT 12
/* Max. number of Key=Value pairs in a text message */
#define MAX_KEY_VALUE_PAIRS 8192
diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h
index 91bd749a02f7..a2849bb9cd19 100644
--- a/include/scsi/scsi_cmnd.h
+++ b/include/scsi/scsi_cmnd.h
@@ -63,6 +63,7 @@ struct scsi_pointer {
/* for scmd->state */
#define SCMD_STATE_COMPLETE 0
+#define SCMD_STATE_INFLIGHT 1
struct scsi_cmnd {
struct scsi_request req;
@@ -190,12 +191,12 @@ static inline unsigned scsi_bufflen(struct scsi_cmnd *cmd)
return cmd->sdb.length;
}
-static inline void scsi_set_resid(struct scsi_cmnd *cmd, int resid)
+static inline void scsi_set_resid(struct scsi_cmnd *cmd, unsigned int resid)
{
cmd->req.resid_len = resid;
}
-static inline int scsi_get_resid(struct scsi_cmnd *cmd)
+static inline unsigned int scsi_get_resid(struct scsi_cmnd *cmd)
{
return cmd->req.resid_len;
}
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index 202f4d6a4342..3ed836db5306 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -140,8 +140,10 @@ struct scsi_device {
const char * rev; /* ... "nullnullnullnull" before scan */
#define SCSI_VPD_PG_LEN 255
+ struct scsi_vpd __rcu *vpd_pg0;
struct scsi_vpd __rcu *vpd_pg83;
struct scsi_vpd __rcu *vpd_pg80;
+ struct scsi_vpd __rcu *vpd_pg89;
unsigned char current_tag; /* current tag */
struct scsi_target *sdev_target; /* used only for single_lun */
@@ -199,7 +201,8 @@ struct scsi_device {
unsigned broken_fua:1; /* Don't set FUA bit */
unsigned lun_in_cdb:1; /* Store LUN bits in CDB[1] */
unsigned unmap_limit_for_ws:1; /* Use the UNMAP limit for WRITE SAME */
-
+ unsigned rpm_autosuspend:1; /* Enable runtime autosuspend at device
+ * creation time */
atomic_t disk_events_disable_depth; /* disable depth for disk events */
DECLARE_BITMAP(supported_events, SDEV_EVT_MAXBITS); /* supported events */
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index 31e0d6ca1eba..f577647bf5f2 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -23,19 +23,6 @@ struct scsi_host_cmd_pool;
struct scsi_transport_template;
-/*
- * The various choices mean:
- * NONE: Self evident. Host adapter is not capable of scatter-gather.
- * ALL: Means that the host adapter module can do scatter-gather,
- * and that there is no limit to the size of the table to which
- * we scatter/gather data. The value we set here is the maximum
- * single element sglist. To use chained sglists, the adapter
- * has to set a value beyond ALL (and correctly use the chain
- * handling API.
- * Anything else: Indicates the maximum number of chains that can be
- * used in one scatter-gather request.
- */
-#define SG_NONE 0
#define SG_ALL SG_CHUNK_SIZE
#define MODE_UNKNOWN 0x00
@@ -345,7 +332,7 @@ struct scsi_host_template {
/*
* This determines if we will use a non-interrupt driven
* or an interrupt driven scheme. It is set to the maximum number
- * of simultaneous commands a given host adapter will accept.
+ * of simultaneous commands a single hw queue in HBA will accept.
*/
int can_queue;
@@ -486,6 +473,9 @@ struct scsi_host_template {
*/
unsigned int cmd_size;
struct scsi_host_cmd_pool *cmd_pool;
+
+ /* Delay for runtime autosuspend */
+ int rpm_autosuspend_delay;
};
/*
@@ -551,7 +541,6 @@ struct Scsi_Host {
/* Area to keep a shared tag map */
struct blk_mq_tag_set tag_set;
- atomic_t host_busy; /* commands actually active on low-level */
atomic_t host_blocked;
unsigned int host_failed; /* commands that failed.
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 7c9716fe731e..1728e883b7b2 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -876,7 +876,6 @@ struct se_portal_group {
/* Spinlock for adding/removing sessions */
spinlock_t session_lock;
struct mutex tpg_lun_mutex;
- struct list_head se_tpg_node;
/* linked list for initiator ACL list */
struct list_head acl_node_list;
struct hlist_head tpg_lun_hlist;
diff --git a/include/uapi/linux/chio.h b/include/uapi/linux/chio.h
index 689fc93fafda..e1cad4c319ee 100644
--- a/include/uapi/linux/chio.h
+++ b/include/uapi/linux/chio.h
@@ -3,6 +3,9 @@
* ioctl interface for the scsi media changer driver
*/
+#ifndef _UAPI_LINUX_CHIO_H
+#define _UAPI_LINUX_CHIO_H
+
/* changer element types */
#define CHET_MT 0 /* media transport element (robot) */
#define CHET_ST 1 /* storage element (media slots) */
@@ -160,10 +163,4 @@ struct changer_set_voltag {
#define CHIOSVOLTAG _IOW('c',18,struct changer_set_voltag)
#define CHIOGVPARAMS _IOR('c',19,struct changer_vendor_params)
-/* ---------------------------------------------------------------------- */
-
-/*
- * Local variables:
- * c-basic-offset: 8
- * End:
- */
+#endif /* _UAPI_LINUX_CHIO_H */