diff options
Diffstat (limited to 'drivers/scsi/qla2xxx')
-rw-r--r-- | drivers/scsi/qla2xxx/qla_attr.c | 111 | ||||
-rw-r--r-- | drivers/scsi/qla2xxx/qla_bsg.c | 152 | ||||
-rw-r--r-- | drivers/scsi/qla2xxx/qla_bsg.h | 42 | ||||
-rw-r--r-- | drivers/scsi/qla2xxx/qla_dbg.c | 6 | ||||
-rw-r--r-- | drivers/scsi/qla2xxx/qla_def.h | 9 | ||||
-rw-r--r-- | drivers/scsi/qla2xxx/qla_gbl.h | 18 | ||||
-rw-r--r-- | drivers/scsi/qla2xxx/qla_init.c | 36 | ||||
-rw-r--r-- | drivers/scsi/qla2xxx/qla_isr.c | 7 | ||||
-rw-r--r-- | drivers/scsi/qla2xxx/qla_mbx.c | 127 | ||||
-rw-r--r-- | drivers/scsi/qla2xxx/qla_nx.c | 970 | ||||
-rw-r--r-- | drivers/scsi/qla2xxx/qla_nx.h | 255 | ||||
-rw-r--r-- | drivers/scsi/qla2xxx/qla_os.c | 46 |
12 files changed, 1672 insertions, 107 deletions
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index a31e05f3bfd4..ac326c41e931 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c @@ -23,11 +23,23 @@ qla2x00_sysfs_read_fw_dump(struct file *filp, struct kobject *kobj, struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, struct device, kobj))); struct qla_hw_data *ha = vha->hw; + int rval = 0; if (ha->fw_dump_reading == 0) return 0; - return memory_read_from_buffer(buf, count, &off, ha->fw_dump, + if (IS_QLA82XX(ha)) { + if (off < ha->md_template_size) { + rval = memory_read_from_buffer(buf, count, + &off, ha->md_tmplt_hdr, ha->md_template_size); + return rval; + } + off -= ha->md_template_size; + rval = memory_read_from_buffer(buf, count, + &off, ha->md_dump, ha->md_dump_size); + return rval; + } else + return memory_read_from_buffer(buf, count, &off, ha->fw_dump, ha->fw_dump_len); } @@ -41,12 +53,6 @@ qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj, struct qla_hw_data *ha = vha->hw; int reading; - if (IS_QLA82XX(ha)) { - ql_dbg(ql_dbg_user, vha, 0x705b, - "Firmware dump not supported for ISP82xx\n"); - return count; - } - if (off != 0) return (0); @@ -59,6 +65,10 @@ qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj, ql_log(ql_log_info, vha, 0x705d, "Firmware dump cleared on (%ld).\n", vha->host_no); + if (IS_QLA82XX(vha->hw)) { + qla82xx_md_free(vha); + qla82xx_md_prep(vha); + } ha->fw_dump_reading = 0; ha->fw_dumped = 0; break; @@ -75,10 +85,29 @@ qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj, qla2x00_alloc_fw_dump(vha); break; case 3: - qla2x00_system_error(vha); + if (IS_QLA82XX(ha)) { + qla82xx_idc_lock(ha); + qla82xx_set_reset_owner(vha); + qla82xx_idc_unlock(ha); + } else + qla2x00_system_error(vha); + break; + case 4: + if (IS_QLA82XX(ha)) { + if (ha->md_tmplt_hdr) + ql_dbg(ql_dbg_user, vha, 0x705b, + "MiniDump supported with this firmware.\n"); + else + ql_dbg(ql_dbg_user, vha, 0x709d, + "MiniDump not supported with this firmware.\n"); + } + break; + case 5: + if (IS_QLA82XX(ha)) + set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); break; } - return (count); + return -EINVAL; } static struct bin_attribute sysfs_fw_dump_attr = { @@ -122,7 +151,7 @@ qla2x00_sysfs_write_nvram(struct file *filp, struct kobject *kobj, if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->nvram_size || !ha->isp_ops->write_nvram) - return 0; + return -EINVAL; /* Checksum NVRAM. */ if (IS_FWI2_CAPABLE(ha)) { @@ -165,7 +194,7 @@ qla2x00_sysfs_write_nvram(struct file *filp, struct kobject *kobj, qla2xxx_wake_dpc(vha); qla2x00_wait_for_chip_reset(vha); - return (count); + return count; } static struct bin_attribute sysfs_nvram_attr = { @@ -239,10 +268,10 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj, int val, valid; if (off) - return 0; + return -EINVAL; if (unlikely(pci_channel_offline(ha->pdev))) - return 0; + return -EAGAIN; if (sscanf(buf, "%d:%x:%x", &val, &start, &size) < 1) return -EINVAL; @@ -253,7 +282,7 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj, case 0: if (ha->optrom_state != QLA_SREADING && ha->optrom_state != QLA_SWRITING) - break; + return -EINVAL; ha->optrom_state = QLA_SWAITING; @@ -266,7 +295,7 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj, break; case 1: if (ha->optrom_state != QLA_SWAITING) - break; + return -EINVAL; ha->optrom_region_start = start; ha->optrom_region_size = start + size > ha->optrom_size ? @@ -280,7 +309,7 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj, "(%x).\n", ha->optrom_region_size); ha->optrom_state = QLA_SWAITING; - return count; + return -ENOMEM; } if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { @@ -299,7 +328,7 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj, break; case 2: if (ha->optrom_state != QLA_SWAITING) - break; + return -EINVAL; /* * We need to be more restrictive on which FLASH regions are @@ -347,7 +376,7 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj, "(%x)\n", ha->optrom_region_size); ha->optrom_state = QLA_SWAITING; - return count; + return -ENOMEM; } ql_dbg(ql_dbg_user, vha, 0x7067, @@ -358,7 +387,7 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj, break; case 3: if (ha->optrom_state != QLA_SWRITING) - break; + return -ENOMEM; if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { ql_log(ql_log_warn, vha, 0x7068, @@ -374,7 +403,7 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj, ha->optrom_region_start, ha->optrom_region_size); break; default: - count = -EINVAL; + return -EINVAL; } return count; } @@ -398,10 +427,10 @@ qla2x00_sysfs_read_vpd(struct file *filp, struct kobject *kobj, struct qla_hw_data *ha = vha->hw; if (unlikely(pci_channel_offline(ha->pdev))) - return 0; + return -EAGAIN; if (!capable(CAP_SYS_ADMIN)) - return 0; + return -EINVAL; if (IS_NOCACHE_VPD_TYPE(ha)) ha->isp_ops->read_optrom(vha, ha->vpd, ha->flt_region_vpd << 2, @@ -438,17 +467,17 @@ qla2x00_sysfs_write_vpd(struct file *filp, struct kobject *kobj, /* Update flash version information for 4Gb & above. */ if (!IS_FWI2_CAPABLE(ha)) - goto done; + return -EINVAL; tmp_data = vmalloc(256); if (!tmp_data) { ql_log(ql_log_warn, vha, 0x706b, "Unable to allocate memory for VPD information update.\n"); - goto done; + return -ENOMEM; } ha->isp_ops->get_flash_version(vha, tmp_data); vfree(tmp_data); -done: + return count; } @@ -505,8 +534,7 @@ do_read: "Unable to read SFP data (%x/%x/%x).\n", rval, addr, offset); - count = 0; - break; + return -EIO; } memcpy(buf, ha->sfp_data, SFP_BLOCK_SIZE); buf += SFP_BLOCK_SIZE; @@ -536,7 +564,7 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj, int type; if (off != 0) - return 0; + return -EINVAL; type = simple_strtol(buf, NULL, 10); switch (type) { @@ -546,13 +574,18 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj, scsi_block_requests(vha->host); set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); + if (IS_QLA82XX(ha)) { + qla82xx_idc_lock(ha); + qla82xx_set_reset_owner(vha); + qla82xx_idc_unlock(ha); + } qla2xxx_wake_dpc(vha); qla2x00_wait_for_chip_reset(vha); scsi_unblock_requests(vha->host); break; case 0x2025d: if (!IS_QLA81XX(ha)) - break; + return -EPERM; ql_log(ql_log_info, vha, 0x706f, "Issuing MPI reset.\n"); @@ -571,7 +604,7 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj, if (!IS_QLA82XX(ha) || vha != base_vha) { ql_log(ql_log_info, vha, 0x7071, "FCoE ctx reset no supported.\n"); - return count; + return -EPERM; } ql_log(ql_log_info, vha, 0x7072, @@ -607,7 +640,7 @@ qla2x00_sysfs_write_edc(struct file *filp, struct kobject *kobj, ha->edc_data_len = 0; if (!capable(CAP_SYS_ADMIN) || off != 0 || count < 8) - return 0; + return -EINVAL; if (!ha->edc_data) { ha->edc_data = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, @@ -615,7 +648,7 @@ qla2x00_sysfs_write_edc(struct file *filp, struct kobject *kobj, if (!ha->edc_data) { ql_log(ql_log_warn, vha, 0x7073, "Unable to allocate memory for EDC write.\n"); - return 0; + return -ENOMEM; } } @@ -634,9 +667,9 @@ qla2x00_sysfs_write_edc(struct file *filp, struct kobject *kobj, dev, adr, len, opt); if (rval != QLA_SUCCESS) { ql_log(ql_log_warn, vha, 0x7074, - "Unable to write EDC (%x) %02x:%04x:%02x:%02x\n", + "Unable to write EDC (%x) %02x:%04x:%02x:%02hhx\n", rval, dev, adr, opt, len, buf[8]); - return 0; + return -EIO; } return count; @@ -665,7 +698,7 @@ qla2x00_sysfs_write_edc_status(struct file *filp, struct kobject *kobj, ha->edc_data_len = 0; if (!capable(CAP_SYS_ADMIN) || off != 0 || count < 8) - return 0; + return -EINVAL; if (!ha->edc_data) { ha->edc_data = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, @@ -673,7 +706,7 @@ qla2x00_sysfs_write_edc_status(struct file *filp, struct kobject *kobj, if (!ha->edc_data) { ql_log(ql_log_warn, vha, 0x708c, "Unable to allocate memory for EDC status.\n"); - return 0; + return -ENOMEM; } } @@ -693,7 +726,7 @@ qla2x00_sysfs_write_edc_status(struct file *filp, struct kobject *kobj, ql_log(ql_log_info, vha, 0x7075, "Unable to write EDC status (%x) %02x:%04x:%02x.\n", rval, dev, adr, opt, len); - return 0; + return -EIO; } ha->edc_data_len = len; @@ -805,7 +838,7 @@ qla2x00_sysfs_read_dcbx_tlv(struct file *filp, struct kobject *kobj, if (!ha->dcbx_tlv) { ql_log(ql_log_warn, vha, 0x7078, "Unable to allocate memory for DCBX TLV read-data.\n"); - return 0; + return -ENOMEM; } do_read: @@ -817,7 +850,7 @@ do_read: if (rval != QLA_SUCCESS) { ql_log(ql_log_warn, vha, 0x7079, "Unable to read DCBX TLV (%x).\n", rval); - count = 0; + return -EIO; } memcpy(buf, ha->dcbx_tlv, count); diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c index 07d1767cd26b..8b641a8a0c74 100644 --- a/drivers/scsi/qla2xxx/qla_bsg.c +++ b/drivers/scsi/qla2xxx/qla_bsg.c @@ -704,6 +704,7 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job) elreq.options = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1]; if ((ha->current_topology == ISP_CFG_F || + (atomic_read(&vha->loop_state) == LOOP_DOWN) || (IS_QLA81XX(ha) && le32_to_cpu(*(uint32_t *)req_data) == ELS_OPCODE_BYTE && req_data_len == MAX_ELS_FRAME_PAYLOAD)) && @@ -1447,6 +1448,148 @@ qla2x00_update_optrom(struct fc_bsg_job *bsg_job) } static int +qla2x00_update_fru_versions(struct fc_bsg_job *bsg_job) +{ + struct Scsi_Host *host = bsg_job->shost; + scsi_qla_host_t *vha = shost_priv(host); + struct qla_hw_data *ha = vha->hw; + int rval = 0; + uint8_t bsg[DMA_POOL_SIZE]; + struct qla_image_version_list *list = (void *)bsg; + struct qla_image_version *image; + uint32_t count; + dma_addr_t sfp_dma; + void *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma); + if (!sfp) { + bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = + EXT_STATUS_NO_MEMORY; + goto done; + } + + sg_copy_to_buffer(bsg_job->request_payload.sg_list, + bsg_job->request_payload.sg_cnt, list, sizeof(bsg)); + + image = list->version; + count = list->count; + while (count--) { + memcpy(sfp, &image->field_info, sizeof(image->field_info)); + rval = qla2x00_write_sfp(vha, sfp_dma, sfp, + image->field_address.device, image->field_address.offset, + sizeof(image->field_info), image->field_address.option); + if (rval) { + bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = + EXT_STATUS_MAILBOX; + goto dealloc; + } + image++; + } + + bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0; + +dealloc: + dma_pool_free(ha->s_dma_pool, sfp, sfp_dma); + +done: + bsg_job->reply_len = sizeof(struct fc_bsg_reply); + bsg_job->reply->result = DID_OK << 16; + bsg_job->job_done(bsg_job); + + return 0; +} + +static int +qla2x00_read_fru_status(struct fc_bsg_job *bsg_job) +{ + struct Scsi_Host *host = bsg_job->shost; + scsi_qla_host_t *vha = shost_priv(host); + struct qla_hw_data *ha = vha->hw; + int rval = 0; + uint8_t bsg[DMA_POOL_SIZE]; + struct qla_status_reg *sr = (void *)bsg; + dma_addr_t sfp_dma; + uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma); + if (!sfp) { + bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = + EXT_STATUS_NO_MEMORY; + goto done; + } + + sg_copy_to_buffer(bsg_job->request_payload.sg_list, + bsg_job->request_payload.sg_cnt, sr, sizeof(*sr)); + + rval = qla2x00_read_sfp(vha, sfp_dma, sfp, + sr->field_address.device, sr->field_address.offset, + sizeof(sr->status_reg), sr->field_address.option); + sr->status_reg = *sfp; + + if (rval) { + bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = + EXT_STATUS_MAILBOX; + goto dealloc; + } + + sg_copy_from_buffer(bsg_job->reply_payload.sg_list, + bsg_job->reply_payload.sg_cnt, sr, sizeof(*sr)); + + bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0; + +dealloc: + dma_pool_free(ha->s_dma_pool, sfp, sfp_dma); + +done: + bsg_job->reply_len = sizeof(struct fc_bsg_reply); + bsg_job->reply->reply_payload_rcv_len = sizeof(*sr); + bsg_job->reply->result = DID_OK << 16; + bsg_job->job_done(bsg_job); + + return 0; +} + +static int +qla2x00_write_fru_status(struct fc_bsg_job *bsg_job) +{ + struct Scsi_Host *host = bsg_job->shost; + scsi_qla_host_t *vha = shost_priv(host); + struct qla_hw_data *ha = vha->hw; + int rval = 0; + uint8_t bsg[DMA_POOL_SIZE]; + struct qla_status_reg *sr = (void *)bsg; + dma_addr_t sfp_dma; + uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma); + if (!sfp) { + bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = + EXT_STATUS_NO_MEMORY; + goto done; + } + + sg_copy_to_buffer(bsg_job->request_payload.sg_list, + bsg_job->request_payload.sg_cnt, sr, sizeof(*sr)); + + *sfp = sr->status_reg; + rval = qla2x00_write_sfp(vha, sfp_dma, sfp, + sr->field_address.device, sr->field_address.offset, + sizeof(sr->status_reg), sr->field_address.option); + + if (rval) { + bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = + EXT_STATUS_MAILBOX; + goto dealloc; + } + + bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0; + +dealloc: + dma_pool_free(ha->s_dma_pool, sfp, sfp_dma); + +done: + bsg_job->reply_len = sizeof(struct fc_bsg_reply); + bsg_job->reply->result = DID_OK << 16; + bsg_job->job_done(bsg_job); + + return 0; +} + +static int qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job) { switch (bsg_job->request->rqst_data.h_vendor.vendor_cmd[0]) { @@ -1474,6 +1617,15 @@ qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job) case QL_VND_UPDATE_FLASH: return qla2x00_update_optrom(bsg_job); + case QL_VND_SET_FRU_VERSION: + return qla2x00_update_fru_versions(bsg_job); + + case QL_VND_READ_FRU_STATUS: + return qla2x00_read_fru_status(bsg_job); + + case QL_VND_WRITE_FRU_STATUS: + return qla2x00_write_fru_status(bsg_job); + default: bsg_job->reply->result = (DID_ERROR << 16); bsg_job->job_done(bsg_job); diff --git a/drivers/scsi/qla2xxx/qla_bsg.h b/drivers/scsi/qla2xxx/qla_bsg.h index 0f0f54e35f06..70caa63a8930 100644 --- a/drivers/scsi/qla2xxx/qla_bsg.h +++ b/drivers/scsi/qla2xxx/qla_bsg.h @@ -16,6 +16,16 @@ #define QL_VND_FCP_PRIO_CFG_CMD 0x06 #define QL_VND_READ_FLASH 0x07 #define QL_VND_UPDATE_FLASH 0x08 +#define QL_VND_SET_FRU_VERSION 0x0B +#define QL_VND_READ_FRU_STATUS 0x0C +#define QL_VND_WRITE_FRU_STATUS 0x0D + +/* BSG Vendor specific subcode returns */ +#define EXT_STATUS_OK 0 +#define EXT_STATUS_ERR 1 +#define EXT_STATUS_INVALID_PARAM 6 +#define EXT_STATUS_MAILBOX 11 +#define EXT_STATUS_NO_MEMORY 17 /* BSG definations for interpreting CommandSent field */ #define INT_DEF_LB_LOOPBACK_CMD 0 @@ -141,4 +151,36 @@ struct qla_port_param { uint16_t mode; uint16_t speed; } __attribute__ ((packed)); + + +/* FRU VPD */ + +#define MAX_FRU_SIZE 36 + +struct qla_field_address { + uint16_t offset; + uint16_t device; + uint16_t option; +} __packed; + +struct qla_field_info { + uint8_t version[MAX_FRU_SIZE]; +} __packed; + +struct qla_image_version { + struct qla_field_address field_address; + struct qla_field_info field_info; +} __packed; + +struct qla_image_version_list { + uint32_t count; + struct qla_image_version version[0]; +} __packed; + +struct qla_status_reg { + struct qla_field_address field_address; + uint8_t status_reg; + uint8_t reserved[7]; +} __packed; + #endif diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c index d79cd8a5f831..9df4787715c0 100644 --- a/drivers/scsi/qla2xxx/qla_dbg.c +++ b/drivers/scsi/qla2xxx/qla_dbg.c @@ -12,7 +12,7 @@ * | Level | Last Value Used | Holes | * ---------------------------------------------------------------------- * | Module Init and Probe | 0x0116 | | - * | Mailbox commands | 0x1126 | | + * | Mailbox commands | 0x1129 | | * | Device Discovery | 0x2083 | | * | Queue Command and IO tracing | 0x302e | 0x3008 | * | DPC Thread | 0x401c | | @@ -22,7 +22,7 @@ * | Task Management | 0x8041 | | * | AER/EEH | 0x900f | | * | Virtual Port | 0xa007 | | - * | ISP82XX Specific | 0xb04f | | + * | ISP82XX Specific | 0xb051 | | * | MultiQ | 0xc00b | | * | Misc | 0xd00b | | * ---------------------------------------------------------------------- @@ -403,7 +403,7 @@ qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain) return ptr + sizeof(struct qla2xxx_mq_chain); } -static void +void qla2xxx_dump_post_process(scsi_qla_host_t *vha, int rval) { struct qla_hw_data *ha = vha->hw; diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index a03eaf40f377..fcf052c50bf5 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -2438,7 +2438,8 @@ struct qla_hw_data { uint32_t quiesce_owner:1; uint32_t thermal_supported:1; uint32_t isp82xx_reset_hdlr_active:1; - /* 26 bits */ + uint32_t isp82xx_reset_owner:1; + /* 28 bits */ } flags; /* This spinlock is used to protect "io transactions", you must @@ -2822,6 +2823,12 @@ struct qla_hw_data { uint8_t fw_type; __le32 file_prd_off; /* File firmware product offset */ + + uint32_t md_template_size; + void *md_tmplt_hdr; + dma_addr_t md_tmplt_hdr_dma; + void *md_dump; + uint32_t md_dump_size; }; /* diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h index 29b1a3e28231..ce32d8135c9e 100644 --- a/drivers/scsi/qla2xxx/qla_gbl.h +++ b/drivers/scsi/qla2xxx/qla_gbl.h @@ -104,6 +104,8 @@ extern int ql2xenablehba_err_chk; extern int ql2xtargetreset; extern int ql2xdontresethba; extern unsigned int ql2xmaxlun; +extern int ql2xmdcapmask; +extern int ql2xmdenable; extern int qla2x00_loop_reset(scsi_qla_host_t *); extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int); @@ -407,6 +409,8 @@ extern void qla2x00_beacon_blink(struct scsi_qla_host *); extern int qla24xx_beacon_on(struct scsi_qla_host *); extern int qla24xx_beacon_off(struct scsi_qla_host *); extern void qla24xx_beacon_blink(struct scsi_qla_host *); +extern int qla82xx_beacon_on(struct scsi_qla_host *); +extern int qla82xx_beacon_off(struct scsi_qla_host *); extern uint8_t *qla2x00_read_optrom_data(struct scsi_qla_host *, uint8_t *, uint32_t, uint32_t); @@ -442,6 +446,7 @@ extern void qla2x00_dump_buffer_zipped(uint8_t *, uint32_t); extern void ql_dump_regs(uint32_t, scsi_qla_host_t *, int32_t); extern void ql_dump_buffer(uint32_t, scsi_qla_host_t *, int32_t, uint8_t *, uint32_t); +extern void qla2xxx_dump_post_process(scsi_qla_host_t *, int); /* * Global Function Prototypes in qla_gs.c source file. @@ -569,7 +574,10 @@ extern int qla82xx_mbx_intr_enable(scsi_qla_host_t *); extern int qla82xx_mbx_intr_disable(scsi_qla_host_t *); extern void qla82xx_start_iocbs(srb_t *); extern int qla82xx_fcoe_ctx_reset(scsi_qla_host_t *); +extern int qla82xx_check_md_needed(scsi_qla_host_t *); extern void qla82xx_chip_reset_cleanup(scsi_qla_host_t *); +extern int qla82xx_mbx_beacon_ctl(scsi_qla_host_t *, int); +extern char *qdev_state(uint32_t); /* BSG related functions */ extern int qla24xx_bsg_request(struct fc_bsg_job *); @@ -579,4 +587,14 @@ extern int qla2x00_issue_iocb_timeout(scsi_qla_host_t *, void *, dma_addr_t, size_t, uint32_t); extern int qla2x00_get_idma_speed(scsi_qla_host_t *, uint16_t, uint16_t *, uint16_t *); + +/* Minidump related functions */ +extern int qla82xx_md_get_template_size(scsi_qla_host_t *); +extern int qla82xx_md_get_template(scsi_qla_host_t *); +extern int qla82xx_md_alloc(scsi_qla_host_t *); +extern void qla82xx_md_free(scsi_qla_host_t *); +extern int qla82xx_md_collect(scsi_qla_host_t *); +extern void qla82xx_md_prep(scsi_qla_host_t *); +extern void qla82xx_set_reset_owner(scsi_qla_host_t *); + #endif /* _QLA_GBL_H */ diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index 37da04d3db26..f03e915f1877 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -1480,13 +1480,19 @@ qla2x00_setup_chip(scsi_qla_host_t *vha) if (rval == QLA_SUCCESS) { enable_82xx_npiv: fw_major_version = ha->fw_major_version; - rval = qla2x00_get_fw_version(vha, - &ha->fw_major_version, - &ha->fw_minor_version, - &ha->fw_subminor_version, - &ha->fw_attributes, &ha->fw_memory_size, - ha->mpi_version, &ha->mpi_capabilities, - ha->phy_version); + if (IS_QLA82XX(ha)) + qla82xx_check_md_needed(vha); + else { + rval = qla2x00_get_fw_version(vha, + &ha->fw_major_version, + &ha->fw_minor_version, + &ha->fw_subminor_version, + &ha->fw_attributes, + &ha->fw_memory_size, + ha->mpi_version, + &ha->mpi_capabilities, + ha->phy_version); + } if (rval != QLA_SUCCESS) goto failed; ha->flags.npiv_supported = 0; @@ -1503,10 +1509,8 @@ enable_82xx_npiv: &ha->fw_xcb_count, NULL, NULL, &ha->max_npiv_vports, NULL); - if (!fw_major_version && ql2xallocfwdump) { - if (!IS_QLA82XX(ha)) - qla2x00_alloc_fw_dump(vha); - } + if (!fw_major_version && ql2xallocfwdump) + qla2x00_alloc_fw_dump(vha); } } else { ql_log(ql_log_fatal, vha, 0x00cd, @@ -1924,7 +1928,7 @@ qla2x00_fw_ready(scsi_qla_host_t *vha) rval = qla84xx_init_chip(vha); if (rval != QLA_SUCCESS) { ql_log(ql_log_warn, - vha, 0x8043, + vha, 0x8026, "Init chip failed.\n"); break; } @@ -1933,7 +1937,7 @@ qla2x00_fw_ready(scsi_qla_host_t *vha) cs84xx_time = jiffies - cs84xx_time; wtime += cs84xx_time; mtime += cs84xx_time; - ql_dbg(ql_dbg_taskm, vha, 0x8042, + ql_dbg(ql_dbg_taskm, vha, 0x8025, "Increasing wait time by %ld. " "New time %ld.\n", cs84xx_time, wtime); @@ -5443,11 +5447,7 @@ qla82xx_restart_isp(scsi_qla_host_t *vha) clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags); /* Update the firmware version */ - qla2x00_get_fw_version(vha, &ha->fw_major_version, - &ha->fw_minor_version, &ha->fw_subminor_version, - &ha->fw_attributes, &ha->fw_memory_size, - ha->mpi_version, &ha->mpi_capabilities, - ha->phy_version); + status = qla82xx_check_md_needed(vha); if (ha->fce) { ha->flags.fce_enabled = 1; diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index 8a7591f035e6..2516adf1aeea 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -2060,6 +2060,11 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha, case ELS_IOCB_TYPE: qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE); break; + case MARKER_TYPE: + /* Do nothing in this case, this check is to prevent it + * from falling into default case + */ + break; default: /* Type Not Supported. */ ql_dbg(ql_dbg_async, vha, 0x5042, @@ -2274,7 +2279,7 @@ qla25xx_msix_rsp_q(int irq, void *dev_id) ha = rsp->hw; /* Clear the interrupt, if enabled, for this response queue */ - if (rsp->options & ~BIT_6) { + if (!ha->flags.disable_msix_handshake) { reg = &ha->iobase->isp24; spin_lock_irqsave(&ha->hardware_lock, flags); WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index f7604ea1af83..3b3cec9f6ac2 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c @@ -4186,3 +4186,130 @@ qla82xx_mbx_intr_disable(scsi_qla_host_t *vha) return rval; } + +int +qla82xx_md_get_template_size(scsi_qla_host_t *vha) +{ + struct qla_hw_data *ha = vha->hw; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + int rval = QLA_FUNCTION_FAILED; + + ql_dbg(ql_dbg_mbx, vha, 0x111f, "Entered %s.\n", __func__); + + memset(mcp->mb, 0 , sizeof(mcp->mb)); + mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE); + mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE); + mcp->mb[2] = LSW(RQST_TMPLT_SIZE); + mcp->mb[3] = MSW(RQST_TMPLT_SIZE); + + mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; + mcp->in_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8| + MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; + + mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD; + mcp->tov = MBX_TOV_SECONDS; + rval = qla2x00_mailbox_command(vha, mcp); + + /* Always copy back return mailbox values. */ + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0x1120, + "mailbox command FAILED=0x%x, subcode=%x.\n", + (mcp->mb[1] << 16) | mcp->mb[0], + (mcp->mb[3] << 16) | mcp->mb[2]); + } else { + ql_dbg(ql_dbg_mbx, vha, 0x1121, "Done %s.\n", __func__); + ha->md_template_size = ((mcp->mb[3] << 16) | mcp->mb[2]); + if (!ha->md_template_size) { + ql_dbg(ql_dbg_mbx, vha, 0x1122, + "Null template size obtained.\n"); + rval = QLA_FUNCTION_FAILED; + } + } + return rval; +} + +int +qla82xx_md_get_template(scsi_qla_host_t *vha) +{ + struct qla_hw_data *ha = vha->hw; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + int rval = QLA_FUNCTION_FAILED; + + ql_dbg(ql_dbg_mbx, vha, 0x1123, "Entered %s.\n", __func__); + + ha->md_tmplt_hdr = dma_alloc_coherent(&ha->pdev->dev, + ha->md_template_size, &ha->md_tmplt_hdr_dma, GFP_KERNEL); + if (!ha->md_tmplt_hdr) { + ql_log(ql_log_warn, vha, 0x1124, + "Unable to allocate memory for Minidump template.\n"); + return rval; + } + + memset(mcp->mb, 0 , sizeof(mcp->mb)); + mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE); + mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE); + mcp->mb[2] = LSW(RQST_TMPLT); + mcp->mb[3] = MSW(RQST_TMPLT); + mcp->mb[4] = LSW(LSD(ha->md_tmplt_hdr_dma)); + mcp->mb[5] = MSW(LSD(ha->md_tmplt_hdr_dma)); + mcp->mb[6] = LSW(MSD(ha->md_tmplt_hdr_dma)); + mcp->mb[7] = MSW(MSD(ha->md_tmplt_hdr_dma)); + mcp->mb[8] = LSW(ha->md_template_size); + mcp->mb[9] = MSW(ha->md_template_size); + + mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD; + mcp->tov = MBX_TOV_SECONDS; + mcp->out_mb = MBX_11|MBX_10|MBX_9|MBX_8| + MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; + mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0; + rval = qla2x00_mailbox_command(vha, mcp); + + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0x1125, + "mailbox command FAILED=0x%x, subcode=%x.\n", + ((mcp->mb[1] << 16) | mcp->mb[0]), + ((mcp->mb[3] << 16) | mcp->mb[2])); + } else + ql_dbg(ql_dbg_mbx, vha, 0x1126, "Done %s.\n", __func__); + return rval; +} + +int +qla82xx_mbx_beacon_ctl(scsi_qla_host_t *vha, int enable) +{ + int rval; + struct qla_hw_data *ha = vha->hw; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + + if (!IS_QLA82XX(ha)) + return QLA_FUNCTION_FAILED; + + ql_dbg(ql_dbg_mbx, vha, 0x1127, + "Entered %s.\n", __func__); + + memset(mcp, 0, sizeof(mbx_cmd_t)); + mcp->mb[0] = MBC_SET_LED_CONFIG; + if (enable) + mcp->mb[7] = 0xE; + else + mcp->mb[7] = 0xD; + + mcp->out_mb = MBX_7|MBX_0; + mcp->in_mb = MBX_0; + mcp->tov = 30; + mcp->flags = 0; + + rval = qla2x00_mailbox_command(vha, mcp); + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0x1128, + "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); + } else { + ql_dbg(ql_dbg_mbx, vha, 0x1129, + "Done %s.\n", __func__); + } + + return rval; +} diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c index 049807cda419..94bded5ddce4 100644 --- a/drivers/scsi/qla2xxx/qla_nx.c +++ b/drivers/scsi/qla2xxx/qla_nx.c @@ -7,6 +7,8 @@ #include "qla_def.h" #include <linux/delay.h> #include <linux/pci.h> +#include <linux/ratelimit.h> +#include <linux/vmalloc.h> #include <scsi/scsi_tcq.h> #define MASK(n) ((1ULL<<(n))-1) @@ -328,7 +330,7 @@ unsigned qla82xx_crb_hub_agt[64] = { }; /* Device states */ -char *qdev_state[] = { +char *q_dev_state[] = { "Unknown", "Cold", "Initializing", @@ -339,6 +341,11 @@ char *qdev_state[] = { "Quiescent", }; +char *qdev_state(uint32_t dev_state) +{ + return q_dev_state[dev_state]; +} + /* * In: 'off' is offset from CRB space in 128M pci map * Out: 'off' is 2M pci map addr @@ -2355,9 +2362,13 @@ qla82xx_need_reset(struct qla_hw_data *ha) uint32_t drv_state; int rval; - drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); - rval = drv_state & (QLA82XX_DRVST_RST_RDY << (ha->portnum * 4)); - return rval; + if (ha->flags.isp82xx_reset_owner) + return 1; + else { + drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); + rval = drv_state & (QLA82XX_DRVST_RST_RDY << (ha->portnum * 4)); + return rval; + } } static inline void @@ -2374,8 +2385,8 @@ qla82xx_set_rst_ready(struct qla_hw_data *ha) drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); } drv_state |= (QLA82XX_DRVST_RST_RDY << (ha->portnum * 4)); - ql_log(ql_log_info, vha, 0x00bb, - "drv_state = 0x%x.\n", drv_state); + ql_dbg(ql_dbg_init, vha, 0x00bb, + "drv_state = 0x%08x.\n", drv_state); qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, drv_state); } @@ -2598,7 +2609,7 @@ qla2xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt, dsd_seg = (uint32_t *)&cmd_pkt->fcp_data_dseg_address; *dsd_seg++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma)); *dsd_seg++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma)); - *dsd_seg++ = cpu_to_le32(dsd_list_len); + cmd_pkt->fcp_data_dseg_len = cpu_to_le32(dsd_list_len); } else { *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma)); *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma)); @@ -3529,6 +3540,7 @@ static void qla82xx_need_reset_handler(scsi_qla_host_t *vha) { uint32_t dev_state, drv_state, drv_active; + uint32_t active_mask = 0; unsigned long reset_timeout; struct qla_hw_data *ha = vha->hw; struct req_que *req = ha->req_q_map[0]; @@ -3541,15 +3553,32 @@ qla82xx_need_reset_handler(scsi_qla_host_t *vha) qla82xx_idc_lock(ha); } - qla82xx_set_rst_ready(ha); + drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); + if (!ha->flags.isp82xx_reset_owner) { + ql_dbg(ql_dbg_p3p, vha, 0xb028, + "reset_acknowledged by 0x%x\n", ha->portnum); + qla82xx_set_rst_ready(ha); + } else { + active_mask = ~(QLA82XX_DRV_ACTIVE << (ha->portnum * 4)); + drv_active &= active_mask; + ql_dbg(ql_dbg_p3p, vha, 0xb029, + "active_mask: 0x%08x\n", active_mask); + } /* wait for 10 seconds for reset ack from all functions */ reset_timeout = jiffies + (ha->nx_reset_timeout * HZ); drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); + dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); - while (drv_state != drv_active) { + ql_dbg(ql_dbg_p3p, vha, 0xb02a, + "drv_state: 0x%08x, drv_active: 0x%08x, " + "dev_state: 0x%08x, active_mask: 0x%08x\n", + drv_state, drv_active, dev_state, active_mask); + + while (drv_state != drv_active && + dev_state != QLA82XX_DEV_INITIALIZING) { if (time_after_eq(jiffies, reset_timeout)) { ql_log(ql_log_warn, vha, 0x00b5, "Reset timeout.\n"); @@ -3560,23 +3589,87 @@ qla82xx_need_reset_handler(scsi_qla_host_t *vha) qla82xx_idc_lock(ha); drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); + if (ha->flags.isp82xx_reset_owner) + drv_active &= active_mask; + dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); } - dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); + ql_dbg(ql_dbg_p3p, vha, 0xb02b, + "drv_state: 0x%08x, drv_active: 0x%08x, " + "dev_state: 0x%08x, active_mask: 0x%08x\n", + drv_state, drv_active, dev_state, active_mask); + ql_log(ql_log_info, vha, 0x00b6, "Device state is 0x%x = %s.\n", dev_state, - dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown"); + dev_state < MAX_STATES ? qdev_state(dev_state) : "Unknown"); /* Force to DEV_COLD unless someone else is starting a reset */ - if (dev_state != QLA82XX_DEV_INITIALIZING) { + if (dev_state != QLA82XX_DEV_INITIALIZING && + dev_state != QLA82XX_DEV_COLD) { ql_log(ql_log_info, vha, 0x00b7, "HW State: COLD/RE-INIT.\n"); qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_COLD); + if (ql2xmdenable) { + if (qla82xx_md_collect(vha)) + ql_log(ql_log_warn, vha, 0xb02c, + "Not able to collect minidump.\n"); + } else + ql_log(ql_log_warn, vha, 0xb04f, + "Minidump disabled.\n"); } } int +qla82xx_check_md_needed(scsi_qla_host_t *vha) +{ + struct qla_hw_data *ha = vha->hw; + uint16_t fw_major_version, fw_minor_version, fw_subminor_version; + int rval = QLA_SUCCESS; + + fw_major_version = ha->fw_major_version; + fw_minor_version = ha->fw_minor_version; + fw_subminor_version = ha->fw_subminor_version; + + rval = qla2x00_get_fw_version(vha, &ha->fw_major_version, + &ha->fw_minor_version, &ha->fw_subminor_version, + &ha->fw_attributes, &ha->fw_memory_size, + ha->mpi_version, &ha->mpi_capabilities, + ha->phy_version); + + if (rval != QLA_SUCCESS) + return rval; + + if (ql2xmdenable) { + if (!ha->fw_dumped) { + if (fw_major_version != ha->fw_major_version || + fw_minor_version != ha->fw_minor_version || + fw_subminor_version != ha->fw_subminor_version) { + + ql_log(ql_log_info, vha, 0xb02d, + "Firmware version differs " + "Previous version: %d:%d:%d - " + "New version: %d:%d:%d\n", + ha->fw_major_version, + ha->fw_minor_version, + ha->fw_subminor_version, + fw_major_version, fw_minor_version, + fw_subminor_version); + /* Release MiniDump resources */ + qla82xx_md_free(vha); + /* ALlocate MiniDump resources */ + qla82xx_md_prep(vha); + } else + ql_log(ql_log_info, vha, 0xb02e, + "Firmware dump available to retrieve\n", + vha->host_no); + } + } + return rval; +} + + +int qla82xx_check_fw_alive(scsi_qla_host_t *vha) { uint32_t fw_heartbeat_counter; @@ -3637,7 +3730,7 @@ qla82xx_device_state_handler(scsi_qla_host_t *vha) ql_log(ql_log_info, vha, 0x009b, "Device state is 0x%x = %s.\n", dev_state, - dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown"); + dev_state < MAX_STATES ? qdev_state(dev_state) : "Unknown"); /* wait for 30 seconds for device to go ready */ dev_init_timeout = jiffies + (ha->nx_dev_init_timeout * HZ); @@ -3659,26 +3752,33 @@ qla82xx_device_state_handler(scsi_qla_host_t *vha) ql_log(ql_log_info, vha, 0x009d, "Device state is 0x%x = %s.\n", dev_state, - dev_state < MAX_STATES ? qdev_state[dev_state] : + dev_state < MAX_STATES ? qdev_state(dev_state) : "Unknown"); } switch (dev_state) { case QLA82XX_DEV_READY: + qla82xx_check_md_needed(vha); + ha->flags.isp82xx_reset_owner = 0; goto exit; case QLA82XX_DEV_COLD: rval = qla82xx_device_bootstrap(vha); - goto exit; + break; case QLA82XX_DEV_INITIALIZING: qla82xx_idc_unlock(ha); msleep(1000); qla82xx_idc_lock(ha); break; case QLA82XX_DEV_NEED_RESET: - if (!ql2xdontresethba) - qla82xx_need_reset_handler(vha); + if (!ql2xdontresethba) + qla82xx_need_reset_handler(vha); + else { + qla82xx_idc_unlock(ha); + msleep(1000); + qla82xx_idc_lock(ha); + } dev_init_timeout = jiffies + - (ha->nx_dev_init_timeout * HZ); + (ha->nx_dev_init_timeout * HZ); break; case QLA82XX_DEV_NEED_QUIESCENT: qla82xx_need_qsnt_handler(vha); @@ -3791,6 +3891,28 @@ int qla82xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr) return rval; } +void +qla82xx_set_reset_owner(scsi_qla_host_t *vha) +{ + struct qla_hw_data *ha = vha->hw; + uint32_t dev_state; + + dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); + if (dev_state == QLA82XX_DEV_READY) { + ql_log(ql_log_info, vha, 0xb02f, + "HW State: NEED RESET\n"); + qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, + QLA82XX_DEV_NEED_RESET); + ha->flags.isp82xx_reset_owner = 1; + ql_dbg(ql_dbg_p3p, vha, 0xb030, + "reset_owner is 0x%x\n", ha->portnum); + } else + ql_log(ql_log_info, vha, 0xb031, + "Device state is 0x%x = %s.\n", + dev_state, + dev_state < MAX_STATES ? qdev_state(dev_state) : "Unknown"); +} + /* * qla82xx_abort_isp * Resets ISP and aborts all outstanding commands. @@ -3806,7 +3928,6 @@ qla82xx_abort_isp(scsi_qla_host_t *vha) { int rval; struct qla_hw_data *ha = vha->hw; - uint32_t dev_state; if (vha->device_flags & DFLG_DEV_FAILED) { ql_log(ql_log_warn, vha, 0x8024, @@ -3816,16 +3937,7 @@ qla82xx_abort_isp(scsi_qla_host_t *vha) ha->flags.isp82xx_reset_hdlr_active = 1; qla82xx_idc_lock(ha); - dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); - if (dev_state == QLA82XX_DEV_READY) { - ql_log(ql_log_info, vha, 0x8025, - "HW State: NEED RESET.\n"); - qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, - QLA82XX_DEV_NEED_RESET); - } else - ql_log(ql_log_info, vha, 0x8026, - "Hw State: %s.\n", dev_state < MAX_STATES ? - qdev_state[dev_state] : "Unknown"); + qla82xx_set_reset_owner(vha); qla82xx_idc_unlock(ha); rval = qla82xx_device_state_handler(vha); @@ -4016,3 +4128,803 @@ qla82xx_chip_reset_cleanup(scsi_qla_host_t *vha) } } } + +/* Minidump related functions */ +int +qla82xx_md_rw_32(struct qla_hw_data *ha, uint32_t off, u32 data, uint8_t flag) +{ + uint32_t off_value, rval = 0; + + WRT_REG_DWORD((void *)(CRB_WINDOW_2M + ha->nx_pcibase), + (off & 0xFFFF0000)); + + /* Read back value to make sure write has gone through */ + RD_REG_DWORD((void *)(CRB_WINDOW_2M + ha->nx_pcibase)); + off_value = (off & 0x0000FFFF); + + if (flag) + WRT_REG_DWORD((void *) + (off_value + CRB_INDIRECT_2M + ha->nx_pcibase), + data); + else + rval = RD_REG_DWORD((void *) + (off_value + CRB_INDIRECT_2M + ha->nx_pcibase)); + + return rval; +} + +static int +qla82xx_minidump_process_control(scsi_qla_host_t *vha, + qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr) +{ + struct qla_hw_data *ha = vha->hw; + struct qla82xx_md_entry_crb *crb_entry; + uint32_t read_value, opcode, poll_time; + uint32_t addr, index, crb_addr; + unsigned long wtime; + struct qla82xx_md_template_hdr *tmplt_hdr; + uint32_t rval = QLA_SUCCESS; + int i; + + tmplt_hdr = (struct qla82xx_md_template_hdr *)ha->md_tmplt_hdr; + crb_entry = (struct qla82xx_md_entry_crb *)entry_hdr; + crb_addr = crb_entry->addr; + + for (i = 0; i < crb_entry->op_count; i++) { + opcode = crb_entry->crb_ctrl.opcode; + if (opcode & QLA82XX_DBG_OPCODE_WR) { + qla82xx_md_rw_32(ha, crb_addr, + crb_entry->value_1, 1); + opcode &= ~QLA82XX_DBG_OPCODE_WR; + } + + if (opcode & QLA82XX_DBG_OPCODE_RW) { + read_value = qla82xx_md_rw_32(ha, crb_addr, 0, 0); + qla82xx_md_rw_32(ha, crb_addr, read_value, 1); + opcode &= ~QLA82XX_DBG_OPCODE_RW; + } + + if (opcode & QLA82XX_DBG_OPCODE_AND) { + read_value = qla82xx_md_rw_32(ha, crb_addr, 0, 0); + read_value &= crb_entry->value_2; + opcode &= ~QLA82XX_DBG_OPCODE_AND; + if (opcode & QLA82XX_DBG_OPCODE_OR) { + read_value |= crb_entry->value_3; + opcode &= ~QLA82XX_DBG_OPCODE_OR; + } + qla82xx_md_rw_32(ha, crb_addr, read_value, 1); + } + + if (opcode & QLA82XX_DBG_OPCODE_OR) { + read_value = qla82xx_md_rw_32(ha, crb_addr, 0, 0); + read_value |= crb_entry->value_3; + qla82xx_md_rw_32(ha, crb_addr, read_value, 1); + opcode &= ~QLA82XX_DBG_OPCODE_OR; + } + + if (opcode & QLA82XX_DBG_OPCODE_POLL) { + poll_time = crb_entry->crb_strd.poll_timeout; + wtime = jiffies + poll_time; + read_value = qla82xx_md_rw_32(ha, crb_addr, 0, 0); + + do { + if ((read_value & crb_entry->value_2) + == crb_entry->value_1) + break; + else if (time_after_eq(jiffies, wtime)) { + /* capturing dump failed */ + rval = QLA_FUNCTION_FAILED; + break; + } else + read_value = qla82xx_md_rw_32(ha, + crb_addr, 0, 0); + } while (1); + opcode &= ~QLA82XX_DBG_OPCODE_POLL; + } + + if (opcode & QLA82XX_DBG_OPCODE_RDSTATE) { + if (crb_entry->crb_strd.state_index_a) { + index = crb_entry->crb_strd.state_index_a; + addr = tmplt_hdr->saved_state_array[index]; + } else + addr = crb_addr; + + read_value = qla82xx_md_rw_32(ha, addr, 0, 0); + index = crb_entry->crb_ctrl.state_index_v; + tmplt_hdr->saved_state_array[index] = read_value; + opcode &= ~QLA82XX_DBG_OPCODE_RDSTATE; + } + + if (opcode & QLA82XX_DBG_OPCODE_WRSTATE) { + if (crb_entry->crb_strd.state_index_a) { + index = crb_entry->crb_strd.state_index_a; + addr = tmplt_hdr->saved_state_array[index]; + } else + addr = crb_addr; + + if (crb_entry->crb_ctrl.state_index_v) { + index = crb_entry->crb_ctrl.state_index_v; + read_value = + tmplt_hdr->saved_state_array[index]; + } else + read_value = crb_entry->value_1; + + qla82xx_md_rw_32(ha, addr, read_value, 1); + opcode &= ~QLA82XX_DBG_OPCODE_WRSTATE; + } + + if (opcode & QLA82XX_DBG_OPCODE_MDSTATE) { + index = crb_entry->crb_ctrl.state_index_v; + read_value = tmplt_hdr->saved_state_array[index]; + read_value <<= crb_entry->crb_ctrl.shl; + read_value >>= crb_entry->crb_ctrl.shr; + if (crb_entry->value_2) + read_value &= crb_entry->value_2; + read_value |= crb_entry->value_3; + read_value += crb_entry->value_1; + tmplt_hdr->saved_state_array[index] = read_value; + opcode &= ~QLA82XX_DBG_OPCODE_MDSTATE; + } + crb_addr += crb_entry->crb_strd.addr_stride; + } + return rval; +} + +static void +qla82xx_minidump_process_rdocm(scsi_qla_host_t *vha, + qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr) +{ + struct qla_hw_data *ha = vha->hw; + uint32_t r_addr, r_stride, loop_cnt, i, r_value; + struct qla82xx_md_entry_rdocm *ocm_hdr; + uint32_t *data_ptr = *d_ptr; + + ocm_hdr = (struct qla82xx_md_entry_rdocm *)entry_hdr; + r_addr = ocm_hdr->read_addr; + r_stride = ocm_hdr->read_addr_stride; + loop_cnt = ocm_hdr->op_count; + + for (i = 0; i < loop_cnt; i++) { + r_value = RD_REG_DWORD((void *)(r_addr + ha->nx_pcibase)); + *data_ptr++ = cpu_to_le32(r_value); + r_addr += r_stride; + } + *d_ptr = data_ptr; +} + +static void +qla82xx_minidump_process_rdmux(scsi_qla_host_t *vha, + qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr) +{ + struct qla_hw_data *ha = vha->hw; + uint32_t r_addr, s_stride, s_addr, s_value, loop_cnt, i, r_value; + struct qla82xx_md_entry_mux *mux_hdr; + uint32_t *data_ptr = *d_ptr; + + mux_hdr = (struct qla82xx_md_entry_mux *)entry_hdr; + r_addr = mux_hdr->read_addr; + s_addr = mux_hdr->select_addr; + s_stride = mux_hdr->select_value_stride; + s_value = mux_hdr->select_value; + loop_cnt = mux_hdr->op_count; + + for (i = 0; i < loop_cnt; i++) { + qla82xx_md_rw_32(ha, s_addr, s_value, 1); + r_value = qla82xx_md_rw_32(ha, r_addr, 0, 0); + *data_ptr++ = cpu_to_le32(s_value); + *data_ptr++ = cpu_to_le32(r_value); + s_value += s_stride; + } + *d_ptr = data_ptr; +} + +static void +qla82xx_minidump_process_rdcrb(scsi_qla_host_t *vha, + qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr) +{ + struct qla_hw_data *ha = vha->hw; + uint32_t r_addr, r_stride, loop_cnt, i, r_value; + struct qla82xx_md_entry_crb *crb_hdr; + uint32_t *data_ptr = *d_ptr; + + crb_hdr = (struct qla82xx_md_entry_crb *)entry_hdr; + r_addr = crb_hdr->addr; + r_stride = crb_hdr->crb_strd.addr_stride; + loop_cnt = crb_hdr->op_count; + + for (i = 0; i < loop_cnt; i++) { + r_value = qla82xx_md_rw_32(ha, r_addr, 0, 0); + *data_ptr++ = cpu_to_le32(r_addr); + *data_ptr++ = cpu_to_le32(r_value); + r_addr += r_stride; + } + *d_ptr = data_ptr; +} + +static int +qla82xx_minidump_process_l2tag(scsi_qla_host_t *vha, + qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr) +{ + struct qla_hw_data *ha = vha->hw; + uint32_t addr, r_addr, c_addr, t_r_addr; + uint32_t i, k, loop_count, t_value, r_cnt, r_value; + unsigned long p_wait, w_time, p_mask; + uint32_t c_value_w, c_value_r; + struct qla82xx_md_entry_cache *cache_hdr; + int rval = QLA_FUNCTION_FAILED; + uint32_t *data_ptr = *d_ptr; + + cache_hdr = (struct qla82xx_md_entry_cache *)entry_hdr; + loop_count = cache_hdr->op_count; + r_addr = cache_hdr->read_addr; + c_addr = cache_hdr->control_addr; + c_value_w = cache_hdr->cache_ctrl.write_value; + + t_r_addr = cache_hdr->tag_reg_addr; + t_value = cache_hdr->addr_ctrl.init_tag_value; + r_cnt = cache_hdr->read_ctrl.read_addr_cnt; + p_wait = cache_hdr->cache_ctrl.poll_wait; + p_mask = cache_hdr->cache_ctrl.poll_mask; + + for (i = 0; i < loop_count; i++) { + qla82xx_md_rw_32(ha, t_r_addr, t_value, 1); + if (c_value_w) + qla82xx_md_rw_32(ha, c_addr, c_value_w, 1); + + if (p_mask) { + w_time = jiffies + p_wait; + do { + c_value_r = qla82xx_md_rw_32(ha, c_addr, 0, 0); + if ((c_value_r & p_mask) == 0) + break; + else if (time_after_eq(jiffies, w_time)) { + /* capturing dump failed */ + ql_dbg(ql_dbg_p3p, vha, 0xb032, + "c_value_r: 0x%x, poll_mask: 0x%lx, " + "w_time: 0x%lx\n", + c_value_r, p_mask, w_time); + return rval; + } + } while (1); + } + + addr = r_addr; + for (k = 0; k < r_cnt; k++) { + r_value = qla82xx_md_rw_32(ha, addr, 0, 0); + *data_ptr++ = cpu_to_le32(r_value); + addr += cache_hdr->read_ctrl.read_addr_stride; + } + t_value += cache_hdr->addr_ctrl.tag_value_stride; + } + *d_ptr = data_ptr; + return QLA_SUCCESS; +} + +static void +qla82xx_minidump_process_l1cache(scsi_qla_host_t *vha, + qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr) +{ + struct qla_hw_data *ha = vha->hw; + uint32_t addr, r_addr, c_addr, t_r_addr; + uint32_t i, k, loop_count, t_value, r_cnt, r_value; + uint32_t c_value_w; + struct qla82xx_md_entry_cache *cache_hdr; + uint32_t *data_ptr = *d_ptr; + + cache_hdr = (struct qla82xx_md_entry_cache *)entry_hdr; + loop_count = cache_hdr->op_count; + r_addr = cache_hdr->read_addr; + c_addr = cache_hdr->control_addr; + c_value_w = cache_hdr->cache_ctrl.write_value; + + t_r_addr = cache_hdr->tag_reg_addr; + t_value = cache_hdr->addr_ctrl.init_tag_value; + r_cnt = cache_hdr->read_ctrl.read_addr_cnt; + + for (i = 0; i < loop_count; i++) { + qla82xx_md_rw_32(ha, t_r_addr, t_value, 1); + qla82xx_md_rw_32(ha, c_addr, c_value_w, 1); + addr = r_addr; + for (k = 0; k < r_cnt; k++) { + r_value = qla82xx_md_rw_32(ha, addr, 0, 0); + *data_ptr++ = cpu_to_le32(r_value); + addr += cache_hdr->read_ctrl.read_addr_stride; + } + t_value += cache_hdr->addr_ctrl.tag_value_stride; + } + *d_ptr = data_ptr; +} + +static void +qla82xx_minidump_process_queue(scsi_qla_host_t *vha, + qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr) +{ + struct qla_hw_data *ha = vha->hw; + uint32_t s_addr, r_addr; + uint32_t r_stride, r_value, r_cnt, qid = 0; + uint32_t i, k, loop_cnt; + struct qla82xx_md_entry_queue *q_hdr; + uint32_t *data_ptr = *d_ptr; + + q_hdr = (struct qla82xx_md_entry_queue *)entry_hdr; + s_addr = q_hdr->select_addr; + r_cnt = q_hdr->rd_strd.read_addr_cnt; + r_stride = q_hdr->rd_strd.read_addr_stride; + loop_cnt = q_hdr->op_count; + + for (i = 0; i < loop_cnt; i++) { + qla82xx_md_rw_32(ha, s_addr, qid, 1); + r_addr = q_hdr->read_addr; + for (k = 0; k < r_cnt; k++) { + r_value = qla82xx_md_rw_32(ha, r_addr, 0, 0); + *data_ptr++ = cpu_to_le32(r_value); + r_addr += r_stride; + } + qid += q_hdr->q_strd.queue_id_stride; + } + *d_ptr = data_ptr; +} + +static void +qla82xx_minidump_process_rdrom(scsi_qla_host_t *vha, + qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr) +{ + struct qla_hw_data *ha = vha->hw; + uint32_t r_addr, r_value; + uint32_t i, loop_cnt; + struct qla82xx_md_entry_rdrom *rom_hdr; + uint32_t *data_ptr = *d_ptr; + + rom_hdr = (struct qla82xx_md_entry_rdrom *)entry_hdr; + r_addr = rom_hdr->read_addr; + loop_cnt = rom_hdr->read_data_size/sizeof(uint32_t); + + for (i = 0; i < loop_cnt; i++) { + qla82xx_md_rw_32(ha, MD_DIRECT_ROM_WINDOW, + (r_addr & 0xFFFF0000), 1); + r_value = qla82xx_md_rw_32(ha, + MD_DIRECT_ROM_READ_BASE + + (r_addr & 0x0000FFFF), 0, 0); + *data_ptr++ = cpu_to_le32(r_value); + r_addr += sizeof(uint32_t); + } + *d_ptr = data_ptr; +} + +static int +qla82xx_minidump_process_rdmem(scsi_qla_host_t *vha, + qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr) +{ + struct qla_hw_data *ha = vha->hw; + uint32_t r_addr, r_value, r_data; + uint32_t i, j, loop_cnt; + struct qla82xx_md_entry_rdmem *m_hdr; + unsigned long flags; + int rval = QLA_FUNCTION_FAILED; + uint32_t *data_ptr = *d_ptr; + + m_hdr = (struct qla82xx_md_entry_rdmem *)entry_hdr; + r_addr = m_hdr->read_addr; + loop_cnt = m_hdr->read_data_size/16; + + if (r_addr & 0xf) { + ql_log(ql_log_warn, vha, 0xb033, + "Read addr 0x%x not 16 bytes alligned\n", r_addr); + return rval; + } + + if (m_hdr->read_data_size % 16) { + ql_log(ql_log_warn, vha, 0xb034, + "Read data[0x%x] not multiple of 16 bytes\n", + m_hdr->read_data_size); + return rval; + } + + ql_dbg(ql_dbg_p3p, vha, 0xb035, + "[%s]: rdmem_addr: 0x%x, read_data_size: 0x%x, loop_cnt: 0x%x\n", + __func__, r_addr, m_hdr->read_data_size, loop_cnt); + + write_lock_irqsave(&ha->hw_lock, flags); + for (i = 0; i < loop_cnt; i++) { + qla82xx_md_rw_32(ha, MD_MIU_TEST_AGT_ADDR_LO, r_addr, 1); + r_value = 0; + qla82xx_md_rw_32(ha, MD_MIU_TEST_AGT_ADDR_HI, r_value, 1); + r_value = MIU_TA_CTL_ENABLE; + qla82xx_md_rw_32(ha, MD_MIU_TEST_AGT_CTRL, r_value, 1); + r_value = MIU_TA_CTL_START | MIU_TA_CTL_ENABLE; + qla82xx_md_rw_32(ha, MD_MIU_TEST_AGT_CTRL, r_value, 1); + + for (j = 0; j < MAX_CTL_CHECK; j++) { + r_value = qla82xx_md_rw_32(ha, + MD_MIU_TEST_AGT_CTRL, 0, 0); + if ((r_value & MIU_TA_CTL_BUSY) == 0) + break; + } + + if (j >= MAX_CTL_CHECK) { + printk_ratelimited(KERN_ERR + "failed to read through agent\n"); + write_unlock_irqrestore(&ha->hw_lock, flags); + return rval; + } + + for (j = 0; j < 4; j++) { + r_data = qla82xx_md_rw_32(ha, + MD_MIU_TEST_AGT_RDDATA[j], 0, 0); + *data_ptr++ = cpu_to_le32(r_data); + } + r_addr += 16; + } + write_unlock_irqrestore(&ha->hw_lock, flags); + *d_ptr = data_ptr; + return QLA_SUCCESS; +} + +static int +qla82xx_validate_template_chksum(scsi_qla_host_t *vha) +{ + struct qla_hw_data *ha = vha->hw; + uint64_t chksum = 0; + uint32_t *d_ptr = (uint32_t *)ha->md_tmplt_hdr; + int count = ha->md_template_size/sizeof(uint32_t); + + while (count-- > 0) + chksum += *d_ptr++; + while (chksum >> 32) + chksum = (chksum & 0xFFFFFFFF) + (chksum >> 32); + return ~chksum; +} + +static void +qla82xx_mark_entry_skipped(scsi_qla_host_t *vha, + qla82xx_md_entry_hdr_t *entry_hdr, int index) +{ + entry_hdr->d_ctrl.driver_flags |= QLA82XX_DBG_SKIPPED_FLAG; + ql_dbg(ql_dbg_p3p, vha, 0xb036, + "Skipping entry[%d]: " + "ETYPE[0x%x]-ELEVEL[0x%x]\n", + index, entry_hdr->entry_type, + entry_hdr->d_ctrl.entry_capture_mask); +} + +int +qla82xx_md_collect(scsi_qla_host_t *vha) +{ + struct qla_hw_data *ha = vha->hw; + int no_entry_hdr = 0; + qla82xx_md_entry_hdr_t *entry_hdr; + struct qla82xx_md_template_hdr *tmplt_hdr; + uint32_t *data_ptr; + uint32_t total_data_size = 0, f_capture_mask, data_collected = 0; + int i = 0, rval = QLA_FUNCTION_FAILED; + + tmplt_hdr = (struct qla82xx_md_template_hdr *)ha->md_tmplt_hdr; + data_ptr = (uint32_t *)ha->md_dump; + + if (ha->fw_dumped) { + ql_log(ql_log_info, vha, 0xb037, + "Firmware dump available to retrive\n"); + goto md_failed; + } + + ha->fw_dumped = 0; + + if (!ha->md_tmplt_hdr || !ha->md_dump) { + ql_log(ql_log_warn, vha, 0xb038, + "Memory not allocated for minidump capture\n"); + goto md_failed; + } + + if (qla82xx_validate_template_chksum(vha)) { + ql_log(ql_log_info, vha, 0xb039, + "Template checksum validation error\n"); + goto md_failed; + } + + no_entry_hdr = tmplt_hdr->num_of_entries; + ql_dbg(ql_dbg_p3p, vha, 0xb03a, + "No of entry headers in Template: 0x%x\n", no_entry_hdr); + + ql_dbg(ql_dbg_p3p, vha, 0xb03b, + "Capture Mask obtained: 0x%x\n", tmplt_hdr->capture_debug_level); + + f_capture_mask = tmplt_hdr->capture_debug_level & 0xFF; + + /* Validate whether required debug level is set */ + if ((f_capture_mask & 0x3) != 0x3) { + ql_log(ql_log_warn, vha, 0xb03c, + "Minimum required capture mask[0x%x] level not set\n", + f_capture_mask); + goto md_failed; + } + tmplt_hdr->driver_capture_mask = ql2xmdcapmask; + + tmplt_hdr->driver_info[0] = vha->host_no; + tmplt_hdr->driver_info[1] = (QLA_DRIVER_MAJOR_VER << 24) | + (QLA_DRIVER_MINOR_VER << 16) | (QLA_DRIVER_PATCH_VER << 8) | + QLA_DRIVER_BETA_VER; + + total_data_size = ha->md_dump_size; + + ql_dbg(ql_log_info, vha, 0xb03d, + "Total minidump data_size 0x%x to be captured\n", total_data_size); + + /* Check whether template obtained is valid */ + if (tmplt_hdr->entry_type != QLA82XX_TLHDR) { + ql_log(ql_log_warn, vha, 0xb04e, + "Bad template header entry type: 0x%x obtained\n", + tmplt_hdr->entry_type); + goto md_failed; + } + + entry_hdr = (qla82xx_md_entry_hdr_t *) \ + (((uint8_t *)ha->md_tmplt_hdr) + tmplt_hdr->first_entry_offset); + + /* Walk through the entry headers */ + for (i = 0; i < no_entry_hdr; i++) { + + if (data_collected > total_data_size) { + ql_log(ql_log_warn, vha, 0xb03e, + "More MiniDump data collected: [0x%x]\n", + data_collected); + goto md_failed; + } + + if (!(entry_hdr->d_ctrl.entry_capture_mask & + ql2xmdcapmask)) { + entry_hdr->d_ctrl.driver_flags |= + QLA82XX_DBG_SKIPPED_FLAG; + ql_dbg(ql_dbg_p3p, vha, 0xb03f, + "Skipping entry[%d]: " + "ETYPE[0x%x]-ELEVEL[0x%x]\n", + i, entry_hdr->entry_type, + entry_hdr->d_ctrl.entry_capture_mask); + goto skip_nxt_entry; + } + + ql_dbg(ql_dbg_p3p, vha, 0xb040, + "[%s]: data ptr[%d]: %p, entry_hdr: %p\n" + "entry_type: 0x%x, captrue_mask: 0x%x\n", + __func__, i, data_ptr, entry_hdr, + entry_hdr->entry_type, + entry_hdr->d_ctrl.entry_capture_mask); + + ql_dbg(ql_dbg_p3p, vha, 0xb041, + "Data collected: [0x%x], Dump size left:[0x%x]\n", + data_collected, (ha->md_dump_size - data_collected)); + + /* Decode the entry type and take + * required action to capture debug data */ + switch (entry_hdr->entry_type) { + case QLA82XX_RDEND: + qla82xx_mark_entry_skipped(vha, entry_hdr, i); + break; + case QLA82XX_CNTRL: + rval = qla82xx_minidump_process_control(vha, + entry_hdr, &data_ptr); + if (rval != QLA_SUCCESS) { + qla82xx_mark_entry_skipped(vha, entry_hdr, i); + goto md_failed; + } + break; + case QLA82XX_RDCRB: + qla82xx_minidump_process_rdcrb(vha, + entry_hdr, &data_ptr); + break; + case QLA82XX_RDMEM: + rval = qla82xx_minidump_process_rdmem(vha, + entry_hdr, &data_ptr); + if (rval != QLA_SUCCESS) { + qla82xx_mark_entry_skipped(vha, entry_hdr, i); + goto md_failed; + } + break; + case QLA82XX_BOARD: + case QLA82XX_RDROM: + qla82xx_minidump_process_rdrom(vha, + entry_hdr, &data_ptr); + break; + case QLA82XX_L2DTG: + case QLA82XX_L2ITG: + case QLA82XX_L2DAT: + case QLA82XX_L2INS: + rval = qla82xx_minidump_process_l2tag(vha, + entry_hdr, &data_ptr); + if (rval != QLA_SUCCESS) { + qla82xx_mark_entry_skipped(vha, entry_hdr, i); + goto md_failed; + } + break; + case QLA82XX_L1DAT: + case QLA82XX_L1INS: + qla82xx_minidump_process_l1cache(vha, + entry_hdr, &data_ptr); + break; + case QLA82XX_RDOCM: + qla82xx_minidump_process_rdocm(vha, + entry_hdr, &data_ptr); + break; + case QLA82XX_RDMUX: + qla82xx_minidump_process_rdmux(vha, + entry_hdr, &data_ptr); + break; + case QLA82XX_QUEUE: + qla82xx_minidump_process_queue(vha, + entry_hdr, &data_ptr); + break; + case QLA82XX_RDNOP: + default: + qla82xx_mark_entry_skipped(vha, entry_hdr, i); + break; + } + + ql_dbg(ql_dbg_p3p, vha, 0xb042, + "[%s]: data ptr[%d]: %p\n", __func__, i, data_ptr); + + data_collected = (uint8_t *)data_ptr - + (uint8_t *)ha->md_dump; +skip_nxt_entry: + entry_hdr = (qla82xx_md_entry_hdr_t *) \ + (((uint8_t *)entry_hdr) + entry_hdr->entry_size); + } + + if (data_collected != total_data_size) { + ql_dbg(ql_log_warn, vha, 0xb043, + "MiniDump data mismatch: Data collected: [0x%x]," + "total_data_size:[0x%x]\n", + data_collected, total_data_size); + goto md_failed; + } + + ql_log(ql_log_info, vha, 0xb044, + "Firmware dump saved to temp buffer (%ld/%p %ld/%p).\n", + vha->host_no, ha->md_tmplt_hdr, vha->host_no, ha->md_dump); + ha->fw_dumped = 1; + qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP); + +md_failed: + return rval; +} + +int +qla82xx_md_alloc(scsi_qla_host_t *vha) +{ + struct qla_hw_data *ha = vha->hw; + int i, k; + struct qla82xx_md_template_hdr *tmplt_hdr; + + tmplt_hdr = (struct qla82xx_md_template_hdr *)ha->md_tmplt_hdr; + + if (ql2xmdcapmask < 0x3 || ql2xmdcapmask > 0x7F) { + ql2xmdcapmask = tmplt_hdr->capture_debug_level & 0xFF; + ql_log(ql_log_info, vha, 0xb045, + "Forcing driver capture mask to firmware default capture mask: 0x%x.\n", + ql2xmdcapmask); + } + + for (i = 0x2, k = 1; (i & QLA82XX_DEFAULT_CAP_MASK); i <<= 1, k++) { + if (i & ql2xmdcapmask) + ha->md_dump_size += tmplt_hdr->capture_size_array[k]; + } + + if (ha->md_dump) { + ql_log(ql_log_warn, vha, 0xb046, + "Firmware dump previously allocated.\n"); + return 1; + } + + ha->md_dump = vmalloc(ha->md_dump_size); + if (ha->md_dump == NULL) { + ql_log(ql_log_warn, vha, 0xb047, + "Unable to allocate memory for Minidump size " + "(0x%x).\n", ha->md_dump_size); + return 1; + } + return 0; +} + +void +qla82xx_md_free(scsi_qla_host_t *vha) +{ + struct qla_hw_data *ha = vha->hw; + + /* Release the template header allocated */ + if (ha->md_tmplt_hdr) { + ql_log(ql_log_info, vha, 0xb048, + "Free MiniDump template: %p, size (%d KB)\n", + ha->md_tmplt_hdr, ha->md_template_size / 1024); + dma_free_coherent(&ha->pdev->dev, ha->md_template_size, + ha->md_tmplt_hdr, ha->md_tmplt_hdr_dma); + ha->md_tmplt_hdr = 0; + } + + /* Release the template data buffer allocated */ + if (ha->md_dump) { + ql_log(ql_log_info, vha, 0xb049, + "Free MiniDump memory: %p, size (%d KB)\n", + ha->md_dump, ha->md_dump_size / 1024); + vfree(ha->md_dump); + ha->md_dump_size = 0; + ha->md_dump = 0; + } +} + +void +qla82xx_md_prep(scsi_qla_host_t *vha) +{ + struct qla_hw_data *ha = vha->hw; + int rval; + + /* Get Minidump template size */ + rval = qla82xx_md_get_template_size(vha); + if (rval == QLA_SUCCESS) { + ql_log(ql_log_info, vha, 0xb04a, + "MiniDump Template size obtained (%d KB)\n", + ha->md_template_size / 1024); + + /* Get Minidump template */ + rval = qla82xx_md_get_template(vha); + if (rval == QLA_SUCCESS) { + ql_dbg(ql_dbg_p3p, vha, 0xb04b, + "MiniDump Template obtained\n"); + + /* Allocate memory for minidump */ + rval = qla82xx_md_alloc(vha); + if (rval == QLA_SUCCESS) + ql_log(ql_log_info, vha, 0xb04c, + "MiniDump memory allocated (%d KB)\n", + ha->md_dump_size / 1024); + else { + ql_log(ql_log_info, vha, 0xb04d, + "Free MiniDump template: %p, size: (%d KB)\n", + ha->md_tmplt_hdr, + ha->md_template_size / 1024); + dma_free_coherent(&ha->pdev->dev, + ha->md_template_size, + ha->md_tmplt_hdr, ha->md_tmplt_hdr_dma); + ha->md_tmplt_hdr = 0; + } + + } + } +} + +int +qla82xx_beacon_on(struct scsi_qla_host *vha) +{ + + int rval; + struct qla_hw_data *ha = vha->hw; + qla82xx_idc_lock(ha); + rval = qla82xx_mbx_beacon_ctl(vha, 1); + + if (rval) { + ql_log(ql_log_warn, vha, 0xb050, + "mbx set led config failed in %s\n", __func__); + goto exit; + } + ha->beacon_blink_led = 1; +exit: + qla82xx_idc_unlock(ha); + return rval; +} + +int +qla82xx_beacon_off(struct scsi_qla_host *vha) +{ + + int rval; + struct qla_hw_data *ha = vha->hw; + qla82xx_idc_lock(ha); + rval = qla82xx_mbx_beacon_ctl(vha, 0); + + if (rval) { + ql_log(ql_log_warn, vha, 0xb051, + "mbx set led config failed in %s\n", __func__); + goto exit; + } + ha->beacon_blink_led = 0; +exit: + qla82xx_idc_unlock(ha); + return rval; +} diff --git a/drivers/scsi/qla2xxx/qla_nx.h b/drivers/scsi/qla2xxx/qla_nx.h index 8a21832c6693..57820c199bc2 100644 --- a/drivers/scsi/qla2xxx/qla_nx.h +++ b/drivers/scsi/qla2xxx/qla_nx.h @@ -484,8 +484,6 @@ #define QLA82XX_ADDR_OCM1 (0x0000000200400000ULL) #define QLA82XX_ADDR_OCM1_MAX (0x00000002004fffffULL) #define QLA82XX_ADDR_QDR_NET (0x0000000300000000ULL) - -#define QLA82XX_P2_ADDR_QDR_NET_MAX (0x00000003001fffffULL) #define QLA82XX_P3_ADDR_QDR_NET_MAX (0x0000000303ffffffULL) #define QLA82XX_PCI_CRBSPACE (unsigned long)0x06000000 @@ -890,6 +888,7 @@ struct ct6_dsd { }; #define MBC_TOGGLE_INTERRUPT 0x10 +#define MBC_SET_LED_CONFIG 0x125 /* Flash offset */ #define FLT_REG_BOOTLOAD_82XX 0x72 @@ -922,4 +921,256 @@ struct ct6_dsd { #define M25P_INSTR_DP 0xb9 #define M25P_INSTR_RES 0xab +/* Minidump related */ + +/* + * Version of the template + * 4 Bytes + * X.Major.Minor.RELEASE + */ +#define QLA82XX_MINIDUMP_VERSION 0x10101 + +/* + * Entry Type Defines + */ +#define QLA82XX_RDNOP 0 +#define QLA82XX_RDCRB 1 +#define QLA82XX_RDMUX 2 +#define QLA82XX_QUEUE 3 +#define QLA82XX_BOARD 4 +#define QLA82XX_RDSRE 5 +#define QLA82XX_RDOCM 6 +#define QLA82XX_CACHE 10 +#define QLA82XX_L1DAT 11 +#define QLA82XX_L1INS 12 +#define QLA82XX_L2DTG 21 +#define QLA82XX_L2ITG 22 +#define QLA82XX_L2DAT 23 +#define QLA82XX_L2INS 24 +#define QLA82XX_RDROM 71 +#define QLA82XX_RDMEM 72 +#define QLA82XX_CNTRL 98 +#define QLA82XX_TLHDR 99 +#define QLA82XX_RDEND 255 + +/* + * Opcodes for Control Entries. + * These Flags are bit fields. + */ +#define QLA82XX_DBG_OPCODE_WR 0x01 +#define QLA82XX_DBG_OPCODE_RW 0x02 +#define QLA82XX_DBG_OPCODE_AND 0x04 +#define QLA82XX_DBG_OPCODE_OR 0x08 +#define QLA82XX_DBG_OPCODE_POLL 0x10 +#define QLA82XX_DBG_OPCODE_RDSTATE 0x20 +#define QLA82XX_DBG_OPCODE_WRSTATE 0x40 +#define QLA82XX_DBG_OPCODE_MDSTATE 0x80 + +/* + * Template Header and Entry Header definitions start here. + */ + +/* + * Template Header + * Parts of the template header can be modified by the driver. + * These include the saved_state_array, capture_debug_level, driver_timestamp + */ + +#define QLA82XX_DBG_STATE_ARRAY_LEN 16 +#define QLA82XX_DBG_CAP_SIZE_ARRAY_LEN 8 +#define QLA82XX_DBG_RSVD_ARRAY_LEN 8 + +/* + * Driver Flags + */ +#define QLA82XX_DBG_SKIPPED_FLAG 0x80 /* driver skipped this entry */ +#define QLA82XX_DEFAULT_CAP_MASK 0xFF /* default capture mask */ + +struct qla82xx_md_template_hdr { + uint32_t entry_type; + uint32_t first_entry_offset; + uint32_t size_of_template; + uint32_t capture_debug_level; + + uint32_t num_of_entries; + uint32_t version; + uint32_t driver_timestamp; + uint32_t template_checksum; + + uint32_t driver_capture_mask; + uint32_t driver_info[3]; + + uint32_t saved_state_array[QLA82XX_DBG_STATE_ARRAY_LEN]; + uint32_t capture_size_array[QLA82XX_DBG_CAP_SIZE_ARRAY_LEN]; + + /* markers_array used to capture some special locations on board */ + uint32_t markers_array[QLA82XX_DBG_RSVD_ARRAY_LEN]; + uint32_t num_of_free_entries; /* For internal use */ + uint32_t free_entry_offset; /* For internal use */ + uint32_t total_table_size; /* For internal use */ + uint32_t bkup_table_offset; /* For internal use */ +} __packed; + +/* + * Entry Header: Common to All Entry Types + */ + +/* + * Driver Code is for driver to write some info about the entry. + * Currently not used. + */ +typedef struct qla82xx_md_entry_hdr { + uint32_t entry_type; + uint32_t entry_size; + uint32_t entry_capture_size; + struct { + uint8_t entry_capture_mask; + uint8_t entry_code; + uint8_t driver_code; + uint8_t driver_flags; + } d_ctrl; +} __packed qla82xx_md_entry_hdr_t; + +/* + * Read CRB entry header + */ +struct qla82xx_md_entry_crb { + qla82xx_md_entry_hdr_t h; + uint32_t addr; + struct { + uint8_t addr_stride; + uint8_t state_index_a; + uint16_t poll_timeout; + } crb_strd; + + uint32_t data_size; + uint32_t op_count; + + struct { + uint8_t opcode; + uint8_t state_index_v; + uint8_t shl; + uint8_t shr; + } crb_ctrl; + + uint32_t value_1; + uint32_t value_2; + uint32_t value_3; +} __packed; + +/* + * Cache entry header + */ +struct qla82xx_md_entry_cache { + qla82xx_md_entry_hdr_t h; + + uint32_t tag_reg_addr; + struct { + uint16_t tag_value_stride; + uint16_t init_tag_value; + } addr_ctrl; + + uint32_t data_size; + uint32_t op_count; + + uint32_t control_addr; + struct { + uint16_t write_value; + uint8_t poll_mask; + uint8_t poll_wait; + } cache_ctrl; + + uint32_t read_addr; + struct { + uint8_t read_addr_stride; + uint8_t read_addr_cnt; + uint16_t rsvd_1; + } read_ctrl; +} __packed; + +/* + * Read OCM + */ +struct qla82xx_md_entry_rdocm { + qla82xx_md_entry_hdr_t h; + + uint32_t rsvd_0; + uint32_t rsvd_1; + uint32_t data_size; + uint32_t op_count; + + uint32_t rsvd_2; + uint32_t rsvd_3; + uint32_t read_addr; + uint32_t read_addr_stride; + uint32_t read_addr_cntrl; +} __packed; + +/* + * Read Memory + */ +struct qla82xx_md_entry_rdmem { + qla82xx_md_entry_hdr_t h; + uint32_t rsvd[6]; + uint32_t read_addr; + uint32_t read_data_size; +} __packed; + +/* + * Read ROM + */ +struct qla82xx_md_entry_rdrom { + qla82xx_md_entry_hdr_t h; + uint32_t rsvd[6]; + uint32_t read_addr; + uint32_t read_data_size; +} __packed; + +struct qla82xx_md_entry_mux { + qla82xx_md_entry_hdr_t h; + + uint32_t select_addr; + uint32_t rsvd_0; + uint32_t data_size; + uint32_t op_count; + + uint32_t select_value; + uint32_t select_value_stride; + uint32_t read_addr; + uint32_t rsvd_1; +} __packed; + +struct qla82xx_md_entry_queue { + qla82xx_md_entry_hdr_t h; + + uint32_t select_addr; + struct { + uint16_t queue_id_stride; + uint16_t rsvd_0; + } q_strd; + + uint32_t data_size; + uint32_t op_count; + uint32_t rsvd_1; + uint32_t rsvd_2; + + uint32_t read_addr; + struct { + uint8_t read_addr_stride; + uint8_t read_addr_cnt; + uint16_t rsvd_3; + } rd_strd; +} __packed; + +#define MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE 0x129 +#define RQST_TMPLT_SIZE 0x0 +#define RQST_TMPLT 0x1 +#define MD_DIRECT_ROM_WINDOW 0x42110030 +#define MD_DIRECT_ROM_READ_BASE 0x42150000 +#define MD_MIU_TEST_AGT_CTRL 0x41000090 +#define MD_MIU_TEST_AGT_ADDR_LO 0x41000094 +#define MD_MIU_TEST_AGT_ADDR_HI 0x41000098 + +static const int MD_MIU_TEST_AGT_RDDATA[] = { 0x410000A8, 0x410000AC, + 0x410000B8, 0x410000BC }; #endif diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 1e69527f1e4e..fd14c7bfc626 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -143,7 +143,7 @@ MODULE_PARM_DESC(ql2xmultique_tag, "Set it to 1 to turn on the cpu affinity."); int ql2xfwloadbin; -module_param(ql2xfwloadbin, int, S_IRUGO); +module_param(ql2xfwloadbin, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(ql2xfwloadbin, "Option to specify location from which to load ISP firmware:.\n" " 2 -- load firmware via the request_firmware() (hotplug).\n" @@ -158,11 +158,11 @@ MODULE_PARM_DESC(ql2xetsenable, "Default is 0 - skip ETS enablement."); int ql2xdbwr = 1; -module_param(ql2xdbwr, int, S_IRUGO); +module_param(ql2xdbwr, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(ql2xdbwr, - "Option to specify scheme for request queue posting.\n" - " 0 -- Regular doorbell.\n" - " 1 -- CAMRAM doorbell (faster).\n"); + "Option to specify scheme for request queue posting.\n" + " 0 -- Regular doorbell.\n" + " 1 -- CAMRAM doorbell (faster).\n"); int ql2xtargetreset = 1; module_param(ql2xtargetreset, int, S_IRUGO); @@ -183,11 +183,11 @@ MODULE_PARM_DESC(ql2xasynctmfenable, "Default is 0 - Issue TM IOCBs via mailbox mechanism."); int ql2xdontresethba; -module_param(ql2xdontresethba, int, S_IRUGO); +module_param(ql2xdontresethba, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(ql2xdontresethba, - "Option to specify reset behaviour.\n" - " 0 (Default) -- Reset on failure.\n" - " 1 -- Do not reset on failure.\n"); + "Option to specify reset behaviour.\n" + " 0 (Default) -- Reset on failure.\n" + " 1 -- Do not reset on failure.\n"); uint ql2xmaxlun = MAX_LUNS; module_param(ql2xmaxlun, uint, S_IRUGO); @@ -195,6 +195,19 @@ MODULE_PARM_DESC(ql2xmaxlun, "Defines the maximum LU number to register with the SCSI " "midlayer. Default is 65535."); +int ql2xmdcapmask = 0x1F; +module_param(ql2xmdcapmask, int, S_IRUGO); +MODULE_PARM_DESC(ql2xmdcapmask, + "Set the Minidump driver capture mask level. " + "Default is 0x7F - Can be set to 0x3, 0x7, 0xF, 0x1F, 0x7F."); + +int ql2xmdenable; +module_param(ql2xmdenable, int, S_IRUGO); +MODULE_PARM_DESC(ql2xmdenable, + "Enable/disable MiniDump. " + "0 (Default) - MiniDump disabled. " + "1 - MiniDump enabled."); + /* * SCSI host template entry points */ @@ -1750,9 +1763,9 @@ static struct isp_operations qla82xx_isp_ops = { .read_nvram = qla24xx_read_nvram_data, .write_nvram = qla24xx_write_nvram_data, .fw_dump = qla24xx_fw_dump, - .beacon_on = qla24xx_beacon_on, - .beacon_off = qla24xx_beacon_off, - .beacon_blink = qla24xx_beacon_blink, + .beacon_on = qla82xx_beacon_on, + .beacon_off = qla82xx_beacon_off, + .beacon_blink = NULL, .read_optrom = qla82xx_read_optrom_data, .write_optrom = qla82xx_write_optrom_data, .get_flash_version = qla24xx_get_flash_version, @@ -2670,6 +2683,8 @@ qla2x00_free_device(scsi_qla_host_t *vha) qla2x00_mem_free(ha); + qla82xx_md_free(vha); + qla2x00_free_queues(ha); } @@ -3903,8 +3918,11 @@ qla2x00_timer(scsi_qla_host_t *vha) /* Check if beacon LED needs to be blinked for physical host only */ if (!vha->vp_idx && (ha->beacon_blink_led == 1)) { - set_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags); - start_dpc++; + /* There is no beacon_blink function for ISP82xx */ + if (!IS_QLA82XX(ha)) { + set_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags); + start_dpc++; + } } /* Process any deferred work. */ |