diff options
Diffstat (limited to 'drivers/scsi')
27 files changed, 815 insertions, 582 deletions
diff --git a/drivers/scsi/53c700.c b/drivers/scsi/53c700.c index 5117d90ccd9e..3242ff63986f 100644 --- a/drivers/scsi/53c700.c +++ b/drivers/scsi/53c700.c @@ -1506,10 +1506,8 @@ NCR_700_intr(int irq, void *dev_id) __u8 sstat0 = 0, dstat = 0; __u32 dsp; struct scsi_cmnd *SCp = hostdata->cmd; - enum NCR_700_Host_State state; handled = 1; - state = hostdata->state; SCp = hostdata->cmd; if(istat & SCSI_INT_PENDING) { @@ -1760,7 +1758,6 @@ NCR_700_queuecommand_lck(struct scsi_cmnd *SCp, void (*done)(struct scsi_cmnd *) struct NCR_700_Host_Parameters *hostdata = (struct NCR_700_Host_Parameters *)SCp->device->host->hostdata[0]; __u32 move_ins; - enum dma_data_direction direction; struct NCR_700_command_slot *slot; if(hostdata->command_slot_count >= NCR_700_COMMAND_SLOTS_PER_HOST) { @@ -1877,7 +1874,6 @@ NCR_700_queuecommand_lck(struct scsi_cmnd *SCp, void (*done)(struct scsi_cmnd *) } /* now build the scatter gather list */ - direction = SCp->sc_data_direction; if(move_ins != 0) { int i; int sg_count; diff --git a/drivers/scsi/arcmsr/arcmsr.h b/drivers/scsi/arcmsr/arcmsr.h index 9220bcf8388f..5d054d5c70a5 100644 --- a/drivers/scsi/arcmsr/arcmsr.h +++ b/drivers/scsi/arcmsr/arcmsr.h @@ -49,7 +49,7 @@ struct device_attribute; #define ARCMSR_MAX_OUTSTANDING_CMD 1024 #define ARCMSR_DEFAULT_OUTSTANDING_CMD 128 #define ARCMSR_MIN_OUTSTANDING_CMD 32 -#define ARCMSR_DRIVER_VERSION "v1.40.00.10-20190116" +#define ARCMSR_DRIVER_VERSION "v1.50.00.02-20200819" #define ARCMSR_SCSI_INITIATOR_ID 255 #define ARCMSR_MAX_XFER_SECTORS 512 #define ARCMSR_MAX_XFER_SECTORS_B 4096 @@ -80,6 +80,7 @@ struct device_attribute; #ifndef PCI_DEVICE_ID_ARECA_1884 #define PCI_DEVICE_ID_ARECA_1884 0x1884 #endif +#define PCI_DEVICE_ID_ARECA_1886 0x188A #define ARCMSR_HOURS (1000 * 60 * 60 * 4) #define ARCMSR_MINUTES (1000 * 60 * 60) /* @@ -436,6 +437,21 @@ struct FIRMWARE_INFO #define ARCMSR_HBEMU_DOORBELL_SYNC 0x100 #define ARCMSR_ARC188X_RESET_ADAPTER 0x00000004 #define ARCMSR_ARC1884_DiagWrite_ENABLE 0x00000080 + +/* +******************************************************************************* +** SPEC. for Areca Type F adapter +******************************************************************************* +*/ +#define ARCMSR_SIGNATURE_1886 0x188617D3 +// Doorbell and interrupt definition are same as Type E adapter +/* ARC-1886 doorbell sync */ +#define ARCMSR_HBFMU_DOORBELL_SYNC 0x100 +//set host rw buffer physical address at inbound message 0, 1 (low,high) +#define ARCMSR_HBFMU_DOORBELL_SYNC1 0x300 +#define ARCMSR_HBFMU_MESSAGE_FIRMWARE_OK 0x80000000 +#define ARCMSR_HBFMU_MESSAGE_NO_VOLUME_CHANGE 0x20000000 + /* ******************************************************************************* ** ARECA SCSI COMMAND DESCRIPTOR BLOCK size 0x1F8 (504) @@ -720,6 +736,80 @@ struct MessageUnit_E{ uint32_t msgcode_rwbuffer[256]; /*2200 23FF*/ }; +/* +********************************************************************* +** Messaging Unit (MU) of Type F processor(LSI) +********************************************************************* +*/ +struct MessageUnit_F { + uint32_t iobound_doorbell; /*0000 0003*/ + uint32_t write_sequence_3xxx; /*0004 0007*/ + uint32_t host_diagnostic_3xxx; /*0008 000B*/ + uint32_t posted_outbound_doorbell; /*000C 000F*/ + uint32_t master_error_attribute; /*0010 0013*/ + uint32_t master_error_address_low; /*0014 0017*/ + uint32_t master_error_address_high; /*0018 001B*/ + uint32_t hcb_size; /*001C 001F*/ + uint32_t inbound_doorbell; /*0020 0023*/ + uint32_t diagnostic_rw_data; /*0024 0027*/ + uint32_t diagnostic_rw_address_low; /*0028 002B*/ + uint32_t diagnostic_rw_address_high; /*002C 002F*/ + uint32_t host_int_status; /*0030 0033*/ + uint32_t host_int_mask; /*0034 0037*/ + uint32_t dcr_data; /*0038 003B*/ + uint32_t dcr_address; /*003C 003F*/ + uint32_t inbound_queueport; /*0040 0043*/ + uint32_t outbound_queueport; /*0044 0047*/ + uint32_t hcb_pci_address_low; /*0048 004B*/ + uint32_t hcb_pci_address_high; /*004C 004F*/ + uint32_t iop_int_status; /*0050 0053*/ + uint32_t iop_int_mask; /*0054 0057*/ + uint32_t iop_inbound_queue_port; /*0058 005B*/ + uint32_t iop_outbound_queue_port; /*005C 005F*/ + uint32_t inbound_free_list_index; /*0060 0063*/ + uint32_t inbound_post_list_index; /*0064 0067*/ + uint32_t reply_post_producer_index; /*0068 006B*/ + uint32_t reply_post_consumer_index; /*006C 006F*/ + uint32_t inbound_doorbell_clear; /*0070 0073*/ + uint32_t i2o_message_unit_control; /*0074 0077*/ + uint32_t last_used_message_source_address_low; /*0078 007B*/ + uint32_t last_used_message_source_address_high; /*007C 007F*/ + uint32_t pull_mode_data_byte_count[4]; /*0080 008F*/ + uint32_t message_dest_address_index; /*0090 0093*/ + uint32_t done_queue_not_empty_int_counter_timer; /*0094 0097*/ + uint32_t utility_A_int_counter_timer; /*0098 009B*/ + uint32_t outbound_doorbell; /*009C 009F*/ + uint32_t outbound_doorbell_clear; /*00A0 00A3*/ + uint32_t message_source_address_index; /*00A4 00A7*/ + uint32_t message_done_queue_index; /*00A8 00AB*/ + uint32_t reserved0; /*00AC 00AF*/ + uint32_t inbound_msgaddr0; /*00B0 00B3*/ + uint32_t inbound_msgaddr1; /*00B4 00B7*/ + uint32_t outbound_msgaddr0; /*00B8 00BB*/ + uint32_t outbound_msgaddr1; /*00BC 00BF*/ + uint32_t inbound_queueport_low; /*00C0 00C3*/ + uint32_t inbound_queueport_high; /*00C4 00C7*/ + uint32_t outbound_queueport_low; /*00C8 00CB*/ + uint32_t outbound_queueport_high; /*00CC 00CF*/ + uint32_t iop_inbound_queue_port_low; /*00D0 00D3*/ + uint32_t iop_inbound_queue_port_high; /*00D4 00D7*/ + uint32_t iop_outbound_queue_port_low; /*00D8 00DB*/ + uint32_t iop_outbound_queue_port_high; /*00DC 00DF*/ + uint32_t message_dest_queue_port_low; /*00E0 00E3*/ + uint32_t message_dest_queue_port_high; /*00E4 00E7*/ + uint32_t last_used_message_dest_address_low; /*00E8 00EB*/ + uint32_t last_used_message_dest_address_high; /*00EC 00EF*/ + uint32_t message_done_queue_base_address_low; /*00F0 00F3*/ + uint32_t message_done_queue_base_address_high; /*00F4 00F7*/ + uint32_t host_diagnostic; /*00F8 00FB*/ + uint32_t write_sequence; /*00FC 00FF*/ + uint32_t reserved1[46]; /*0100 01B7*/ + uint32_t reply_post_producer_index1; /*01B8 01BB*/ + uint32_t reply_post_consumer_index1; /*01BC 01BF*/ +}; + +#define MESG_RW_BUFFER_SIZE (256 * 3) + typedef struct deliver_completeQ { uint16_t cmdFlag; uint16_t cmdSMID; @@ -739,6 +829,7 @@ struct AdapterControlBlock #define ACB_ADAPTER_TYPE_C 0x00000002 /* hbc L IOP */ #define ACB_ADAPTER_TYPE_D 0x00000003 /* hbd M IOP */ #define ACB_ADAPTER_TYPE_E 0x00000004 /* hba L IOP */ +#define ACB_ADAPTER_TYPE_F 0x00000005 /* hba L IOP */ u32 ioqueue_size; struct pci_dev * pdev; struct Scsi_Host * host; @@ -760,10 +851,16 @@ struct AdapterControlBlock struct MessageUnit_C __iomem *pmuC; struct MessageUnit_D *pmuD; struct MessageUnit_E __iomem *pmuE; + struct MessageUnit_F __iomem *pmuF; }; /* message unit ATU inbound base address0 */ void __iomem *mem_base0; void __iomem *mem_base1; + //0x000 - COMPORT_IN (Host sent to ROC) + uint32_t *message_wbuffer; + //0x100 - COMPORT_OUT (ROC sent to Host) + uint32_t *message_rbuffer; + uint32_t *msgcode_rwbuffer; //0x200 - BIOS_AREA uint32_t acb_flags; u16 dev_id; uint8_t adapter_index; @@ -836,8 +933,6 @@ struct AdapterControlBlock #define FW_NORMAL 0x0000 #define FW_BOG 0x0001 #define FW_DEADLOCK 0x0010 - atomic_t rq_map_token; - atomic_t ante_token_value; uint32_t maxOutstanding; int vector_count; uint32_t maxFreeCCB; @@ -848,6 +943,7 @@ struct AdapterControlBlock uint32_t out_doorbell; uint32_t completionQ_entry; pCompletion_Q pCompletionQ; + uint32_t completeQ_size; };/* HW_DEVICE_EXTENSION */ /* ******************************************************************************* diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c index ec895d0319f0..e4fdb473b990 100644 --- a/drivers/scsi/arcmsr/arcmsr_hba.c +++ b/drivers/scsi/arcmsr/arcmsr_hba.c @@ -133,6 +133,7 @@ static void arcmsr_hbaC_message_isr(struct AdapterControlBlock *pACB); static void arcmsr_hbaD_message_isr(struct AdapterControlBlock *acb); static void arcmsr_hbaE_message_isr(struct AdapterControlBlock *acb); static void arcmsr_hbaE_postqueue_isr(struct AdapterControlBlock *acb); +static void arcmsr_hbaF_postqueue_isr(struct AdapterControlBlock *acb); static void arcmsr_hardware_reset(struct AdapterControlBlock *acb); static const char *arcmsr_info(struct Scsi_Host *); static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb); @@ -209,6 +210,8 @@ static struct pci_device_id arcmsr_device_id_table[] = { .driver_data = ACB_ADAPTER_TYPE_C}, {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1884), .driver_data = ACB_ADAPTER_TYPE_E}, + {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1886), + .driver_data = ACB_ADAPTER_TYPE_F}, {0, 0}, /* Terminating entry */ }; MODULE_DEVICE_TABLE(pci, arcmsr_device_id_table); @@ -232,12 +235,12 @@ static void arcmsr_free_io_queue(struct AdapterControlBlock *acb) switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_B: case ACB_ADAPTER_TYPE_D: - case ACB_ADAPTER_TYPE_E: { + case ACB_ADAPTER_TYPE_E: + case ACB_ADAPTER_TYPE_F: dma_free_coherent(&acb->pdev->dev, acb->ioqueue_size, acb->dma_coherent2, acb->dma_coherent_handle2); break; } - } } static bool arcmsr_remap_pciregion(struct AdapterControlBlock *acb) @@ -310,6 +313,19 @@ static bool arcmsr_remap_pciregion(struct AdapterControlBlock *acb) acb->out_doorbell = 0; break; } + case ACB_ADAPTER_TYPE_F: { + acb->pmuF = ioremap(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0)); + if (!acb->pmuF) { + pr_notice("arcmsr%d: memory mapping region fail\n", + acb->host->host_no); + return false; + } + writel(0, &acb->pmuF->host_int_status); /* clear interrupt */ + writel(ARCMSR_HBFMU_DOORBELL_SYNC, &acb->pmuF->iobound_doorbell); + acb->in_doorbell = 0; + acb->out_doorbell = 0; + break; + } } return true; } @@ -317,26 +333,25 @@ static bool arcmsr_remap_pciregion(struct AdapterControlBlock *acb) static void arcmsr_unmap_pciregion(struct AdapterControlBlock *acb) { switch (acb->adapter_type) { - case ACB_ADAPTER_TYPE_A:{ + case ACB_ADAPTER_TYPE_A: iounmap(acb->pmuA); - } - break; - case ACB_ADAPTER_TYPE_B:{ + break; + case ACB_ADAPTER_TYPE_B: iounmap(acb->mem_base0); iounmap(acb->mem_base1); - } - - break; - case ACB_ADAPTER_TYPE_C:{ + break; + case ACB_ADAPTER_TYPE_C: iounmap(acb->pmuC); - } - break; + break; case ACB_ADAPTER_TYPE_D: iounmap(acb->mem_base0); break; case ACB_ADAPTER_TYPE_E: iounmap(acb->pmuE); break; + case ACB_ADAPTER_TYPE_F: + iounmap(acb->pmuF); + break; } } @@ -552,23 +567,20 @@ static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb) { switch (acb->adapter_type) { - case ACB_ADAPTER_TYPE_A: { + case ACB_ADAPTER_TYPE_A: arcmsr_hbaA_flush_cache(acb); - } break; - - case ACB_ADAPTER_TYPE_B: { + case ACB_ADAPTER_TYPE_B: arcmsr_hbaB_flush_cache(acb); - } break; - case ACB_ADAPTER_TYPE_C: { + case ACB_ADAPTER_TYPE_C: arcmsr_hbaC_flush_cache(acb); - } break; case ACB_ADAPTER_TYPE_D: arcmsr_hbaD_flush_cache(acb); break; case ACB_ADAPTER_TYPE_E: + case ACB_ADAPTER_TYPE_F: arcmsr_hbaE_flush_cache(acb); break; } @@ -626,6 +638,27 @@ static void arcmsr_hbaD_assign_regAddr(struct AdapterControlBlock *acb) reg->msgcode_rwbuffer = MEM_BASE0(ARCMSR_ARC1214_MESSAGE_RWBUFFER); } +static void arcmsr_hbaF_assign_regAddr(struct AdapterControlBlock *acb) +{ + dma_addr_t host_buffer_dma; + struct MessageUnit_F __iomem *pmuF; + + memset(acb->dma_coherent2, 0xff, acb->completeQ_size); + acb->message_wbuffer = (uint32_t *)round_up((unsigned long)acb->dma_coherent2 + + acb->completeQ_size, 4); + acb->message_rbuffer = ((void *)acb->message_wbuffer) + 0x100; + acb->msgcode_rwbuffer = ((void *)acb->message_wbuffer) + 0x200; + memset((void *)acb->message_wbuffer, 0, MESG_RW_BUFFER_SIZE); + host_buffer_dma = round_up(acb->dma_coherent_handle2 + acb->completeQ_size, 4); + pmuF = acb->pmuF; + /* host buffer low address, bit0:1 all buffer active */ + writel(lower_32_bits(host_buffer_dma | 1), &pmuF->inbound_msgaddr0); + /* host buffer high address */ + writel(upper_32_bits(host_buffer_dma), &pmuF->inbound_msgaddr1); + /* set host buffer physical address */ + writel(ARCMSR_HBFMU_DOORBELL_SYNC1, &pmuF->iobound_doorbell); +} + static bool arcmsr_alloc_io_queue(struct AdapterControlBlock *acb) { bool rtn = true; @@ -679,6 +712,28 @@ static bool arcmsr_alloc_io_queue(struct AdapterControlBlock *acb) acb->doneq_index = 0; } break; + case ACB_ADAPTER_TYPE_F: { + uint32_t QueueDepth; + uint32_t depthTbl[] = {256, 512, 1024, 128, 64, 32}; + + arcmsr_wait_firmware_ready(acb); + QueueDepth = depthTbl[readl(&acb->pmuF->outbound_msgaddr1) & 7]; + acb->completeQ_size = sizeof(struct deliver_completeQ) * QueueDepth + 128; + acb->ioqueue_size = roundup(acb->completeQ_size + MESG_RW_BUFFER_SIZE, 32); + dma_coherent = dma_alloc_coherent(&pdev->dev, acb->ioqueue_size, + &dma_coherent_handle, GFP_KERNEL); + if (!dma_coherent) { + pr_notice("arcmsr%d: DMA allocation failed\n", acb->host->host_no); + return false; + } + acb->dma_coherent_handle2 = dma_coherent_handle; + acb->dma_coherent2 = dma_coherent; + acb->pCompletionQ = dma_coherent; + acb->completionQ_entry = acb->completeQ_size / sizeof(struct deliver_completeQ); + acb->doneq_index = 0; + arcmsr_hbaF_assign_regAddr(acb); + } + break; default: break; } @@ -713,7 +768,8 @@ static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb) acb->host->sg_tablesize = max_sg_entrys; roundup_ccbsize = roundup(sizeof(struct CommandControlBlock) + (max_sg_entrys - 1) * sizeof(struct SG64ENTRY), 32); acb->uncache_size = roundup_ccbsize * acb->maxFreeCCB; - acb->uncache_size += acb->ioqueue_size; + if (acb->adapter_type != ACB_ADAPTER_TYPE_F) + acb->uncache_size += acb->ioqueue_size; dma_coherent = dma_alloc_coherent(&pdev->dev, acb->uncache_size, &dma_coherent_handle, GFP_KERNEL); if(!dma_coherent){ printk(KERN_NOTICE "arcmsr%d: dma_alloc_coherent got error\n", acb->host->host_no); @@ -736,6 +792,7 @@ static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb) case ACB_ADAPTER_TYPE_C: case ACB_ADAPTER_TYPE_D: case ACB_ADAPTER_TYPE_E: + case ACB_ADAPTER_TYPE_F: ccb_tmp->cdb_phyaddr = cdb_phyaddr; break; } @@ -754,8 +811,10 @@ static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb) ccb_tmp = (struct CommandControlBlock *)((unsigned long)ccb_tmp + roundup_ccbsize); dma_coherent_handle = next_ccb_phy; } - acb->dma_coherent_handle2 = dma_coherent_handle; - acb->dma_coherent2 = ccb_tmp; + if (acb->adapter_type != ACB_ADAPTER_TYPE_F) { + acb->dma_coherent_handle2 = dma_coherent_handle; + acb->dma_coherent2 = ccb_tmp; + } switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_B: acb->pmuB = (struct MessageUnit_B *)acb->dma_coherent2; @@ -785,7 +844,6 @@ static void arcmsr_message_isr_bh_fn(struct work_struct *work) struct scsi_device *psdev; char diff, temp; - acb->acb_flags &= ~ACB_F_MSG_GET_CONFIG; switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { struct MessageUnit_A __iomem *reg = acb->pmuA; @@ -822,8 +880,12 @@ static void arcmsr_message_isr_bh_fn(struct work_struct *work) devicemap = (char __iomem *)(®->msgcode_rwbuffer[21]); break; } + case ACB_ADAPTER_TYPE_F: { + signature = (uint32_t __iomem *)(&acb->msgcode_rwbuffer[0]); + devicemap = (char __iomem *)(&acb->msgcode_rwbuffer[21]); + break; + } } - atomic_inc(&acb->rq_map_token); if (readl(signature) != ARCMSR_SIGNATURE_GET_CONFIG) return; for (target = 0; target < ARCMSR_MAX_TARGETID - 1; @@ -854,6 +916,7 @@ static void arcmsr_message_isr_bh_fn(struct work_struct *work) devicemap++; acb_dev_map++; } + acb->acb_flags &= ~ACB_F_MSG_GET_CONFIG; } static int @@ -906,8 +969,6 @@ out_free_irq: static void arcmsr_init_get_devmap_timer(struct AdapterControlBlock *pacb) { INIT_WORK(&pacb->arcmsr_do_message_isr_bh, arcmsr_message_isr_bh_fn); - atomic_set(&pacb->rq_map_token, 16); - atomic_set(&pacb->ante_token_value, 16); pacb->fw_flag = FW_NORMAL; timer_setup(&pacb->eternal_timer, arcmsr_request_device_map, 0); pacb->eternal_timer.expires = jiffies + msecs_to_jiffies(6 * HZ); @@ -1009,7 +1070,8 @@ static int arcmsr_probe(struct pci_dev *pdev, const struct pci_device_id *id) if(!error){ goto free_hbb_mu; } - arcmsr_free_io_queue(acb); + if (acb->adapter_type != ACB_ADAPTER_TYPE_F) + arcmsr_free_io_queue(acb); error = arcmsr_alloc_ccb_pool(acb); if(error){ goto unmap_pci_region; @@ -1122,6 +1184,14 @@ static int arcmsr_resume(struct pci_dev *pdev) acb->out_doorbell = 0; acb->doneq_index = 0; break; + case ACB_ADAPTER_TYPE_F: + writel(0, &acb->pmuF->host_int_status); + writel(ARCMSR_HBFMU_DOORBELL_SYNC, &acb->pmuF->iobound_doorbell); + acb->in_doorbell = 0; + acb->out_doorbell = 0; + acb->doneq_index = 0; + arcmsr_hbaF_assign_regAddr(acb); + break; } arcmsr_iop_init(acb); arcmsr_init_get_devmap_timer(acb); @@ -1134,6 +1204,8 @@ controller_stop: controller_unregister: scsi_remove_host(host); arcmsr_free_ccb_pool(acb); + if (acb->adapter_type == ACB_ADAPTER_TYPE_F) + arcmsr_free_io_queue(acb); arcmsr_unmap_pciregion(acb); pci_release_regions(pdev); scsi_host_put(host); @@ -1213,25 +1285,20 @@ static uint8_t arcmsr_abort_allcmd(struct AdapterControlBlock *acb) { uint8_t rtnval = 0; switch (acb->adapter_type) { - case ACB_ADAPTER_TYPE_A: { + case ACB_ADAPTER_TYPE_A: rtnval = arcmsr_hbaA_abort_allcmd(acb); - } break; - - case ACB_ADAPTER_TYPE_B: { + case ACB_ADAPTER_TYPE_B: rtnval = arcmsr_hbaB_abort_allcmd(acb); - } break; - - case ACB_ADAPTER_TYPE_C: { + case ACB_ADAPTER_TYPE_C: rtnval = arcmsr_hbaC_abort_allcmd(acb); - } break; - case ACB_ADAPTER_TYPE_D: rtnval = arcmsr_hbaD_abort_allcmd(acb); break; case ACB_ADAPTER_TYPE_E: + case ACB_ADAPTER_TYPE_F: rtnval = arcmsr_hbaE_abort_allcmd(acb); break; } @@ -1307,7 +1374,8 @@ static u32 arcmsr_disable_outbound_ints(struct AdapterControlBlock *acb) writel(ARCMSR_ARC1214_ALL_INT_DISABLE, reg->pcief0_int_enable); } break; - case ACB_ADAPTER_TYPE_E: { + case ACB_ADAPTER_TYPE_E: + case ACB_ADAPTER_TYPE_F: { struct MessageUnit_E __iomem *reg = acb->pmuE; orig_mask = readl(®->host_int_mask); writel(orig_mask | ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR | ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR, ®->host_int_mask); @@ -1514,6 +1582,9 @@ static void arcmsr_done4abort_postqueue(struct AdapterControlBlock *acb) case ACB_ADAPTER_TYPE_E: arcmsr_hbaE_postqueue_isr(acb); break; + case ACB_ADAPTER_TYPE_F: + arcmsr_hbaF_postqueue_isr(acb); + break; } } @@ -1568,6 +1639,8 @@ static void arcmsr_free_pcidev(struct AdapterControlBlock *acb) pdev = acb->pdev; arcmsr_free_irq(pdev, acb); arcmsr_free_ccb_pool(acb); + if (acb->adapter_type == ACB_ADAPTER_TYPE_F) + arcmsr_free_io_queue(acb); arcmsr_unmap_pciregion(acb); pci_release_regions(pdev); scsi_host_put(host); @@ -1625,6 +1698,8 @@ static void arcmsr_remove(struct pci_dev *pdev) } arcmsr_free_irq(pdev, acb); arcmsr_free_ccb_pool(acb); + if (acb->adapter_type == ACB_ADAPTER_TYPE_F) + arcmsr_free_io_queue(acb); arcmsr_unmap_pciregion(acb); pci_release_regions(pdev); scsi_host_put(host); @@ -1702,7 +1777,8 @@ static void arcmsr_enable_outbound_ints(struct AdapterControlBlock *acb, writel(intmask_org | mask, reg->pcief0_int_enable); break; } - case ACB_ADAPTER_TYPE_E: { + case ACB_ADAPTER_TYPE_E: + case ACB_ADAPTER_TYPE_F: { struct MessageUnit_E __iomem *reg = acb->pmuE; mask = ~(ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR | ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR); @@ -1846,6 +1922,19 @@ static void arcmsr_post_ccb(struct AdapterControlBlock *acb, struct CommandContr writel(ccb_post_stamp, &pmu->inbound_queueport_low); break; } + case ACB_ADAPTER_TYPE_F: { + struct MessageUnit_F __iomem *pmu = acb->pmuF; + u32 ccb_post_stamp, arc_cdb_size; + + if (ccb->arc_cdb_size <= 0x300) + arc_cdb_size = (ccb->arc_cdb_size - 1) >> 6 | 1; + else + arc_cdb_size = (((ccb->arc_cdb_size + 0xff) >> 8) + 2) << 1 | 1; + ccb_post_stamp = (ccb->smid | arc_cdb_size); + writel(0, &pmu->inbound_queueport_high); + writel(ccb_post_stamp, &pmu->inbound_queueport_low); + break; + } } } @@ -1916,23 +2005,20 @@ static void arcmsr_hbaE_stop_bgrb(struct AdapterControlBlock *pACB) static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb) { switch (acb->adapter_type) { - case ACB_ADAPTER_TYPE_A: { + case ACB_ADAPTER_TYPE_A: arcmsr_hbaA_stop_bgrb(acb); - } break; - - case ACB_ADAPTER_TYPE_B: { + case ACB_ADAPTER_TYPE_B: arcmsr_hbaB_stop_bgrb(acb); - } break; - case ACB_ADAPTER_TYPE_C: { + case ACB_ADAPTER_TYPE_C: arcmsr_hbaC_stop_bgrb(acb); - } break; case ACB_ADAPTER_TYPE_D: arcmsr_hbaD_stop_bgrb(acb); break; case ACB_ADAPTER_TYPE_E: + case ACB_ADAPTER_TYPE_F: arcmsr_hbaE_stop_bgrb(acb); break; } @@ -1951,7 +2037,6 @@ static void arcmsr_iop_message_read(struct AdapterControlBlock *acb) writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK, ®->inbound_doorbell); } break; - case ACB_ADAPTER_TYPE_B: { struct MessageUnit_B *reg = acb->pmuB; writel(ARCMSR_DRV2IOP_DATA_READ_OK, reg->drv2iop_doorbell); @@ -1969,7 +2054,8 @@ static void arcmsr_iop_message_read(struct AdapterControlBlock *acb) reg->inbound_doorbell); } break; - case ACB_ADAPTER_TYPE_E: { + case ACB_ADAPTER_TYPE_E: + case ACB_ADAPTER_TYPE_F: { struct MessageUnit_E __iomem *reg = acb->pmuE; acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_DATA_READ_OK; writel(acb->out_doorbell, ®->iobound_doorbell); @@ -2015,7 +2101,8 @@ static void arcmsr_iop_message_wrote(struct AdapterControlBlock *acb) reg->inbound_doorbell); } break; - case ACB_ADAPTER_TYPE_E: { + case ACB_ADAPTER_TYPE_E: + case ACB_ADAPTER_TYPE_F: { struct MessageUnit_E __iomem *reg = acb->pmuE; acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_DATA_WRITE_OK; writel(acb->out_doorbell, ®->iobound_doorbell); @@ -2034,7 +2121,6 @@ struct QBUFFER __iomem *arcmsr_get_iop_rqbuffer(struct AdapterControlBlock *acb) qbuffer = (struct QBUFFER __iomem *)®->message_rbuffer; } break; - case ACB_ADAPTER_TYPE_B: { struct MessageUnit_B *reg = acb->pmuB; qbuffer = (struct QBUFFER __iomem *)reg->message_rbuffer; @@ -2055,6 +2141,10 @@ struct QBUFFER __iomem *arcmsr_get_iop_rqbuffer(struct AdapterControlBlock *acb) qbuffer = (struct QBUFFER __iomem *)®->message_rbuffer; } break; + case ACB_ADAPTER_TYPE_F: { + qbuffer = (struct QBUFFER __iomem *)acb->message_rbuffer; + } + break; } return qbuffer; } @@ -2069,7 +2159,6 @@ static struct QBUFFER __iomem *arcmsr_get_iop_wqbuffer(struct AdapterControlBloc pqbuffer = (struct QBUFFER __iomem *) ®->message_wbuffer; } break; - case ACB_ADAPTER_TYPE_B: { struct MessageUnit_B *reg = acb->pmuB; pqbuffer = (struct QBUFFER __iomem *)reg->message_wbuffer; @@ -2090,6 +2179,9 @@ static struct QBUFFER __iomem *arcmsr_get_iop_wqbuffer(struct AdapterControlBloc pqbuffer = (struct QBUFFER __iomem *)®->message_wbuffer; } break; + case ACB_ADAPTER_TYPE_F: + pqbuffer = (struct QBUFFER __iomem *)acb->message_wbuffer; + break; } return pqbuffer; } @@ -2504,6 +2596,36 @@ static void arcmsr_hbaE_postqueue_isr(struct AdapterControlBlock *acb) spin_unlock_irqrestore(&acb->doneq_lock, flags); } +static void arcmsr_hbaF_postqueue_isr(struct AdapterControlBlock *acb) +{ + uint32_t doneq_index; + uint16_t cmdSMID; + int error; + struct MessageUnit_F __iomem *phbcmu; + struct CommandControlBlock *ccb; + unsigned long flags; + + spin_lock_irqsave(&acb->doneq_lock, flags); + doneq_index = acb->doneq_index; + phbcmu = acb->pmuF; + while (1) { + cmdSMID = acb->pCompletionQ[doneq_index].cmdSMID; + if (cmdSMID == 0xffff) + break; + ccb = acb->pccb_pool[cmdSMID]; + error = (acb->pCompletionQ[doneq_index].cmdFlag & + ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ? true : false; + arcmsr_drain_donequeue(acb, ccb, error); + acb->pCompletionQ[doneq_index].cmdSMID = 0xffff; + doneq_index++; + if (doneq_index >= acb->completionQ_entry) + doneq_index = 0; + } + acb->doneq_index = doneq_index; + writel(doneq_index, &phbcmu->reply_post_consumer_index); + spin_unlock_irqrestore(&acb->doneq_lock, flags); +} + /* ********************************************************************************** ** Handle a message interrupt @@ -2694,21 +2816,46 @@ static irqreturn_t arcmsr_hbaE_handle_isr(struct AdapterControlBlock *pACB) return IRQ_HANDLED; } +static irqreturn_t arcmsr_hbaF_handle_isr(struct AdapterControlBlock *pACB) +{ + uint32_t host_interrupt_status; + struct MessageUnit_F __iomem *phbcmu = pACB->pmuF; + + host_interrupt_status = readl(&phbcmu->host_int_status) & + (ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR | + ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR); + if (!host_interrupt_status) + return IRQ_NONE; + do { + /* MU post queue interrupts*/ + if (host_interrupt_status & ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR) + arcmsr_hbaF_postqueue_isr(pACB); + + /* MU ioctl transfer doorbell interrupts*/ + if (host_interrupt_status & ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR) + arcmsr_hbaE_doorbell_isr(pACB); + + host_interrupt_status = readl(&phbcmu->host_int_status); + } while (host_interrupt_status & (ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR | + ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR)); + return IRQ_HANDLED; +} + static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb) { switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: return arcmsr_hbaA_handle_isr(acb); - break; case ACB_ADAPTER_TYPE_B: return arcmsr_hbaB_handle_isr(acb); - break; case ACB_ADAPTER_TYPE_C: return arcmsr_hbaC_handle_isr(acb); case ACB_ADAPTER_TYPE_D: return arcmsr_hbaD_handle_isr(acb); case ACB_ADAPTER_TYPE_E: return arcmsr_hbaE_handle_isr(acb); + case ACB_ADAPTER_TYPE_F: + return arcmsr_hbaF_handle_isr(acb); default: return IRQ_NONE; } @@ -3257,6 +3404,31 @@ static bool arcmsr_hbaE_get_config(struct AdapterControlBlock *pACB) return true; } +static bool arcmsr_hbaF_get_config(struct AdapterControlBlock *pACB) +{ + struct MessageUnit_F __iomem *reg = pACB->pmuF; + uint32_t intmask_org; + + /* disable all outbound interrupt */ + intmask_org = readl(®->host_int_mask); /* disable outbound message0 int */ + writel(intmask_org | ARCMSR_HBEMU_ALL_INTMASKENABLE, ®->host_int_mask); + /* wait firmware ready */ + arcmsr_wait_firmware_ready(pACB); + /* post "get config" instruction */ + writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, ®->inbound_msgaddr0); + + pACB->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE; + writel(pACB->out_doorbell, ®->iobound_doorbell); + /* wait message ready */ + if (!arcmsr_hbaE_wait_msgint_ready(pACB)) { + pr_notice("arcmsr%d: wait get adapter firmware miscellaneous data timeout\n", + pACB->host->host_no); + return false; + } + arcmsr_get_adapter_config(pACB, pACB->msgcode_rwbuffer); + return true; +} + static bool arcmsr_get_firmware_spec(struct AdapterControlBlock *acb) { bool rtn = false; @@ -3277,6 +3449,9 @@ static bool arcmsr_get_firmware_spec(struct AdapterControlBlock *acb) case ACB_ADAPTER_TYPE_E: rtn = arcmsr_hbaE_get_config(acb); break; + case ACB_ADAPTER_TYPE_F: + rtn = arcmsr_hbaF_get_config(acb); + break; default: break; } @@ -3634,23 +3809,20 @@ static int arcmsr_polling_ccbdone(struct AdapterControlBlock *acb, int rtn = 0; switch (acb->adapter_type) { - case ACB_ADAPTER_TYPE_A: { + case ACB_ADAPTER_TYPE_A: rtn = arcmsr_hbaA_polling_ccbdone(acb, poll_ccb); - } break; - - case ACB_ADAPTER_TYPE_B: { + case ACB_ADAPTER_TYPE_B: rtn = arcmsr_hbaB_polling_ccbdone(acb, poll_ccb); - } break; - case ACB_ADAPTER_TYPE_C: { + case ACB_ADAPTER_TYPE_C: rtn = arcmsr_hbaC_polling_ccbdone(acb, poll_ccb); - } break; case ACB_ADAPTER_TYPE_D: rtn = arcmsr_hbaD_polling_ccbdone(acb, poll_ccb); break; case ACB_ADAPTER_TYPE_E: + case ACB_ADAPTER_TYPE_F: rtn = arcmsr_hbaE_polling_ccbdone(acb, poll_ccb); break; } @@ -3731,6 +3903,16 @@ static void arcmsr_set_iop_datetime(struct timer_list *t) writel(pacb->out_doorbell, ®->iobound_doorbell); break; } + case ACB_ADAPTER_TYPE_F: { + struct MessageUnit_F __iomem *reg = pacb->pmuF; + + pacb->msgcode_rwbuffer[0] = datetime.b.msg_time[0]; + pacb->msgcode_rwbuffer[1] = datetime.b.msg_time[1]; + writel(ARCMSR_INBOUND_MESG0_SYNC_TIMER, ®->inbound_msgaddr0); + pacb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE; + writel(pacb->out_doorbell, ®->iobound_doorbell); + break; + } } if (sys_tz.tz_minuteswest) next_time = ARCMSR_HOURS; @@ -3756,6 +3938,7 @@ static int arcmsr_iop_confirm(struct AdapterControlBlock *acb) dma_coherent_handle = acb->dma_coherent_handle2; break; case ACB_ADAPTER_TYPE_E: + case ACB_ADAPTER_TYPE_F: dma_coherent_handle = acb->dma_coherent_handle + offsetof(struct CommandControlBlock, arcmsr_cdb); break; @@ -3873,11 +4056,8 @@ static int arcmsr_iop_confirm(struct AdapterControlBlock *acb) writel(cdb_phyaddr, ®->msgcode_rwbuffer[2]); writel(cdb_phyaddr_hi32, ®->msgcode_rwbuffer[3]); writel(acb->ccbsize, ®->msgcode_rwbuffer[4]); - dma_coherent_handle = acb->dma_coherent_handle2; - cdb_phyaddr = (uint32_t)(dma_coherent_handle & 0xffffffff); - cdb_phyaddr_hi32 = (uint32_t)((dma_coherent_handle >> 16) >> 16); - writel(cdb_phyaddr, ®->msgcode_rwbuffer[5]); - writel(cdb_phyaddr_hi32, ®->msgcode_rwbuffer[6]); + writel(lower_32_bits(acb->dma_coherent_handle2), ®->msgcode_rwbuffer[5]); + writel(upper_32_bits(acb->dma_coherent_handle2), ®->msgcode_rwbuffer[6]); writel(acb->ioqueue_size, ®->msgcode_rwbuffer[7]); writel(ARCMSR_INBOUND_MESG0_SET_CONFIG, ®->inbound_msgaddr0); acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE; @@ -3889,6 +4069,27 @@ static int arcmsr_iop_confirm(struct AdapterControlBlock *acb) } } break; + case ACB_ADAPTER_TYPE_F: { + struct MessageUnit_F __iomem *reg = acb->pmuF; + + acb->msgcode_rwbuffer[0] = ARCMSR_SIGNATURE_SET_CONFIG; + acb->msgcode_rwbuffer[1] = ARCMSR_SIGNATURE_1886; + acb->msgcode_rwbuffer[2] = cdb_phyaddr; + acb->msgcode_rwbuffer[3] = cdb_phyaddr_hi32; + acb->msgcode_rwbuffer[4] = acb->ccbsize; + acb->msgcode_rwbuffer[5] = lower_32_bits(acb->dma_coherent_handle2); + acb->msgcode_rwbuffer[6] = upper_32_bits(acb->dma_coherent_handle2); + acb->msgcode_rwbuffer[7] = acb->completeQ_size; + writel(ARCMSR_INBOUND_MESG0_SET_CONFIG, ®->inbound_msgaddr0); + acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE; + writel(acb->out_doorbell, ®->iobound_doorbell); + if (!arcmsr_hbaE_wait_msgint_ready(acb)) { + pr_notice("arcmsr%d: 'set command Q window' timeout\n", + acb->host->host_no); + return 1; + } + } + break; } return 0; } @@ -3937,7 +4138,8 @@ static void arcmsr_wait_firmware_ready(struct AdapterControlBlock *acb) ARCMSR_ARC1214_MESSAGE_FIRMWARE_OK) == 0); } break; - case ACB_ADAPTER_TYPE_E: { + case ACB_ADAPTER_TYPE_E: + case ACB_ADAPTER_TYPE_F: { struct MessageUnit_E __iomem *reg = acb->pmuE; do { if (!(acb->acb_flags & ACB_F_IOP_INITED)) @@ -3952,24 +4154,10 @@ static void arcmsr_wait_firmware_ready(struct AdapterControlBlock *acb) static void arcmsr_request_device_map(struct timer_list *t) { struct AdapterControlBlock *acb = from_timer(acb, t, eternal_timer); - if (unlikely(atomic_read(&acb->rq_map_token) == 0) || - (acb->acb_flags & ACB_F_BUS_RESET) || - (acb->acb_flags & ACB_F_ABORT)) { - mod_timer(&acb->eternal_timer, - jiffies + msecs_to_jiffies(6 * HZ)); + if (acb->acb_flags & (ACB_F_MSG_GET_CONFIG | ACB_F_BUS_RESET | ACB_F_ABORT)) { + mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ)); } else { acb->fw_flag = FW_NORMAL; - if (atomic_read(&acb->ante_token_value) == - atomic_read(&acb->rq_map_token)) { - atomic_set(&acb->rq_map_token, 16); - } - atomic_set(&acb->ante_token_value, - atomic_read(&acb->rq_map_token)); - if (atomic_dec_and_test(&acb->rq_map_token)) { - mod_timer(&acb->eternal_timer, jiffies + - msecs_to_jiffies(6 * HZ)); - return; - } switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { struct MessageUnit_A __iomem *reg = acb->pmuA; @@ -3999,10 +4187,23 @@ static void arcmsr_request_device_map(struct timer_list *t) writel(acb->out_doorbell, ®->iobound_doorbell); break; } + case ACB_ADAPTER_TYPE_F: { + struct MessageUnit_F __iomem *reg = acb->pmuF; + uint32_t outMsg1 = readl(®->outbound_msgaddr1); + + if (!(outMsg1 & ARCMSR_HBFMU_MESSAGE_FIRMWARE_OK) || + (outMsg1 & ARCMSR_HBFMU_MESSAGE_NO_VOLUME_CHANGE)) + goto nxt6s; + writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, ®->inbound_msgaddr0); + acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE; + writel(acb->out_doorbell, ®->iobound_doorbell); + break; + } default: return; } acb->acb_flags |= ACB_F_MSG_GET_CONFIG; +nxt6s: mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ)); } } @@ -4084,6 +4285,7 @@ static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb) arcmsr_hbaD_start_bgrb(acb); break; case ACB_ADAPTER_TYPE_E: + case ACB_ADAPTER_TYPE_F: arcmsr_hbaE_start_bgrb(acb); break; } @@ -4163,7 +4365,8 @@ static void arcmsr_clear_doorbell_queue_buffer(struct AdapterControlBlock *acb) } } break; - case ACB_ADAPTER_TYPE_E: { + case ACB_ADAPTER_TYPE_E: + case ACB_ADAPTER_TYPE_F: { struct MessageUnit_E __iomem *reg = acb->pmuE; uint32_t i, tmp; @@ -4290,7 +4493,8 @@ static bool arcmsr_reset_in_progress(struct AdapterControlBlock *acb) true : false; } break; - case ACB_ADAPTER_TYPE_E:{ + case ACB_ADAPTER_TYPE_E: + case ACB_ADAPTER_TYPE_F:{ struct MessageUnit_E __iomem *reg = acb->pmuE; rtn = (readl(®->host_diagnostic_3xxx) & ARCMSR_ARC188X_RESET_ADAPTER) ? true : false; @@ -4389,8 +4593,6 @@ wait_reset_done: goto wait_reset_done; } arcmsr_iop_init(acb); - atomic_set(&acb->rq_map_token, 16); - atomic_set(&acb->ante_token_value, 16); acb->fw_flag = FW_NORMAL; mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ)); @@ -4399,8 +4601,6 @@ wait_reset_done: pr_notice("arcmsr: scsi bus reset eh returns with success\n"); } else { acb->acb_flags &= ~ACB_F_BUS_RESET; - atomic_set(&acb->rq_map_token, 16); - atomic_set(&acb->ante_token_value, 16); acb->fw_flag = FW_NORMAL; mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ)); @@ -4493,6 +4693,9 @@ static const char *arcmsr_info(struct Scsi_Host *host) case PCI_DEVICE_ID_ARECA_1884: type = "SAS/SATA"; break; + case PCI_DEVICE_ID_ARECA_1886: + type = "NVMe/SAS/SATA"; + break; default: type = "unknown"; raid6 = 0; diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c index bc5d84f87d8f..440ef32be048 100644 --- a/drivers/scsi/bfa/bfad.c +++ b/drivers/scsi/bfa/bfad.c @@ -749,6 +749,7 @@ bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad) if (bfad->pci_bar0_kva == NULL) { printk(KERN_ERR "Fail to map bar0\n"); + rc = -ENODEV; goto out_release_region; } diff --git a/drivers/scsi/dc395x.c b/drivers/scsi/dc395x.c index 0c251a3b99b7..fa16894d8758 100644 --- a/drivers/scsi/dc395x.c +++ b/drivers/scsi/dc395x.c @@ -4721,30 +4721,7 @@ static struct pci_driver dc395x_driver = { .probe = dc395x_init_one, .remove = dc395x_remove_one, }; - - -/** - * dc395x_module_init - Module initialization function - * - * Used by both module and built-in driver to initialise this driver. - **/ -static int __init dc395x_module_init(void) -{ - return pci_register_driver(&dc395x_driver); -} - - -/** - * dc395x_module_exit - Module cleanup function. - **/ -static void __exit dc395x_module_exit(void) -{ - pci_unregister_driver(&dc395x_driver); -} - - -module_init(dc395x_module_init); -module_exit(dc395x_module_exit); +module_pci_driver(dc395x_driver); MODULE_AUTHOR("C.L. Huang / Erich Chen / Kurt Garloff"); MODULE_DESCRIPTION("SCSI host adapter driver for Tekram TRM-S1040 based adapters: Tekram DC395 and DC315 series"); diff --git a/drivers/scsi/fcoe/fcoe_sysfs.c b/drivers/scsi/fcoe/fcoe_sysfs.c index 2cb7a8c93a15..ffef2c8eddc6 100644 --- a/drivers/scsi/fcoe/fcoe_sysfs.c +++ b/drivers/scsi/fcoe/fcoe_sysfs.c @@ -1053,16 +1053,10 @@ EXPORT_SYMBOL_GPL(fcoe_fcf_device_add); int __init fcoe_sysfs_setup(void) { - int error; - atomic_set(&ctlr_num, 0); atomic_set(&fcf_num, 0); - error = bus_register(&fcoe_bus_type); - if (error) - return error; - - return 0; + return bus_register(&fcoe_bus_type); } void __exit fcoe_sysfs_teardown(void) diff --git a/drivers/scsi/fnic/vnic_wq_copy.c b/drivers/scsi/fnic/vnic_wq_copy.c index 9eab7e7caf38..7b18635df7e6 100644 --- a/drivers/scsi/fnic/vnic_wq_copy.c +++ b/drivers/scsi/fnic/vnic_wq_copy.c @@ -79,8 +79,6 @@ int vnic_wq_copy_alloc(struct vnic_dev *vdev, struct vnic_wq_copy *wq, unsigned int index, unsigned int desc_count, unsigned int desc_size) { - int err; - wq->index = index; wq->vdev = vdev; wq->to_use_index = wq->to_clean_index = 0; @@ -92,11 +90,7 @@ int vnic_wq_copy_alloc(struct vnic_dev *vdev, struct vnic_wq_copy *wq, vnic_wq_copy_disable(wq); - err = vnic_dev_alloc_desc_ring(vdev, &wq->ring, desc_count, desc_size); - if (err) - return err; - - return 0; + return vnic_dev_alloc_desc_ring(vdev, &wq->ring, desc_count, desc_size); } void vnic_wq_copy_init(struct vnic_wq_copy *wq, unsigned int cq_index, diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c index dc0e17729acf..5d801388680b 100644 --- a/drivers/scsi/gdth.c +++ b/drivers/scsi/gdth.c @@ -3168,81 +3168,6 @@ static inline void gdth_timer_init(void) } #endif -static void __init internal_setup(char *str,int *ints) -{ - int i; - char *cur_str, *argv; - - TRACE2(("internal_setup() str %s ints[0] %d\n", - str ? str:"NULL", ints ? ints[0]:0)); - - /* analyse string */ - argv = str; - while (argv && (cur_str = strchr(argv, ':'))) { - int val = 0, c = *++cur_str; - - if (c == 'n' || c == 'N') - val = 0; - else if (c == 'y' || c == 'Y') - val = 1; - else - val = (int)simple_strtoul(cur_str, NULL, 0); - - if (!strncmp(argv, "disable:", 8)) - disable = val; - else if (!strncmp(argv, "reserve_mode:", 13)) - reserve_mode = val; - else if (!strncmp(argv, "reverse_scan:", 13)) - reverse_scan = val; - else if (!strncmp(argv, "hdr_channel:", 12)) - hdr_channel = val; - else if (!strncmp(argv, "max_ids:", 8)) - max_ids = val; - else if (!strncmp(argv, "rescan:", 7)) - rescan = val; - else if (!strncmp(argv, "shared_access:", 14)) - shared_access = val; - else if (!strncmp(argv, "reserve_list:", 13)) { - reserve_list[0] = val; - for (i = 1; i < MAX_RES_ARGS; i++) { - cur_str = strchr(cur_str, ','); - if (!cur_str) - break; - if (!isdigit((int)*++cur_str)) { - --cur_str; - break; - } - reserve_list[i] = - (int)simple_strtoul(cur_str, NULL, 0); - } - if (!cur_str) - break; - argv = ++cur_str; - continue; - } - - if ((argv = strchr(argv, ','))) - ++argv; - } -} - -int __init option_setup(char *str) -{ - int ints[MAXHA]; - char *cur = str; - int i = 1; - - TRACE2(("option_setup() str %s\n", str ? str:"NULL")); - - while (cur && isdigit(*cur) && i < MAXHA) { - ints[i++] = simple_strtoul(cur, NULL, 0); - if ((cur = strchr(cur, ',')) != NULL) cur++; - } - - ints[0] = i - 1; - internal_setup(cur, ints); - return 1; -} static const char *gdth_ctr_name(gdth_ha_str *ha) { @@ -4317,5 +4242,81 @@ module_init(gdth_init); module_exit(gdth_exit); #ifndef MODULE +static void __init internal_setup(char *str,int *ints) +{ + int i; + char *cur_str, *argv; + + TRACE2(("internal_setup() str %s ints[0] %d\n", + str ? str:"NULL", ints ? ints[0]:0)); + + /* analyse string */ + argv = str; + while (argv && (cur_str = strchr(argv, ':'))) { + int val = 0, c = *++cur_str; + + if (c == 'n' || c == 'N') + val = 0; + else if (c == 'y' || c == 'Y') + val = 1; + else + val = (int)simple_strtoul(cur_str, NULL, 0); + + if (!strncmp(argv, "disable:", 8)) + disable = val; + else if (!strncmp(argv, "reserve_mode:", 13)) + reserve_mode = val; + else if (!strncmp(argv, "reverse_scan:", 13)) + reverse_scan = val; + else if (!strncmp(argv, "hdr_channel:", 12)) + hdr_channel = val; + else if (!strncmp(argv, "max_ids:", 8)) + max_ids = val; + else if (!strncmp(argv, "rescan:", 7)) + rescan = val; + else if (!strncmp(argv, "shared_access:", 14)) + shared_access = val; + else if (!strncmp(argv, "reserve_list:", 13)) { + reserve_list[0] = val; + for (i = 1; i < MAX_RES_ARGS; i++) { + cur_str = strchr(cur_str, ','); + if (!cur_str) + break; + if (!isdigit((int)*++cur_str)) { + --cur_str; + break; + } + reserve_list[i] = + (int)simple_strtoul(cur_str, NULL, 0); + } + if (!cur_str) + break; + argv = ++cur_str; + continue; + } + + if ((argv = strchr(argv, ','))) + ++argv; + } +} + +static int __init option_setup(char *str) +{ + int ints[MAXHA]; + char *cur = str; + int i = 1; + + TRACE2(("option_setup() str %s\n", str ? str:"NULL")); + + while (cur && isdigit(*cur) && i < MAXHA) { + ints[i++] = simple_strtoul(cur, NULL, 0); + if ((cur = strchr(cur, ',')) != NULL) cur++; + } + + ints[0] = i - 1; + internal_setup(cur, ints); + return 1; +} + __setup("gdth=", option_setup); #endif diff --git a/drivers/scsi/initio.c b/drivers/scsi/initio.c index 1d39628ac947..ca16ef45d8dc 100644 --- a/drivers/scsi/initio.c +++ b/drivers/scsi/initio.c @@ -2962,20 +2962,8 @@ static struct pci_driver initio_pci_driver = { .probe = initio_probe_one, .remove = initio_remove_one, }; - -static int __init initio_init_driver(void) -{ - return pci_register_driver(&initio_pci_driver); -} - -static void __exit initio_exit_driver(void) -{ - pci_unregister_driver(&initio_pci_driver); -} +module_pci_driver(initio_pci_driver); MODULE_DESCRIPTION("Initio INI-9X00U/UW SCSI device driver"); MODULE_AUTHOR("Initio Corporation"); MODULE_LICENSE("GPL"); - -module_init(initio_init_driver); -module_exit(initio_exit_driver); diff --git a/drivers/scsi/isci/remote_node_table.h b/drivers/scsi/isci/remote_node_table.h index 721ab982d2ac..0ddfdda2b248 100644 --- a/drivers/scsi/isci/remote_node_table.h +++ b/drivers/scsi/isci/remote_node_table.h @@ -61,7 +61,7 @@ /** * * - * Remote node sets are sets of remote node index in the remtoe node table The + * Remote node sets are sets of remote node index in the remote node table. The * SCU hardware requires that STP remote node entries take three consecutive * remote node index so the table is arranged in sets of three. The bits are * used as 0111 0111 to make a byte and the bits define the set of three remote diff --git a/drivers/scsi/myrb.c b/drivers/scsi/myrb.c index cbf1e8b091b9..5fa0f4ed6565 100644 --- a/drivers/scsi/myrb.c +++ b/drivers/scsi/myrb.c @@ -1050,7 +1050,7 @@ static int myrb_get_hba_config(struct myrb_hba *cb) enquiry2->fw.turn_id = 0; } snprintf(cb->fw_version, sizeof(cb->fw_version), - "%d.%02d-%c-%02d", + "%u.%02u-%c-%02u", enquiry2->fw.major_version, enquiry2->fw.minor_version, enquiry2->fw.firmware_type, @@ -2167,7 +2167,7 @@ static ssize_t ctlr_num_show(struct device *dev, struct Scsi_Host *shost = class_to_shost(dev); struct myrb_hba *cb = shost_priv(shost); - return snprintf(buf, 20, "%d\n", cb->ctlr_num); + return snprintf(buf, 20, "%u\n", cb->ctlr_num); } static DEVICE_ATTR_RO(ctlr_num); @@ -2732,7 +2732,6 @@ static int DAC960_LA_hw_init(struct pci_dev *pdev, DAC960_LA_disable_intr(base); DAC960_LA_ack_hw_mbox_status(base); udelay(1000); - timeout = 0; while (DAC960_LA_init_in_progress(base) && timeout < MYRB_MAILBOX_TIMEOUT) { if (DAC960_LA_read_error_status(base, &error, diff --git a/drivers/scsi/pm8001/pm8001_ctl.c b/drivers/scsi/pm8001/pm8001_ctl.c index 77c805db2724..3587f7c8a428 100644 --- a/drivers/scsi/pm8001/pm8001_ctl.c +++ b/drivers/scsi/pm8001/pm8001_ctl.c @@ -408,9 +408,10 @@ static ssize_t pm8001_ctl_ib_queue_log_show(struct device *cdev, int offset; char *str = buf; int start = 0; + u32 ib_offset = pm8001_ha->ib_offset; #define IB_MEMMAP(c) \ (*(u32 *)((u8 *)pm8001_ha-> \ - memoryMap.region[IB].virt_ptr + \ + memoryMap.region[ib_offset].virt_ptr + \ pm8001_ha->evtlog_ib_offset + (c))) for (offset = 0; offset < IB_OB_READ_TIMES; offset++) { @@ -442,9 +443,10 @@ static ssize_t pm8001_ctl_ob_queue_log_show(struct device *cdev, int offset; char *str = buf; int start = 0; + u32 ob_offset = pm8001_ha->ob_offset; #define OB_MEMMAP(c) \ (*(u32 *)((u8 *)pm8001_ha-> \ - memoryMap.region[OB].virt_ptr + \ + memoryMap.region[ob_offset].virt_ptr + \ pm8001_ha->evtlog_ob_offset + (c))) for (offset = 0; offset < IB_OB_READ_TIMES; offset++) { diff --git a/drivers/scsi/pm8001/pm8001_defs.h b/drivers/scsi/pm8001/pm8001_defs.h index 1c7f15fd69ce..501b574239e8 100644 --- a/drivers/scsi/pm8001/pm8001_defs.h +++ b/drivers/scsi/pm8001/pm8001_defs.h @@ -75,12 +75,10 @@ enum port_type { }; /* driver compile-time configuration */ -#define PM8001_MAX_CCB 256 /* max ccbs supported */ +#define PM8001_MAX_CCB 1024 /* max ccbs supported */ #define PM8001_MPI_QUEUE 1024 /* maximum mpi queue entries */ -#define PM8001_MAX_INB_NUM 1 -#define PM8001_MAX_OUTB_NUM 1 -#define PM8001_MAX_SPCV_INB_NUM 1 -#define PM8001_MAX_SPCV_OUTB_NUM 4 +#define PM8001_MAX_INB_NUM 64 +#define PM8001_MAX_OUTB_NUM 64 #define PM8001_CAN_QUEUE 508 /* SCSI Queue depth */ /* Inbound/Outbound queue size */ @@ -92,26 +90,27 @@ enum port_type { #define PM8001_MAX_PORTS 16 /* max. possible ports */ #define PM8001_MAX_DEVICES 2048 /* max supported device */ #define PM8001_MAX_MSIX_VEC 64 /* max msi-x int for spcv/ve */ +#define PM8001_RESERVE_SLOT 8 -#define USI_MAX_MEMCNT_BASE 5 -#define IB (USI_MAX_MEMCNT_BASE + 1) -#define CI (IB + PM8001_MAX_SPCV_INB_NUM) -#define OB (CI + PM8001_MAX_SPCV_INB_NUM) -#define PI (OB + PM8001_MAX_SPCV_OUTB_NUM) -#define USI_MAX_MEMCNT (PI + PM8001_MAX_SPCV_OUTB_NUM) #define CONFIG_SCSI_PM8001_MAX_DMA_SG 528 #define PM8001_MAX_DMA_SG CONFIG_SCSI_PM8001_MAX_DMA_SG + enum memory_region_num { AAP1 = 0x0, /* application acceleration processor */ IOP, /* IO processor */ NVMD, /* NVM device */ - DEV_MEM, /* memory for devices */ - CCB_MEM, /* memory for command control block */ FW_FLASH, /* memory for fw flash update */ - FORENSIC_MEM /* memory for fw forensic data */ + FORENSIC_MEM, /* memory for fw forensic data */ + USI_MAX_MEMCNT_BASE }; #define PM8001_EVENT_LOG_SIZE (128 * 1024) +/** + * maximum DMA memory regions(number of IBQ + number of IBQ CI + * + number of OBQ + number of OBQ PI) + */ +#define USI_MAX_MEMCNT (USI_MAX_MEMCNT_BASE + ((2 * PM8001_MAX_INB_NUM) \ + + (2 * PM8001_MAX_OUTB_NUM))) /*error code*/ enum mpi_err { MPI_IO_STATUS_SUCCESS = 0x0, diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c index e9a939230b15..2b7b2954ec31 100644 --- a/drivers/scsi/pm8001/pm8001_hwi.c +++ b/drivers/scsi/pm8001/pm8001_hwi.c @@ -189,6 +189,10 @@ static void init_default_table_values(struct pm8001_hba_info *pm8001_ha) u32 offsetib, offsetob; void __iomem *addressib = pm8001_ha->inbnd_q_tbl_addr; void __iomem *addressob = pm8001_ha->outbnd_q_tbl_addr; + u32 ib_offset = pm8001_ha->ib_offset; + u32 ob_offset = pm8001_ha->ob_offset; + u32 ci_offset = pm8001_ha->ci_offset; + u32 pi_offset = pm8001_ha->pi_offset; pm8001_ha->main_cfg_tbl.pm8001_tbl.inbound_q_nppd_hppd = 0; pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_hw_event_pid0_3 = 0; @@ -223,19 +227,19 @@ static void init_default_table_values(struct pm8001_hba_info *pm8001_ha) pm8001_ha->inbnd_q_tbl[i].element_pri_size_cnt = PM8001_MPI_QUEUE | (pm8001_ha->iomb_size << 16) | (0x00<<30); pm8001_ha->inbnd_q_tbl[i].upper_base_addr = - pm8001_ha->memoryMap.region[IB + i].phys_addr_hi; + pm8001_ha->memoryMap.region[ib_offset + i].phys_addr_hi; pm8001_ha->inbnd_q_tbl[i].lower_base_addr = - pm8001_ha->memoryMap.region[IB + i].phys_addr_lo; + pm8001_ha->memoryMap.region[ib_offset + i].phys_addr_lo; pm8001_ha->inbnd_q_tbl[i].base_virt = - (u8 *)pm8001_ha->memoryMap.region[IB + i].virt_ptr; + (u8 *)pm8001_ha->memoryMap.region[ib_offset + i].virt_ptr; pm8001_ha->inbnd_q_tbl[i].total_length = - pm8001_ha->memoryMap.region[IB + i].total_len; + pm8001_ha->memoryMap.region[ib_offset + i].total_len; pm8001_ha->inbnd_q_tbl[i].ci_upper_base_addr = - pm8001_ha->memoryMap.region[CI + i].phys_addr_hi; + pm8001_ha->memoryMap.region[ci_offset + i].phys_addr_hi; pm8001_ha->inbnd_q_tbl[i].ci_lower_base_addr = - pm8001_ha->memoryMap.region[CI + i].phys_addr_lo; + pm8001_ha->memoryMap.region[ci_offset + i].phys_addr_lo; pm8001_ha->inbnd_q_tbl[i].ci_virt = - pm8001_ha->memoryMap.region[CI + i].virt_ptr; + pm8001_ha->memoryMap.region[ci_offset + i].virt_ptr; offsetib = i * 0x20; pm8001_ha->inbnd_q_tbl[i].pi_pci_bar = get_pci_bar_index(pm8001_mr32(addressib, @@ -249,21 +253,21 @@ static void init_default_table_values(struct pm8001_hba_info *pm8001_ha) pm8001_ha->outbnd_q_tbl[i].element_size_cnt = PM8001_MPI_QUEUE | (pm8001_ha->iomb_size << 16) | (0x01<<30); pm8001_ha->outbnd_q_tbl[i].upper_base_addr = - pm8001_ha->memoryMap.region[OB + i].phys_addr_hi; + pm8001_ha->memoryMap.region[ob_offset + i].phys_addr_hi; pm8001_ha->outbnd_q_tbl[i].lower_base_addr = - pm8001_ha->memoryMap.region[OB + i].phys_addr_lo; + pm8001_ha->memoryMap.region[ob_offset + i].phys_addr_lo; pm8001_ha->outbnd_q_tbl[i].base_virt = - (u8 *)pm8001_ha->memoryMap.region[OB + i].virt_ptr; + (u8 *)pm8001_ha->memoryMap.region[ob_offset + i].virt_ptr; pm8001_ha->outbnd_q_tbl[i].total_length = - pm8001_ha->memoryMap.region[OB + i].total_len; + pm8001_ha->memoryMap.region[ob_offset + i].total_len; pm8001_ha->outbnd_q_tbl[i].pi_upper_base_addr = - pm8001_ha->memoryMap.region[PI + i].phys_addr_hi; + pm8001_ha->memoryMap.region[pi_offset + i].phys_addr_hi; pm8001_ha->outbnd_q_tbl[i].pi_lower_base_addr = - pm8001_ha->memoryMap.region[PI + i].phys_addr_lo; + pm8001_ha->memoryMap.region[pi_offset + i].phys_addr_lo; pm8001_ha->outbnd_q_tbl[i].interrup_vec_cnt_delay = 0 | (10 << 16) | (i << 24); pm8001_ha->outbnd_q_tbl[i].pi_virt = - pm8001_ha->memoryMap.region[PI + i].virt_ptr; + pm8001_ha->memoryMap.region[pi_offset + i].virt_ptr; offsetob = i * 0x24; pm8001_ha->outbnd_q_tbl[i].ci_pci_bar = get_pci_bar_index(pm8001_mr32(addressob, @@ -4371,8 +4375,7 @@ static int pm8001_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha, /* fill in PRD (scatter/gather) table, if any */ if (task->num_scatter > 1) { pm8001_chip_make_sg(task->scatter, ccb->n_elem, ccb->buf_prd); - phys_addr = ccb->ccb_dma_handle + - offsetof(struct pm8001_ccb_info, buf_prd[0]); + phys_addr = ccb->ccb_dma_handle; ssp_cmd.addr_low = cpu_to_le32(lower_32_bits(phys_addr)); ssp_cmd.addr_high = cpu_to_le32(upper_32_bits(phys_addr)); ssp_cmd.esgl = cpu_to_le32(1<<31); @@ -4445,8 +4448,7 @@ static int pm8001_chip_sata_req(struct pm8001_hba_info *pm8001_ha, /* fill in PRD (scatter/gather) table, if any */ if (task->num_scatter > 1) { pm8001_chip_make_sg(task->scatter, ccb->n_elem, ccb->buf_prd); - phys_addr = ccb->ccb_dma_handle + - offsetof(struct pm8001_ccb_info, buf_prd[0]); + phys_addr = ccb->ccb_dma_handle; sata_cmd.addr_low = lower_32_bits(phys_addr); sata_cmd.addr_high = upper_32_bits(phys_addr); sata_cmd.esgl = cpu_to_le32(1 << 31); diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c index 20fa96cbc9d3..3cf3e58b6979 100644 --- a/drivers/scsi/pm8001/pm8001_init.c +++ b/drivers/scsi/pm8001/pm8001_init.c @@ -56,6 +56,7 @@ MODULE_PARM_DESC(link_rate, "Enable link rate.\n" " 8: Link rate 12.0G\n"); static struct scsi_transport_template *pm8001_stt; +static int pm8001_init_ccb_tag(struct pm8001_hba_info *, struct Scsi_Host *, struct pci_dev *); /* * chip info structure to identify chip key functionality as @@ -264,12 +265,36 @@ static u32 pm8001_request_irq(struct pm8001_hba_info *pm8001_ha); static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha, const struct pci_device_id *ent) { - int i; + int i, count = 0, rc = 0; + u32 ci_offset, ib_offset, ob_offset, pi_offset; + struct inbound_queue_table *circularQ; + spin_lock_init(&pm8001_ha->lock); spin_lock_init(&pm8001_ha->bitmap_lock); PM8001_INIT_DBG(pm8001_ha, pm8001_printk("pm8001_alloc: PHY:%x\n", pm8001_ha->chip->n_phy)); + + /* Setup Interrupt */ + rc = pm8001_setup_irq(pm8001_ha); + if (rc) { + PM8001_FAIL_DBG(pm8001_ha, pm8001_printk( + "pm8001_setup_irq failed [ret: %d]\n", rc)); + goto err_out_shost; + } + /* Request Interrupt */ + rc = pm8001_request_irq(pm8001_ha); + if (rc) + goto err_out_shost; + + count = pm8001_ha->max_q_num; + /* Queues are chosen based on the number of cores/msix availability */ + ib_offset = pm8001_ha->ib_offset = USI_MAX_MEMCNT_BASE; + ci_offset = pm8001_ha->ci_offset = ib_offset + count; + ob_offset = pm8001_ha->ob_offset = ci_offset + count; + pi_offset = pm8001_ha->pi_offset = ob_offset + count; + pm8001_ha->max_memcnt = pi_offset + count; + for (i = 0; i < pm8001_ha->chip->n_phy; i++) { pm8001_phy_init(pm8001_ha, i); pm8001_ha->port[i].wide_port_phymap = 0; @@ -278,9 +303,6 @@ static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha, INIT_LIST_HEAD(&pm8001_ha->port[i].list); } - pm8001_ha->tags = kzalloc(PM8001_MAX_CCB, GFP_KERNEL); - if (!pm8001_ha->tags) - goto err_out; /* MPI Memory region 1 for AAP Event Log for fw */ pm8001_ha->memoryMap.region[AAP1].num_elements = 1; pm8001_ha->memoryMap.region[AAP1].element_size = PM8001_EVENT_LOG_SIZE; @@ -293,54 +315,62 @@ static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha, pm8001_ha->memoryMap.region[IOP].total_len = PM8001_EVENT_LOG_SIZE; pm8001_ha->memoryMap.region[IOP].alignment = 32; - for (i = 0; i < PM8001_MAX_SPCV_INB_NUM; i++) { + for (i = 0; i < count; i++) { + circularQ = &pm8001_ha->inbnd_q_tbl[i]; + spin_lock_init(&circularQ->iq_lock); /* MPI Memory region 3 for consumer Index of inbound queues */ - pm8001_ha->memoryMap.region[CI+i].num_elements = 1; - pm8001_ha->memoryMap.region[CI+i].element_size = 4; - pm8001_ha->memoryMap.region[CI+i].total_len = 4; - pm8001_ha->memoryMap.region[CI+i].alignment = 4; + pm8001_ha->memoryMap.region[ci_offset+i].num_elements = 1; + pm8001_ha->memoryMap.region[ci_offset+i].element_size = 4; + pm8001_ha->memoryMap.region[ci_offset+i].total_len = 4; + pm8001_ha->memoryMap.region[ci_offset+i].alignment = 4; if ((ent->driver_data) != chip_8001) { /* MPI Memory region 5 inbound queues */ - pm8001_ha->memoryMap.region[IB+i].num_elements = + pm8001_ha->memoryMap.region[ib_offset+i].num_elements = PM8001_MPI_QUEUE; - pm8001_ha->memoryMap.region[IB+i].element_size = 128; - pm8001_ha->memoryMap.region[IB+i].total_len = + pm8001_ha->memoryMap.region[ib_offset+i].element_size + = 128; + pm8001_ha->memoryMap.region[ib_offset+i].total_len = PM8001_MPI_QUEUE * 128; - pm8001_ha->memoryMap.region[IB+i].alignment = 128; + pm8001_ha->memoryMap.region[ib_offset+i].alignment + = 128; } else { - pm8001_ha->memoryMap.region[IB+i].num_elements = + pm8001_ha->memoryMap.region[ib_offset+i].num_elements = PM8001_MPI_QUEUE; - pm8001_ha->memoryMap.region[IB+i].element_size = 64; - pm8001_ha->memoryMap.region[IB+i].total_len = + pm8001_ha->memoryMap.region[ib_offset+i].element_size + = 64; + pm8001_ha->memoryMap.region[ib_offset+i].total_len = PM8001_MPI_QUEUE * 64; - pm8001_ha->memoryMap.region[IB+i].alignment = 64; + pm8001_ha->memoryMap.region[ib_offset+i].alignment = 64; } } - for (i = 0; i < PM8001_MAX_SPCV_OUTB_NUM; i++) { + for (i = 0; i < count; i++) { /* MPI Memory region 4 for producer Index of outbound queues */ - pm8001_ha->memoryMap.region[PI+i].num_elements = 1; - pm8001_ha->memoryMap.region[PI+i].element_size = 4; - pm8001_ha->memoryMap.region[PI+i].total_len = 4; - pm8001_ha->memoryMap.region[PI+i].alignment = 4; + pm8001_ha->memoryMap.region[pi_offset+i].num_elements = 1; + pm8001_ha->memoryMap.region[pi_offset+i].element_size = 4; + pm8001_ha->memoryMap.region[pi_offset+i].total_len = 4; + pm8001_ha->memoryMap.region[pi_offset+i].alignment = 4; if (ent->driver_data != chip_8001) { /* MPI Memory region 6 Outbound queues */ - pm8001_ha->memoryMap.region[OB+i].num_elements = + pm8001_ha->memoryMap.region[ob_offset+i].num_elements = PM8001_MPI_QUEUE; - pm8001_ha->memoryMap.region[OB+i].element_size = 128; - pm8001_ha->memoryMap.region[OB+i].total_len = + pm8001_ha->memoryMap.region[ob_offset+i].element_size + = 128; + pm8001_ha->memoryMap.region[ob_offset+i].total_len = PM8001_MPI_QUEUE * 128; - pm8001_ha->memoryMap.region[OB+i].alignment = 128; + pm8001_ha->memoryMap.region[ob_offset+i].alignment + = 128; } else { /* MPI Memory region 6 Outbound queues */ - pm8001_ha->memoryMap.region[OB+i].num_elements = + pm8001_ha->memoryMap.region[ob_offset+i].num_elements = PM8001_MPI_QUEUE; - pm8001_ha->memoryMap.region[OB+i].element_size = 64; - pm8001_ha->memoryMap.region[OB+i].total_len = + pm8001_ha->memoryMap.region[ob_offset+i].element_size + = 64; + pm8001_ha->memoryMap.region[ob_offset+i].total_len = PM8001_MPI_QUEUE * 64; - pm8001_ha->memoryMap.region[OB+i].alignment = 64; + pm8001_ha->memoryMap.region[ob_offset+i].alignment = 64; } } @@ -348,19 +378,6 @@ static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha, pm8001_ha->memoryMap.region[NVMD].num_elements = 1; pm8001_ha->memoryMap.region[NVMD].element_size = 4096; pm8001_ha->memoryMap.region[NVMD].total_len = 4096; - /* Memory region for devices*/ - pm8001_ha->memoryMap.region[DEV_MEM].num_elements = 1; - pm8001_ha->memoryMap.region[DEV_MEM].element_size = PM8001_MAX_DEVICES * - sizeof(struct pm8001_device); - pm8001_ha->memoryMap.region[DEV_MEM].total_len = PM8001_MAX_DEVICES * - sizeof(struct pm8001_device); - - /* Memory region for ccb_info*/ - pm8001_ha->memoryMap.region[CCB_MEM].num_elements = 1; - pm8001_ha->memoryMap.region[CCB_MEM].element_size = PM8001_MAX_CCB * - sizeof(struct pm8001_ccb_info); - pm8001_ha->memoryMap.region[CCB_MEM].total_len = PM8001_MAX_CCB * - sizeof(struct pm8001_ccb_info); /* Memory region for fw flash */ pm8001_ha->memoryMap.region[FW_FLASH].total_len = 4096; @@ -369,7 +386,7 @@ static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha, pm8001_ha->memoryMap.region[FORENSIC_MEM].total_len = 0x10000; pm8001_ha->memoryMap.region[FORENSIC_MEM].element_size = 0x10000; pm8001_ha->memoryMap.region[FORENSIC_MEM].alignment = 0x10000; - for (i = 0; i < USI_MAX_MEMCNT; i++) { + for (i = 0; i < pm8001_ha->max_memcnt; i++) { if (pm8001_mem_alloc(pm8001_ha->pdev, &pm8001_ha->memoryMap.region[i].virt_ptr, &pm8001_ha->memoryMap.region[i].phys_addr, @@ -384,27 +401,36 @@ static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha, } } - pm8001_ha->devices = pm8001_ha->memoryMap.region[DEV_MEM].virt_ptr; + /* Memory region for devices*/ + pm8001_ha->devices = kzalloc(PM8001_MAX_DEVICES + * sizeof(struct pm8001_device), GFP_KERNEL); + if (!pm8001_ha->devices) { + rc = -ENOMEM; + goto err_out_nodev; + } for (i = 0; i < PM8001_MAX_DEVICES; i++) { pm8001_ha->devices[i].dev_type = SAS_PHY_UNUSED; pm8001_ha->devices[i].id = i; pm8001_ha->devices[i].device_id = PM8001_MAX_DEVICES; pm8001_ha->devices[i].running_req = 0; } - pm8001_ha->ccb_info = pm8001_ha->memoryMap.region[CCB_MEM].virt_ptr; - for (i = 0; i < PM8001_MAX_CCB; i++) { - pm8001_ha->ccb_info[i].ccb_dma_handle = - pm8001_ha->memoryMap.region[CCB_MEM].phys_addr + - i * sizeof(struct pm8001_ccb_info); - pm8001_ha->ccb_info[i].task = NULL; - pm8001_ha->ccb_info[i].ccb_tag = 0xffffffff; - pm8001_ha->ccb_info[i].device = NULL; - ++pm8001_ha->tags_num; - } pm8001_ha->flags = PM8001F_INIT_TIME; /* Initialize tags */ pm8001_tag_init(pm8001_ha); return 0; + +err_out_shost: + scsi_remove_host(pm8001_ha->shost); +err_out_nodev: + for (i = 0; i < pm8001_ha->max_memcnt; i++) { + if (pm8001_ha->memoryMap.region[i].virt_ptr != NULL) { + pci_free_consistent(pm8001_ha->pdev, + (pm8001_ha->memoryMap.region[i].total_len + + pm8001_ha->memoryMap.region[i].alignment), + pm8001_ha->memoryMap.region[i].virt_ptr, + pm8001_ha->memoryMap.region[i].phys_addr); + } + } err_out: return 1; } @@ -899,7 +925,8 @@ static int pm8001_configure_phy_settings(struct pm8001_hba_info *pm8001_ha) static u32 pm8001_setup_msix(struct pm8001_hba_info *pm8001_ha) { u32 number_of_intr; - int rc; + int rc, cpu_online_count; + unsigned int allocated_irq_vectors; /* SPCv controllers supports 64 msi-x */ if (pm8001_ha->chip_id == chip_8001) { @@ -908,13 +935,21 @@ static u32 pm8001_setup_msix(struct pm8001_hba_info *pm8001_ha) number_of_intr = PM8001_MAX_MSIX_VEC; } + cpu_online_count = num_online_cpus(); + number_of_intr = min_t(int, cpu_online_count, number_of_intr); rc = pci_alloc_irq_vectors(pm8001_ha->pdev, number_of_intr, number_of_intr, PCI_IRQ_MSIX); - number_of_intr = rc; + allocated_irq_vectors = rc; if (rc < 0) return rc; + + /* Assigns the number of interrupts */ + number_of_intr = min_t(int, allocated_irq_vectors, number_of_intr); pm8001_ha->number_of_intr = number_of_intr; + /* Maximum queue number updating in HBA structure */ + pm8001_ha->max_q_num = number_of_intr; + PM8001_INIT_DBG(pm8001_ha, pm8001_printk( "pci_alloc_irq_vectors request ret:%d no of intr %d\n", rc, pm8001_ha->number_of_intr)); @@ -1069,13 +1104,6 @@ static int pm8001_pci_probe(struct pci_dev *pdev, rc = -ENOMEM; goto err_out_free; } - /* Setup Interrupt */ - rc = pm8001_setup_irq(pm8001_ha); - if (rc) { - PM8001_FAIL_DBG(pm8001_ha, pm8001_printk( - "pm8001_setup_irq failed [ret: %d]\n", rc)); - goto err_out_shost; - } PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha); rc = PM8001_CHIP_DISP->chip_init(pm8001_ha); @@ -1085,16 +1113,13 @@ static int pm8001_pci_probe(struct pci_dev *pdev, goto err_out_ha_free; } + rc = pm8001_init_ccb_tag(pm8001_ha, shost, pdev); + if (rc) + goto err_out_enable; + rc = scsi_add_host(shost, &pdev->dev); if (rc) goto err_out_ha_free; - /* Request Interrupt */ - rc = pm8001_request_irq(pm8001_ha); - if (rc) { - PM8001_FAIL_DBG(pm8001_ha, pm8001_printk( - "pm8001_request_irq failed [ret: %d]\n", rc)); - goto err_out_shost; - } PM8001_CHIP_DISP->interrupt_enable(pm8001_ha, 0); if (pm8001_ha->chip_id != chip_8001) { @@ -1137,6 +1162,60 @@ err_out_enable: return rc; } +/* + * pm8001_init_ccb_tag - allocate memory to CCB and tag. + * @pm8001_ha: our hba card information. + * @shost: scsi host which has been allocated outside. + */ +static int +pm8001_init_ccb_tag(struct pm8001_hba_info *pm8001_ha, struct Scsi_Host *shost, + struct pci_dev *pdev) +{ + int i = 0; + u32 max_out_io, ccb_count; + u32 can_queue; + + max_out_io = pm8001_ha->main_cfg_tbl.pm80xx_tbl.max_out_io; + ccb_count = min_t(int, PM8001_MAX_CCB, max_out_io); + + /* Update to the scsi host*/ + can_queue = ccb_count - PM8001_RESERVE_SLOT; + shost->can_queue = can_queue; + + pm8001_ha->tags = kzalloc(ccb_count, GFP_KERNEL); + if (!pm8001_ha->tags) + goto err_out; + + /* Memory region for ccb_info*/ + pm8001_ha->ccb_info = (struct pm8001_ccb_info *) + kcalloc(ccb_count, sizeof(struct pm8001_ccb_info), GFP_KERNEL); + if (!pm8001_ha->ccb_info) { + PM8001_FAIL_DBG(pm8001_ha, pm8001_printk + ("Unable to allocate memory for ccb\n")); + goto err_out_noccb; + } + for (i = 0; i < ccb_count; i++) { + pm8001_ha->ccb_info[i].buf_prd = pci_alloc_consistent(pdev, + sizeof(struct pm8001_prd) * PM8001_MAX_DMA_SG, + &pm8001_ha->ccb_info[i].ccb_dma_handle); + if (!pm8001_ha->ccb_info[i].buf_prd) { + PM8001_FAIL_DBG(pm8001_ha, pm8001_printk + ("pm80xx: ccb prd memory allocation error\n")); + goto err_out; + } + pm8001_ha->ccb_info[i].task = NULL; + pm8001_ha->ccb_info[i].ccb_tag = 0xffffffff; + pm8001_ha->ccb_info[i].device = NULL; + ++pm8001_ha->tags_num; + } + return 0; + +err_out_noccb: + kfree(pm8001_ha->devices); +err_out: + return -ENOMEM; +} + static void pm8001_pci_remove(struct pci_dev *pdev) { struct sas_ha_struct *sha = pci_get_drvdata(pdev); diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h index ae7ba9b3c4bc..95663e138083 100644 --- a/drivers/scsi/pm8001/pm8001_sas.h +++ b/drivers/scsi/pm8001/pm8001_sas.h @@ -58,7 +58,7 @@ #include "pm8001_defs.h" #define DRV_NAME "pm80xx" -#define DRV_VERSION "0.1.39" +#define DRV_VERSION "0.1.40" #define PM8001_FAIL_LOGGING 0x01 /* Error message logging */ #define PM8001_INIT_LOGGING 0x02 /* driver init logging */ #define PM8001_DISC_LOGGING 0x04 /* discovery layer logging */ @@ -315,7 +315,7 @@ struct pm8001_ccb_info { u32 ccb_tag; dma_addr_t ccb_dma_handle; struct pm8001_device *device; - struct pm8001_prd buf_prd[PM8001_MAX_DMA_SG]; + struct pm8001_prd *buf_prd; struct fw_control_ex *fw_control_context; u8 open_retry; }; @@ -468,6 +468,7 @@ struct inbound_queue_table { u32 reserved; __le32 consumer_index; u32 producer_idx; + spinlock_t iq_lock; }; struct outbound_queue_table { u32 element_size_cnt; @@ -524,8 +525,8 @@ struct pm8001_hba_info { void __iomem *fatal_tbl_addr; /*MPI IVT Table Addr */ union main_cfg_table main_cfg_tbl; union general_status_table gs_tbl; - struct inbound_queue_table inbnd_q_tbl[PM8001_MAX_SPCV_INB_NUM]; - struct outbound_queue_table outbnd_q_tbl[PM8001_MAX_SPCV_OUTB_NUM]; + struct inbound_queue_table inbnd_q_tbl[PM8001_MAX_INB_NUM]; + struct outbound_queue_table outbnd_q_tbl[PM8001_MAX_OUTB_NUM]; struct sas_phy_attribute_table phy_attr_table; /* MPI SAS PHY attributes */ u8 sas_addr[SAS_ADDR_SIZE]; @@ -561,6 +562,12 @@ struct pm8001_hba_info { u32 reset_in_progress; u32 non_fatal_count; u32 non_fatal_read_length; + u32 max_q_num; + u32 ib_offset; + u32 ob_offset; + u32 ci_offset; + u32 pi_offset; + u32 max_memcnt; }; struct pm8001_work { diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c index b42f41d1ed49..7593f248afb2 100644 --- a/drivers/scsi/pm8001/pm80xx_hwi.c +++ b/drivers/scsi/pm8001/pm80xx_hwi.c @@ -720,7 +720,7 @@ static void read_inbnd_queue_table(struct pm8001_hba_info *pm8001_ha) { int i; void __iomem *address = pm8001_ha->inbnd_q_tbl_addr; - for (i = 0; i < PM8001_MAX_SPCV_INB_NUM; i++) { + for (i = 0; i < PM8001_MAX_INB_NUM; i++) { u32 offset = i * 0x20; pm8001_ha->inbnd_q_tbl[i].pi_pci_bar = get_pci_bar_index(pm8001_mr32(address, @@ -738,7 +738,7 @@ static void read_outbnd_queue_table(struct pm8001_hba_info *pm8001_ha) { int i; void __iomem *address = pm8001_ha->outbnd_q_tbl_addr; - for (i = 0; i < PM8001_MAX_SPCV_OUTB_NUM; i++) { + for (i = 0; i < PM8001_MAX_OUTB_NUM; i++) { u32 offset = i * 0x24; pm8001_ha->outbnd_q_tbl[i].ci_pci_bar = get_pci_bar_index(pm8001_mr32(address, @@ -758,6 +758,10 @@ static void init_default_table_values(struct pm8001_hba_info *pm8001_ha) u32 offsetib, offsetob; void __iomem *addressib = pm8001_ha->inbnd_q_tbl_addr; void __iomem *addressob = pm8001_ha->outbnd_q_tbl_addr; + u32 ib_offset = pm8001_ha->ib_offset; + u32 ob_offset = pm8001_ha->ob_offset; + u32 ci_offset = pm8001_ha->ci_offset; + u32 pi_offset = pm8001_ha->pi_offset; pm8001_ha->main_cfg_tbl.pm80xx_tbl.upper_event_log_addr = pm8001_ha->memoryMap.region[AAP1].phys_addr_hi; @@ -778,23 +782,23 @@ static void init_default_table_values(struct pm8001_hba_info *pm8001_ha) /* Disable end to end CRC checking */ pm8001_ha->main_cfg_tbl.pm80xx_tbl.crc_core_dump = (0x1 << 16); - for (i = 0; i < PM8001_MAX_SPCV_INB_NUM; i++) { + for (i = 0; i < pm8001_ha->max_q_num; i++) { pm8001_ha->inbnd_q_tbl[i].element_pri_size_cnt = PM8001_MPI_QUEUE | (pm8001_ha->iomb_size << 16) | (0x00<<30); pm8001_ha->inbnd_q_tbl[i].upper_base_addr = - pm8001_ha->memoryMap.region[IB + i].phys_addr_hi; + pm8001_ha->memoryMap.region[ib_offset + i].phys_addr_hi; pm8001_ha->inbnd_q_tbl[i].lower_base_addr = - pm8001_ha->memoryMap.region[IB + i].phys_addr_lo; + pm8001_ha->memoryMap.region[ib_offset + i].phys_addr_lo; pm8001_ha->inbnd_q_tbl[i].base_virt = - (u8 *)pm8001_ha->memoryMap.region[IB + i].virt_ptr; + (u8 *)pm8001_ha->memoryMap.region[ib_offset + i].virt_ptr; pm8001_ha->inbnd_q_tbl[i].total_length = - pm8001_ha->memoryMap.region[IB + i].total_len; + pm8001_ha->memoryMap.region[ib_offset + i].total_len; pm8001_ha->inbnd_q_tbl[i].ci_upper_base_addr = - pm8001_ha->memoryMap.region[CI + i].phys_addr_hi; + pm8001_ha->memoryMap.region[ci_offset + i].phys_addr_hi; pm8001_ha->inbnd_q_tbl[i].ci_lower_base_addr = - pm8001_ha->memoryMap.region[CI + i].phys_addr_lo; + pm8001_ha->memoryMap.region[ci_offset + i].phys_addr_lo; pm8001_ha->inbnd_q_tbl[i].ci_virt = - pm8001_ha->memoryMap.region[CI + i].virt_ptr; + pm8001_ha->memoryMap.region[ci_offset + i].virt_ptr; offsetib = i * 0x20; pm8001_ha->inbnd_q_tbl[i].pi_pci_bar = get_pci_bar_index(pm8001_mr32(addressib, @@ -809,25 +813,25 @@ static void init_default_table_values(struct pm8001_hba_info *pm8001_ha) pm8001_ha->inbnd_q_tbl[i].pi_pci_bar, pm8001_ha->inbnd_q_tbl[i].pi_offset)); } - for (i = 0; i < PM8001_MAX_SPCV_OUTB_NUM; i++) { + for (i = 0; i < pm8001_ha->max_q_num; i++) { pm8001_ha->outbnd_q_tbl[i].element_size_cnt = PM8001_MPI_QUEUE | (pm8001_ha->iomb_size << 16) | (0x01<<30); pm8001_ha->outbnd_q_tbl[i].upper_base_addr = - pm8001_ha->memoryMap.region[OB + i].phys_addr_hi; + pm8001_ha->memoryMap.region[ob_offset + i].phys_addr_hi; pm8001_ha->outbnd_q_tbl[i].lower_base_addr = - pm8001_ha->memoryMap.region[OB + i].phys_addr_lo; + pm8001_ha->memoryMap.region[ob_offset + i].phys_addr_lo; pm8001_ha->outbnd_q_tbl[i].base_virt = - (u8 *)pm8001_ha->memoryMap.region[OB + i].virt_ptr; + (u8 *)pm8001_ha->memoryMap.region[ob_offset + i].virt_ptr; pm8001_ha->outbnd_q_tbl[i].total_length = - pm8001_ha->memoryMap.region[OB + i].total_len; + pm8001_ha->memoryMap.region[ob_offset + i].total_len; pm8001_ha->outbnd_q_tbl[i].pi_upper_base_addr = - pm8001_ha->memoryMap.region[PI + i].phys_addr_hi; + pm8001_ha->memoryMap.region[pi_offset + i].phys_addr_hi; pm8001_ha->outbnd_q_tbl[i].pi_lower_base_addr = - pm8001_ha->memoryMap.region[PI + i].phys_addr_lo; + pm8001_ha->memoryMap.region[pi_offset + i].phys_addr_lo; /* interrupt vector based on oq */ pm8001_ha->outbnd_q_tbl[i].interrup_vec_cnt_delay = (i << 24); pm8001_ha->outbnd_q_tbl[i].pi_virt = - pm8001_ha->memoryMap.region[PI + i].virt_ptr; + pm8001_ha->memoryMap.region[pi_offset + i].virt_ptr; offsetob = i * 0x24; pm8001_ha->outbnd_q_tbl[i].ci_pci_bar = get_pci_bar_index(pm8001_mr32(addressob, @@ -871,7 +875,7 @@ static void update_main_config_table(struct pm8001_hba_info *pm8001_ha) pm8001_ha->main_cfg_tbl.pm80xx_tbl.pcs_event_log_severity); /* Update Fatal error interrupt vector */ pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_interrupt |= - ((pm8001_ha->number_of_intr - 1) << 8); + ((pm8001_ha->max_q_num - 1) << 8); pm8001_mw32(address, MAIN_FATAL_ERROR_INTERRUPT, pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_interrupt); PM8001_DEV_DBG(pm8001_ha, pm8001_printk( @@ -1010,8 +1014,12 @@ static int mpi_init_check(struct pm8001_hba_info *pm8001_ha) value &= SPCv_MSGU_CFG_TABLE_UPDATE; } while ((value != 0) && (--max_wait_count)); - if (!max_wait_count) - return -1; + if (!max_wait_count) { + /* additional check */ + PM8001_FAIL_DBG(pm8001_ha, pm8001_printk( + "Inb doorbell clear not toggled[value:%x]\n", value)); + return -EBUSY; + } /* check the MPI-State for initialization upto 100ms*/ max_wait_count = 100 * 1000;/* 100 msec */ do { @@ -1022,12 +1030,12 @@ static int mpi_init_check(struct pm8001_hba_info *pm8001_ha) } while ((GST_MPI_STATE_INIT != (gst_len_mpistate & GST_MPI_STATE_MASK)) && (--max_wait_count)); if (!max_wait_count) - return -1; + return -EBUSY; /* check MPI Initialization error */ gst_len_mpistate = gst_len_mpistate >> 16; if (0x0000 != gst_len_mpistate) - return -1; + return -EBUSY; return 0; } @@ -1469,11 +1477,10 @@ static int pm80xx_chip_init(struct pm8001_hba_info *pm8001_ha) /* update main config table ,inbound table and outbound table */ update_main_config_table(pm8001_ha); - for (i = 0; i < PM8001_MAX_SPCV_INB_NUM; i++) + for (i = 0; i < pm8001_ha->max_q_num; i++) { update_inbnd_queue_table(pm8001_ha, i); - for (i = 0; i < PM8001_MAX_SPCV_OUTB_NUM; i++) update_outbnd_queue_table(pm8001_ha, i); - + } /* notify firmware update finished and check initialization status */ if (0 == mpi_init_check(pm8001_ha)) { PM8001_INIT_DBG(pm8001_ha, @@ -4191,7 +4198,7 @@ static int process_oq(struct pm8001_hba_info *pm8001_ha, u8 vec) unsigned long flags; u32 regval; - if (vec == (pm8001_ha->number_of_intr - 1)) { + if (vec == (pm8001_ha->max_q_num - 1)) { regval = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1); if ((regval & SCRATCH_PAD_MIPSALL_READY) != SCRATCH_PAD_MIPSALL_READY) { @@ -4274,6 +4281,7 @@ static int pm80xx_chip_smp_req(struct pm8001_hba_info *pm8001_ha, char *preq_dma_addr = NULL; __le64 tmp_addr; u32 i, length; + unsigned long flags; memset(&smp_cmd, 0, sizeof(smp_cmd)); /* @@ -4369,8 +4377,10 @@ static int pm80xx_chip_smp_req(struct pm8001_hba_info *pm8001_ha, build_smp_cmd(pm8001_dev->device_id, smp_cmd.tag, &smp_cmd, pm8001_ha->smp_exp_mode, length); + spin_lock_irqsave(&circularQ->iq_lock, flags); rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &smp_cmd, sizeof(smp_cmd), 0); + spin_unlock_irqrestore(&circularQ->iq_lock, flags); if (rc) goto err_out_2; return 0; @@ -4434,7 +4444,8 @@ static int pm80xx_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha, u64 phys_addr, start_addr, end_addr; u32 end_addr_high, end_addr_low; struct inbound_queue_table *circularQ; - u32 q_index; + unsigned long flags; + u32 q_index, cpu_id; u32 opc = OPC_INB_SSPINIIOSTART; memset(&ssp_cmd, 0, sizeof(ssp_cmd)); memcpy(ssp_cmd.ssp_iu.lun, task->ssp_task.LUN, 8); @@ -4453,7 +4464,8 @@ static int pm80xx_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha, ssp_cmd.ssp_iu.efb_prio_attr |= (task->ssp_task.task_attr & 7); memcpy(ssp_cmd.ssp_iu.cdb, task->ssp_task.cmd->cmnd, task->ssp_task.cmd->cmd_len); - q_index = (u32) (pm8001_dev->id & 0x00ffffff) % PM8001_MAX_INB_NUM; + cpu_id = smp_processor_id(); + q_index = (u32) (cpu_id) % (pm8001_ha->max_q_num); circularQ = &pm8001_ha->inbnd_q_tbl[q_index]; /* Check if encryption is set */ @@ -4471,8 +4483,7 @@ static int pm80xx_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha, if (task->num_scatter > 1) { pm8001_chip_make_sg(task->scatter, ccb->n_elem, ccb->buf_prd); - phys_addr = ccb->ccb_dma_handle + - offsetof(struct pm8001_ccb_info, buf_prd[0]); + phys_addr = ccb->ccb_dma_handle; ssp_cmd.enc_addr_low = cpu_to_le32(lower_32_bits(phys_addr)); ssp_cmd.enc_addr_high = @@ -4501,9 +4512,7 @@ static int pm80xx_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha, end_addr_high, end_addr_low)); pm8001_chip_make_sg(task->scatter, 1, ccb->buf_prd); - phys_addr = ccb->ccb_dma_handle + - offsetof(struct pm8001_ccb_info, - buf_prd[0]); + phys_addr = ccb->ccb_dma_handle; ssp_cmd.enc_addr_low = cpu_to_le32(lower_32_bits(phys_addr)); ssp_cmd.enc_addr_high = @@ -4531,8 +4540,7 @@ static int pm80xx_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha, if (task->num_scatter > 1) { pm8001_chip_make_sg(task->scatter, ccb->n_elem, ccb->buf_prd); - phys_addr = ccb->ccb_dma_handle + - offsetof(struct pm8001_ccb_info, buf_prd[0]); + phys_addr = ccb->ccb_dma_handle; ssp_cmd.addr_low = cpu_to_le32(lower_32_bits(phys_addr)); ssp_cmd.addr_high = @@ -4560,9 +4568,7 @@ static int pm80xx_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha, end_addr_high, end_addr_low)); pm8001_chip_make_sg(task->scatter, 1, ccb->buf_prd); - phys_addr = ccb->ccb_dma_handle + - offsetof(struct pm8001_ccb_info, - buf_prd[0]); + phys_addr = ccb->ccb_dma_handle; ssp_cmd.addr_low = cpu_to_le32(lower_32_bits(phys_addr)); ssp_cmd.addr_high = @@ -4576,9 +4582,10 @@ static int pm80xx_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha, ssp_cmd.esgl = 0; } } - q_index = (u32) (pm8001_dev->id & 0x00ffffff) % PM8001_MAX_OUTB_NUM; + spin_lock_irqsave(&circularQ->iq_lock, flags); ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &ssp_cmd, sizeof(ssp_cmd), q_index); + spin_unlock_irqrestore(&circularQ->iq_lock, flags); return ret; } @@ -4590,7 +4597,7 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha, struct pm8001_device *pm8001_ha_dev = dev->lldd_dev; u32 tag = ccb->ccb_tag; int ret; - u32 q_index; + u32 q_index, cpu_id; struct sata_start_req sata_cmd; u32 hdr_tag, ncg_tag = 0; u64 phys_addr, start_addr, end_addr; @@ -4601,7 +4608,8 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha, unsigned long flags; u32 opc = OPC_INB_SATA_HOST_OPSTART; memset(&sata_cmd, 0, sizeof(sata_cmd)); - q_index = (u32) (pm8001_ha_dev->id & 0x00ffffff) % PM8001_MAX_INB_NUM; + cpu_id = smp_processor_id(); + q_index = (u32) (cpu_id) % (pm8001_ha->max_q_num); circularQ = &pm8001_ha->inbnd_q_tbl[q_index]; if (task->data_dir == DMA_NONE) { @@ -4652,8 +4660,7 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha, if (task->num_scatter > 1) { pm8001_chip_make_sg(task->scatter, ccb->n_elem, ccb->buf_prd); - phys_addr = ccb->ccb_dma_handle + - offsetof(struct pm8001_ccb_info, buf_prd[0]); + phys_addr = ccb->ccb_dma_handle; sata_cmd.enc_addr_low = lower_32_bits(phys_addr); sata_cmd.enc_addr_high = upper_32_bits(phys_addr); sata_cmd.enc_esgl = cpu_to_le32(1 << 31); @@ -4678,9 +4685,7 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha, end_addr_high, end_addr_low)); pm8001_chip_make_sg(task->scatter, 1, ccb->buf_prd); - phys_addr = ccb->ccb_dma_handle + - offsetof(struct pm8001_ccb_info, - buf_prd[0]); + phys_addr = ccb->ccb_dma_handle; sata_cmd.enc_addr_low = lower_32_bits(phys_addr); sata_cmd.enc_addr_high = @@ -4718,8 +4723,7 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha, if (task->num_scatter > 1) { pm8001_chip_make_sg(task->scatter, ccb->n_elem, ccb->buf_prd); - phys_addr = ccb->ccb_dma_handle + - offsetof(struct pm8001_ccb_info, buf_prd[0]); + phys_addr = ccb->ccb_dma_handle; sata_cmd.addr_low = lower_32_bits(phys_addr); sata_cmd.addr_high = upper_32_bits(phys_addr); sata_cmd.esgl = cpu_to_le32(1 << 31); @@ -4744,9 +4748,7 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha, end_addr_high, end_addr_low)); pm8001_chip_make_sg(task->scatter, 1, ccb->buf_prd); - phys_addr = ccb->ccb_dma_handle + - offsetof(struct pm8001_ccb_info, - buf_prd[0]); + phys_addr = ccb->ccb_dma_handle; sata_cmd.addr_low = lower_32_bits(phys_addr); sata_cmd.addr_high = @@ -4817,9 +4819,10 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha, } } } - q_index = (u32) (pm8001_ha_dev->id & 0x00ffffff) % PM8001_MAX_OUTB_NUM; + spin_lock_irqsave(&circularQ->iq_lock, flags); ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd, sizeof(sata_cmd), q_index); + spin_unlock_irqrestore(&circularQ->iq_lock, flags); return ret; } diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c index f89ad3274412..d5ebcf7d70ff 100644 --- a/drivers/scsi/qla2xxx/qla_dfs.c +++ b/drivers/scsi/qla2xxx/qla_dfs.c @@ -170,20 +170,7 @@ qla2x00_dfs_tgt_sess_show(struct seq_file *s, void *unused) return 0; } -static int -qla2x00_dfs_tgt_sess_open(struct inode *inode, struct file *file) -{ - scsi_qla_host_t *vha = inode->i_private; - - return single_open(file, qla2x00_dfs_tgt_sess_show, vha); -} - -static const struct file_operations dfs_tgt_sess_ops = { - .open = qla2x00_dfs_tgt_sess_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; +DEFINE_SHOW_ATTRIBUTE(qla2x00_dfs_tgt_sess); static int qla2x00_dfs_tgt_port_database_show(struct seq_file *s, void *unused) @@ -239,20 +226,7 @@ out_free_id_list: return 0; } -static int -qla2x00_dfs_tgt_port_database_open(struct inode *inode, struct file *file) -{ - scsi_qla_host_t *vha = inode->i_private; - - return single_open(file, qla2x00_dfs_tgt_port_database_show, vha); -} - -static const struct file_operations dfs_tgt_port_database_ops = { - .open = qla2x00_dfs_tgt_port_database_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; +DEFINE_SHOW_ATTRIBUTE(qla2x00_dfs_tgt_port_database); static int qla_dfs_fw_resource_cnt_show(struct seq_file *s, void *unused) @@ -301,20 +275,7 @@ qla_dfs_fw_resource_cnt_show(struct seq_file *s, void *unused) return 0; } -static int -qla_dfs_fw_resource_cnt_open(struct inode *inode, struct file *file) -{ - struct scsi_qla_host *vha = inode->i_private; - - return single_open(file, qla_dfs_fw_resource_cnt_show, vha); -} - -static const struct file_operations dfs_fw_resource_cnt_ops = { - .open = qla_dfs_fw_resource_cnt_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; +DEFINE_SHOW_ATTRIBUTE(qla_dfs_fw_resource_cnt); static int qla_dfs_tgt_counters_show(struct seq_file *s, void *unused) @@ -391,20 +352,7 @@ qla_dfs_tgt_counters_show(struct seq_file *s, void *unused) return 0; } -static int -qla_dfs_tgt_counters_open(struct inode *inode, struct file *file) -{ - struct scsi_qla_host *vha = inode->i_private; - - return single_open(file, qla_dfs_tgt_counters_show, vha); -} - -static const struct file_operations dfs_tgt_counters_ops = { - .open = qla_dfs_tgt_counters_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; +DEFINE_SHOW_ATTRIBUTE(qla_dfs_tgt_counters); static int qla2x00_dfs_fce_show(struct seq_file *s, void *unused) @@ -606,19 +554,19 @@ create_dir: create_nodes: ha->dfs_fw_resource_cnt = debugfs_create_file("fw_resource_count", - S_IRUSR, ha->dfs_dir, vha, &dfs_fw_resource_cnt_ops); + S_IRUSR, ha->dfs_dir, vha, &qla_dfs_fw_resource_cnt_fops); ha->dfs_tgt_counters = debugfs_create_file("tgt_counters", S_IRUSR, - ha->dfs_dir, vha, &dfs_tgt_counters_ops); + ha->dfs_dir, vha, &qla_dfs_tgt_counters_fops); ha->tgt.dfs_tgt_port_database = debugfs_create_file("tgt_port_database", - S_IRUSR, ha->dfs_dir, vha, &dfs_tgt_port_database_ops); + S_IRUSR, ha->dfs_dir, vha, &qla2x00_dfs_tgt_port_database_fops); ha->dfs_fce = debugfs_create_file("fce", S_IRUSR, ha->dfs_dir, vha, &dfs_fce_ops); ha->tgt.dfs_tgt_sess = debugfs_create_file("tgt_sess", - S_IRUSR, ha->dfs_dir, vha, &dfs_tgt_sess_ops); + S_IRUSR, ha->dfs_dir, vha, &qla2x00_dfs_tgt_sess_fops); if (IS_QLA27XX(ha) || IS_QLA83XX(ha) || IS_QLA28XX(ha)) { ha->tgt.dfs_naqp = debugfs_create_file("naqp", diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index a39b1a885053..a24b82de4aab 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -1838,6 +1838,7 @@ qla24xx_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, struct mbx_24xx_entry *pkt) { const char func[] = "MBX-IOCB2"; + struct qla_hw_data *ha = vha->hw; srb_t *sp; struct srb_iocb *si; u16 sz, i; @@ -1847,6 +1848,18 @@ qla24xx_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, if (!sp) return; + if (sp->type == SRB_SCSI_CMD || + sp->type == SRB_NVME_CMD || + sp->type == SRB_TM_CMD) { + ql_log(ql_log_warn, vha, 0x509d, + "Inconsistent event entry type %d\n", sp->type); + if (IS_P3P_TYPE(ha)) + set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); + else + set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); + return; + } + si = &sp->u.iocb_cmd; sz = min(ARRAY_SIZE(pkt->mb), ARRAY_SIZE(sp->u.iocb_cmd.u.mbx.in_mb)); @@ -3399,32 +3412,6 @@ void qla24xx_nvme_ls4_iocb(struct scsi_qla_host *vha, sp->done(sp, comp_status); } -static void qla24xx_process_mbx_iocb_response(struct scsi_qla_host *vha, - struct rsp_que *rsp, struct sts_entry_24xx *pkt) -{ - struct qla_hw_data *ha = vha->hw; - srb_t *sp; - static const char func[] = "MBX-IOCB2"; - - sp = qla2x00_get_sp_from_handle(vha, func, rsp->req, pkt); - if (!sp) - return; - - if (sp->type == SRB_SCSI_CMD || - sp->type == SRB_NVME_CMD || - sp->type == SRB_TM_CMD) { - ql_log(ql_log_warn, vha, 0x509d, - "Inconsistent event entry type %d\n", sp->type); - if (IS_P3P_TYPE(ha)) - set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); - else - set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); - return; - } - - qla24xx_mbx_iocb_entry(vha, rsp->req, (struct mbx_24xx_entry *)pkt); -} - /** * qla24xx_process_response_queue() - Process response queue entries. * @vha: SCSI driver HA context @@ -3534,7 +3521,8 @@ process_err: (struct abort_entry_24xx *)pkt); break; case MBX_IOCB_TYPE: - qla24xx_process_mbx_iocb_response(vha, rsp, pkt); + qla24xx_mbx_iocb_entry(vha, rsp->req, + (struct mbx_24xx_entry *)pkt); break; case VP_CTRL_IOCB_TYPE: qla_ctrlvp_completed(vha, rsp->req, diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c index b4f22dbe7175..1f9005125313 100644 --- a/drivers/scsi/qla2xxx/qla_nvme.c +++ b/drivers/scsi/qla2xxx/qla_nvme.c @@ -541,7 +541,7 @@ static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport, fc_port_t *fcport; struct srb_iocb *nvme; struct scsi_qla_host *vha; - int rval = -ENODEV; + int rval; srb_t *sp; struct qla_qpair *qpair = hw_queue_handle; struct nvme_private *priv = fd->private; @@ -549,19 +549,19 @@ static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport, if (!priv) { /* nvme association has been torn down */ - return rval; + return -ENODEV; } fcport = qla_rport->fcport; if (!qpair || !fcport || (qpair && !qpair->fw_started) || (fcport && fcport->deleted)) - return rval; + return -ENODEV; vha = fcport->vha; if (!(fcport->nvme_flag & NVME_FLAG_REGISTERED)) - return rval; + return -ENODEV; if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) || (qpair && !qpair->fw_started) || fcport->deleted) diff --git a/drivers/scsi/qla2xxx/qla_nx2.c b/drivers/scsi/qla2xxx/qla_nx2.c index 3a415b12dcec..01ccd4526707 100644 --- a/drivers/scsi/qla2xxx/qla_nx2.c +++ b/drivers/scsi/qla2xxx/qla_nx2.c @@ -659,7 +659,7 @@ static int qla8044_poll_reg(struct scsi_qla_host *vha, uint32_t addr, int duration, uint32_t test_mask, uint32_t test_result) { - uint32_t value; + uint32_t value = 0; int timeout_error; uint8_t retries; int ret_val = QLA_SUCCESS; diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c index fd3aabb6a190..f1767b21076f 100644 --- a/drivers/scsi/qla4xxx/ql4_nx.c +++ b/drivers/scsi/qla4xxx/ql4_nx.c @@ -3225,7 +3225,7 @@ static void qla4_8xxx_uevent_emit(struct scsi_qla_host *ha, u32 code) switch (code) { case QL4_UEVENT_CODE_FW_DUMP: - snprintf(event_string, sizeof(event_string), "FW_DUMP=%ld", + snprintf(event_string, sizeof(event_string), "FW_DUMP=%lu", ha->host_no); break; default: diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 97ff31ed2a44..ab676ce2d051 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -293,21 +293,6 @@ int __scsi_execute(struct scsi_device *sdev, const unsigned char *cmd, } EXPORT_SYMBOL(__scsi_execute); -/** - * scsi_init_cmd_errh - Initialize cmd fields related to error handling. - * @cmd: command that is ready to be queued. - * - * This function has the job of initializing a number of fields related to error - * handling. Typically this will be called once for each command, as required. - */ -static void scsi_init_cmd_errh(struct scsi_cmnd *cmd) -{ - scsi_set_resid(cmd, 0); - memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); - if (cmd->cmd_len == 0) - cmd->cmd_len = scsi_command_size(cmd->cmnd); -} - /* * Wake up the error handler if necessary. Avoid as follows that the error * handler is not woken up if host in-flight requests number == @@ -530,7 +515,7 @@ static void scsi_uninit_cmd(struct scsi_cmnd *cmd) } } -static void scsi_free_sgtables(struct scsi_cmnd *cmd) +void scsi_free_sgtables(struct scsi_cmnd *cmd) { if (cmd->sdb.table.nents) sg_free_table_chained(&cmd->sdb.table, @@ -539,6 +524,7 @@ static void scsi_free_sgtables(struct scsi_cmnd *cmd) sg_free_table_chained(&cmd->prot_sdb->table, SCSI_INLINE_PROT_SG_CNT); } +EXPORT_SYMBOL_GPL(scsi_free_sgtables); static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd) { @@ -998,7 +984,7 @@ static inline bool scsi_cmd_needs_dma_drain(struct scsi_device *sdev, } /** - * scsi_init_io - SCSI I/O initialization function. + * scsi_alloc_sgtables - allocate S/G tables for a command * @cmd: command descriptor we wish to initialize * * Returns: @@ -1006,7 +992,7 @@ static inline bool scsi_cmd_needs_dma_drain(struct scsi_device *sdev, * * BLK_STS_RESOURCE - if the failure is retryable * * BLK_STS_IOERR - if the failure is fatal */ -blk_status_t scsi_init_io(struct scsi_cmnd *cmd) +blk_status_t scsi_alloc_sgtables(struct scsi_cmnd *cmd) { struct scsi_device *sdev = cmd->device; struct request *rq = cmd->request; @@ -1098,7 +1084,7 @@ out_free_sgtables: scsi_free_sgtables(cmd); return ret; } -EXPORT_SYMBOL(scsi_init_io); +EXPORT_SYMBOL(scsi_alloc_sgtables); /** * scsi_initialize_rq - initialize struct scsi_cmnd partially @@ -1186,7 +1172,7 @@ static blk_status_t scsi_setup_scsi_cmnd(struct scsi_device *sdev, * submit a request without an attached bio. */ if (req->bio) { - blk_status_t ret = scsi_init_io(cmd); + blk_status_t ret = scsi_alloc_sgtables(cmd); if (unlikely(ret != BLK_STS_OK)) return ret; } else { @@ -1196,58 +1182,16 @@ static blk_status_t scsi_setup_scsi_cmnd(struct scsi_device *sdev, } cmd->cmd_len = scsi_req(req)->cmd_len; + if (cmd->cmd_len == 0) + cmd->cmd_len = scsi_command_size(cmd->cmnd); cmd->cmnd = scsi_req(req)->cmd; cmd->transfersize = blk_rq_bytes(req); cmd->allowed = scsi_req(req)->retries; return BLK_STS_OK; } -/* - * Setup a normal block command. These are simple request from filesystems - * that still need to be translated to SCSI CDBs from the ULD. - */ -static blk_status_t scsi_setup_fs_cmnd(struct scsi_device *sdev, - struct request *req) -{ - struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req); - - if (unlikely(sdev->handler && sdev->handler->prep_fn)) { - blk_status_t ret = sdev->handler->prep_fn(sdev, req); - if (ret != BLK_STS_OK) - return ret; - } - - cmd->cmnd = scsi_req(req)->cmd = scsi_req(req)->__cmd; - memset(cmd->cmnd, 0, BLK_MAX_CDB); - return scsi_cmd_to_driver(cmd)->init_command(cmd); -} - -static blk_status_t scsi_setup_cmnd(struct scsi_device *sdev, - struct request *req) -{ - struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req); - blk_status_t ret; - - if (!blk_rq_bytes(req)) - cmd->sc_data_direction = DMA_NONE; - else if (rq_data_dir(req) == WRITE) - cmd->sc_data_direction = DMA_TO_DEVICE; - else - cmd->sc_data_direction = DMA_FROM_DEVICE; - - if (blk_rq_is_scsi(req)) - ret = scsi_setup_scsi_cmnd(sdev, req); - else - ret = scsi_setup_fs_cmnd(sdev, req); - - if (ret != BLK_STS_OK) - scsi_free_sgtables(cmd); - - return ret; -} - static blk_status_t -scsi_prep_state_check(struct scsi_device *sdev, struct request *req) +scsi_device_state_check(struct scsi_device *sdev, struct request *req) { switch (sdev->sdev_state) { case SDEV_OFFLINE: @@ -1589,7 +1533,7 @@ static unsigned int scsi_mq_inline_sgl_size(struct Scsi_Host *shost) sizeof(struct scatterlist); } -static blk_status_t scsi_mq_prep_fn(struct request *req) +static blk_status_t scsi_prepare_cmd(struct request *req) { struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req); struct scsi_device *sdev = req->q->queuedata; @@ -1601,6 +1545,10 @@ static blk_status_t scsi_mq_prep_fn(struct request *req) cmd->request = req; cmd->tag = req->tag; cmd->prot_op = SCSI_PROT_NORMAL; + if (blk_rq_bytes(req)) + cmd->sc_data_direction = rq_dma_dir(req); + else + cmd->sc_data_direction = DMA_NONE; sg = (void *)cmd + sizeof(struct scsi_cmnd) + shost->hostt->cmd_size; cmd->sdb.table.sgl = sg; @@ -1612,9 +1560,23 @@ static blk_status_t scsi_mq_prep_fn(struct request *req) (struct scatterlist *)(cmd->prot_sdb + 1); } - blk_mq_start_request(req); + /* + * Special handling for passthrough commands, which don't go to the ULP + * at all: + */ + if (blk_rq_is_scsi(req)) + return scsi_setup_scsi_cmnd(sdev, req); + + if (sdev->handler && sdev->handler->prep_fn) { + blk_status_t ret = sdev->handler->prep_fn(sdev, req); - return scsi_setup_cmnd(sdev, req); + if (ret != BLK_STS_OK) + return ret; + } + + cmd->cmnd = scsi_req(req)->cmd = scsi_req(req)->__cmd; + memset(cmd->cmnd, 0, BLK_MAX_CDB); + return scsi_cmd_to_driver(cmd)->init_command(cmd); } static void scsi_mq_done(struct scsi_cmnd *cmd) @@ -1680,7 +1642,7 @@ static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx, * commands. */ if (unlikely(sdev->sdev_state != SDEV_RUNNING)) { - ret = scsi_prep_state_check(sdev, req); + ret = scsi_device_state_check(sdev, req); if (ret != BLK_STS_OK) goto out_put_budget; } @@ -1692,13 +1654,12 @@ static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx, goto out_dec_target_busy; if (!(req->rq_flags & RQF_DONTPREP)) { - ret = scsi_mq_prep_fn(req); + ret = scsi_prepare_cmd(req); if (ret != BLK_STS_OK) goto out_dec_host_busy; req->rq_flags |= RQF_DONTPREP; } else { clear_bit(SCMD_STATE_COMPLETE, &cmd->state); - blk_mq_start_request(req); } cmd->flags &= SCMD_PRESERVED_FLAGS; @@ -1707,9 +1668,11 @@ static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx, if (bd->last) cmd->flags |= SCMD_LAST; - scsi_init_cmd_errh(cmd); + scsi_set_resid(cmd, 0); + memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); cmd->scsi_done = scsi_mq_done; + blk_mq_start_request(req); reason = scsi_dispatch_cmd(cmd); if (reason) { scsi_set_blocked(cmd, reason); @@ -1970,7 +1933,6 @@ struct scsi_device *scsi_device_from_queue(struct request_queue *q) return sdev; } -EXPORT_SYMBOL_GPL(scsi_device_from_queue); /** * scsi_block_requests - Utility function used by low-level drivers to prevent diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 83c4d95756a9..656bcf4940d6 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -902,7 +902,7 @@ static blk_status_t sd_setup_unmap_cmnd(struct scsi_cmnd *cmd) cmd->transfersize = data_len; rq->timeout = SD_TIMEOUT; - return scsi_init_io(cmd); + return scsi_alloc_sgtables(cmd); } static blk_status_t sd_setup_write_same16_cmnd(struct scsi_cmnd *cmd, @@ -934,7 +934,7 @@ static blk_status_t sd_setup_write_same16_cmnd(struct scsi_cmnd *cmd, cmd->transfersize = data_len; rq->timeout = unmap ? SD_TIMEOUT : SD_WRITE_SAME_TIMEOUT; - return scsi_init_io(cmd); + return scsi_alloc_sgtables(cmd); } static blk_status_t sd_setup_write_same10_cmnd(struct scsi_cmnd *cmd, @@ -966,7 +966,7 @@ static blk_status_t sd_setup_write_same10_cmnd(struct scsi_cmnd *cmd, cmd->transfersize = data_len; rq->timeout = unmap ? SD_TIMEOUT : SD_WRITE_SAME_TIMEOUT; - return scsi_init_io(cmd); + return scsi_alloc_sgtables(cmd); } static blk_status_t sd_setup_write_zeroes_cmnd(struct scsi_cmnd *cmd) @@ -1107,7 +1107,7 @@ static blk_status_t sd_setup_write_same_cmnd(struct scsi_cmnd *cmd) * knows how much to actually write. */ rq->__data_len = sdp->sector_size; - ret = scsi_init_io(cmd); + ret = scsi_alloc_sgtables(cmd); rq->__data_len = blk_rq_bytes(rq); return ret; @@ -1226,23 +1226,24 @@ static blk_status_t sd_setup_read_write_cmnd(struct scsi_cmnd *cmd) unsigned int dif; bool dix; - ret = scsi_init_io(cmd); + ret = scsi_alloc_sgtables(cmd); if (ret != BLK_STS_OK) return ret; + ret = BLK_STS_IOERR; if (!scsi_device_online(sdp) || sdp->changed) { scmd_printk(KERN_ERR, cmd, "device offline or changed\n"); - return BLK_STS_IOERR; + goto fail; } if (blk_rq_pos(rq) + blk_rq_sectors(rq) > get_capacity(rq->rq_disk)) { scmd_printk(KERN_ERR, cmd, "access beyond end of device\n"); - return BLK_STS_IOERR; + goto fail; } if ((blk_rq_pos(rq) & mask) || (blk_rq_sectors(rq) & mask)) { scmd_printk(KERN_ERR, cmd, "request not aligned to the logical block size\n"); - return BLK_STS_IOERR; + goto fail; } /* @@ -1264,7 +1265,7 @@ static blk_status_t sd_setup_read_write_cmnd(struct scsi_cmnd *cmd) if (req_op(rq) == REQ_OP_ZONE_APPEND) { ret = sd_zbc_prepare_zone_append(cmd, &lba, nr_blocks); if (ret) - return ret; + goto fail; } fua = rq->cmd_flags & REQ_FUA ? 0x8 : 0; @@ -1292,7 +1293,7 @@ static blk_status_t sd_setup_read_write_cmnd(struct scsi_cmnd *cmd) } if (unlikely(ret != BLK_STS_OK)) - return ret; + goto fail; /* * We shouldn't disconnect in the middle of a sector, so with a dumb @@ -1316,10 +1317,12 @@ static blk_status_t sd_setup_read_write_cmnd(struct scsi_cmnd *cmd) blk_rq_sectors(rq))); /* - * This indicates that the command is ready from our end to be - * queued. + * This indicates that the command is ready from our end to be queued. */ return BLK_STS_OK; +fail: + scsi_free_sgtables(cmd); + return ret; } static blk_status_t sd_init_command(struct scsi_cmnd *cmd) diff --git a/drivers/scsi/snic/vnic_cq.c b/drivers/scsi/snic/vnic_cq.c index 4c8e64e4fba6..3455dd7e73f4 100644 --- a/drivers/scsi/snic/vnic_cq.c +++ b/drivers/scsi/snic/vnic_cq.c @@ -31,8 +31,6 @@ void svnic_cq_free(struct vnic_cq *cq) int svnic_cq_alloc(struct vnic_dev *vdev, struct vnic_cq *cq, unsigned int index, unsigned int desc_count, unsigned int desc_size) { - int err; - cq->index = index; cq->vdev = vdev; @@ -43,11 +41,7 @@ int svnic_cq_alloc(struct vnic_dev *vdev, struct vnic_cq *cq, return -EINVAL; } - err = svnic_dev_alloc_desc_ring(vdev, &cq->ring, desc_count, desc_size); - if (err) - return err; - - return 0; + return svnic_dev_alloc_desc_ring(vdev, &cq->ring, desc_count, desc_size); } void svnic_cq_init(struct vnic_cq *cq, unsigned int flow_control_enable, diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c index 2b43c0f97442..fd4b582110b2 100644 --- a/drivers/scsi/sr.c +++ b/drivers/scsi/sr.c @@ -392,15 +392,11 @@ static blk_status_t sr_init_command(struct scsi_cmnd *SCpnt) struct request *rq = SCpnt->request; blk_status_t ret; - ret = scsi_init_io(SCpnt); + ret = scsi_alloc_sgtables(SCpnt); if (ret != BLK_STS_OK) - goto out; + return ret; cd = scsi_cd(rq->rq_disk); - /* from here on until we're complete, any goto out - * is used for a killable error condition */ - ret = BLK_STS_IOERR; - SCSI_LOG_HLQUEUE(1, scmd_printk(KERN_INFO, SCpnt, "Doing sr request, block = %d\n", block)); @@ -507,14 +503,15 @@ static blk_status_t sr_init_command(struct scsi_cmnd *SCpnt) SCpnt->transfersize = cd->device->sector_size; SCpnt->underflow = this_count << 9; SCpnt->allowed = MAX_RETRIES; + SCpnt->cmd_len = 10; /* - * This indicates that the command is ready from our end to be - * queued. + * This indicates that the command is ready from our end to be queued. */ - ret = BLK_STS_OK; + return BLK_STS_OK; out: - return ret; + scsi_free_sgtables(SCpnt); + return BLK_STS_IOERR; } static void sr_revalidate_disk(struct scsi_cd *cd) diff --git a/drivers/scsi/sym53c8xx_2/sym_hipd.c b/drivers/scsi/sym53c8xx_2/sym_hipd.c index cc11daa1222b..a9fe092a4906 100644 --- a/drivers/scsi/sym53c8xx_2/sym_hipd.c +++ b/drivers/scsi/sym53c8xx_2/sym_hipd.c @@ -5656,7 +5656,7 @@ int sym_hcb_attach(struct Scsi_Host *shost, struct sym_fw *fw, struct sym_nvram /* * Allocate the array of lists of CCBs hashed by DSA. */ - np->ccbh = kcalloc(CCB_HASH_SIZE, sizeof(struct sym_ccb **), GFP_KERNEL); + np->ccbh = kcalloc(CCB_HASH_SIZE, sizeof(*np->ccbh), GFP_KERNEL); if (!np->ccbh) goto attach_failed; |