summaryrefslogtreecommitdiffstats
path: root/drivers/scsi
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi')
-rw-r--r--drivers/scsi/53c7xx.c8
-rw-r--r--drivers/scsi/NCR53C9x.c18
-rw-r--r--drivers/scsi/NCR_D700.c14
-rw-r--r--drivers/scsi/aha152x.c43
-rw-r--r--drivers/scsi/ahci.c27
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_core.c2
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm.c21
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm.h1
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm.c1
-rw-r--r--drivers/scsi/aic7xxx/aicasm/Makefile2
-rw-r--r--drivers/scsi/arm/Kconfig3
-rw-r--r--drivers/scsi/arm/fas216.c2
-rw-r--r--drivers/scsi/arm/scsi.h2
-rw-r--r--drivers/scsi/ata_piix.c232
-rw-r--r--drivers/scsi/atari_NCR5380.c2
-rw-r--r--drivers/scsi/constants.c126
-rw-r--r--drivers/scsi/esp.c19
-rw-r--r--drivers/scsi/hptiop.c568
-rw-r--r--drivers/scsi/ibmvscsi/iseries_vscsi.c2
-rw-r--r--drivers/scsi/ibmvscsi/rpa_vscsi.c1
-rw-r--r--drivers/scsi/ide-scsi.c2
-rw-r--r--drivers/scsi/iscsi_tcp.c209
-rw-r--r--drivers/scsi/iscsi_tcp.h2
-rw-r--r--drivers/scsi/jazz_esp.c2
-rw-r--r--drivers/scsi/libata-core.c325
-rw-r--r--drivers/scsi/libata-eh.c464
-rw-r--r--drivers/scsi/libata-scsi.c137
-rw-r--r--drivers/scsi/libiscsi.c214
-rw-r--r--drivers/scsi/lpfc/lpfc.h9
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c190
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c13
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c86
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c19
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c72
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c16
-rw-r--r--drivers/scsi/lpfc/lpfc_mem.c5
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c35
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c85
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c112
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h22
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/mac53c94.c2
-rw-r--r--drivers/scsi/megaraid/mega_common.h6
-rw-r--r--drivers/scsi/megaraid/megaraid_ioctl.h4
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.c42
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.h4
-rw-r--r--drivers/scsi/megaraid/megaraid_mm.c2
-rw-r--r--drivers/scsi/megaraid/megaraid_mm.h4
-rw-r--r--drivers/scsi/mesh.c2
-rw-r--r--drivers/scsi/pdc_adma.c3
-rw-r--r--drivers/scsi/pluto.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h1
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c11
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c1
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c5
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c15
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h4
-rw-r--r--drivers/scsi/qlogicpti.c4
-rw-r--r--drivers/scsi/sata_promise.c7
-rw-r--r--drivers/scsi/sata_sil.c105
-rw-r--r--drivers/scsi/sata_sil24.c135
-rw-r--r--drivers/scsi/sata_via.c117
-rw-r--r--drivers/scsi/sata_vsc.c2
-rw-r--r--drivers/scsi/scsi.c11
-rw-r--r--drivers/scsi/scsi_debug.c72
-rw-r--r--drivers/scsi/scsi_error.c218
-rw-r--r--drivers/scsi/scsi_ioctl.c5
-rw-r--r--drivers/scsi/scsi_lib.c88
-rw-r--r--drivers/scsi/scsi_priv.h1
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c15
-rw-r--r--drivers/scsi/scsi_transport_sas.c64
-rw-r--r--drivers/scsi/sd.c3
-rw-r--r--drivers/scsi/seagate.c2
-rw-r--r--drivers/scsi/sg.c8
-rw-r--r--drivers/scsi/sr.c5
-rw-r--r--drivers/scsi/st.c7
-rw-r--r--drivers/scsi/sun3_NCR5380.c2
-rw-r--r--drivers/scsi/sun3x_esp.c2
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_glue.c2
-rw-r--r--drivers/scsi/wd33c93.c2
81 files changed, 2316 insertions, 1786 deletions
diff --git a/drivers/scsi/53c7xx.c b/drivers/scsi/53c7xx.c
index c690c2b89e41..acf292736b4e 100644
--- a/drivers/scsi/53c7xx.c
+++ b/drivers/scsi/53c7xx.c
@@ -3451,12 +3451,12 @@ create_cmd (Scsi_Cmnd *cmd) {
for (i = 0; cmd->use_sg ? (i < cmd->use_sg) : !i; cmd_datain += 4,
cmd_dataout += 4, ++i) {
u32 vbuf = cmd->use_sg
- ? (u32)page_address(((struct scatterlist *)cmd->buffer)[i].page)+
- ((struct scatterlist *)cmd->buffer)[i].offset
+ ? (u32)page_address(((struct scatterlist *)cmd->request_buffer)[i].page)+
+ ((struct scatterlist *)cmd->request_buffer)[i].offset
: (u32)(cmd->request_buffer);
u32 bbuf = virt_to_bus((void *)vbuf);
u32 count = cmd->use_sg ?
- ((struct scatterlist *)cmd->buffer)[i].length :
+ ((struct scatterlist *)cmd->request_buffer)[i].length :
cmd->request_bufflen;
/*
@@ -5417,7 +5417,7 @@ insn_to_offset (Scsi_Cmnd *cmd, u32 *insn) {
if ((buffers = cmd->use_sg)) {
for (offset = 0,
- segment = (struct scatterlist *) cmd->buffer;
+ segment = (struct scatterlist *) cmd->request_buffer;
buffers && !((found = ((ptr >= (char *)page_address(segment->page)+segment->offset) &&
(ptr < ((char *)page_address(segment->page)+segment->offset+segment->length)))));
--buffers, offset += segment->length, ++segment)
diff --git a/drivers/scsi/NCR53C9x.c b/drivers/scsi/NCR53C9x.c
index 8a4659e94105..bdc6bb262bce 100644
--- a/drivers/scsi/NCR53C9x.c
+++ b/drivers/scsi/NCR53C9x.c
@@ -911,7 +911,7 @@ static void esp_get_dmabufs(struct NCR_ESP *esp, Scsi_Cmnd *sp)
sp->SCp.ptr =
(char *) virt_to_phys(sp->request_buffer);
} else {
- sp->SCp.buffer = (struct scatterlist *) sp->buffer;
+ sp->SCp.buffer = (struct scatterlist *) sp->request_buffer;
sp->SCp.buffers_residual = sp->use_sg - 1;
sp->SCp.this_residual = sp->SCp.buffer->length;
if (esp->dma_mmu_get_scsi_sgl)
@@ -2152,29 +2152,23 @@ static int esp_do_data_finale(struct NCR_ESP *esp,
*/
static int esp_should_clear_sync(Scsi_Cmnd *sp)
{
- unchar cmd1 = sp->cmnd[0];
- unchar cmd2 = sp->data_cmnd[0];
+ unchar cmd = sp->cmnd[0];
/* These cases are for spinning up a disk and
* waiting for that spinup to complete.
*/
- if(cmd1 == START_STOP ||
- cmd2 == START_STOP)
+ if(cmd == START_STOP)
return 0;
- if(cmd1 == TEST_UNIT_READY ||
- cmd2 == TEST_UNIT_READY)
+ if(cmd == TEST_UNIT_READY)
return 0;
/* One more special case for SCSI tape drives,
* this is what is used to probe the device for
* completion of a rewind or tape load operation.
*/
- if(sp->device->type == TYPE_TAPE) {
- if(cmd1 == MODE_SENSE ||
- cmd2 == MODE_SENSE)
- return 0;
- }
+ if(sp->device->type == TYPE_TAPE && cmd == MODE_SENSE)
+ return 0;
return 1;
}
diff --git a/drivers/scsi/NCR_D700.c b/drivers/scsi/NCR_D700.c
index a06f547e87f7..d05681f9d81a 100644
--- a/drivers/scsi/NCR_D700.c
+++ b/drivers/scsi/NCR_D700.c
@@ -114,7 +114,7 @@ MODULE_DESCRIPTION("NCR Dual700 SCSI Driver");
MODULE_LICENSE("GPL");
module_param(NCR_D700, charp, 0);
-static __u8 __initdata id_array[2*(MCA_MAX_SLOT_NR + 1)] =
+static __u8 __devinitdata id_array[2*(MCA_MAX_SLOT_NR + 1)] =
{ [0 ... 2*(MCA_MAX_SLOT_NR + 1)-1] = 7 };
#ifdef MODULE
@@ -173,7 +173,7 @@ struct NCR_D700_private {
char pad;
};
-static int
+static int __devinit
NCR_D700_probe_one(struct NCR_D700_private *p, int siop, int irq,
int slot, u32 region, int differential)
{
@@ -243,7 +243,7 @@ NCR_D700_intr(int irq, void *data, struct pt_regs *regs)
* essentially connectecd to the MCA bus independently, it is easier
* to set them up as two separate host adapters, rather than one
* adapter with two channels */
-static int
+static int __devinit
NCR_D700_probe(struct device *dev)
{
struct NCR_D700_private *p;
@@ -329,7 +329,7 @@ NCR_D700_probe(struct device *dev)
for (i = 0; i < 2; i++) {
int err;
- if ((err = NCR_D700_probe_one(p, i, slot, irq,
+ if ((err = NCR_D700_probe_one(p, i, irq, slot,
offset_addr + (0x80 * i),
differential)) != 0)
printk("D700: SIOP%d: probe failed, error = %d\n",
@@ -349,7 +349,7 @@ NCR_D700_probe(struct device *dev)
return 0;
}
-static void
+static void __devexit
NCR_D700_remove_one(struct Scsi_Host *host)
{
scsi_remove_host(host);
@@ -359,7 +359,7 @@ NCR_D700_remove_one(struct Scsi_Host *host)
release_region(host->base, 64);
}
-static int
+static int __devexit
NCR_D700_remove(struct device *dev)
{
struct NCR_D700_private *p = dev_get_drvdata(dev);
@@ -380,7 +380,7 @@ static struct mca_driver NCR_D700_driver = {
.name = "NCR_D700",
.bus = &mca_bus_type,
.probe = NCR_D700_probe,
- .remove = NCR_D700_remove,
+ .remove = __devexit_p(NCR_D700_remove),
},
};
diff --git a/drivers/scsi/aha152x.c b/drivers/scsi/aha152x.c
index 36e63f82d9f8..f974869ea323 100644
--- a/drivers/scsi/aha152x.c
+++ b/drivers/scsi/aha152x.c
@@ -551,6 +551,11 @@ struct aha152x_hostdata {
struct aha152x_scdata {
Scsi_Cmnd *next; /* next sc in queue */
struct semaphore *sem; /* semaphore to block on */
+ unsigned char cmd_len;
+ unsigned char cmnd[MAX_COMMAND_SIZE];
+ unsigned short use_sg;
+ unsigned request_bufflen;
+ void *request_buffer;
};
@@ -1006,11 +1011,20 @@ static int aha152x_internal_queue(Scsi_Cmnd *SCpnt, struct semaphore *sem, int p
return FAILED;
}
} else {
+ struct aha152x_scdata *sc;
+
SCpnt->host_scribble = kmalloc(sizeof(struct aha152x_scdata), GFP_ATOMIC);
if(SCpnt->host_scribble==0) {
printk(ERR_LEAD "allocation failed\n", CMDINFO(SCpnt));
return FAILED;
}
+
+ sc = SCDATA(SCpnt);
+ memcpy(sc->cmnd, SCpnt->cmnd, sizeof(sc->cmnd));
+ sc->request_buffer = SCpnt->request_buffer;
+ sc->request_bufflen = SCpnt->request_bufflen;
+ sc->use_sg = SCpnt->use_sg;
+ sc->cmd_len = SCpnt->cmd_len;
}
SCNEXT(SCpnt) = NULL;
@@ -1165,6 +1179,10 @@ static int aha152x_device_reset(Scsi_Cmnd * SCpnt)
DECLARE_MUTEX_LOCKED(sem);
struct timer_list timer;
int ret, issued, disconnected;
+ unsigned char old_cmd_len = SCpnt->cmd_len;
+ unsigned short old_use_sg = SCpnt->use_sg;
+ void *old_buffer = SCpnt->request_buffer;
+ unsigned old_bufflen = SCpnt->request_bufflen;
unsigned long flags;
#if defined(AHA152X_DEBUG)
@@ -1198,11 +1216,11 @@ static int aha152x_device_reset(Scsi_Cmnd * SCpnt)
add_timer(&timer);
down(&sem);
del_timer(&timer);
-
- SCpnt->cmd_len = SCpnt->old_cmd_len;
- SCpnt->use_sg = SCpnt->old_use_sg;
- SCpnt->request_buffer = SCpnt->buffer;
- SCpnt->request_bufflen = SCpnt->bufflen;
+
+ SCpnt->cmd_len = old_cmd_len;
+ SCpnt->use_sg = old_use_sg;
+ SCpnt->request_buffer = old_buffer;
+ SCpnt->request_bufflen = old_bufflen;
DO_LOCK(flags);
@@ -1565,6 +1583,9 @@ static void busfree_run(struct Scsi_Host *shpnt)
#endif
if(DONE_SC->SCp.phase & check_condition) {
+ struct scsi_cmnd *cmd = HOSTDATA(shpnt)->done_SC;
+ struct aha152x_scdata *sc = SCDATA(cmd);
+
#if 0
if(HOSTDATA(shpnt)->debug & debug_eh) {
printk(ERR_LEAD "received sense: ", CMDINFO(DONE_SC));
@@ -1573,13 +1594,13 @@ static void busfree_run(struct Scsi_Host *shpnt)
#endif
/* restore old command */
- memcpy((void *) DONE_SC->cmnd, (void *) DONE_SC->data_cmnd, sizeof(DONE_SC->data_cmnd));
- DONE_SC->request_buffer = DONE_SC->buffer;
- DONE_SC->request_bufflen = DONE_SC->bufflen;
- DONE_SC->use_sg = DONE_SC->old_use_sg;
- DONE_SC->cmd_len = DONE_SC->old_cmd_len;
+ memcpy(cmd->cmnd, sc->cmnd, sizeof(sc->cmnd));
+ cmd->request_buffer = sc->request_buffer;
+ cmd->request_bufflen = sc->request_bufflen;
+ cmd->use_sg = sc->use_sg;
+ cmd->cmd_len = sc->cmd_len;
- DONE_SC->SCp.Status = 0x02;
+ cmd->SCp.Status = 0x02;
HOSTDATA(shpnt)->commands--;
if (!HOSTDATA(shpnt)->commands)
diff --git a/drivers/scsi/ahci.c b/drivers/scsi/ahci.c
index 15f6cd4279b7..904c25fb4ba4 100644
--- a/drivers/scsi/ahci.c
+++ b/drivers/scsi/ahci.c
@@ -940,14 +940,8 @@ static void ahci_host_intr(struct ata_port *ap)
return;
/* ignore interim PIO setup fis interrupts */
- if (ata_tag_valid(ap->active_tag)) {
- struct ata_queued_cmd *qc =
- ata_qc_from_tag(ap, ap->active_tag);
-
- if (qc && qc->tf.protocol == ATA_PROT_PIO &&
- (status & PORT_IRQ_PIOS_FIS))
- return;
- }
+ if (ata_tag_valid(ap->active_tag) && (status & PORT_IRQ_PIOS_FIS))
+ return;
if (ata_ratelimit())
ata_port_printk(ap, KERN_INFO, "spurious interrupt "
@@ -1052,7 +1046,7 @@ static void ahci_thaw(struct ata_port *ap)
static void ahci_error_handler(struct ata_port *ap)
{
- if (!(ap->flags & ATA_FLAG_FROZEN)) {
+ if (!(ap->pflags & ATA_PFLAG_FROZEN)) {
/* restart engine */
ahci_stop_engine(ap);
ahci_start_engine(ap);
@@ -1323,6 +1317,17 @@ static int ahci_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
if (!printed_version++)
dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
+ /* JMicron-specific fixup: make sure we're in AHCI mode */
+ /* This is protected from races with ata_jmicron by the pci probe
+ locking */
+ if (pdev->vendor == PCI_VENDOR_ID_JMICRON) {
+ /* AHCI enable, AHCI on function 0 */
+ pci_write_config_byte(pdev, 0x41, 0xa1);
+ /* Function 1 is the PATA controller */
+ if (PCI_FUNC(pdev->devfn))
+ return -ENODEV;
+ }
+
rc = pci_enable_device(pdev);
if (rc)
return rc;
@@ -1378,10 +1383,6 @@ static int ahci_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
if (have_msi)
hpriv->flags |= AHCI_FLAG_MSI;
- /* JMicron-specific fixup: make sure we're in AHCI mode */
- if (pdev->vendor == 0x197b)
- pci_write_config_byte(pdev, 0x41, 0xa1);
-
/* initialize adapter */
rc = ahci_host_init(probe_ent);
if (rc)
diff --git a/drivers/scsi/aic7xxx/aic79xx_core.c b/drivers/scsi/aic7xxx/aic79xx_core.c
index a1e8ca758594..653818d2f802 100644
--- a/drivers/scsi/aic7xxx/aic79xx_core.c
+++ b/drivers/scsi/aic7xxx/aic79xx_core.c
@@ -7289,7 +7289,7 @@ ahd_reset_cmds_pending(struct ahd_softc *ahd)
ahd->flags &= ~AHD_UPDATE_PEND_CMDS;
}
-void
+static void
ahd_done_with_status(struct ahd_softc *ahd, struct scb *scb, uint32_t status)
{
cam_status ostat;
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.c b/drivers/scsi/aic7xxx/aic79xx_osm.c
index b244c7124179..998999c0a972 100644
--- a/drivers/scsi/aic7xxx/aic79xx_osm.c
+++ b/drivers/scsi/aic7xxx/aic79xx_osm.c
@@ -243,25 +243,6 @@ ahd_print_path(struct ahd_softc *ahd, struct scb *scb)
static uint32_t aic79xx_no_reset;
/*
- * Certain PCI motherboards will scan PCI devices from highest to lowest,
- * others scan from lowest to highest, and they tend to do all kinds of
- * strange things when they come into contact with PCI bridge chips. The
- * net result of all this is that the PCI card that is actually used to boot
- * the machine is very hard to detect. Most motherboards go from lowest
- * PCI slot number to highest, and the first SCSI controller found is the
- * one you boot from. The only exceptions to this are when a controller
- * has its BIOS disabled. So, we by default sort all of our SCSI controllers
- * from lowest PCI slot number to highest PCI slot number. We also force
- * all controllers with their BIOS disabled to the end of the list. This
- * works on *almost* all computers. Where it doesn't work, we have this
- * option. Setting this option to non-0 will reverse the order of the sort
- * to highest first, then lowest, but will still leave cards with their BIOS
- * disabled at the very end. That should fix everyone up unless there are
- * really strange cirumstances.
- */
-static uint32_t aic79xx_reverse_scan;
-
-/*
* Should we force EXTENDED translation on a controller.
* 0 == Use whatever is in the SEEPROM or default to off
* 1 == Use whatever is in the SEEPROM or default to on
@@ -350,7 +331,6 @@ MODULE_PARM_DESC(aic79xx,
" periodically to prevent tag starvation.\n"
" This may be required by some older disk\n"
" or drives/RAID arrays.\n"
-" reverse_scan Sort PCI devices highest Bus/Slot to lowest\n"
" tag_info:<tag_str> Set per-target tag depth\n"
" global_tag_depth:<int> Global tag depth for all targets on all buses\n"
" slewrate:<slewrate_list>Set the signal slew rate (0-15).\n"
@@ -1031,7 +1011,6 @@ aic79xx_setup(char *s)
#ifdef AHD_DEBUG
{ "debug", &ahd_debug },
#endif
- { "reverse_scan", &aic79xx_reverse_scan },
{ "periodic_otag", &aic79xx_periodic_otag },
{ "pci_parity", &aic79xx_pci_parity },
{ "seltime", &aic79xx_seltime },
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.h b/drivers/scsi/aic7xxx/aic79xx_osm.h
index 9e871de23835..601340d84410 100644
--- a/drivers/scsi/aic7xxx/aic79xx_osm.h
+++ b/drivers/scsi/aic7xxx/aic79xx_osm.h
@@ -93,7 +93,6 @@
#endif
/********************************** Misc Macros *******************************/
-#define roundup(x, y) ((((x)+((y)-1))/(y))*(y))
#define powerof2(x) ((((x)-1)&(x))==0)
/************************* Forward Declarations *******************************/
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.c b/drivers/scsi/aic7xxx/aic7xxx_osm.c
index debf3e2a0798..aa4be8a31415 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm.c
@@ -353,7 +353,6 @@ MODULE_PARM_DESC(aic7xxx,
" periodically to prevent tag starvation.\n"
" This may be required by some older disk\n"
" drives or RAID arrays.\n"
-" reverse_scan Sort PCI devices highest Bus/Slot to lowest\n"
" tag_info:<tag_str> Set per-target tag depth\n"
" global_tag_depth:<int> Global tag depth for every target\n"
" on every bus\n"
diff --git a/drivers/scsi/aic7xxx/aicasm/Makefile b/drivers/scsi/aic7xxx/aicasm/Makefile
index 8c91fda6482c..b98c5c1056c3 100644
--- a/drivers/scsi/aic7xxx/aicasm/Makefile
+++ b/drivers/scsi/aic7xxx/aicasm/Makefile
@@ -14,6 +14,8 @@ LIBS= -ldb
clean-files:= ${GENSRCS} ${GENHDRS} $(YSRCS:.y=.output) $(PROG)
# Override default kernel CFLAGS. This is a userland app.
AICASM_CFLAGS:= -I/usr/include -I.
+LEX= flex
+YACC= bison
YFLAGS= -d
NOMAN= noman
diff --git a/drivers/scsi/arm/Kconfig b/drivers/scsi/arm/Kconfig
index 06d7601cdf56..d006a8cb4a74 100644
--- a/drivers/scsi/arm/Kconfig
+++ b/drivers/scsi/arm/Kconfig
@@ -69,6 +69,7 @@ comment "The following drivers are not fully supported"
config SCSI_CUMANA_1
tristate "CumanaSCSI I support (EXPERIMENTAL)"
depends on ARCH_ACORN && EXPERIMENTAL && SCSI
+ select SCSI_SPI_ATTRS
help
This enables support for the Cumana SCSI I card. If you have an
Acorn system with one of these, say Y. If unsure, say N.
@@ -76,6 +77,7 @@ config SCSI_CUMANA_1
config SCSI_ECOSCSI
tristate "EcoScsi support (EXPERIMENTAL)"
depends on ARCH_ACORN && EXPERIMENTAL && (ARCH_ARC || ARCH_A5K) && SCSI
+ select SCSI_SPI_ATTRS
help
This enables support for the EcoSCSI card -- a small card that sits
in the Econet socket. If you have an Acorn system with one of these,
@@ -84,6 +86,7 @@ config SCSI_ECOSCSI
config SCSI_OAK1
tristate "Oak SCSI support (EXPERIMENTAL)"
depends on ARCH_ACORN && EXPERIMENTAL && SCSI
+ select SCSI_SPI_ATTRS
help
This enables support for the Oak SCSI card. If you have an Acorn
system with one of these, say Y. If unsure, say N.
diff --git a/drivers/scsi/arm/fas216.c b/drivers/scsi/arm/fas216.c
index 3e1053f111dc..4cf7afc31cc7 100644
--- a/drivers/scsi/arm/fas216.c
+++ b/drivers/scsi/arm/fas216.c
@@ -2427,7 +2427,7 @@ int fas216_eh_abort(Scsi_Cmnd *SCpnt)
info->stats.aborts += 1;
printk(KERN_WARNING "scsi%d: abort command ", info->host->host_no);
- __scsi_print_command(SCpnt->data_cmnd);
+ __scsi_print_command(SCpnt->cmnd);
print_debug_list();
fas216_dumpstate(info);
diff --git a/drivers/scsi/arm/scsi.h b/drivers/scsi/arm/scsi.h
index 6dd544a5eb56..8c2600ffc6af 100644
--- a/drivers/scsi/arm/scsi.h
+++ b/drivers/scsi/arm/scsi.h
@@ -74,7 +74,7 @@ static inline void init_SCp(Scsi_Cmnd *SCpnt)
unsigned long len = 0;
int buf;
- SCpnt->SCp.buffer = (struct scatterlist *) SCpnt->buffer;
+ SCpnt->SCp.buffer = (struct scatterlist *) SCpnt->request_buffer;
SCpnt->SCp.buffers_residual = SCpnt->use_sg - 1;
SCpnt->SCp.ptr = (char *)
(page_address(SCpnt->SCp.buffer->page) +
diff --git a/drivers/scsi/ata_piix.c b/drivers/scsi/ata_piix.c
index 94b1261a259d..2d20caf377f5 100644
--- a/drivers/scsi/ata_piix.c
+++ b/drivers/scsi/ata_piix.c
@@ -105,9 +105,6 @@ enum {
PIIX_FLAG_SCR = (1 << 26), /* SCR available */
PIIX_FLAG_AHCI = (1 << 27), /* AHCI possible */
PIIX_FLAG_CHECKINTR = (1 << 28), /* make sure PCI INTx enabled */
- PIIX_FLAG_COMBINED = (1 << 29), /* combined mode possible */
- /* ICH6/7 use different scheme for map value */
- PIIX_FLAG_COMBINED_ICH6 = PIIX_FLAG_COMBINED | (1 << 30),
/* combined mode. if set, PATA is channel 0.
* if clear, PATA is channel 1.
@@ -126,6 +123,7 @@ enum {
ich6_sata = 4,
ich6_sata_ahci = 5,
ich6m_sata_ahci = 6,
+ ich8_sata_ahci = 7,
/* constants for mapping table */
P0 = 0, /* port 0 */
@@ -141,11 +139,19 @@ enum {
struct piix_map_db {
const u32 mask;
+ const u16 port_enable;
+ const int present_shift;
const int map[][4];
};
+struct piix_host_priv {
+ const int *map;
+ const struct piix_map_db *map_db;
+};
+
static int piix_init_one (struct pci_dev *pdev,
const struct pci_device_id *ent);
+static void piix_host_stop(struct ata_host_set *host_set);
static void piix_set_piomode (struct ata_port *ap, struct ata_device *adev);
static void piix_set_dmamode (struct ata_port *ap, struct ata_device *adev);
static void piix_pata_error_handler(struct ata_port *ap);
@@ -186,11 +192,11 @@ static const struct pci_device_id piix_pci_tbl[] = {
/* Enterprise Southbridge 2 (where's the datasheet?) */
{ 0x8086, 0x2680, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci },
/* SATA Controller 1 IDE (ICH8, no datasheet yet) */
- { 0x8086, 0x2820, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci },
+ { 0x8086, 0x2820, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_ahci },
/* SATA Controller 2 IDE (ICH8, ditto) */
- { 0x8086, 0x2825, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata_ahci },
+ { 0x8086, 0x2825, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_ahci },
/* Mobile SATA Controller IDE (ICH8M, ditto) */
- { 0x8086, 0x2828, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6m_sata_ahci },
+ { 0x8086, 0x2828, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_ahci },
{ } /* terminate list */
};
@@ -254,7 +260,7 @@ static const struct ata_port_operations piix_pata_ops = {
.port_start = ata_port_start,
.port_stop = ata_port_stop,
- .host_stop = ata_host_stop,
+ .host_stop = piix_host_stop,
};
static const struct ata_port_operations piix_sata_ops = {
@@ -284,11 +290,13 @@ static const struct ata_port_operations piix_sata_ops = {
.port_start = ata_port_start,
.port_stop = ata_port_stop,
- .host_stop = ata_host_stop,
+ .host_stop = piix_host_stop,
};
-static struct piix_map_db ich5_map_db = {
+static const struct piix_map_db ich5_map_db = {
.mask = 0x7,
+ .port_enable = 0x3,
+ .present_shift = 4,
.map = {
/* PM PS SM SS MAP */
{ P0, NA, P1, NA }, /* 000b */
@@ -302,8 +310,10 @@ static struct piix_map_db ich5_map_db = {
},
};
-static struct piix_map_db ich6_map_db = {
+static const struct piix_map_db ich6_map_db = {
.mask = 0x3,
+ .port_enable = 0xf,
+ .present_shift = 4,
.map = {
/* PM PS SM SS MAP */
{ P0, P2, P1, P3 }, /* 00b */
@@ -313,8 +323,10 @@ static struct piix_map_db ich6_map_db = {
},
};
-static struct piix_map_db ich6m_map_db = {
+static const struct piix_map_db ich6m_map_db = {
.mask = 0x3,
+ .port_enable = 0x5,
+ .present_shift = 4,
.map = {
/* PM PS SM SS MAP */
{ P0, P2, RV, RV }, /* 00b */
@@ -324,6 +336,28 @@ static struct piix_map_db ich6m_map_db = {
},
};
+static const struct piix_map_db ich8_map_db = {
+ .mask = 0x3,
+ .port_enable = 0x3,
+ .present_shift = 8,
+ .map = {
+ /* PM PS SM SS MAP */
+ { P0, NA, P1, NA }, /* 00b (hardwired) */
+ { RV, RV, RV, RV },
+ { RV, RV, RV, RV }, /* 10b (never) */
+ { RV, RV, RV, RV },
+ },
+};
+
+static const struct piix_map_db *piix_map_db_table[] = {
+ [ich5_sata] = &ich5_map_db,
+ [esb_sata] = &ich5_map_db,
+ [ich6_sata] = &ich6_map_db,
+ [ich6_sata_ahci] = &ich6_map_db,
+ [ich6m_sata_ahci] = &ich6m_map_db,
+ [ich8_sata_ahci] = &ich8_map_db,
+};
+
static struct ata_port_info piix_port_info[] = {
/* piix4_pata */
{
@@ -356,63 +390,70 @@ static struct ata_port_info piix_port_info[] = {
/* ich5_sata */
{
.sht = &piix_sht,
- .host_flags = ATA_FLAG_SATA | PIIX_FLAG_COMBINED |
- PIIX_FLAG_CHECKINTR,
+ .host_flags = ATA_FLAG_SATA | PIIX_FLAG_CHECKINTR |
+ PIIX_FLAG_IGNORE_PCS,
.pio_mask = 0x1f, /* pio0-4 */
.mwdma_mask = 0x07, /* mwdma0-2 */
.udma_mask = 0x7f, /* udma0-6 */
.port_ops = &piix_sata_ops,
- .private_data = &ich5_map_db,
},
/* i6300esb_sata */
{
.sht = &piix_sht,
- .host_flags = ATA_FLAG_SATA | PIIX_FLAG_COMBINED |
+ .host_flags = ATA_FLAG_SATA |
PIIX_FLAG_CHECKINTR | PIIX_FLAG_IGNORE_PCS,
.pio_mask = 0x1f, /* pio0-4 */
.mwdma_mask = 0x07, /* mwdma0-2 */
.udma_mask = 0x7f, /* udma0-6 */
.port_ops = &piix_sata_ops,
- .private_data = &ich5_map_db,
},
/* ich6_sata */
{
.sht = &piix_sht,
- .host_flags = ATA_FLAG_SATA | PIIX_FLAG_COMBINED_ICH6 |
+ .host_flags = ATA_FLAG_SATA |
PIIX_FLAG_CHECKINTR | PIIX_FLAG_SCR,
.pio_mask = 0x1f, /* pio0-4 */
.mwdma_mask = 0x07, /* mwdma0-2 */
.udma_mask = 0x7f, /* udma0-6 */
.port_ops = &piix_sata_ops,
- .private_data = &ich6_map_db,
},
/* ich6_sata_ahci */
{
.sht = &piix_sht,
- .host_flags = ATA_FLAG_SATA | PIIX_FLAG_COMBINED_ICH6 |
+ .host_flags = ATA_FLAG_SATA |
PIIX_FLAG_CHECKINTR | PIIX_FLAG_SCR |
PIIX_FLAG_AHCI,
.pio_mask = 0x1f, /* pio0-4 */
.mwdma_mask = 0x07, /* mwdma0-2 */
.udma_mask = 0x7f, /* udma0-6 */
.port_ops = &piix_sata_ops,
- .private_data = &ich6_map_db,
},
/* ich6m_sata_ahci */
{
.sht = &piix_sht,
- .host_flags = ATA_FLAG_SATA | PIIX_FLAG_COMBINED_ICH6 |
+ .host_flags = ATA_FLAG_SATA |
+ PIIX_FLAG_CHECKINTR | PIIX_FLAG_SCR |
+ PIIX_FLAG_AHCI,
+ .pio_mask = 0x1f, /* pio0-4 */
+ .mwdma_mask = 0x07, /* mwdma0-2 */
+ .udma_mask = 0x7f, /* udma0-6 */
+ .port_ops = &piix_sata_ops,
+ },
+
+ /* ich8_sata_ahci */
+ {
+ .sht = &piix_sht,
+ .host_flags = ATA_FLAG_SATA |
PIIX_FLAG_CHECKINTR | PIIX_FLAG_SCR |
PIIX_FLAG_AHCI,
.pio_mask = 0x1f, /* pio0-4 */
.mwdma_mask = 0x07, /* mwdma0-2 */
.udma_mask = 0x7f, /* udma0-6 */
.port_ops = &piix_sata_ops,
- .private_data = &ich6m_map_db,
},
};
@@ -427,6 +468,11 @@ MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, piix_pci_tbl);
MODULE_VERSION(DRV_VERSION);
+static int force_pcs = 0;
+module_param(force_pcs, int, 0444);
+MODULE_PARM_DESC(force_pcs, "force honoring or ignoring PCS to work around "
+ "device mis-detection (0=default, 1=ignore PCS, 2=honor PCS)");
+
/**
* piix_pata_cbl_detect - Probe host controller cable detect info
* @ap: Port for which cable detect info is desired
@@ -491,74 +537,83 @@ static void piix_pata_error_handler(struct ata_port *ap)
}
/**
- * piix_sata_prereset - prereset for SATA host controller
+ * piix_sata_present_mask - determine present mask for SATA host controller
* @ap: Target port
*
- * Reads and configures SATA PCI device's PCI config register
- * Port Configuration and Status (PCS) to determine port and
- * device availability. Return -ENODEV to skip reset if no
- * device is present.
+ * Reads SATA PCI device's PCI config register Port Configuration
+ * and Status (PCS) to determine port and device availability.
*
* LOCKING:
* None (inherited from caller).
*
* RETURNS:
- * 0 if device is present, -ENODEV otherwise.
+ * determined present_mask
*/
-static int piix_sata_prereset(struct ata_port *ap)
+static unsigned int piix_sata_present_mask(struct ata_port *ap)
{
struct pci_dev *pdev = to_pci_dev(ap->host_set->dev);
- const unsigned int *map = ap->host_set->private_data;
+ struct piix_host_priv *hpriv = ap->host_set->private_data;
+ const unsigned int *map = hpriv->map;
int base = 2 * ap->hard_port_no;
unsigned int present_mask = 0;
int port, i;
- u8 pcs;
+ u16 pcs;
- pci_read_config_byte(pdev, ICH5_PCS, &pcs);
+ pci_read_config_word(pdev, ICH5_PCS, &pcs);
DPRINTK("ata%u: ENTER, pcs=0x%x base=%d\n", ap->id, pcs, base);
- /* enable all ports on this ap and wait for them to settle */
- for (i = 0; i < 2; i++) {
- port = map[base + i];
- if (port >= 0)
- pcs |= 1 << port;
- }
-
- pci_write_config_byte(pdev, ICH5_PCS, pcs);
- msleep(100);
-
- /* let's see which devices are present */
- pci_read_config_byte(pdev, ICH5_PCS, &pcs);
-
for (i = 0; i < 2; i++) {
port = map[base + i];
if (port < 0)
continue;
- if (ap->flags & PIIX_FLAG_IGNORE_PCS || pcs & 1 << (4 + port))
+ if ((ap->flags & PIIX_FLAG_IGNORE_PCS) ||
+ (pcs & 1 << (hpriv->map_db->present_shift + port)))
present_mask |= 1 << i;
- else
- pcs &= ~(1 << port);
}
- /* disable offline ports on non-AHCI controllers */
- if (!(ap->flags & PIIX_FLAG_AHCI))
- pci_write_config_byte(pdev, ICH5_PCS, pcs);
-
DPRINTK("ata%u: LEAVE, pcs=0x%x present_mask=0x%x\n",
ap->id, pcs, present_mask);
- if (!present_mask) {
- ata_port_printk(ap, KERN_INFO, "SATA port has no device.\n");
- ap->eh_context.i.action &= ~ATA_EH_RESET_MASK;
- return 0;
+ return present_mask;
+}
+
+/**
+ * piix_sata_softreset - reset SATA host port via ATA SRST
+ * @ap: port to reset
+ * @classes: resulting classes of attached devices
+ *
+ * Reset SATA host port via ATA SRST. On controllers with
+ * reliable PCS present bits, the bits are used to determine
+ * device presence.
+ *
+ * LOCKING:
+ * Kernel thread context (may sleep)
+ *
+ * RETURNS:
+ * 0 on success, -errno otherwise.
+ */
+static int piix_sata_softreset(struct ata_port *ap, unsigned int *classes)
+{
+ unsigned int present_mask;
+ int i, rc;
+
+ present_mask = piix_sata_present_mask(ap);
+
+ rc = ata_std_softreset(ap, classes);
+ if (rc)
+ return rc;
+
+ for (i = 0; i < ATA_MAX_DEVICES; i++) {
+ if (!(present_mask & (1 << i)))
+ classes[i] = ATA_DEV_NONE;
}
- return ata_std_prereset(ap);
+ return 0;
}
static void piix_sata_error_handler(struct ata_port *ap)
{
- ata_bmdma_drive_eh(ap, piix_sata_prereset, ata_std_softreset, NULL,
+ ata_bmdma_drive_eh(ap, ata_std_prereset, piix_sata_softreset, NULL,
ata_std_postreset);
}
@@ -761,10 +816,40 @@ static int __devinit piix_check_450nx_errata(struct pci_dev *ata_dev)
return no_piix_dma;
}
+static void __devinit piix_init_pcs(struct pci_dev *pdev,
+ struct ata_port_info *pinfo,
+ const struct piix_map_db *map_db)
+{
+ u16 pcs, new_pcs;
+
+ pci_read_config_word(pdev, ICH5_PCS, &pcs);
+
+ new_pcs = pcs | map_db->port_enable;
+
+ if (new_pcs != pcs) {
+ DPRINTK("updating PCS from 0x%x to 0x%x\n", pcs, new_pcs);
+ pci_write_config_word(pdev, ICH5_PCS, new_pcs);
+ msleep(150);
+ }
+
+ if (force_pcs == 1) {
+ dev_printk(KERN_INFO, &pdev->dev,
+ "force ignoring PCS (0x%x)\n", new_pcs);
+ pinfo[0].host_flags |= PIIX_FLAG_IGNORE_PCS;
+ pinfo[1].host_flags |= PIIX_FLAG_IGNORE_PCS;
+ } else if (force_pcs == 2) {
+ dev_printk(KERN_INFO, &pdev->dev,
+ "force honoring PCS (0x%x)\n", new_pcs);
+ pinfo[0].host_flags &= ~PIIX_FLAG_IGNORE_PCS;
+ pinfo[1].host_flags &= ~PIIX_FLAG_IGNORE_PCS;
+ }
+}
+
static void __devinit piix_init_sata_map(struct pci_dev *pdev,
- struct ata_port_info *pinfo)
+ struct ata_port_info *pinfo,
+ const struct piix_map_db *map_db)
{
- struct piix_map_db *map_db = pinfo[0].private_data;
+ struct piix_host_priv *hpriv = pinfo[0].private_data;
const unsigned int *map;
int i, invalid_map = 0;
u8 map_value;
@@ -788,6 +873,7 @@ static void __devinit piix_init_sata_map(struct pci_dev *pdev,
case IDE:
WARN_ON((i & 1) || map[i + 1] != IDE);
pinfo[i / 2] = piix_port_info[ich5_pata];
+ pinfo[i / 2].private_data = hpriv;
i++;
printk(" IDE IDE");
break;
@@ -805,8 +891,8 @@ static void __devinit piix_init_sata_map(struct pci_dev *pdev,
dev_printk(KERN_ERR, &pdev->dev,
"invalid MAP value %u\n", map_value);
- pinfo[0].private_data = (void *)map;
- pinfo[1].private_data = (void *)map;
+ hpriv->map = map;
+ hpriv->map_db = map_db;
}
/**
@@ -829,6 +915,7 @@ static int piix_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
static int printed_version;
struct ata_port_info port_info[2];
struct ata_port_info *ppinfo[2] = { &port_info[0], &port_info[1] };
+ struct piix_host_priv *hpriv;
unsigned long host_flags;
if (!printed_version++)
@@ -839,8 +926,14 @@ static int piix_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
if (!in_module_init)
return -ENODEV;
+ hpriv = kzalloc(sizeof(*hpriv), GFP_KERNEL);
+ if (!hpriv)
+ return -ENOMEM;
+
port_info[0] = piix_port_info[ent->driver_data];
port_info[1] = piix_port_info[ent->driver_data];
+ port_info[0].private_data = hpriv;
+ port_info[1].private_data = hpriv;
host_flags = port_info[0].host_flags;
@@ -855,8 +948,12 @@ static int piix_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
}
/* Initialize SATA map */
- if (host_flags & ATA_FLAG_SATA)
- piix_init_sata_map(pdev, port_info);
+ if (host_flags & ATA_FLAG_SATA) {
+ piix_init_sata_map(pdev, port_info,
+ piix_map_db_table[ent->driver_data]);
+ piix_init_pcs(pdev, port_info,
+ piix_map_db_table[ent->driver_data]);
+ }
/* On ICH5, some BIOSen disable the interrupt using the
* PCI_COMMAND_INTX_DISABLE bit added in PCI 2.3.
@@ -879,6 +976,13 @@ static int piix_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
return ata_pci_init_one(pdev, ppinfo, 2);
}
+static void piix_host_stop(struct ata_host_set *host_set)
+{
+ if (host_set->next == NULL)
+ kfree(host_set->private_data);
+ ata_host_stop(host_set);
+}
+
static int __init piix_init(void)
{
int rc;
diff --git a/drivers/scsi/atari_NCR5380.c b/drivers/scsi/atari_NCR5380.c
index 007a14e5c3fd..e397129c90d1 100644
--- a/drivers/scsi/atari_NCR5380.c
+++ b/drivers/scsi/atari_NCR5380.c
@@ -507,7 +507,7 @@ static __inline__ void initialize_SCp(Scsi_Cmnd *cmd)
*/
if (cmd->use_sg) {
- cmd->SCp.buffer = (struct scatterlist *) cmd->buffer;
+ cmd->SCp.buffer = (struct scatterlist *) cmd->request_buffer;
cmd->SCp.buffers_residual = cmd->use_sg - 1;
cmd->SCp.ptr = (char *)page_address(cmd->SCp.buffer->page)+
cmd->SCp.buffer->offset;
diff --git a/drivers/scsi/constants.c b/drivers/scsi/constants.c
index dddd2acce76f..61f6024b61ba 100644
--- a/drivers/scsi/constants.c
+++ b/drivers/scsi/constants.c
@@ -5,6 +5,7 @@
* Additions for SCSI 3+ (SPC-3 T10/1416-D Rev 07 3 May 2002)
* by D. Gilbert and aeb (20020609)
* Additions for SPC-3 T10/1416-D Rev 21 22 Sept 2004, D. Gilbert 20041025
+ * Update to SPC-4 T10/1713-D Rev 5a, 14 June 2006, D. Gilbert 20060702
*/
#include <linux/blkdev.h>
@@ -36,55 +37,56 @@ static const char * cdb_byte0_names[] = {
/* 00-03 */ "Test Unit Ready", "Rezero Unit/Rewind", NULL, "Request Sense",
/* 04-07 */ "Format Unit/Medium", "Read Block Limits", NULL,
"Reasssign Blocks",
-/* 08-0d */ "Read (6)", NULL, "Write (6)", "Seek (6)", NULL, NULL,
+/* 08-0d */ "Read(6)", NULL, "Write(6)", "Seek(6)", NULL, NULL,
/* 0e-12 */ NULL, "Read Reverse", "Write Filemarks", "Space", "Inquiry",
-/* 13-16 */ "Verify (6)", "Recover Buffered Data", "Mode Select (6)",
- "Reserve (6)",
-/* 17-1a */ "Release (6)", "Copy", "Erase", "Mode Sense (6)",
+/* 13-16 */ "Verify(6)", "Recover Buffered Data", "Mode Select(6)",
+ "Reserve(6)",
+/* 17-1a */ "Release(6)", "Copy", "Erase", "Mode Sense(6)",
/* 1b-1d */ "Start/Stop Unit", "Receive Diagnostic", "Send Diagnostic",
/* 1e-1f */ "Prevent/Allow Medium Removal", NULL,
/* 20-22 */ NULL, NULL, NULL,
/* 23-28 */ "Read Format Capacities", "Set Window",
- "Read Capacity (10)", NULL, NULL, "Read (10)",
-/* 29-2d */ "Read Generation", "Write (10)", "Seek (10)", "Erase (10)",
- "Read updated block",
-/* 2e-31 */ "Write Verify (10)", "Verify (10)", "Search High", "Search Equal",
+ "Read Capacity(10)", NULL, NULL, "Read(10)",
+/* 29-2d */ "Read Generation", "Write(10)", "Seek(10)", "Erase(10)",
+ "Read updated block",
+/* 2e-31 */ "Write Verify(10)", "Verify(10)", "Search High", "Search Equal",
/* 32-34 */ "Search Low", "Set Limits", "Prefetch/Read Position",
-/* 35-37 */ "Synchronize Cache (10)", "Lock/Unlock Cache (10)",
+/* 35-37 */ "Synchronize Cache(10)", "Lock/Unlock Cache(10)",
"Read Defect Data(10)",
/* 38-3c */ "Medium Scan", "Compare", "Copy Verify", "Write Buffer",
"Read Buffer",
-/* 3d-3f */ "Update Block", "Read Long (10)", "Write Long (10)",
-/* 40-41 */ "Change Definition", "Write Same (10)",
+/* 3d-3f */ "Update Block", "Read Long(10)", "Write Long(10)",
+/* 40-41 */ "Change Definition", "Write Same(10)",
/* 42-48 */ "Read sub-channel", "Read TOC/PMA/ATIP", "Read density support",
- "Play audio (10)", "Get configuration", "Play audio msf",
+ "Play audio(10)", "Get configuration", "Play audio msf",
"Play audio track/index",
-/* 49-4f */ "Play track relative (10)", "Get event status notification",
+/* 49-4f */ "Play track relative(10)", "Get event status notification",
"Pause/resume", "Log Select", "Log Sense", "Stop play/scan",
NULL,
/* 50-55 */ "Xdwrite", "Xpwrite, Read disk info", "Xdread, Read track info",
- "Reserve track", "Send OPC info", "Mode Select (10)",
-/* 56-5b */ "Reserve (10)", "Release (10)", "Repair track", "Read master cue",
- "Mode Sense (10)", "Close track/session",
+ "Reserve track", "Send OPC info", "Mode Select(10)",
+/* 56-5b */ "Reserve(10)", "Release(10)", "Repair track", "Read master cue",
+ "Mode Sense(10)", "Close track/session",
/* 5c-5f */ "Read buffer capacity", "Send cue sheet", "Persistent reserve in",
"Persistent reserve out",
/* 60-67 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/* 68-6f */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/* 70-77 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/* 78-7f */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, "Variable length",
-/* 80-84 */ "Xdwrite (16)", "Rebuild (16)", "Regenerate (16)", "Extended copy",
+/* 80-84 */ "Xdwrite(16)", "Rebuild(16)", "Regenerate(16)", "Extended copy",
"Receive copy results",
-/* 85-89 */ "Memory Export In (16)", "Access control in", "Access control out",
- "Read (16)", "Memory Export Out (16)",
-/* 8a-8f */ "Write (16)", NULL, "Read attributes", "Write attributes",
- "Write and verify (16)", "Verify (16)",
-/* 90-94 */ "Pre-fetch (16)", "Synchronize cache (16)",
- "Lock/unlock cache (16)", "Write same (16)", NULL,
+/* 85-89 */ "ATA command pass through(16)", "Access control in",
+ "Access control out", "Read(16)", "Memory Export Out(16)",
+/* 8a-8f */ "Write(16)", NULL, "Read attributes", "Write attributes",
+ "Write and verify(16)", "Verify(16)",
+/* 90-94 */ "Pre-fetch(16)", "Synchronize cache(16)",
+ "Lock/unlock cache(16)", "Write same(16)", NULL,
/* 95-99 */ NULL, NULL, NULL, NULL, NULL,
-/* 9a-9f */ NULL, NULL, NULL, NULL, "Service action in (16)",
- "Service action out (16)",
-/* a0-a5 */ "Report luns", "Blank", "Send event", "Maintenance in",
- "Maintenance out", "Move medium/play audio(12)",
+/* 9a-9f */ NULL, NULL, NULL, NULL, "Service action in(16)",
+ "Service action out(16)",
+/* a0-a5 */ "Report luns", "ATA command pass through(12)/Blank",
+ "Security protocol in", "Maintenance in", "Maintenance out",
+ "Move medium/play audio(12)",
/* a6-a9 */ "Exchange medium", "Move medium attached", "Read(12)",
"Play track relative(12)",
/* aa-ae */ "Write(12)", NULL, "Erase(12), Get Performance",
@@ -92,12 +94,12 @@ static const char * cdb_byte0_names[] = {
/* af-b1 */ "Verify(12)", "Search data high(12)", "Search data equal(12)",
/* b2-b4 */ "Search data low(12)", "Set limits(12)",
"Read element status attached",
-/* b5-b6 */ "Request volume element address", "Send volume tag, set streaming",
+/* b5-b6 */ "Security protocol out", "Send volume tag, set streaming",
/* b7-b9 */ "Read defect data(12)", "Read element status", "Read CD msf",
/* ba-bc */ "Redundancy group (in), Scan",
- "Redundancy group (out), Set cd-rom speed", "Spare in, Play cd",
-/* bd-bf */ "Spare out, Mechanism status", "Volume set in, Read cd",
- "Volume set out, Send DVD structure",
+ "Redundancy group (out), Set cd-rom speed", "Spare (in), Play cd",
+/* bd-bf */ "Spare (out), Mechanism status", "Volume set (in), Read cd",
+ "Volume set (out), Send DVD structure",
};
struct value_name_pair {
@@ -112,6 +114,7 @@ static const struct value_name_pair maint_in_arr[] = {
{0xc, "Report supported operation codes"},
{0xd, "Report supported task management functions"},
{0xe, "Report priority"},
+ {0xf, "Report timestamp"},
};
#define MAINT_IN_SZ ARRAY_SIZE(maint_in_arr)
@@ -120,6 +123,7 @@ static const struct value_name_pair maint_out_arr[] = {
{0xa, "Set target port groups"},
{0xb, "Change aliases"},
{0xe, "Set priority"},
+ {0xe, "Set timestamp"},
};
#define MAINT_OUT_SZ ARRAY_SIZE(maint_out_arr)
@@ -427,6 +431,7 @@ static struct error_info additional[] =
{0x001A, "Rewind operation in progress"},
{0x001B, "Set capacity operation in progress"},
{0x001C, "Verify operation in progress"},
+ {0x001D, "ATA pass through information available"},
{0x0100, "No index/sector signal"},
@@ -438,7 +443,7 @@ static struct error_info additional[] =
{0x0400, "Logical unit not ready, cause not reportable"},
{0x0401, "Logical unit is in process of becoming ready"},
- {0x0402, "Logical unit not ready, initializing cmd. required"},
+ {0x0402, "Logical unit not ready, initializing command required"},
{0x0403, "Logical unit not ready, manual intervention required"},
{0x0404, "Logical unit not ready, format in progress"},
{0x0405, "Logical unit not ready, rebuild in progress"},
@@ -478,6 +483,9 @@ static struct error_info additional[] =
{0x0B00, "Warning"},
{0x0B01, "Warning - specified temperature exceeded"},
{0x0B02, "Warning - enclosure degraded"},
+ {0x0B03, "Warning - background self-test failed"},
+ {0x0B04, "Warning - background pre-scan detected medium error"},
+ {0x0B05, "Warning - background medium scan detected medium error"},
{0x0C00, "Write error"},
{0x0C01, "Write error - recovered with auto reallocation"},
@@ -493,6 +501,7 @@ static struct error_info additional[] =
{0x0C0B, "Auxiliary memory write error"},
{0x0C0C, "Write error - unexpected unsolicited data"},
{0x0C0D, "Write error - not enough unsolicited data"},
+ {0x0C0F, "Defects in error window"},
{0x0D00, "Error detected by third party temporary initiator"},
{0x0D01, "Third party device failure"},
@@ -504,11 +513,12 @@ static struct error_info additional[] =
{0x0E00, "Invalid information unit"},
{0x0E01, "Information unit too short"},
{0x0E02, "Information unit too long"},
+ {0x0E03, "Invalid field in command information unit"},
{0x1000, "Id CRC or ECC error"},
- {0x1001, "Data block guard check failed"},
- {0x1002, "Data block application tag check failed"},
- {0x1003, "Data block reference tag check failed"},
+ {0x1001, "Logical block guard check failed"},
+ {0x1002, "Logical block application tag check failed"},
+ {0x1003, "Logical block reference tag check failed"},
{0x1100, "Unrecovered read error"},
{0x1101, "Read retries exhausted"},
@@ -530,6 +540,7 @@ static struct error_info additional[] =
{0x1111, "Read error - loss of streaming"},
{0x1112, "Auxiliary memory read error"},
{0x1113, "Read error - failed retransmission request"},
+ {0x1114, "Read error - lba marked bad by application client"},
{0x1200, "Address mark not found for id field"},
@@ -610,11 +621,14 @@ static struct error_info additional[] =
{0x2100, "Logical block address out of range"},
{0x2101, "Invalid element address"},
{0x2102, "Invalid address for write"},
+ {0x2103, "Invalid write crossing layer jump"},
{0x2200, "Illegal function (use 20 00, 24 00, or 26 00)"},
{0x2400, "Invalid field in cdb"},
{0x2401, "CDB decryption error"},
+ {0x2402, "Obsolete"},
+ {0x2403, "Obsolete"},
{0x2404, "Security audit value frozen"},
{0x2405, "Security working key frozen"},
{0x2406, "Nonce not unique"},
@@ -637,7 +651,10 @@ static struct error_info additional[] =
{0x260C, "Invalid operation for copy source or destination"},
{0x260D, "Copy segment granularity violation"},
{0x260E, "Invalid parameter while port is enabled"},
- {0x260F, "Invalid data-out buffer integrity"},
+ {0x260F, "Invalid data-out buffer integrity check value"},
+ {0x2610, "Data decryption key fail limit reached"},
+ {0x2611, "Incomplete key-associated data set"},
+ {0x2612, "Vendor specific key reference not found"},
{0x2700, "Write protected"},
{0x2701, "Hardware write protected"},
@@ -649,6 +666,7 @@ static struct error_info additional[] =
{0x2800, "Not ready to ready change, medium may have changed"},
{0x2801, "Import or export element accessed"},
+ {0x2802, "Format-layer may have changed"},
{0x2900, "Power on, reset, or bus device reset occurred"},
{0x2901, "Power on occurred"},
@@ -669,6 +687,11 @@ static struct error_info additional[] =
{0x2A07, "Implicit asymmetric access state transition failed"},
{0x2A08, "Priority changed"},
{0x2A09, "Capacity data has changed"},
+ {0x2A10, "Timestamp changed"},
+ {0x2A11, "Data encryption parameters changed by another i_t nexus"},
+ {0x2A12, "Data encryption parameters changed by vendor specific "
+ "event"},
+ {0x2A13, "Data encryption key instance counter has changed"},
{0x2B00, "Copy cannot execute since host cannot disconnect"},
@@ -690,6 +713,7 @@ static struct error_info additional[] =
{0x2E00, "Insufficient time for operation"},
{0x2F00, "Commands cleared by another initiator"},
+ {0x2F01, "Commands cleared by power loss notification"},
{0x3000, "Incompatible medium installed"},
{0x3001, "Cannot read medium - unknown format"},
@@ -702,7 +726,8 @@ static struct error_info additional[] =
{0x3008, "Cannot write - application code mismatch"},
{0x3009, "Current session not fixated for append"},
{0x300A, "Cleaning request rejected"},
- {0x300C, "WORM medium, overwrite attempted"},
+ {0x300C, "WORM medium - overwrite attempted"},
+ {0x300D, "WORM medium - integrity check"},
{0x3010, "Medium not formatted"},
{0x3100, "Medium format corrupted"},
@@ -790,6 +815,9 @@ static struct error_info additional[] =
{0x3F0F, "Echo buffer overwritten"},
{0x3F10, "Medium loadable"},
{0x3F11, "Medium auxiliary memory accessible"},
+ {0x3F12, "iSCSI IP address added"},
+ {0x3F13, "iSCSI IP address removed"},
+ {0x3F14, "iSCSI IP address changed"},
/*
* {0x40NN, "Ram failure"},
* {0x40NN, "Diagnostic failure on component nn"},
@@ -799,6 +827,7 @@ static struct error_info additional[] =
{0x4300, "Message error"},
{0x4400, "Internal target failure"},
+ {0x4471, "ATA device failed set features"},
{0x4500, "Select or reselect failure"},
@@ -807,9 +836,10 @@ static struct error_info additional[] =
{0x4700, "Scsi parity error"},
{0x4701, "Data phase CRC error detected"},
{0x4702, "Scsi parity error detected during st data phase"},
- {0x4703, "Information unit CRC error detected"},
+ {0x4703, "Information unit iuCRC error detected"},
{0x4704, "Asynchronous information protection error detected"},
{0x4705, "Protocol service CRC error"},
+ {0x4706, "Phy test function in progress"},
{0x477f, "Some commands cleared by iSCSI Protocol event"},
{0x4800, "Initiator detected error message received"},
@@ -844,6 +874,8 @@ static struct error_info additional[] =
{0x5300, "Media load or eject failed"},
{0x5301, "Unload tape failure"},
{0x5302, "Medium removal prevented"},
+ {0x5303, "Medium removal prevented by data transfer element"},
+ {0x5304, "Medium thread or unthread failure"},
{0x5400, "Scsi to host system interface failure"},
@@ -855,6 +887,7 @@ static struct error_info additional[] =
{0x5505, "Insufficient access control resources"},
{0x5506, "Auxiliary memory out of space"},
{0x5507, "Quota error"},
+ {0x5508, "Maximum number of supplemental decryption keys exceeded"},
{0x5700, "Unable to recover table-of-contents"},
@@ -1004,6 +1037,7 @@ static struct error_info additional[] =
{0x6708, "Assign failure occurred"},
{0x6709, "Multiply assigned logical unit"},
{0x670A, "Set target port groups command failed"},
+ {0x670B, "ATA device feature not enabled"},
{0x6800, "Logical unit not configured"},
@@ -1030,6 +1064,8 @@ static struct error_info additional[] =
{0x6F03, "Read of scrambled sector without authentication"},
{0x6F04, "Media region code is mismatched to logical unit region"},
{0x6F05, "Drive region must be permanent/region reset count error"},
+ {0x6F06, "Insufficient block count for binding nonce recording"},
+ {0x6F07, "Conflict in binding nonce recording"},
/*
* {0x70NN, "Decompression exception short algorithm id of nn"},
*/
@@ -1041,6 +1077,8 @@ static struct error_info additional[] =
{0x7203, "Session fixation error - incomplete track in session"},
{0x7204, "Empty or partially written reserved track"},
{0x7205, "No more track reservations allowed"},
+ {0x7206, "RMZ extension is not allowed"},
+ {0x7207, "No more test zone extensions are allowed"},
{0x7300, "Cd control error"},
{0x7301, "Power calibration area almost full"},
@@ -1049,6 +1087,18 @@ static struct error_info additional[] =
{0x7304, "Program memory area update failure"},
{0x7305, "Program memory area is full"},
{0x7306, "RMA/PMA is almost full"},
+ {0x7310, "Current power calibration area almost full"},
+ {0x7311, "Current power calibration area is full"},
+ {0x7317, "RDZ is full"},
+
+ {0x7400, "Security error"},
+ {0x7401, "Unable to decrypt data"},
+ {0x7402, "Unencrypted data encountered while decrypting"},
+ {0x7403, "Incorrect data encryption key"},
+ {0x7404, "Cryptographic integrity validation failed"},
+ {0x7405, "Error decrypting data"},
+ {0x7471, "Logical unit access not authorized"},
+
{0, NULL}
};
diff --git a/drivers/scsi/esp.c b/drivers/scsi/esp.c
index 10573c24a50b..5630868c1b25 100644
--- a/drivers/scsi/esp.c
+++ b/drivers/scsi/esp.c
@@ -1146,7 +1146,7 @@ static struct sbus_dev sun4_esp_dev;
static int __init esp_sun4_probe(struct scsi_host_template *tpnt)
{
if (sun4_esp_physaddr) {
- memset(&sun4_esp_dev, 0, sizeof(esp_dev));
+ memset(&sun4_esp_dev, 0, sizeof(sun4_esp_dev));
sun4_esp_dev.reg_addrs[0].phys_addr = sun4_esp_physaddr;
sun4_esp_dev.irqs[0] = 4;
sun4_esp_dev.resource[0].start = sun4_esp_physaddr;
@@ -1162,6 +1162,7 @@ static int __init esp_sun4_probe(struct scsi_host_template *tpnt)
static int __devexit esp_sun4_remove(void)
{
+ struct of_device *dev = &sun4_esp_dev.ofdev;
struct esp *esp = dev_get_drvdata(&dev->dev);
return esp_remove_common(esp);
@@ -1397,7 +1398,7 @@ static void esp_get_dmabufs(struct esp *esp, struct scsi_cmnd *sp)
sp->SCp.ptr = NULL;
}
} else {
- sp->SCp.buffer = (struct scatterlist *) sp->buffer;
+ sp->SCp.buffer = (struct scatterlist *) sp->request_buffer;
sp->SCp.buffers_residual = sbus_map_sg(esp->sdev,
sp->SCp.buffer,
sp->use_sg,
@@ -1410,7 +1411,7 @@ static void esp_get_dmabufs(struct esp *esp, struct scsi_cmnd *sp)
static void esp_release_dmabufs(struct esp *esp, struct scsi_cmnd *sp)
{
if (sp->use_sg) {
- sbus_unmap_sg(esp->sdev, sp->buffer, sp->use_sg,
+ sbus_unmap_sg(esp->sdev, sp->request_buffer, sp->use_sg,
sp->sc_data_direction);
} else if (sp->request_bufflen) {
sbus_unmap_single(esp->sdev,
@@ -2754,18 +2755,15 @@ static int esp_do_data_finale(struct esp *esp)
*/
static int esp_should_clear_sync(struct scsi_cmnd *sp)
{
- u8 cmd1 = sp->cmnd[0];
- u8 cmd2 = sp->data_cmnd[0];
+ u8 cmd = sp->cmnd[0];
/* These cases are for spinning up a disk and
* waiting for that spinup to complete.
*/
- if (cmd1 == START_STOP ||
- cmd2 == START_STOP)
+ if (cmd == START_STOP)
return 0;
- if (cmd1 == TEST_UNIT_READY ||
- cmd2 == TEST_UNIT_READY)
+ if (cmd == TEST_UNIT_READY)
return 0;
/* One more special case for SCSI tape drives,
@@ -2773,8 +2771,7 @@ static int esp_should_clear_sync(struct scsi_cmnd *sp)
* completion of a rewind or tape load operation.
*/
if (sp->device->type == TYPE_TAPE) {
- if (cmd1 == MODE_SENSE ||
- cmd2 == MODE_SENSE)
+ if (cmd == MODE_SENSE)
return 0;
}
diff --git a/drivers/scsi/hptiop.c b/drivers/scsi/hptiop.c
index ab2f8b267908..bcb3444f1dcf 100644
--- a/drivers/scsi/hptiop.c
+++ b/drivers/scsi/hptiop.c
@@ -45,10 +45,6 @@ static char driver_name[] = "hptiop";
static const char driver_name_long[] = "RocketRAID 3xxx SATA Controller driver";
static const char driver_ver[] = "v1.0 (060426)";
-static DEFINE_SPINLOCK(hptiop_hba_list_lock);
-static LIST_HEAD(hptiop_hba_list);
-static int hptiop_cdev_major = -1;
-
static void hptiop_host_request_callback(struct hptiop_hba *hba, u32 tag);
static void hptiop_iop_request_callback(struct hptiop_hba *hba, u32 tag);
static void hptiop_message_callback(struct hptiop_hba *hba, u32 msg);
@@ -577,7 +573,7 @@ static int hptiop_reset_hba(struct hptiop_hba *hba)
if (atomic_xchg(&hba->resetting, 1) == 0) {
atomic_inc(&hba->reset_count);
writel(IOPMU_INBOUND_MSG0_RESET,
- &hba->iop->outbound_msgaddr0);
+ &hba->iop->inbound_msgaddr0);
hptiop_pci_posting_flush(hba->iop);
}
@@ -620,532 +616,11 @@ static int hptiop_adjust_disk_queue_depth(struct scsi_device *sdev,
return queue_depth;
}
-struct hptiop_getinfo {
- char __user *buffer;
- loff_t buflength;
- loff_t bufoffset;
- loff_t buffillen;
- loff_t filpos;
-};
-
-static void hptiop_copy_mem_info(struct hptiop_getinfo *pinfo,
- char *data, int datalen)
-{
- if (pinfo->filpos < pinfo->bufoffset) {
- if (pinfo->filpos + datalen <= pinfo->bufoffset) {
- pinfo->filpos += datalen;
- return;
- } else {
- data += (pinfo->bufoffset - pinfo->filpos);
- datalen -= (pinfo->bufoffset - pinfo->filpos);
- pinfo->filpos = pinfo->bufoffset;
- }
- }
-
- pinfo->filpos += datalen;
- if (pinfo->buffillen == pinfo->buflength)
- return;
-
- if (pinfo->buflength - pinfo->buffillen < datalen)
- datalen = pinfo->buflength - pinfo->buffillen;
-
- if (copy_to_user(pinfo->buffer + pinfo->buffillen, data, datalen))
- return;
-
- pinfo->buffillen += datalen;
-}
-
-static int hptiop_copy_info(struct hptiop_getinfo *pinfo, char *fmt, ...)
-{
- va_list args;
- char buf[128];
- int len;
-
- va_start(args, fmt);
- len = vsnprintf(buf, sizeof(buf), fmt, args);
- va_end(args);
- hptiop_copy_mem_info(pinfo, buf, len);
- return len;
-}
-
-static void hptiop_ioctl_done(struct hpt_ioctl_k *arg)
-{
- arg->done = NULL;
- wake_up(&arg->hba->ioctl_wq);
-}
-
-static void hptiop_do_ioctl(struct hpt_ioctl_k *arg)
-{
- struct hptiop_hba *hba = arg->hba;
- u32 val;
- struct hpt_iop_request_ioctl_command __iomem *req;
- int ioctl_retry = 0;
-
- dprintk("scsi%d: hptiop_do_ioctl\n", hba->host->host_no);
-
- /*
- * check (in + out) buff size from application.
- * outbuf must be dword aligned.
- */
- if (((arg->inbuf_size + 3) & ~3) + arg->outbuf_size >
- hba->max_request_size
- - sizeof(struct hpt_iop_request_header)
- - 4 * sizeof(u32)) {
- dprintk("scsi%d: ioctl buf size (%d/%d) is too large\n",
- hba->host->host_no,
- arg->inbuf_size, arg->outbuf_size);
- arg->result = HPT_IOCTL_RESULT_FAILED;
- return;
- }
-
-retry:
- spin_lock_irq(hba->host->host_lock);
-
- val = readl(&hba->iop->inbound_queue);
- if (val == IOPMU_QUEUE_EMPTY) {
- spin_unlock_irq(hba->host->host_lock);
- dprintk("scsi%d: no free req for ioctl\n", hba->host->host_no);
- arg->result = -1;
- return;
- }
-
- req = (struct hpt_iop_request_ioctl_command __iomem *)
- ((unsigned long)hba->iop + val);
-
- writel(HPT_CTL_CODE_LINUX_TO_IOP(arg->ioctl_code),
- &req->ioctl_code);
- writel(arg->inbuf_size, &req->inbuf_size);
- writel(arg->outbuf_size, &req->outbuf_size);
-
- /*
- * use the buffer on the IOP local memory first, then copy it
- * back to host.
- * the caller's request buffer shoudl be little-endian.
- */
- if (arg->inbuf_size)
- memcpy_toio(req->buf, arg->inbuf, arg->inbuf_size);
-
- /* correct the controller ID for IOP */
- if ((arg->ioctl_code == HPT_IOCTL_GET_CHANNEL_INFO ||
- arg->ioctl_code == HPT_IOCTL_GET_CONTROLLER_INFO_V2 ||
- arg->ioctl_code == HPT_IOCTL_GET_CONTROLLER_INFO)
- && arg->inbuf_size >= sizeof(u32))
- writel(0, req->buf);
-
- writel(IOP_REQUEST_TYPE_IOCTL_COMMAND, &req->header.type);
- writel(0, &req->header.flags);
- writel(offsetof(struct hpt_iop_request_ioctl_command, buf)
- + arg->inbuf_size, &req->header.size);
- writel((u32)(unsigned long)arg, &req->header.context);
- writel(BITS_PER_LONG > 32 ? (u32)((unsigned long)arg>>32) : 0,
- &req->header.context_hi32);
- writel(IOP_RESULT_PENDING, &req->header.result);
-
- arg->result = HPT_IOCTL_RESULT_FAILED;
- arg->done = hptiop_ioctl_done;
-
- writel(val, &hba->iop->inbound_queue);
- hptiop_pci_posting_flush(hba->iop);
-
- spin_unlock_irq(hba->host->host_lock);
-
- wait_event_timeout(hba->ioctl_wq, arg->done == NULL, 60 * HZ);
-
- if (arg->done != NULL) {
- hptiop_reset_hba(hba);
- if (ioctl_retry++ < 3)
- goto retry;
- }
-
- dprintk("hpt_iop_ioctl %x result %d\n",
- arg->ioctl_code, arg->result);
-}
-
-static int __hpt_do_ioctl(struct hptiop_hba *hba, u32 code, void *inbuf,
- u32 insize, void *outbuf, u32 outsize)
-{
- struct hpt_ioctl_k arg;
- arg.hba = hba;
- arg.ioctl_code = code;
- arg.inbuf = inbuf;
- arg.outbuf = outbuf;
- arg.inbuf_size = insize;
- arg.outbuf_size = outsize;
- arg.bytes_returned = NULL;
- hptiop_do_ioctl(&arg);
- return arg.result;
-}
-
-static inline int hpt_id_valid(__le32 id)
-{
- return id != 0 && id != cpu_to_le32(0xffffffff);
-}
-
-static int hptiop_get_controller_info(struct hptiop_hba *hba,
- struct hpt_controller_info *pinfo)
-{
- int id = 0;
-
- return __hpt_do_ioctl(hba, HPT_IOCTL_GET_CONTROLLER_INFO,
- &id, sizeof(int), pinfo, sizeof(*pinfo));
-}
-
-
-static int hptiop_get_channel_info(struct hptiop_hba *hba, int bus,
- struct hpt_channel_info *pinfo)
-{
- u32 ids[2];
-
- ids[0] = 0;
- ids[1] = bus;
- return __hpt_do_ioctl(hba, HPT_IOCTL_GET_CHANNEL_INFO,
- ids, sizeof(ids), pinfo, sizeof(*pinfo));
-
-}
-
-static int hptiop_get_logical_devices(struct hptiop_hba *hba,
- __le32 *pids, int maxcount)
-{
- int i;
- u32 count = maxcount - 1;
-
- if (__hpt_do_ioctl(hba, HPT_IOCTL_GET_LOGICAL_DEVICES,
- &count, sizeof(u32),
- pids, sizeof(u32) * maxcount))
- return -1;
-
- maxcount = le32_to_cpu(pids[0]);
- for (i = 0; i < maxcount; i++)
- pids[i] = pids[i+1];
-
- return maxcount;
-}
-
-static int hptiop_get_device_info_v3(struct hptiop_hba *hba, __le32 id,
- struct hpt_logical_device_info_v3 *pinfo)
-{
- return __hpt_do_ioctl(hba, HPT_IOCTL_GET_DEVICE_INFO_V3,
- &id, sizeof(u32),
- pinfo, sizeof(*pinfo));
-}
-
-static const char *get_array_status(struct hpt_logical_device_info_v3 *devinfo)
-{
- static char s[64];
- u32 flags = le32_to_cpu(devinfo->u.array.flags);
- u32 trans_prog = le32_to_cpu(devinfo->u.array.transforming_progress);
- u32 reb_prog = le32_to_cpu(devinfo->u.array.rebuilding_progress);
-
- if (flags & ARRAY_FLAG_DISABLED)
- return "Disabled";
- else if (flags & ARRAY_FLAG_TRANSFORMING)
- sprintf(s, "Expanding/Migrating %d.%d%%%s%s",
- trans_prog / 100,
- trans_prog % 100,
- (flags & (ARRAY_FLAG_NEEDBUILDING|ARRAY_FLAG_BROKEN))?
- ", Critical" : "",
- ((flags & ARRAY_FLAG_NEEDINITIALIZING) &&
- !(flags & ARRAY_FLAG_REBUILDING) &&
- !(flags & ARRAY_FLAG_INITIALIZING))?
- ", Unintialized" : "");
- else if ((flags & ARRAY_FLAG_BROKEN) &&
- devinfo->u.array.array_type != AT_RAID6)
- return "Critical";
- else if (flags & ARRAY_FLAG_REBUILDING)
- sprintf(s,
- (flags & ARRAY_FLAG_NEEDINITIALIZING)?
- "%sBackground initializing %d.%d%%" :
- "%sRebuilding %d.%d%%",
- (flags & ARRAY_FLAG_BROKEN)? "Critical, " : "",
- reb_prog / 100,
- reb_prog % 100);
- else if (flags & ARRAY_FLAG_VERIFYING)
- sprintf(s, "%sVerifying %d.%d%%",
- (flags & ARRAY_FLAG_BROKEN)? "Critical, " : "",
- reb_prog / 100,
- reb_prog % 100);
- else if (flags & ARRAY_FLAG_INITIALIZING)
- sprintf(s, "%sForground initializing %d.%d%%",
- (flags & ARRAY_FLAG_BROKEN)? "Critical, " : "",
- reb_prog / 100,
- reb_prog % 100);
- else if (flags & ARRAY_FLAG_NEEDTRANSFORM)
- sprintf(s,"%s%s%s", "Need Expanding/Migrating",
- (flags & ARRAY_FLAG_BROKEN)? "Critical, " : "",
- ((flags & ARRAY_FLAG_NEEDINITIALIZING) &&
- !(flags & ARRAY_FLAG_REBUILDING) &&
- !(flags & ARRAY_FLAG_INITIALIZING))?
- ", Unintialized" : "");
- else if (flags & ARRAY_FLAG_NEEDINITIALIZING &&
- !(flags & ARRAY_FLAG_REBUILDING) &&
- !(flags & ARRAY_FLAG_INITIALIZING))
- sprintf(s,"%sUninitialized",
- (flags & ARRAY_FLAG_BROKEN)? "Critical, " : "");
- else if ((flags & ARRAY_FLAG_NEEDBUILDING) ||
- (flags & ARRAY_FLAG_BROKEN))
- return "Critical";
- else
- return "Normal";
- return s;
-}
-
-static void hptiop_dump_devinfo(struct hptiop_hba *hba,
- struct hptiop_getinfo *pinfo, __le32 id, int indent)
-{
- struct hpt_logical_device_info_v3 devinfo;
- int i;
- u64 capacity;
-
- for (i = 0; i < indent; i++)
- hptiop_copy_info(pinfo, "\t");
-
- if (hptiop_get_device_info_v3(hba, id, &devinfo)) {
- hptiop_copy_info(pinfo, "unknown\n");
- return;
- }
-
- switch (devinfo.type) {
-
- case LDT_DEVICE: {
- struct hd_driveid *driveid;
- u32 flags = le32_to_cpu(devinfo.u.device.flags);
-
- driveid = (struct hd_driveid *)devinfo.u.device.ident;
- /* model[] is 40 chars long, but we just want 20 chars here */
- driveid->model[20] = 0;
-
- if (indent)
- if (flags & DEVICE_FLAG_DISABLED)
- hptiop_copy_info(pinfo,"Missing\n");
- else
- hptiop_copy_info(pinfo, "CH%d %s\n",
- devinfo.u.device.path_id + 1,
- driveid->model);
- else {
- capacity = le64_to_cpu(devinfo.capacity) * 512;
- do_div(capacity, 1000000);
- hptiop_copy_info(pinfo,
- "CH%d %s, %lluMB, %s %s%s%s%s\n",
- devinfo.u.device.path_id + 1,
- driveid->model,
- capacity,
- (flags & DEVICE_FLAG_DISABLED)?
- "Disabled" : "Normal",
- devinfo.u.device.read_ahead_enabled?
- "[RA]" : "",
- devinfo.u.device.write_cache_enabled?
- "[WC]" : "",
- devinfo.u.device.TCQ_enabled?
- "[TCQ]" : "",
- devinfo.u.device.NCQ_enabled?
- "[NCQ]" : ""
- );
- }
- break;
- }
-
- case LDT_ARRAY:
- if (devinfo.target_id != INVALID_TARGET_ID)
- hptiop_copy_info(pinfo, "[DISK %d_%d] ",
- devinfo.vbus_id, devinfo.target_id);
-
- capacity = le64_to_cpu(devinfo.capacity) * 512;
- do_div(capacity, 1000000);
- hptiop_copy_info(pinfo, "%s (%s), %lluMB, %s\n",
- devinfo.u.array.name,
- devinfo.u.array.array_type==AT_RAID0? "RAID0" :
- devinfo.u.array.array_type==AT_RAID1? "RAID1" :
- devinfo.u.array.array_type==AT_RAID5? "RAID5" :
- devinfo.u.array.array_type==AT_RAID6? "RAID6" :
- devinfo.u.array.array_type==AT_JBOD? "JBOD" :
- "unknown",
- capacity,
- get_array_status(&devinfo));
- for (i = 0; i < devinfo.u.array.ndisk; i++) {
- if (hpt_id_valid(devinfo.u.array.members[i])) {
- if (cpu_to_le16(1<<i) &
- devinfo.u.array.critical_members)
- hptiop_copy_info(pinfo, "\t*");
- hptiop_dump_devinfo(hba, pinfo,
- devinfo.u.array.members[i], indent+1);
- }
- else
- hptiop_copy_info(pinfo, "\tMissing\n");
- }
- if (id == devinfo.u.array.transform_source) {
- hptiop_copy_info(pinfo, "\tExpanding/Migrating to:\n");
- hptiop_dump_devinfo(hba, pinfo,
- devinfo.u.array.transform_target, indent+1);
- }
- break;
- }
-}
-
static ssize_t hptiop_show_version(struct class_device *class_dev, char *buf)
{
return snprintf(buf, PAGE_SIZE, "%s\n", driver_ver);
}
-static ssize_t hptiop_cdev_read(struct file *filp, char __user *buf,
- size_t count, loff_t *ppos)
-{
- struct hptiop_hba *hba = filp->private_data;
- struct hptiop_getinfo info;
- int i, j, ndev;
- struct hpt_controller_info con_info;
- struct hpt_channel_info chan_info;
- __le32 ids[32];
-
- info.buffer = buf;
- info.buflength = count;
- info.bufoffset = ppos ? *ppos : 0;
- info.filpos = 0;
- info.buffillen = 0;
-
- if (hptiop_get_controller_info(hba, &con_info))
- return -EIO;
-
- for (i = 0; i < con_info.num_buses; i++) {
- if (hptiop_get_channel_info(hba, i, &chan_info) == 0) {
- if (hpt_id_valid(chan_info.devices[0]))
- hptiop_dump_devinfo(hba, &info,
- chan_info.devices[0], 0);
- if (hpt_id_valid(chan_info.devices[1]))
- hptiop_dump_devinfo(hba, &info,
- chan_info.devices[1], 0);
- }
- }
-
- ndev = hptiop_get_logical_devices(hba, ids,
- sizeof(ids) / sizeof(ids[0]));
-
- /*
- * if hptiop_get_logical_devices fails, ndev==-1 and it just
- * output nothing here
- */
- for (j = 0; j < ndev; j++)
- hptiop_dump_devinfo(hba, &info, ids[j], 0);
-
- if (ppos)
- *ppos += info.buffillen;
-
- return info.buffillen;
-}
-
-static int hptiop_cdev_ioctl(struct inode *inode, struct file *file,
- unsigned int cmd, unsigned long arg)
-{
- struct hptiop_hba *hba = file->private_data;
- struct hpt_ioctl_u ioctl_u;
- struct hpt_ioctl_k ioctl_k;
- u32 bytes_returned;
- int err = -EINVAL;
-
- if (copy_from_user(&ioctl_u,
- (void __user *)arg, sizeof(struct hpt_ioctl_u)))
- return -EINVAL;
-
- if (ioctl_u.magic != HPT_IOCTL_MAGIC)
- return -EINVAL;
-
- ioctl_k.ioctl_code = ioctl_u.ioctl_code;
- ioctl_k.inbuf = NULL;
- ioctl_k.inbuf_size = ioctl_u.inbuf_size;
- ioctl_k.outbuf = NULL;
- ioctl_k.outbuf_size = ioctl_u.outbuf_size;
- ioctl_k.hba = hba;
- ioctl_k.bytes_returned = &bytes_returned;
-
- /* verify user buffer */
- if ((ioctl_k.inbuf_size && !access_ok(VERIFY_READ,
- ioctl_u.inbuf, ioctl_k.inbuf_size)) ||
- (ioctl_k.outbuf_size && !access_ok(VERIFY_WRITE,
- ioctl_u.outbuf, ioctl_k.outbuf_size)) ||
- (ioctl_u.bytes_returned && !access_ok(VERIFY_WRITE,
- ioctl_u.bytes_returned, sizeof(u32))) ||
- ioctl_k.inbuf_size + ioctl_k.outbuf_size > 0x10000) {
-
- dprintk("scsi%d: got bad user address\n", hba->host->host_no);
- return -EINVAL;
- }
-
- /* map buffer to kernel. */
- if (ioctl_k.inbuf_size) {
- ioctl_k.inbuf = kmalloc(ioctl_k.inbuf_size, GFP_KERNEL);
- if (!ioctl_k.inbuf) {
- dprintk("scsi%d: fail to alloc inbuf\n",
- hba->host->host_no);
- err = -ENOMEM;
- goto err_exit;
- }
-
- if (copy_from_user(ioctl_k.inbuf,
- ioctl_u.inbuf, ioctl_k.inbuf_size)) {
- goto err_exit;
- }
- }
-
- if (ioctl_k.outbuf_size) {
- ioctl_k.outbuf = kmalloc(ioctl_k.outbuf_size, GFP_KERNEL);
- if (!ioctl_k.outbuf) {
- dprintk("scsi%d: fail to alloc outbuf\n",
- hba->host->host_no);
- err = -ENOMEM;
- goto err_exit;
- }
- }
-
- hptiop_do_ioctl(&ioctl_k);
-
- if (ioctl_k.result == HPT_IOCTL_RESULT_OK) {
- if (ioctl_k.outbuf_size &&
- copy_to_user(ioctl_u.outbuf,
- ioctl_k.outbuf, ioctl_k.outbuf_size))
- goto err_exit;
-
- if (ioctl_u.bytes_returned &&
- copy_to_user(ioctl_u.bytes_returned,
- &bytes_returned, sizeof(u32)))
- goto err_exit;
-
- err = 0;
- }
-
-err_exit:
- kfree(ioctl_k.inbuf);
- kfree(ioctl_k.outbuf);
-
- return err;
-}
-
-static int hptiop_cdev_open(struct inode *inode, struct file *file)
-{
- struct hptiop_hba *hba;
- unsigned i = 0, minor = iminor(inode);
- int ret = -ENODEV;
-
- spin_lock(&hptiop_hba_list_lock);
- list_for_each_entry(hba, &hptiop_hba_list, link) {
- if (i == minor) {
- file->private_data = hba;
- ret = 0;
- goto out;
- }
- i++;
- }
-
-out:
- spin_unlock(&hptiop_hba_list_lock);
- return ret;
-}
-
-static struct file_operations hptiop_cdev_fops = {
- .owner = THIS_MODULE,
- .read = hptiop_cdev_read,
- .ioctl = hptiop_cdev_ioctl,
- .open = hptiop_cdev_open,
-};
-
static ssize_t hptiop_show_fw_version(struct class_device *class_dev, char *buf)
{
struct Scsi_Host *host = class_to_shost(class_dev);
@@ -1296,19 +771,13 @@ static int __devinit hptiop_probe(struct pci_dev *pcidev,
goto unmap_pci_bar;
}
- if (scsi_add_host(host, &pcidev->dev)) {
- printk(KERN_ERR "scsi%d: scsi_add_host failed\n",
- hba->host->host_no);
- goto unmap_pci_bar;
- }
-
pci_set_drvdata(pcidev, host);
if (request_irq(pcidev->irq, hptiop_intr, IRQF_SHARED,
driver_name, hba)) {
printk(KERN_ERR "scsi%d: request irq %d failed\n",
hba->host->host_no, pcidev->irq);
- goto remove_scsi_host;
+ goto unmap_pci_bar;
}
/* Allocate request mem */
@@ -1355,9 +824,12 @@ static int __devinit hptiop_probe(struct pci_dev *pcidev,
if (hptiop_initialize_iop(hba))
goto free_request_mem;
- spin_lock(&hptiop_hba_list_lock);
- list_add_tail(&hba->link, &hptiop_hba_list);
- spin_unlock(&hptiop_hba_list_lock);
+ if (scsi_add_host(host, &pcidev->dev)) {
+ printk(KERN_ERR "scsi%d: scsi_add_host failed\n",
+ hba->host->host_no);
+ goto free_request_mem;
+ }
+
scsi_scan_host(host);
@@ -1372,9 +844,6 @@ free_request_mem:
free_request_irq:
free_irq(hba->pcidev->irq, hba);
-remove_scsi_host:
- scsi_remove_host(host);
-
unmap_pci_bar:
iounmap(hba->iop);
@@ -1422,10 +891,6 @@ static void hptiop_remove(struct pci_dev *pcidev)
scsi_remove_host(host);
- spin_lock(&hptiop_hba_list_lock);
- list_del_init(&hba->link);
- spin_unlock(&hptiop_hba_list_lock);
-
hptiop_shutdown(pcidev);
free_irq(hba->pcidev->irq, hba);
@@ -1462,27 +927,12 @@ static struct pci_driver hptiop_pci_driver = {
static int __init hptiop_module_init(void)
{
- int error;
-
printk(KERN_INFO "%s %s\n", driver_name_long, driver_ver);
-
- error = pci_register_driver(&hptiop_pci_driver);
- if (error < 0)
- return error;
-
- hptiop_cdev_major = register_chrdev(0, "hptiop", &hptiop_cdev_fops);
- if (hptiop_cdev_major < 0) {
- printk(KERN_WARNING "unable to register hptiop device.\n");
- return hptiop_cdev_major;
- }
-
- return 0;
+ return pci_register_driver(&hptiop_pci_driver);
}
static void __exit hptiop_module_exit(void)
{
- dprintk("hptiop_module_exit\n");
- unregister_chrdev(hptiop_cdev_major, "hptiop");
pci_unregister_driver(&hptiop_pci_driver);
}
diff --git a/drivers/scsi/ibmvscsi/iseries_vscsi.c b/drivers/scsi/ibmvscsi/iseries_vscsi.c
index 7eed0b098171..6aeb5f003c3c 100644
--- a/drivers/scsi/ibmvscsi/iseries_vscsi.c
+++ b/drivers/scsi/ibmvscsi/iseries_vscsi.c
@@ -81,7 +81,7 @@ int ibmvscsi_init_crq_queue(struct crq_queue *queue,
int rc;
single_host_data = hostdata;
- rc = viopath_open(viopath_hostLp, viomajorsubtype_scsi, 0);
+ rc = viopath_open(viopath_hostLp, viomajorsubtype_scsi, max_requests);
if (rc < 0) {
printk("viopath_open failed with rc %d in open_event_path\n",
rc);
diff --git a/drivers/scsi/ibmvscsi/rpa_vscsi.c b/drivers/scsi/ibmvscsi/rpa_vscsi.c
index 242b8873b333..ed22b96580c6 100644
--- a/drivers/scsi/ibmvscsi/rpa_vscsi.c
+++ b/drivers/scsi/ibmvscsi/rpa_vscsi.c
@@ -238,6 +238,7 @@ int ibmvscsi_init_crq_queue(struct crq_queue *queue,
if (rc == 2) {
/* Adapter is good, but other end is not ready */
printk(KERN_WARNING "ibmvscsi: Partner adapter not ready\n");
+ retrc = 0;
} else if (rc != 0) {
printk(KERN_WARNING "ibmvscsi: Error %d opening adapter\n", rc);
goto reg_crq_failed;
diff --git a/drivers/scsi/ide-scsi.c b/drivers/scsi/ide-scsi.c
index f7b5d7372d26..94d1de55607f 100644
--- a/drivers/scsi/ide-scsi.c
+++ b/drivers/scsi/ide-scsi.c
@@ -517,7 +517,7 @@ static ide_startstop_t idescsi_pc_intr (ide_drive_t *drive)
/* No more interrupts */
if (test_bit(IDESCSI_LOG_CMD, &scsi->log))
printk (KERN_INFO "Packet command completed, %d bytes transferred\n", pc->actually_transferred);
- local_irq_enable();
+ local_irq_enable_in_hardirq();
if (status.b.check)
rq->errors++;
idescsi_end_request (drive, 1, 0);
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index 848fb2aa4ca3..058f094f945a 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -43,13 +43,10 @@
#include "iscsi_tcp.h"
-#define ISCSI_TCP_VERSION "1.0-595"
-
MODULE_AUTHOR("Dmitry Yusupov <dmitry_yus@yahoo.com>, "
"Alex Aizman <itn780@yahoo.com>");
MODULE_DESCRIPTION("iSCSI/TCP data-path");
MODULE_LICENSE("GPL");
-MODULE_VERSION(ISCSI_TCP_VERSION);
/* #define DEBUG_TCP */
#define DEBUG_ASSERT
@@ -185,11 +182,19 @@ iscsi_hdr_extract(struct iscsi_tcp_conn *tcp_conn)
* must be called with session lock
*/
static void
-__iscsi_ctask_cleanup(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
+iscsi_tcp_cleanup_ctask(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
{
struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
+ struct iscsi_r2t_info *r2t;
struct scsi_cmnd *sc;
+ /* flush ctask's r2t queues */
+ while (__kfifo_get(tcp_ctask->r2tqueue, (void*)&r2t, sizeof(void*))) {
+ __kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t,
+ sizeof(void*));
+ debug_scsi("iscsi_tcp_cleanup_ctask pending r2t dropped\n");
+ }
+
sc = ctask->sc;
if (unlikely(!sc))
return;
@@ -374,6 +379,7 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
spin_unlock(&session->lock);
return 0;
}
+
rc = __kfifo_get(tcp_ctask->r2tpool.queue, (void*)&r2t, sizeof(void*));
BUG_ON(!rc);
@@ -399,7 +405,7 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
tcp_ctask->exp_r2tsn = r2tsn + 1;
tcp_ctask->xmstate |= XMSTATE_SOL_HDR;
__kfifo_put(tcp_ctask->r2tqueue, (void*)&r2t, sizeof(void*));
- __kfifo_put(conn->xmitqueue, (void*)&ctask, sizeof(void*));
+ list_move_tail(&ctask->running, &conn->xmitqueue);
scsi_queue_work(session->host, &conn->xmitwork);
conn->r2t_pdus_cnt++;
@@ -477,6 +483,8 @@ iscsi_tcp_hdr_recv(struct iscsi_conn *conn)
case ISCSI_OP_SCSI_DATA_IN:
tcp_conn->in.ctask = session->cmds[itt];
rc = iscsi_data_rsp(conn, tcp_conn->in.ctask);
+ if (rc)
+ return rc;
/* fall through */
case ISCSI_OP_SCSI_CMD_RSP:
tcp_conn->in.ctask = session->cmds[itt];
@@ -484,7 +492,7 @@ iscsi_tcp_hdr_recv(struct iscsi_conn *conn)
goto copy_hdr;
spin_lock(&session->lock);
- __iscsi_ctask_cleanup(conn, tcp_conn->in.ctask);
+ iscsi_tcp_cleanup_ctask(conn, tcp_conn->in.ctask);
rc = __iscsi_complete_pdu(conn, hdr, NULL, 0);
spin_unlock(&session->lock);
break;
@@ -500,13 +508,28 @@ iscsi_tcp_hdr_recv(struct iscsi_conn *conn)
break;
case ISCSI_OP_LOGIN_RSP:
case ISCSI_OP_TEXT_RSP:
- case ISCSI_OP_LOGOUT_RSP:
- case ISCSI_OP_NOOP_IN:
case ISCSI_OP_REJECT:
case ISCSI_OP_ASYNC_EVENT:
+ /*
+ * It is possible that we could get a PDU with a buffer larger
+ * than 8K, but there are no targets that currently do this.
+ * For now we fail until we find a vendor that needs it
+ */
+ if (DEFAULT_MAX_RECV_DATA_SEGMENT_LENGTH <
+ tcp_conn->in.datalen) {
+ printk(KERN_ERR "iscsi_tcp: received buffer of len %u "
+ "but conn buffer is only %u (opcode %0x)\n",
+ tcp_conn->in.datalen,
+ DEFAULT_MAX_RECV_DATA_SEGMENT_LENGTH, opcode);
+ rc = ISCSI_ERR_PROTO;
+ break;
+ }
+
if (tcp_conn->in.datalen)
goto copy_hdr;
/* fall through */
+ case ISCSI_OP_LOGOUT_RSP:
+ case ISCSI_OP_NOOP_IN:
case ISCSI_OP_SCSI_TMFUNC_RSP:
rc = iscsi_complete_pdu(conn, hdr, NULL, 0);
break;
@@ -523,7 +546,7 @@ copy_hdr:
* skbs to complete the command then we have to copy the header
* for later use
*/
- if (tcp_conn->in.zero_copy_hdr && tcp_conn->in.copy <
+ if (tcp_conn->in.zero_copy_hdr && tcp_conn->in.copy <=
(tcp_conn->in.datalen + tcp_conn->in.padding +
(conn->datadgst_en ? 4 : 0))) {
debug_tcp("Copying header for later use. in.copy %d in.datalen"
@@ -614,9 +637,9 @@ iscsi_ctask_copy(struct iscsi_tcp_conn *tcp_conn, struct iscsi_cmd_task *ctask,
* byte counters.
**/
static inline int
-iscsi_tcp_copy(struct iscsi_tcp_conn *tcp_conn)
+iscsi_tcp_copy(struct iscsi_conn *conn)
{
- void *buf = tcp_conn->data;
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
int buf_size = tcp_conn->in.datalen;
int buf_left = buf_size - tcp_conn->data_copied;
int size = min(tcp_conn->in.copy, buf_left);
@@ -627,7 +650,7 @@ iscsi_tcp_copy(struct iscsi_tcp_conn *tcp_conn)
BUG_ON(size <= 0);
rc = skb_copy_bits(tcp_conn->in.skb, tcp_conn->in.offset,
- (char*)buf + tcp_conn->data_copied, size);
+ (char*)conn->data + tcp_conn->data_copied, size);
BUG_ON(rc);
tcp_conn->in.offset += size;
@@ -745,10 +768,11 @@ static int iscsi_scsi_data_in(struct iscsi_conn *conn)
done:
/* check for non-exceptional status */
if (tcp_conn->in.hdr->flags & ISCSI_FLAG_DATA_STATUS) {
- debug_scsi("done [sc %lx res %d itt 0x%x]\n",
- (long)sc, sc->result, ctask->itt);
+ debug_scsi("done [sc %lx res %d itt 0x%x flags 0x%x]\n",
+ (long)sc, sc->result, ctask->itt,
+ tcp_conn->in.hdr->flags);
spin_lock(&conn->session->lock);
- __iscsi_ctask_cleanup(conn, ctask);
+ iscsi_tcp_cleanup_ctask(conn, ctask);
__iscsi_complete_pdu(conn, tcp_conn->in.hdr, NULL, 0);
spin_unlock(&conn->session->lock);
}
@@ -769,26 +793,25 @@ iscsi_data_recv(struct iscsi_conn *conn)
break;
case ISCSI_OP_SCSI_CMD_RSP:
spin_lock(&conn->session->lock);
- __iscsi_ctask_cleanup(conn, tcp_conn->in.ctask);
+ iscsi_tcp_cleanup_ctask(conn, tcp_conn->in.ctask);
spin_unlock(&conn->session->lock);
case ISCSI_OP_TEXT_RSP:
case ISCSI_OP_LOGIN_RSP:
- case ISCSI_OP_NOOP_IN:
case ISCSI_OP_ASYNC_EVENT:
case ISCSI_OP_REJECT:
/*
* Collect data segment to the connection's data
* placeholder
*/
- if (iscsi_tcp_copy(tcp_conn)) {
+ if (iscsi_tcp_copy(conn)) {
rc = -EAGAIN;
goto exit;
}
- rc = iscsi_complete_pdu(conn, tcp_conn->in.hdr, tcp_conn->data,
+ rc = iscsi_complete_pdu(conn, tcp_conn->in.hdr, conn->data,
tcp_conn->in.datalen);
if (!rc && conn->datadgst_en && opcode != ISCSI_OP_LOGIN_RSP)
- iscsi_recv_digest_update(tcp_conn, tcp_conn->data,
+ iscsi_recv_digest_update(tcp_conn, conn->data,
tcp_conn->in.datalen);
break;
default:
@@ -843,7 +866,7 @@ more:
if (rc == -EAGAIN)
goto nomore;
else {
- iscsi_conn_failure(conn, rc);
+ iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
return 0;
}
}
@@ -897,7 +920,7 @@ more:
if (rc) {
if (rc == -EAGAIN)
goto again;
- iscsi_conn_failure(conn, rc);
+ iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
return 0;
}
tcp_conn->in.copy -= tcp_conn->in.padding;
@@ -1028,9 +1051,8 @@ iscsi_conn_set_callbacks(struct iscsi_conn *conn)
}
static void
-iscsi_conn_restore_callbacks(struct iscsi_conn *conn)
+iscsi_conn_restore_callbacks(struct iscsi_tcp_conn *tcp_conn)
{
- struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
struct sock *sk = tcp_conn->sock->sk;
/* restore socket callbacks, see also: iscsi_conn_set_callbacks() */
@@ -1308,7 +1330,7 @@ iscsi_tcp_cmd_init(struct iscsi_cmd_task *ctask)
ctask->imm_count -
ctask->unsol_count;
- debug_scsi("cmd [itt %x total %d imm %d imm_data %d "
+ debug_scsi("cmd [itt 0x%x total %d imm %d imm_data %d "
"r2t_data %d]\n",
ctask->itt, ctask->total_length, ctask->imm_count,
ctask->unsol_count, tcp_ctask->r2t_data_count);
@@ -1636,7 +1658,7 @@ handle_xmstate_sol_data(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
}
solicit_again:
/*
- * send Data-Out whitnin this R2T sequence.
+ * send Data-Out within this R2T sequence.
*/
if (!r2t->data_count)
goto data_out_done;
@@ -1731,7 +1753,7 @@ handle_xmstate_w_pad(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
struct iscsi_data_task *dtask = tcp_ctask->dtask;
- int sent, rc;
+ int sent = 0, rc;
tcp_ctask->xmstate &= ~XMSTATE_W_PAD;
iscsi_buf_init_iov(&tcp_ctask->sendbuf, (char*)&tcp_ctask->pad,
@@ -1900,27 +1922,32 @@ iscsi_tcp_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
tcp_conn->in_progress = IN_PROGRESS_WAIT_HEADER;
/* initial operational parameters */
tcp_conn->hdr_size = sizeof(struct iscsi_hdr);
- tcp_conn->data_size = DEFAULT_MAX_RECV_DATA_SEGMENT_LENGTH;
-
- /* allocate initial PDU receive place holder */
- if (tcp_conn->data_size <= PAGE_SIZE)
- tcp_conn->data = kmalloc(tcp_conn->data_size, GFP_KERNEL);
- else
- tcp_conn->data = (void*)__get_free_pages(GFP_KERNEL,
- get_order(tcp_conn->data_size));
- if (!tcp_conn->data)
- goto max_recv_dlenght_alloc_fail;
return cls_conn;
-max_recv_dlenght_alloc_fail:
- kfree(tcp_conn);
tcp_conn_alloc_fail:
iscsi_conn_teardown(cls_conn);
return NULL;
}
static void
+iscsi_tcp_release_conn(struct iscsi_conn *conn)
+{
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+
+ if (!tcp_conn->sock)
+ return;
+
+ sock_hold(tcp_conn->sock->sk);
+ iscsi_conn_restore_callbacks(tcp_conn);
+ sock_put(tcp_conn->sock->sk);
+
+ sock_release(tcp_conn->sock);
+ tcp_conn->sock = NULL;
+ conn->recv_lock = NULL;
+}
+
+static void
iscsi_tcp_conn_destroy(struct iscsi_cls_conn *cls_conn)
{
struct iscsi_conn *conn = cls_conn->dd_data;
@@ -1930,6 +1957,7 @@ iscsi_tcp_conn_destroy(struct iscsi_cls_conn *cls_conn)
if (conn->hdrdgst_en || conn->datadgst_en)
digest = 1;
+ iscsi_tcp_release_conn(conn);
iscsi_conn_teardown(cls_conn);
/* now free tcp_conn */
@@ -1944,15 +1972,18 @@ iscsi_tcp_conn_destroy(struct iscsi_cls_conn *cls_conn)
crypto_free_tfm(tcp_conn->data_rx_tfm);
}
- /* free conn->data, size = MaxRecvDataSegmentLength */
- if (tcp_conn->data_size <= PAGE_SIZE)
- kfree(tcp_conn->data);
- else
- free_pages((unsigned long)tcp_conn->data,
- get_order(tcp_conn->data_size));
kfree(tcp_conn);
}
+static void
+iscsi_tcp_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
+{
+ struct iscsi_conn *conn = cls_conn->dd_data;
+
+ iscsi_conn_stop(cls_conn, flag);
+ iscsi_tcp_release_conn(conn);
+}
+
static int
iscsi_tcp_conn_bind(struct iscsi_cls_session *cls_session,
struct iscsi_cls_conn *cls_conn, uint64_t transport_eph,
@@ -2001,52 +2032,6 @@ iscsi_tcp_conn_bind(struct iscsi_cls_session *cls_session,
return 0;
}
-static void
-iscsi_tcp_cleanup_ctask(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
-{
- struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
- struct iscsi_r2t_info *r2t;
-
- /* flush ctask's r2t queues */
- while (__kfifo_get(tcp_ctask->r2tqueue, (void*)&r2t, sizeof(void*)))
- __kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t,
- sizeof(void*));
-
- __iscsi_ctask_cleanup(conn, ctask);
-}
-
-static void
-iscsi_tcp_suspend_conn_rx(struct iscsi_conn *conn)
-{
- struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
- struct sock *sk;
-
- if (!tcp_conn->sock)
- return;
-
- sk = tcp_conn->sock->sk;
- write_lock_bh(&sk->sk_callback_lock);
- set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
- write_unlock_bh(&sk->sk_callback_lock);
-}
-
-static void
-iscsi_tcp_terminate_conn(struct iscsi_conn *conn)
-{
- struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
-
- if (!tcp_conn->sock)
- return;
-
- sock_hold(tcp_conn->sock->sk);
- iscsi_conn_restore_callbacks(conn);
- sock_put(tcp_conn->sock->sk);
-
- sock_release(tcp_conn->sock);
- tcp_conn->sock = NULL;
- conn->recv_lock = NULL;
-}
-
/* called with host lock */
static void
iscsi_tcp_mgmt_init(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask,
@@ -2057,6 +2042,7 @@ iscsi_tcp_mgmt_init(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask,
iscsi_buf_init_iov(&tcp_mtask->headbuf, (char*)mtask->hdr,
sizeof(struct iscsi_hdr));
tcp_mtask->xmstate = XMSTATE_IMM_HDR;
+ tcp_mtask->sent = 0;
if (mtask->data_count)
iscsi_buf_init_iov(&tcp_mtask->sendbuf, (char*)mtask->data,
@@ -2138,39 +2124,6 @@ iscsi_conn_set_param(struct iscsi_cls_conn *cls_conn, enum iscsi_param param,
int value;
switch(param) {
- case ISCSI_PARAM_MAX_RECV_DLENGTH: {
- char *saveptr = tcp_conn->data;
- gfp_t flags = GFP_KERNEL;
-
- sscanf(buf, "%d", &value);
- if (tcp_conn->data_size >= value) {
- iscsi_set_param(cls_conn, param, buf, buflen);
- break;
- }
-
- spin_lock_bh(&session->lock);
- if (conn->stop_stage == STOP_CONN_RECOVER)
- flags = GFP_ATOMIC;
- spin_unlock_bh(&session->lock);
-
- if (value <= PAGE_SIZE)
- tcp_conn->data = kmalloc(value, flags);
- else
- tcp_conn->data = (void*)__get_free_pages(flags,
- get_order(value));
- if (tcp_conn->data == NULL) {
- tcp_conn->data = saveptr;
- return -ENOMEM;
- }
- if (tcp_conn->data_size <= PAGE_SIZE)
- kfree(saveptr);
- else
- free_pages((unsigned long)saveptr,
- get_order(tcp_conn->data_size));
- iscsi_set_param(cls_conn, param, buf, buflen);
- tcp_conn->data_size = value;
- break;
- }
case ISCSI_PARAM_HDRDGST_EN:
iscsi_set_param(cls_conn, param, buf, buflen);
tcp_conn->hdr_size = sizeof(struct iscsi_hdr);
@@ -2361,8 +2314,7 @@ static void iscsi_tcp_session_destroy(struct iscsi_cls_session *cls_session)
}
static struct scsi_host_template iscsi_sht = {
- .name = "iSCSI Initiator over TCP/IP, v"
- ISCSI_TCP_VERSION,
+ .name = "iSCSI Initiator over TCP/IP",
.queuecommand = iscsi_queuecommand,
.change_queue_depth = iscsi_change_queue_depth,
.can_queue = ISCSI_XMIT_CMDS_MAX - 1,
@@ -2414,10 +2366,7 @@ static struct iscsi_transport iscsi_tcp_transport = {
.get_conn_param = iscsi_tcp_conn_get_param,
.get_session_param = iscsi_session_get_param,
.start_conn = iscsi_conn_start,
- .stop_conn = iscsi_conn_stop,
- /* these are called as part of conn recovery */
- .suspend_conn_recv = iscsi_tcp_suspend_conn_rx,
- .terminate_conn = iscsi_tcp_terminate_conn,
+ .stop_conn = iscsi_tcp_conn_stop,
/* IO */
.send_pdu = iscsi_conn_send_pdu,
.get_stats = iscsi_conn_get_stats,
diff --git a/drivers/scsi/iscsi_tcp.h b/drivers/scsi/iscsi_tcp.h
index 808302832e68..6a4ee704e46e 100644
--- a/drivers/scsi/iscsi_tcp.h
+++ b/drivers/scsi/iscsi_tcp.h
@@ -78,8 +78,6 @@ struct iscsi_tcp_conn {
char hdrext[4*sizeof(__u16) +
sizeof(__u32)];
int data_copied;
- char *data; /* data placeholder */
- int data_size; /* actual recv_dlength */
int stop_stage; /* conn_stop() flag: *
* stop to recover, *
* stop to terminate */
diff --git a/drivers/scsi/jazz_esp.c b/drivers/scsi/jazz_esp.c
index 3fd8a96f2af3..bfac4441d89f 100644
--- a/drivers/scsi/jazz_esp.c
+++ b/drivers/scsi/jazz_esp.c
@@ -257,7 +257,7 @@ static void dma_mmu_release_scsi_one (struct NCR_ESP *esp, struct scsi_cmnd *sp)
static void dma_mmu_release_scsi_sgl (struct NCR_ESP *esp, struct scsi_cmnd *sp)
{
int sz = sp->use_sg - 1;
- struct scatterlist *sg = (struct scatterlist *)sp->buffer;
+ struct scatterlist *sg = (struct scatterlist *)sp->request_buffer;
while(sz >= 0) {
vdma_free(sg[sz].dma_address);
diff --git a/drivers/scsi/libata-core.c b/drivers/scsi/libata-core.c
index 1c960ac1617f..73dd6c8deede 100644
--- a/drivers/scsi/libata-core.c
+++ b/drivers/scsi/libata-core.c
@@ -61,9 +61,9 @@
#include "libata.h"
/* debounce timing parameters in msecs { interval, duration, timeout } */
-const unsigned long sata_deb_timing_boot[] = { 5, 100, 2000 };
-const unsigned long sata_deb_timing_eh[] = { 25, 500, 2000 };
-const unsigned long sata_deb_timing_before_fsrst[] = { 100, 2000, 5000 };
+const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 };
+const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 };
+const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 };
static unsigned int ata_dev_init_params(struct ata_device *dev,
u16 heads, u16 sectors);
@@ -907,7 +907,7 @@ void ata_port_queue_task(struct ata_port *ap, void (*fn)(void *), void *data,
{
int rc;
- if (ap->flags & ATA_FLAG_FLUSH_PORT_TASK)
+ if (ap->pflags & ATA_PFLAG_FLUSH_PORT_TASK)
return;
PREPARE_WORK(&ap->port_task, fn, data);
@@ -938,7 +938,7 @@ void ata_port_flush_task(struct ata_port *ap)
DPRINTK("ENTER\n");
spin_lock_irqsave(ap->lock, flags);
- ap->flags |= ATA_FLAG_FLUSH_PORT_TASK;
+ ap->pflags |= ATA_PFLAG_FLUSH_PORT_TASK;
spin_unlock_irqrestore(ap->lock, flags);
DPRINTK("flush #1\n");
@@ -957,7 +957,7 @@ void ata_port_flush_task(struct ata_port *ap)
}
spin_lock_irqsave(ap->lock, flags);
- ap->flags &= ~ATA_FLAG_FLUSH_PORT_TASK;
+ ap->pflags &= ~ATA_PFLAG_FLUSH_PORT_TASK;
spin_unlock_irqrestore(ap->lock, flags);
if (ata_msg_ctl(ap))
@@ -1009,7 +1009,7 @@ unsigned ata_exec_internal(struct ata_device *dev,
spin_lock_irqsave(ap->lock, flags);
/* no internal command while frozen */
- if (ap->flags & ATA_FLAG_FROZEN) {
+ if (ap->pflags & ATA_PFLAG_FROZEN) {
spin_unlock_irqrestore(ap->lock, flags);
return AC_ERR_SYSTEM;
}
@@ -1325,6 +1325,19 @@ static void ata_dev_config_ncq(struct ata_device *dev,
snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth);
}
+static void ata_set_port_max_cmd_len(struct ata_port *ap)
+{
+ int i;
+
+ if (ap->host) {
+ ap->host->max_cmd_len = 0;
+ for (i = 0; i < ATA_MAX_DEVICES; i++)
+ ap->host->max_cmd_len = max_t(unsigned int,
+ ap->host->max_cmd_len,
+ ap->device[i].cdb_len);
+ }
+}
+
/**
* ata_dev_configure - Configure the specified ATA/ATAPI device
* @dev: Target device to configure
@@ -1344,7 +1357,7 @@ int ata_dev_configure(struct ata_device *dev, int print_info)
struct ata_port *ap = dev->ap;
const u16 *id = dev->id;
unsigned int xfer_mask;
- int i, rc;
+ int rc;
if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
ata_dev_printk(dev, KERN_INFO,
@@ -1404,7 +1417,7 @@ int ata_dev_configure(struct ata_device *dev, int print_info)
ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
/* print device info to dmesg */
- if (ata_msg_info(ap))
+ if (ata_msg_drv(ap) && print_info)
ata_dev_printk(dev, KERN_INFO, "ATA-%d, "
"max %s, %Lu sectors: %s %s\n",
ata_id_major_version(id),
@@ -1427,7 +1440,7 @@ int ata_dev_configure(struct ata_device *dev, int print_info)
}
/* print device info to dmesg */
- if (ata_msg_info(ap))
+ if (ata_msg_drv(ap) && print_info)
ata_dev_printk(dev, KERN_INFO, "ATA-%d, "
"max %s, %Lu sectors: CHS %u/%u/%u\n",
ata_id_major_version(id),
@@ -1439,7 +1452,7 @@ int ata_dev_configure(struct ata_device *dev, int print_info)
if (dev->id[59] & 0x100) {
dev->multi_count = dev->id[59] & 0xff;
- if (ata_msg_info(ap))
+ if (ata_msg_drv(ap) && print_info)
ata_dev_printk(dev, KERN_INFO,
"ata%u: dev %u multi count %u\n",
ap->id, dev->devno, dev->multi_count);
@@ -1468,21 +1481,17 @@ int ata_dev_configure(struct ata_device *dev, int print_info)
}
/* print device info to dmesg */
- if (ata_msg_info(ap))
+ if (ata_msg_drv(ap) && print_info)
ata_dev_printk(dev, KERN_INFO, "ATAPI, max %s%s\n",
ata_mode_string(xfer_mask),
cdb_intr_string);
}
- ap->host->max_cmd_len = 0;
- for (i = 0; i < ATA_MAX_DEVICES; i++)
- ap->host->max_cmd_len = max_t(unsigned int,
- ap->host->max_cmd_len,
- ap->device[i].cdb_len);
+ ata_set_port_max_cmd_len(ap);
/* limit bridge transfers to udma5, 200 sectors */
if (ata_dev_knobble(dev)) {
- if (ata_msg_info(ap))
+ if (ata_msg_drv(ap) && print_info)
ata_dev_printk(dev, KERN_INFO,
"applying bridge limits\n");
dev->udma_mask &= ATA_UDMA5;
@@ -2137,7 +2146,7 @@ int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev)
* return error code and failing device on failure.
*/
for (i = 0; i < ATA_MAX_DEVICES; i++) {
- if (ata_dev_enabled(&ap->device[i])) {
+ if (ata_dev_ready(&ap->device[i])) {
ap->ops->set_mode(ap);
break;
}
@@ -2203,7 +2212,8 @@ int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev)
for (i = 0; i < ATA_MAX_DEVICES; i++) {
dev = &ap->device[i];
- if (!ata_dev_enabled(dev))
+ /* don't udpate suspended devices' xfer mode */
+ if (!ata_dev_ready(dev))
continue;
rc = ata_dev_set_mode(dev);
@@ -2579,7 +2589,7 @@ static void ata_wait_spinup(struct ata_port *ap)
/* first, debounce phy if SATA */
if (ap->cbl == ATA_CBL_SATA) {
- rc = sata_phy_debounce(ap, sata_deb_timing_eh);
+ rc = sata_phy_debounce(ap, sata_deb_timing_hotplug);
/* if debounced successfully and offline, no need to wait */
if ((rc == 0 || rc == -EOPNOTSUPP) && ata_port_offline(ap))
@@ -2615,16 +2625,17 @@ static void ata_wait_spinup(struct ata_port *ap)
int ata_std_prereset(struct ata_port *ap)
{
struct ata_eh_context *ehc = &ap->eh_context;
- const unsigned long *timing;
+ const unsigned long *timing = sata_ehc_deb_timing(ehc);
int rc;
- /* hotplug? */
- if (ehc->i.flags & ATA_EHI_HOTPLUGGED) {
- if (ap->flags & ATA_FLAG_HRST_TO_RESUME)
- ehc->i.action |= ATA_EH_HARDRESET;
- if (ap->flags & ATA_FLAG_SKIP_D2H_BSY)
- ata_wait_spinup(ap);
- }
+ /* handle link resume & hotplug spinup */
+ if ((ehc->i.flags & ATA_EHI_RESUME_LINK) &&
+ (ap->flags & ATA_FLAG_HRST_TO_RESUME))
+ ehc->i.action |= ATA_EH_HARDRESET;
+
+ if ((ehc->i.flags & ATA_EHI_HOTPLUGGED) &&
+ (ap->flags & ATA_FLAG_SKIP_D2H_BSY))
+ ata_wait_spinup(ap);
/* if we're about to do hardreset, nothing more to do */
if (ehc->i.action & ATA_EH_HARDRESET)
@@ -2632,11 +2643,6 @@ int ata_std_prereset(struct ata_port *ap)
/* if SATA, resume phy */
if (ap->cbl == ATA_CBL_SATA) {
- if (ap->flags & ATA_FLAG_LOADING)
- timing = sata_deb_timing_boot;
- else
- timing = sata_deb_timing_eh;
-
rc = sata_phy_resume(ap, timing);
if (rc && rc != -EOPNOTSUPP) {
/* phy resume failed */
@@ -2724,6 +2730,8 @@ int ata_std_softreset(struct ata_port *ap, unsigned int *classes)
*/
int sata_std_hardreset(struct ata_port *ap, unsigned int *class)
{
+ struct ata_eh_context *ehc = &ap->eh_context;
+ const unsigned long *timing = sata_ehc_deb_timing(ehc);
u32 scontrol;
int rc;
@@ -2738,7 +2746,7 @@ int sata_std_hardreset(struct ata_port *ap, unsigned int *class)
if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
return rc;
- scontrol = (scontrol & 0x0f0) | 0x302;
+ scontrol = (scontrol & 0x0f0) | 0x304;
if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
return rc;
@@ -2761,7 +2769,7 @@ int sata_std_hardreset(struct ata_port *ap, unsigned int *class)
msleep(1);
/* bring phy back */
- sata_phy_resume(ap, sata_deb_timing_eh);
+ sata_phy_resume(ap, timing);
/* TODO: phy layer with polling, timeouts, etc. */
if (ata_port_offline(ap)) {
@@ -4285,7 +4293,7 @@ static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
unsigned int i;
/* no command while frozen */
- if (unlikely(ap->flags & ATA_FLAG_FROZEN))
+ if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
return NULL;
/* the last tag is reserved for internal command. */
@@ -4407,7 +4415,7 @@ void ata_qc_complete(struct ata_queued_cmd *qc)
* taken care of.
*/
if (ap->ops->error_handler) {
- WARN_ON(ap->flags & ATA_FLAG_FROZEN);
+ WARN_ON(ap->pflags & ATA_PFLAG_FROZEN);
if (unlikely(qc->err_mask))
qc->flags |= ATA_QCFLAG_FAILED;
@@ -5001,86 +5009,120 @@ int ata_flush_cache(struct ata_device *dev)
return 0;
}
-static int ata_standby_drive(struct ata_device *dev)
+static int ata_host_set_request_pm(struct ata_host_set *host_set,
+ pm_message_t mesg, unsigned int action,
+ unsigned int ehi_flags, int wait)
{
- unsigned int err_mask;
+ unsigned long flags;
+ int i, rc;
- err_mask = ata_do_simple_cmd(dev, ATA_CMD_STANDBYNOW1);
- if (err_mask) {
- ata_dev_printk(dev, KERN_ERR, "failed to standby drive "
- "(err_mask=0x%x)\n", err_mask);
- return -EIO;
- }
+ for (i = 0; i < host_set->n_ports; i++) {
+ struct ata_port *ap = host_set->ports[i];
- return 0;
-}
+ /* Previous resume operation might still be in
+ * progress. Wait for PM_PENDING to clear.
+ */
+ if (ap->pflags & ATA_PFLAG_PM_PENDING) {
+ ata_port_wait_eh(ap);
+ WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
+ }
-static int ata_start_drive(struct ata_device *dev)
-{
- unsigned int err_mask;
+ /* request PM ops to EH */
+ spin_lock_irqsave(ap->lock, flags);
- err_mask = ata_do_simple_cmd(dev, ATA_CMD_IDLEIMMEDIATE);
- if (err_mask) {
- ata_dev_printk(dev, KERN_ERR, "failed to start drive "
- "(err_mask=0x%x)\n", err_mask);
- return -EIO;
+ ap->pm_mesg = mesg;
+ if (wait) {
+ rc = 0;
+ ap->pm_result = &rc;
+ }
+
+ ap->pflags |= ATA_PFLAG_PM_PENDING;
+ ap->eh_info.action |= action;
+ ap->eh_info.flags |= ehi_flags;
+
+ ata_port_schedule_eh(ap);
+
+ spin_unlock_irqrestore(ap->lock, flags);
+
+ /* wait and check result */
+ if (wait) {
+ ata_port_wait_eh(ap);
+ WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
+ if (rc)
+ return rc;
+ }
}
return 0;
}
/**
- * ata_device_resume - wakeup a previously suspended devices
- * @dev: the device to resume
+ * ata_host_set_suspend - suspend host_set
+ * @host_set: host_set to suspend
+ * @mesg: PM message
*
- * Kick the drive back into action, by sending it an idle immediate
- * command and making sure its transfer mode matches between drive
- * and host.
+ * Suspend @host_set. Actual operation is performed by EH. This
+ * function requests EH to perform PM operations and waits for EH
+ * to finish.
*
+ * LOCKING:
+ * Kernel thread context (may sleep).
+ *
+ * RETURNS:
+ * 0 on success, -errno on failure.
*/
-int ata_device_resume(struct ata_device *dev)
+int ata_host_set_suspend(struct ata_host_set *host_set, pm_message_t mesg)
{
- struct ata_port *ap = dev->ap;
+ int i, j, rc;
+
+ rc = ata_host_set_request_pm(host_set, mesg, 0, ATA_EHI_QUIET, 1);
+ if (rc)
+ goto fail;
- if (ap->flags & ATA_FLAG_SUSPENDED) {
- struct ata_device *failed_dev;
+ /* EH is quiescent now. Fail if we have any ready device.
+ * This happens if hotplug occurs between completion of device
+ * suspension and here.
+ */
+ for (i = 0; i < host_set->n_ports; i++) {
+ struct ata_port *ap = host_set->ports[i];
- ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
- ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 200000);
+ for (j = 0; j < ATA_MAX_DEVICES; j++) {
+ struct ata_device *dev = &ap->device[j];
- ap->flags &= ~ATA_FLAG_SUSPENDED;
- while (ata_set_mode(ap, &failed_dev))
- ata_dev_disable(failed_dev);
+ if (ata_dev_ready(dev)) {
+ ata_port_printk(ap, KERN_WARNING,
+ "suspend failed, device %d "
+ "still active\n", dev->devno);
+ rc = -EBUSY;
+ goto fail;
+ }
+ }
}
- if (!ata_dev_enabled(dev))
- return 0;
- if (dev->class == ATA_DEV_ATA)
- ata_start_drive(dev);
+ host_set->dev->power.power_state = mesg;
return 0;
+
+ fail:
+ ata_host_set_resume(host_set);
+ return rc;
}
/**
- * ata_device_suspend - prepare a device for suspend
- * @dev: the device to suspend
- * @state: target power management state
+ * ata_host_set_resume - resume host_set
+ * @host_set: host_set to resume
*
- * Flush the cache on the drive, if appropriate, then issue a
- * standbynow command.
+ * Resume @host_set. Actual operation is performed by EH. This
+ * function requests EH to perform PM operations and returns.
+ * Note that all resume operations are performed parallely.
+ *
+ * LOCKING:
+ * Kernel thread context (may sleep).
*/
-int ata_device_suspend(struct ata_device *dev, pm_message_t state)
+void ata_host_set_resume(struct ata_host_set *host_set)
{
- struct ata_port *ap = dev->ap;
-
- if (!ata_dev_enabled(dev))
- return 0;
- if (dev->class == ATA_DEV_ATA)
- ata_flush_cache(dev);
-
- if (state.event != PM_EVENT_FREEZE)
- ata_standby_drive(dev);
- ap->flags |= ATA_FLAG_SUSPENDED;
- return 0;
+ ata_host_set_request_pm(host_set, PMSG_ON, ATA_EH_SOFTRESET,
+ ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
+ host_set->dev->power.power_state = PMSG_ON;
}
/**
@@ -5143,28 +5185,6 @@ void ata_host_stop (struct ata_host_set *host_set)
iounmap(host_set->mmio_base);
}
-
-/**
- * ata_host_remove - Unregister SCSI host structure with upper layers
- * @ap: Port to unregister
- * @do_unregister: 1 if we fully unregister, 0 to just stop the port
- *
- * LOCKING:
- * Inherited from caller.
- */
-
-static void ata_host_remove(struct ata_port *ap, unsigned int do_unregister)
-{
- struct Scsi_Host *sh = ap->host;
-
- DPRINTK("ENTER\n");
-
- if (do_unregister)
- scsi_remove_host(sh);
-
- ap->ops->port_stop(ap);
-}
-
/**
* ata_dev_init - Initialize an ata_device structure
* @dev: Device structure to initialize
@@ -5440,6 +5460,7 @@ int ata_device_add(const struct ata_probe_ent *ent)
}
if (ap->ops->error_handler) {
+ struct ata_eh_info *ehi = &ap->eh_info;
unsigned long flags;
ata_port_probe(ap);
@@ -5447,10 +5468,11 @@ int ata_device_add(const struct ata_probe_ent *ent)
/* kick EH for boot probing */
spin_lock_irqsave(ap->lock, flags);
- ap->eh_info.probe_mask = (1 << ATA_MAX_DEVICES) - 1;
- ap->eh_info.action |= ATA_EH_SOFTRESET;
+ ehi->probe_mask = (1 << ATA_MAX_DEVICES) - 1;
+ ehi->action |= ATA_EH_SOFTRESET;
+ ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
- ap->flags |= ATA_FLAG_LOADING;
+ ap->pflags |= ATA_PFLAG_LOADING;
ata_port_schedule_eh(ap);
spin_unlock_irqrestore(ap->lock, flags);
@@ -5488,8 +5510,11 @@ int ata_device_add(const struct ata_probe_ent *ent)
err_out:
for (i = 0; i < count; i++) {
- ata_host_remove(host_set->ports[i], 1);
- scsi_host_put(host_set->ports[i]->host);
+ struct ata_port *ap = host_set->ports[i];
+ if (ap) {
+ ap->ops->port_stop(ap);
+ scsi_host_put(ap->host);
+ }
}
err_free_ret:
kfree(host_set);
@@ -5514,11 +5539,11 @@ void ata_port_detach(struct ata_port *ap)
int i;
if (!ap->ops->error_handler)
- return;
+ goto skip_eh;
/* tell EH we're leaving & flush EH */
spin_lock_irqsave(ap->lock, flags);
- ap->flags |= ATA_FLAG_UNLOADING;
+ ap->pflags |= ATA_PFLAG_UNLOADING;
spin_unlock_irqrestore(ap->lock, flags);
ata_port_wait_eh(ap);
@@ -5550,6 +5575,7 @@ void ata_port_detach(struct ata_port *ap)
cancel_delayed_work(&ap->hotplug_task);
flush_workqueue(ata_aux_wq);
+ skip_eh:
/* remove the associated SCSI host */
scsi_remove_host(ap->host);
}
@@ -5618,7 +5644,7 @@ int ata_scsi_release(struct Scsi_Host *host)
DPRINTK("ENTER\n");
ap->ops->port_disable(ap);
- ata_host_remove(ap, 0);
+ ap->ops->port_stop(ap);
DPRINTK("EXIT\n");
return 1;
@@ -5723,20 +5749,55 @@ int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
return (tmp == bits->val) ? 1 : 0;
}
-int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t state)
+void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t state)
{
pci_save_state(pdev);
- pci_disable_device(pdev);
- pci_set_power_state(pdev, PCI_D3hot);
- return 0;
+
+ if (state.event == PM_EVENT_SUSPEND) {
+ pci_disable_device(pdev);
+ pci_set_power_state(pdev, PCI_D3hot);
+ }
}
-int ata_pci_device_resume(struct pci_dev *pdev)
+void ata_pci_device_do_resume(struct pci_dev *pdev)
{
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
pci_enable_device(pdev);
pci_set_master(pdev);
+}
+
+int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct ata_host_set *host_set = dev_get_drvdata(&pdev->dev);
+ int rc = 0;
+
+ rc = ata_host_set_suspend(host_set, state);
+ if (rc)
+ return rc;
+
+ if (host_set->next) {
+ rc = ata_host_set_suspend(host_set->next, state);
+ if (rc) {
+ ata_host_set_resume(host_set);
+ return rc;
+ }
+ }
+
+ ata_pci_device_do_suspend(pdev, state);
+
+ return 0;
+}
+
+int ata_pci_device_resume(struct pci_dev *pdev)
+{
+ struct ata_host_set *host_set = dev_get_drvdata(&pdev->dev);
+
+ ata_pci_device_do_resume(pdev);
+ ata_host_set_resume(host_set);
+ if (host_set->next)
+ ata_host_set_resume(host_set->next);
+
return 0;
}
#endif /* CONFIG_PCI */
@@ -5842,9 +5903,9 @@ u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
* Do not depend on ABI/API stability.
*/
-EXPORT_SYMBOL_GPL(sata_deb_timing_boot);
-EXPORT_SYMBOL_GPL(sata_deb_timing_eh);
-EXPORT_SYMBOL_GPL(sata_deb_timing_before_fsrst);
+EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
+EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
+EXPORT_SYMBOL_GPL(sata_deb_timing_long);
EXPORT_SYMBOL_GPL(ata_std_bios_param);
EXPORT_SYMBOL_GPL(ata_std_ports);
EXPORT_SYMBOL_GPL(ata_device_add);
@@ -5916,6 +5977,8 @@ EXPORT_SYMBOL_GPL(sata_scr_write);
EXPORT_SYMBOL_GPL(sata_scr_write_flush);
EXPORT_SYMBOL_GPL(ata_port_online);
EXPORT_SYMBOL_GPL(ata_port_offline);
+EXPORT_SYMBOL_GPL(ata_host_set_suspend);
+EXPORT_SYMBOL_GPL(ata_host_set_resume);
EXPORT_SYMBOL_GPL(ata_id_string);
EXPORT_SYMBOL_GPL(ata_id_c_string);
EXPORT_SYMBOL_GPL(ata_scsi_simulate);
@@ -5930,14 +5993,14 @@ EXPORT_SYMBOL_GPL(ata_pci_host_stop);
EXPORT_SYMBOL_GPL(ata_pci_init_native_mode);
EXPORT_SYMBOL_GPL(ata_pci_init_one);
EXPORT_SYMBOL_GPL(ata_pci_remove_one);
+EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
+EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
EXPORT_SYMBOL_GPL(ata_pci_device_resume);
EXPORT_SYMBOL_GPL(ata_pci_default_filter);
EXPORT_SYMBOL_GPL(ata_pci_clear_simplex);
#endif /* CONFIG_PCI */
-EXPORT_SYMBOL_GPL(ata_device_suspend);
-EXPORT_SYMBOL_GPL(ata_device_resume);
EXPORT_SYMBOL_GPL(ata_scsi_device_suspend);
EXPORT_SYMBOL_GPL(ata_scsi_device_resume);
diff --git a/drivers/scsi/libata-eh.c b/drivers/scsi/libata-eh.c
index bf5a72aca8a4..29f59345305d 100644
--- a/drivers/scsi/libata-eh.c
+++ b/drivers/scsi/libata-eh.c
@@ -47,6 +47,8 @@
static void __ata_port_freeze(struct ata_port *ap);
static void ata_eh_finish(struct ata_port *ap);
+static void ata_eh_handle_port_suspend(struct ata_port *ap);
+static void ata_eh_handle_port_resume(struct ata_port *ap);
static void ata_ering_record(struct ata_ering *ering, int is_io,
unsigned int err_mask)
@@ -190,7 +192,6 @@ enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd)
void ata_scsi_error(struct Scsi_Host *host)
{
struct ata_port *ap = ata_shost_to_port(host);
- spinlock_t *ap_lock = ap->lock;
int i, repeat_cnt = ATA_EH_MAX_REPEAT;
unsigned long flags;
@@ -217,7 +218,7 @@ void ata_scsi_error(struct Scsi_Host *host)
struct scsi_cmnd *scmd, *tmp;
int nr_timedout = 0;
- spin_lock_irqsave(ap_lock, flags);
+ spin_lock_irqsave(ap->lock, flags);
list_for_each_entry_safe(scmd, tmp, &host->eh_cmd_q, eh_entry) {
struct ata_queued_cmd *qc;
@@ -256,43 +257,49 @@ void ata_scsi_error(struct Scsi_Host *host)
if (nr_timedout)
__ata_port_freeze(ap);
- spin_unlock_irqrestore(ap_lock, flags);
+ spin_unlock_irqrestore(ap->lock, flags);
} else
- spin_unlock_wait(ap_lock);
+ spin_unlock_wait(ap->lock);
repeat:
/* invoke error handler */
if (ap->ops->error_handler) {
+ /* process port resume request */
+ ata_eh_handle_port_resume(ap);
+
/* fetch & clear EH info */
- spin_lock_irqsave(ap_lock, flags);
+ spin_lock_irqsave(ap->lock, flags);
memset(&ap->eh_context, 0, sizeof(ap->eh_context));
ap->eh_context.i = ap->eh_info;
memset(&ap->eh_info, 0, sizeof(ap->eh_info));
- ap->flags |= ATA_FLAG_EH_IN_PROGRESS;
- ap->flags &= ~ATA_FLAG_EH_PENDING;
+ ap->pflags |= ATA_PFLAG_EH_IN_PROGRESS;
+ ap->pflags &= ~ATA_PFLAG_EH_PENDING;
- spin_unlock_irqrestore(ap_lock, flags);
+ spin_unlock_irqrestore(ap->lock, flags);
- /* invoke EH. if unloading, just finish failed qcs */
- if (!(ap->flags & ATA_FLAG_UNLOADING))
+ /* invoke EH, skip if unloading or suspended */
+ if (!(ap->pflags & (ATA_PFLAG_UNLOADING | ATA_PFLAG_SUSPENDED)))
ap->ops->error_handler(ap);
else
ata_eh_finish(ap);
+ /* process port suspend request */
+ ata_eh_handle_port_suspend(ap);
+
/* Exception might have happend after ->error_handler
* recovered the port but before this point. Repeat
* EH in such case.
*/
- spin_lock_irqsave(ap_lock, flags);
+ spin_lock_irqsave(ap->lock, flags);
- if (ap->flags & ATA_FLAG_EH_PENDING) {
+ if (ap->pflags & ATA_PFLAG_EH_PENDING) {
if (--repeat_cnt) {
ata_port_printk(ap, KERN_INFO,
"EH pending after completion, "
"repeating EH (cnt=%d)\n", repeat_cnt);
- spin_unlock_irqrestore(ap_lock, flags);
+ spin_unlock_irqrestore(ap->lock, flags);
goto repeat;
}
ata_port_printk(ap, KERN_ERR, "EH pending after %d "
@@ -302,14 +309,14 @@ void ata_scsi_error(struct Scsi_Host *host)
/* this run is complete, make sure EH info is clear */
memset(&ap->eh_info, 0, sizeof(ap->eh_info));
- /* Clear host_eh_scheduled while holding ap_lock such
+ /* Clear host_eh_scheduled while holding ap->lock such
* that if exception occurs after this point but
* before EH completion, SCSI midlayer will
* re-initiate EH.
*/
host->host_eh_scheduled = 0;
- spin_unlock_irqrestore(ap_lock, flags);
+ spin_unlock_irqrestore(ap->lock, flags);
} else {
WARN_ON(ata_qc_from_tag(ap, ap->active_tag) == NULL);
ap->ops->eng_timeout(ap);
@@ -321,24 +328,23 @@ void ata_scsi_error(struct Scsi_Host *host)
scsi_eh_flush_done_q(&ap->eh_done_q);
/* clean up */
- spin_lock_irqsave(ap_lock, flags);
+ spin_lock_irqsave(ap->lock, flags);
- if (ap->flags & ATA_FLAG_LOADING) {
- ap->flags &= ~ATA_FLAG_LOADING;
- } else {
- if (ap->flags & ATA_FLAG_SCSI_HOTPLUG)
- queue_work(ata_aux_wq, &ap->hotplug_task);
- if (ap->flags & ATA_FLAG_RECOVERED)
- ata_port_printk(ap, KERN_INFO, "EH complete\n");
- }
+ if (ap->pflags & ATA_PFLAG_LOADING)
+ ap->pflags &= ~ATA_PFLAG_LOADING;
+ else if (ap->pflags & ATA_PFLAG_SCSI_HOTPLUG)
+ queue_work(ata_aux_wq, &ap->hotplug_task);
- ap->flags &= ~(ATA_FLAG_SCSI_HOTPLUG | ATA_FLAG_RECOVERED);
+ if (ap->pflags & ATA_PFLAG_RECOVERED)
+ ata_port_printk(ap, KERN_INFO, "EH complete\n");
+
+ ap->pflags &= ~(ATA_PFLAG_SCSI_HOTPLUG | ATA_PFLAG_RECOVERED);
/* tell wait_eh that we're done */
- ap->flags &= ~ATA_FLAG_EH_IN_PROGRESS;
+ ap->pflags &= ~ATA_PFLAG_EH_IN_PROGRESS;
wake_up_all(&ap->eh_wait_q);
- spin_unlock_irqrestore(ap_lock, flags);
+ spin_unlock_irqrestore(ap->lock, flags);
DPRINTK("EXIT\n");
}
@@ -360,7 +366,7 @@ void ata_port_wait_eh(struct ata_port *ap)
retry:
spin_lock_irqsave(ap->lock, flags);
- while (ap->flags & (ATA_FLAG_EH_PENDING | ATA_FLAG_EH_IN_PROGRESS)) {
+ while (ap->pflags & (ATA_PFLAG_EH_PENDING | ATA_PFLAG_EH_IN_PROGRESS)) {
prepare_to_wait(&ap->eh_wait_q, &wait, TASK_UNINTERRUPTIBLE);
spin_unlock_irqrestore(ap->lock, flags);
schedule();
@@ -489,7 +495,7 @@ void ata_qc_schedule_eh(struct ata_queued_cmd *qc)
WARN_ON(!ap->ops->error_handler);
qc->flags |= ATA_QCFLAG_FAILED;
- qc->ap->flags |= ATA_FLAG_EH_PENDING;
+ qc->ap->pflags |= ATA_PFLAG_EH_PENDING;
/* The following will fail if timeout has already expired.
* ata_scsi_error() takes care of such scmds on EH entry.
@@ -513,7 +519,7 @@ void ata_port_schedule_eh(struct ata_port *ap)
{
WARN_ON(!ap->ops->error_handler);
- ap->flags |= ATA_FLAG_EH_PENDING;
+ ap->pflags |= ATA_PFLAG_EH_PENDING;
scsi_schedule_eh(ap->host);
DPRINTK("port EH scheduled\n");
@@ -578,7 +584,7 @@ static void __ata_port_freeze(struct ata_port *ap)
if (ap->ops->freeze)
ap->ops->freeze(ap);
- ap->flags |= ATA_FLAG_FROZEN;
+ ap->pflags |= ATA_PFLAG_FROZEN;
DPRINTK("ata%u port frozen\n", ap->id);
}
@@ -646,7 +652,7 @@ void ata_eh_thaw_port(struct ata_port *ap)
spin_lock_irqsave(ap->lock, flags);
- ap->flags &= ~ATA_FLAG_FROZEN;
+ ap->pflags &= ~ATA_PFLAG_FROZEN;
if (ap->ops->thaw)
ap->ops->thaw(ap);
@@ -731,7 +737,7 @@ static void ata_eh_detach_dev(struct ata_device *dev)
if (ata_scsi_offline_dev(dev)) {
dev->flags |= ATA_DFLAG_DETACHED;
- ap->flags |= ATA_FLAG_SCSI_HOTPLUG;
+ ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG;
}
/* clear per-dev EH actions */
@@ -758,10 +764,29 @@ static void ata_eh_about_to_do(struct ata_port *ap, struct ata_device *dev,
unsigned int action)
{
unsigned long flags;
+ struct ata_eh_info *ehi = &ap->eh_info;
+ struct ata_eh_context *ehc = &ap->eh_context;
spin_lock_irqsave(ap->lock, flags);
- ata_eh_clear_action(dev, &ap->eh_info, action);
- ap->flags |= ATA_FLAG_RECOVERED;
+
+ /* Reset is represented by combination of actions and EHI
+ * flags. Suck in all related bits before clearing eh_info to
+ * avoid losing requested action.
+ */
+ if (action & ATA_EH_RESET_MASK) {
+ ehc->i.action |= ehi->action & ATA_EH_RESET_MASK;
+ ehc->i.flags |= ehi->flags & ATA_EHI_RESET_MODIFIER_MASK;
+
+ /* make sure all reset actions are cleared & clear EHI flags */
+ action |= ATA_EH_RESET_MASK;
+ ehi->flags &= ~ATA_EHI_RESET_MODIFIER_MASK;
+ }
+
+ ata_eh_clear_action(dev, ehi, action);
+
+ if (!(ehc->i.flags & ATA_EHI_QUIET))
+ ap->pflags |= ATA_PFLAG_RECOVERED;
+
spin_unlock_irqrestore(ap->lock, flags);
}
@@ -780,6 +805,12 @@ static void ata_eh_about_to_do(struct ata_port *ap, struct ata_device *dev,
static void ata_eh_done(struct ata_port *ap, struct ata_device *dev,
unsigned int action)
{
+ /* if reset is complete, clear all reset actions & reset modifier */
+ if (action & ATA_EH_RESET_MASK) {
+ action |= ATA_EH_RESET_MASK;
+ ap->eh_context.i.flags &= ~ATA_EHI_RESET_MODIFIER_MASK;
+ }
+
ata_eh_clear_action(dev, &ap->eh_context.i, action);
}
@@ -1027,7 +1058,7 @@ static void ata_eh_analyze_ncq_error(struct ata_port *ap)
int tag, rc;
/* if frozen, we can't do much */
- if (ap->flags & ATA_FLAG_FROZEN)
+ if (ap->pflags & ATA_PFLAG_FROZEN)
return;
/* is it NCQ device error? */
@@ -1266,8 +1297,6 @@ static int ata_eh_speed_down(struct ata_device *dev, int is_io,
static void ata_eh_autopsy(struct ata_port *ap)
{
struct ata_eh_context *ehc = &ap->eh_context;
- unsigned int action = ehc->i.action;
- struct ata_device *failed_dev = NULL;
unsigned int all_err_mask = 0;
int tag, is_io = 0;
u32 serror;
@@ -1275,13 +1304,16 @@ static void ata_eh_autopsy(struct ata_port *ap)
DPRINTK("ENTER\n");
+ if (ehc->i.flags & ATA_EHI_NO_AUTOPSY)
+ return;
+
/* obtain and analyze SError */
rc = sata_scr_read(ap, SCR_ERROR, &serror);
if (rc == 0) {
ehc->i.serror |= serror;
ata_eh_analyze_serror(ap);
} else if (rc != -EOPNOTSUPP)
- action |= ATA_EH_HARDRESET;
+ ehc->i.action |= ATA_EH_HARDRESET;
/* analyze NCQ failure */
ata_eh_analyze_ncq_error(ap);
@@ -1302,7 +1334,7 @@ static void ata_eh_autopsy(struct ata_port *ap)
qc->err_mask |= ehc->i.err_mask;
/* analyze TF */
- action |= ata_eh_analyze_tf(qc, &qc->result_tf);
+ ehc->i.action |= ata_eh_analyze_tf(qc, &qc->result_tf);
/* DEV errors are probably spurious in case of ATA_BUS error */
if (qc->err_mask & AC_ERR_ATA_BUS)
@@ -1316,38 +1348,35 @@ static void ata_eh_autopsy(struct ata_port *ap)
/* SENSE_VALID trumps dev/unknown error and revalidation */
if (qc->flags & ATA_QCFLAG_SENSE_VALID) {
qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_OTHER);
- action &= ~ATA_EH_REVALIDATE;
+ ehc->i.action &= ~ATA_EH_REVALIDATE;
}
/* accumulate error info */
- failed_dev = qc->dev;
+ ehc->i.dev = qc->dev;
all_err_mask |= qc->err_mask;
if (qc->flags & ATA_QCFLAG_IO)
is_io = 1;
}
/* enforce default EH actions */
- if (ap->flags & ATA_FLAG_FROZEN ||
+ if (ap->pflags & ATA_PFLAG_FROZEN ||
all_err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT))
- action |= ATA_EH_SOFTRESET;
+ ehc->i.action |= ATA_EH_SOFTRESET;
else if (all_err_mask)
- action |= ATA_EH_REVALIDATE;
+ ehc->i.action |= ATA_EH_REVALIDATE;
/* if we have offending qcs and the associated failed device */
- if (failed_dev) {
+ if (ehc->i.dev) {
/* speed down */
- action |= ata_eh_speed_down(failed_dev, is_io, all_err_mask);
+ ehc->i.action |= ata_eh_speed_down(ehc->i.dev, is_io,
+ all_err_mask);
/* perform per-dev EH action only on the offending device */
- ehc->i.dev_action[failed_dev->devno] |=
- action & ATA_EH_PERDEV_MASK;
- action &= ~ATA_EH_PERDEV_MASK;
+ ehc->i.dev_action[ehc->i.dev->devno] |=
+ ehc->i.action & ATA_EH_PERDEV_MASK;
+ ehc->i.action &= ~ATA_EH_PERDEV_MASK;
}
- /* record autopsy result */
- ehc->i.dev = failed_dev;
- ehc->i.action = action;
-
DPRINTK("EXIT\n");
}
@@ -1385,7 +1414,7 @@ static void ata_eh_report(struct ata_port *ap)
return;
frozen = "";
- if (ap->flags & ATA_FLAG_FROZEN)
+ if (ap->pflags & ATA_PFLAG_FROZEN)
frozen = " frozen";
if (ehc->i.dev) {
@@ -1465,11 +1494,14 @@ static int ata_eh_reset(struct ata_port *ap, int classify,
struct ata_eh_context *ehc = &ap->eh_context;
unsigned int *classes = ehc->classes;
int tries = ATA_EH_RESET_TRIES;
- int verbose = !(ap->flags & ATA_FLAG_LOADING);
+ int verbose = !(ehc->i.flags & ATA_EHI_QUIET);
unsigned int action;
ata_reset_fn_t reset;
int i, did_followup_srst, rc;
+ /* about to reset */
+ ata_eh_about_to_do(ap, NULL, ehc->i.action & ATA_EH_RESET_MASK);
+
/* Determine which reset to use and record in ehc->i.action.
* prereset() may examine and modify it.
*/
@@ -1518,8 +1550,7 @@ static int ata_eh_reset(struct ata_port *ap, int classify,
ata_port_printk(ap, KERN_INFO, "%s resetting port\n",
reset == softreset ? "soft" : "hard");
- /* reset */
- ata_eh_about_to_do(ap, NULL, ATA_EH_RESET_MASK);
+ /* mark that this EH session started with reset */
ehc->i.flags |= ATA_EHI_DID_RESET;
rc = ata_do_reset(ap, reset, classes);
@@ -1582,7 +1613,7 @@ static int ata_eh_reset(struct ata_port *ap, int classify,
postreset(ap, classes);
/* reset successful, schedule revalidation */
- ata_eh_done(ap, NULL, ATA_EH_RESET_MASK);
+ ata_eh_done(ap, NULL, ehc->i.action & ATA_EH_RESET_MASK);
ehc->i.action |= ATA_EH_REVALIDATE;
}
@@ -1605,7 +1636,7 @@ static int ata_eh_revalidate_and_attach(struct ata_port *ap,
dev = &ap->device[i];
action = ata_eh_dev_action(dev);
- if (action & ATA_EH_REVALIDATE && ata_dev_enabled(dev)) {
+ if (action & ATA_EH_REVALIDATE && ata_dev_ready(dev)) {
if (ata_port_offline(ap)) {
rc = -EIO;
break;
@@ -1636,7 +1667,7 @@ static int ata_eh_revalidate_and_attach(struct ata_port *ap,
}
spin_lock_irqsave(ap->lock, flags);
- ap->flags |= ATA_FLAG_SCSI_HOTPLUG;
+ ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG;
spin_unlock_irqrestore(ap->lock, flags);
}
}
@@ -1648,6 +1679,164 @@ static int ata_eh_revalidate_and_attach(struct ata_port *ap,
return rc;
}
+/**
+ * ata_eh_suspend - handle suspend EH action
+ * @ap: target host port
+ * @r_failed_dev: result parameter to indicate failing device
+ *
+ * Handle suspend EH action. Disk devices are spinned down and
+ * other types of devices are just marked suspended. Once
+ * suspended, no EH action to the device is allowed until it is
+ * resumed.
+ *
+ * LOCKING:
+ * Kernel thread context (may sleep).
+ *
+ * RETURNS:
+ * 0 on success, -errno otherwise
+ */
+static int ata_eh_suspend(struct ata_port *ap, struct ata_device **r_failed_dev)
+{
+ struct ata_device *dev;
+ int i, rc = 0;
+
+ DPRINTK("ENTER\n");
+
+ for (i = 0; i < ATA_MAX_DEVICES; i++) {
+ unsigned long flags;
+ unsigned int action, err_mask;
+
+ dev = &ap->device[i];
+ action = ata_eh_dev_action(dev);
+
+ if (!ata_dev_enabled(dev) || !(action & ATA_EH_SUSPEND))
+ continue;
+
+ WARN_ON(dev->flags & ATA_DFLAG_SUSPENDED);
+
+ ata_eh_about_to_do(ap, dev, ATA_EH_SUSPEND);
+
+ if (dev->class == ATA_DEV_ATA && !(action & ATA_EH_PM_FREEZE)) {
+ /* flush cache */
+ rc = ata_flush_cache(dev);
+ if (rc)
+ break;
+
+ /* spin down */
+ err_mask = ata_do_simple_cmd(dev, ATA_CMD_STANDBYNOW1);
+ if (err_mask) {
+ ata_dev_printk(dev, KERN_ERR, "failed to "
+ "spin down (err_mask=0x%x)\n",
+ err_mask);
+ rc = -EIO;
+ break;
+ }
+ }
+
+ spin_lock_irqsave(ap->lock, flags);
+ dev->flags |= ATA_DFLAG_SUSPENDED;
+ spin_unlock_irqrestore(ap->lock, flags);
+
+ ata_eh_done(ap, dev, ATA_EH_SUSPEND);
+ }
+
+ if (rc)
+ *r_failed_dev = dev;
+
+ DPRINTK("EXIT\n");
+ return 0;
+}
+
+/**
+ * ata_eh_prep_resume - prep for resume EH action
+ * @ap: target host port
+ *
+ * Clear SUSPENDED in preparation for scheduled resume actions.
+ * This allows other parts of EH to access the devices being
+ * resumed.
+ *
+ * LOCKING:
+ * Kernel thread context (may sleep).
+ */
+static void ata_eh_prep_resume(struct ata_port *ap)
+{
+ struct ata_device *dev;
+ unsigned long flags;
+ int i;
+
+ DPRINTK("ENTER\n");
+
+ for (i = 0; i < ATA_MAX_DEVICES; i++) {
+ unsigned int action;
+
+ dev = &ap->device[i];
+ action = ata_eh_dev_action(dev);
+
+ if (!ata_dev_enabled(dev) || !(action & ATA_EH_RESUME))
+ continue;
+
+ spin_lock_irqsave(ap->lock, flags);
+ dev->flags &= ~ATA_DFLAG_SUSPENDED;
+ spin_unlock_irqrestore(ap->lock, flags);
+ }
+
+ DPRINTK("EXIT\n");
+}
+
+/**
+ * ata_eh_resume - handle resume EH action
+ * @ap: target host port
+ * @r_failed_dev: result parameter to indicate failing device
+ *
+ * Handle resume EH action. Target devices are already reset and
+ * revalidated. Spinning up is the only operation left.
+ *
+ * LOCKING:
+ * Kernel thread context (may sleep).
+ *
+ * RETURNS:
+ * 0 on success, -errno otherwise
+ */
+static int ata_eh_resume(struct ata_port *ap, struct ata_device **r_failed_dev)
+{
+ struct ata_device *dev;
+ int i, rc = 0;
+
+ DPRINTK("ENTER\n");
+
+ for (i = 0; i < ATA_MAX_DEVICES; i++) {
+ unsigned int action, err_mask;
+
+ dev = &ap->device[i];
+ action = ata_eh_dev_action(dev);
+
+ if (!ata_dev_enabled(dev) || !(action & ATA_EH_RESUME))
+ continue;
+
+ ata_eh_about_to_do(ap, dev, ATA_EH_RESUME);
+
+ if (dev->class == ATA_DEV_ATA && !(action & ATA_EH_PM_FREEZE)) {
+ err_mask = ata_do_simple_cmd(dev,
+ ATA_CMD_IDLEIMMEDIATE);
+ if (err_mask) {
+ ata_dev_printk(dev, KERN_ERR, "failed to "
+ "spin up (err_mask=0x%x)\n",
+ err_mask);
+ rc = -EIO;
+ break;
+ }
+ }
+
+ ata_eh_done(ap, dev, ATA_EH_RESUME);
+ }
+
+ if (rc)
+ *r_failed_dev = dev;
+
+ DPRINTK("EXIT\n");
+ return 0;
+}
+
static int ata_port_nr_enabled(struct ata_port *ap)
{
int i, cnt = 0;
@@ -1673,7 +1862,20 @@ static int ata_eh_skip_recovery(struct ata_port *ap)
struct ata_eh_context *ehc = &ap->eh_context;
int i;
- if (ap->flags & ATA_FLAG_FROZEN || ata_port_nr_enabled(ap))
+ /* skip if all possible devices are suspended */
+ for (i = 0; i < ata_port_max_devices(ap); i++) {
+ struct ata_device *dev = &ap->device[i];
+
+ if (!(dev->flags & ATA_DFLAG_SUSPENDED))
+ break;
+ }
+
+ if (i == ata_port_max_devices(ap))
+ return 1;
+
+ /* thaw frozen port, resume link and recover failed devices */
+ if ((ap->pflags & ATA_PFLAG_FROZEN) ||
+ (ehc->i.flags & ATA_EHI_RESUME_LINK) || ata_port_nr_enabled(ap))
return 0;
/* skip if class codes for all vacant slots are ATA_DEV_NONE */
@@ -1744,9 +1946,12 @@ static int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
rc = 0;
/* if UNLOADING, finish immediately */
- if (ap->flags & ATA_FLAG_UNLOADING)
+ if (ap->pflags & ATA_PFLAG_UNLOADING)
goto out;
+ /* prep for resume */
+ ata_eh_prep_resume(ap);
+
/* skip EH if possible. */
if (ata_eh_skip_recovery(ap))
ehc->i.action = 0;
@@ -1774,6 +1979,11 @@ static int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
if (rc)
goto dev_fail;
+ /* resume devices */
+ rc = ata_eh_resume(ap, &dev);
+ if (rc)
+ goto dev_fail;
+
/* configure transfer mode if the port has been reset */
if (ehc->i.flags & ATA_EHI_DID_RESET) {
rc = ata_set_mode(ap, &dev);
@@ -1783,6 +1993,11 @@ static int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
}
}
+ /* suspend devices */
+ rc = ata_eh_suspend(ap, &dev);
+ if (rc)
+ goto dev_fail;
+
goto out;
dev_fail:
@@ -1908,11 +2123,124 @@ void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
ata_postreset_fn_t postreset)
{
- if (!(ap->flags & ATA_FLAG_LOADING)) {
- ata_eh_autopsy(ap);
- ata_eh_report(ap);
- }
-
+ ata_eh_autopsy(ap);
+ ata_eh_report(ap);
ata_eh_recover(ap, prereset, softreset, hardreset, postreset);
ata_eh_finish(ap);
}
+
+/**
+ * ata_eh_handle_port_suspend - perform port suspend operation
+ * @ap: port to suspend
+ *
+ * Suspend @ap.
+ *
+ * LOCKING:
+ * Kernel thread context (may sleep).
+ */
+static void ata_eh_handle_port_suspend(struct ata_port *ap)
+{
+ unsigned long flags;
+ int rc = 0;
+
+ /* are we suspending? */
+ spin_lock_irqsave(ap->lock, flags);
+ if (!(ap->pflags & ATA_PFLAG_PM_PENDING) ||
+ ap->pm_mesg.event == PM_EVENT_ON) {
+ spin_unlock_irqrestore(ap->lock, flags);
+ return;
+ }
+ spin_unlock_irqrestore(ap->lock, flags);
+
+ WARN_ON(ap->pflags & ATA_PFLAG_SUSPENDED);
+
+ /* suspend */
+ ata_eh_freeze_port(ap);
+
+ if (ap->ops->port_suspend)
+ rc = ap->ops->port_suspend(ap, ap->pm_mesg);
+
+ /* report result */
+ spin_lock_irqsave(ap->lock, flags);
+
+ ap->pflags &= ~ATA_PFLAG_PM_PENDING;
+ if (rc == 0)
+ ap->pflags |= ATA_PFLAG_SUSPENDED;
+ else
+ ata_port_schedule_eh(ap);
+
+ if (ap->pm_result) {
+ *ap->pm_result = rc;
+ ap->pm_result = NULL;
+ }
+
+ spin_unlock_irqrestore(ap->lock, flags);
+
+ return;
+}
+
+/**
+ * ata_eh_handle_port_resume - perform port resume operation
+ * @ap: port to resume
+ *
+ * Resume @ap.
+ *
+ * This function also waits upto one second until all devices
+ * hanging off this port requests resume EH action. This is to
+ * prevent invoking EH and thus reset multiple times on resume.
+ *
+ * On DPM resume, where some of devices might not be resumed
+ * together, this may delay port resume upto one second, but such
+ * DPM resumes are rare and 1 sec delay isn't too bad.
+ *
+ * LOCKING:
+ * Kernel thread context (may sleep).
+ */
+static void ata_eh_handle_port_resume(struct ata_port *ap)
+{
+ unsigned long timeout;
+ unsigned long flags;
+ int i, rc = 0;
+
+ /* are we resuming? */
+ spin_lock_irqsave(ap->lock, flags);
+ if (!(ap->pflags & ATA_PFLAG_PM_PENDING) ||
+ ap->pm_mesg.event != PM_EVENT_ON) {
+ spin_unlock_irqrestore(ap->lock, flags);
+ return;
+ }
+ spin_unlock_irqrestore(ap->lock, flags);
+
+ /* spurious? */
+ if (!(ap->pflags & ATA_PFLAG_SUSPENDED))
+ goto done;
+
+ if (ap->ops->port_resume)
+ rc = ap->ops->port_resume(ap);
+
+ /* give devices time to request EH */
+ timeout = jiffies + HZ; /* 1s max */
+ while (1) {
+ for (i = 0; i < ATA_MAX_DEVICES; i++) {
+ struct ata_device *dev = &ap->device[i];
+ unsigned int action = ata_eh_dev_action(dev);
+
+ if ((dev->flags & ATA_DFLAG_SUSPENDED) &&
+ !(action & ATA_EH_RESUME))
+ break;
+ }
+
+ if (i == ATA_MAX_DEVICES || time_after(jiffies, timeout))
+ break;
+ msleep(10);
+ }
+
+ done:
+ spin_lock_irqsave(ap->lock, flags);
+ ap->pflags &= ~(ATA_PFLAG_PM_PENDING | ATA_PFLAG_SUSPENDED);
+ if (ap->pm_result) {
+ *ap->pm_result = rc;
+ ap->pm_result = NULL;
+ }
+ spin_unlock_irqrestore(ap->lock, flags);
+}
diff --git a/drivers/scsi/libata-scsi.c b/drivers/scsi/libata-scsi.c
index 2915bca691e8..e92c31d698ff 100644
--- a/drivers/scsi/libata-scsi.c
+++ b/drivers/scsi/libata-scsi.c
@@ -397,20 +397,129 @@ void ata_dump_status(unsigned id, struct ata_taskfile *tf)
}
}
-int ata_scsi_device_resume(struct scsi_device *sdev)
+/**
+ * ata_scsi_device_suspend - suspend ATA device associated with sdev
+ * @sdev: the SCSI device to suspend
+ * @state: target power management state
+ *
+ * Request suspend EH action on the ATA device associated with
+ * @sdev and wait for the operation to complete.
+ *
+ * LOCKING:
+ * Kernel thread context (may sleep).
+ *
+ * RETURNS:
+ * 0 on success, -errno otherwise.
+ */
+int ata_scsi_device_suspend(struct scsi_device *sdev, pm_message_t state)
{
struct ata_port *ap = ata_shost_to_port(sdev->host);
- struct ata_device *dev = __ata_scsi_find_dev(ap, sdev);
+ struct ata_device *dev = ata_scsi_find_dev(ap, sdev);
+ unsigned long flags;
+ unsigned int action;
+ int rc = 0;
+
+ if (!dev)
+ goto out;
+
+ spin_lock_irqsave(ap->lock, flags);
+
+ /* wait for the previous resume to complete */
+ while (dev->flags & ATA_DFLAG_SUSPENDED) {
+ spin_unlock_irqrestore(ap->lock, flags);
+ ata_port_wait_eh(ap);
+ spin_lock_irqsave(ap->lock, flags);
+ }
+
+ /* if @sdev is already detached, nothing to do */
+ if (sdev->sdev_state == SDEV_OFFLINE ||
+ sdev->sdev_state == SDEV_CANCEL || sdev->sdev_state == SDEV_DEL)
+ goto out_unlock;
+
+ /* request suspend */
+ action = ATA_EH_SUSPEND;
+ if (state.event != PM_EVENT_SUSPEND)
+ action |= ATA_EH_PM_FREEZE;
+ ap->eh_info.dev_action[dev->devno] |= action;
+ ap->eh_info.flags |= ATA_EHI_QUIET;
+ ata_port_schedule_eh(ap);
+
+ spin_unlock_irqrestore(ap->lock, flags);
+
+ /* wait for EH to do the job */
+ ata_port_wait_eh(ap);
+
+ spin_lock_irqsave(ap->lock, flags);
+
+ /* If @sdev is still attached but the associated ATA device
+ * isn't suspended, the operation failed.
+ */
+ if (sdev->sdev_state != SDEV_OFFLINE &&
+ sdev->sdev_state != SDEV_CANCEL && sdev->sdev_state != SDEV_DEL &&
+ !(dev->flags & ATA_DFLAG_SUSPENDED))
+ rc = -EIO;
- return ata_device_resume(dev);
+ out_unlock:
+ spin_unlock_irqrestore(ap->lock, flags);
+ out:
+ if (rc == 0)
+ sdev->sdev_gendev.power.power_state = state;
+ return rc;
}
-int ata_scsi_device_suspend(struct scsi_device *sdev, pm_message_t state)
+/**
+ * ata_scsi_device_resume - resume ATA device associated with sdev
+ * @sdev: the SCSI device to resume
+ *
+ * Request resume EH action on the ATA device associated with
+ * @sdev and return immediately. This enables parallel
+ * wakeup/spinup of devices.
+ *
+ * LOCKING:
+ * Kernel thread context (may sleep).
+ *
+ * RETURNS:
+ * 0.
+ */
+int ata_scsi_device_resume(struct scsi_device *sdev)
{
struct ata_port *ap = ata_shost_to_port(sdev->host);
- struct ata_device *dev = __ata_scsi_find_dev(ap, sdev);
+ struct ata_device *dev = ata_scsi_find_dev(ap, sdev);
+ struct ata_eh_info *ehi = &ap->eh_info;
+ unsigned long flags;
+ unsigned int action;
- return ata_device_suspend(dev, state);
+ if (!dev)
+ goto out;
+
+ spin_lock_irqsave(ap->lock, flags);
+
+ /* if @sdev is already detached, nothing to do */
+ if (sdev->sdev_state == SDEV_OFFLINE ||
+ sdev->sdev_state == SDEV_CANCEL || sdev->sdev_state == SDEV_DEL)
+ goto out_unlock;
+
+ /* request resume */
+ action = ATA_EH_RESUME;
+ if (sdev->sdev_gendev.power.power_state.event == PM_EVENT_SUSPEND)
+ __ata_ehi_hotplugged(ehi);
+ else
+ action |= ATA_EH_PM_FREEZE | ATA_EH_SOFTRESET;
+ ehi->dev_action[dev->devno] |= action;
+
+ /* We don't want autopsy and verbose EH messages. Disable
+ * those if we're the only device on this link.
+ */
+ if (ata_port_max_devices(ap) == 1)
+ ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
+
+ ata_port_schedule_eh(ap);
+
+ out_unlock:
+ spin_unlock_irqrestore(ap->lock, flags);
+ out:
+ sdev->sdev_gendev.power.power_state = PMSG_ON;
+ return 0;
}
/**
@@ -2244,6 +2353,19 @@ static void atapi_qc_complete(struct ata_queued_cmd *qc)
ata_gen_ata_desc_sense(qc);
}
+ /* SCSI EH automatically locks door if sdev->locked is
+ * set. Sometimes door lock request continues to
+ * fail, for example, when no media is present. This
+ * creates a loop - SCSI EH issues door lock which
+ * fails and gets invoked again to acquire sense data
+ * for the failed command.
+ *
+ * If door lock fails, always clear sdev->locked to
+ * avoid this infinite loop.
+ */
+ if (qc->cdb[0] == ALLOW_MEDIUM_REMOVAL)
+ qc->dev->sdev->locked = 0;
+
qc->scsicmd->result = SAM_STAT_CHECK_CONDITION;
qc->scsidone(cmd);
ata_qc_free(qc);
@@ -2930,7 +3052,7 @@ void ata_scsi_hotplug(void *data)
struct ata_port *ap = data;
int i;
- if (ap->flags & ATA_FLAG_UNLOADING) {
+ if (ap->pflags & ATA_PFLAG_UNLOADING) {
DPRINTK("ENTER/EXIT - unloading\n");
return;
}
@@ -3011,6 +3133,7 @@ static int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel,
if (dev) {
ap->eh_info.probe_mask |= 1 << dev->devno;
ap->eh_info.action |= ATA_EH_SOFTRESET;
+ ap->eh_info.flags |= ATA_EHI_RESUME_LINK;
} else
rc = -EINVAL;
}
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 7e6e031cc41b..5884cd26d53a 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -189,6 +189,7 @@ static void iscsi_complete_command(struct iscsi_session *session,
{
struct scsi_cmnd *sc = ctask->sc;
+ ctask->state = ISCSI_TASK_COMPLETED;
ctask->sc = NULL;
list_del_init(&ctask->running);
__kfifo_put(session->cmdpool.queue, (void*)&ctask, sizeof(void*));
@@ -275,6 +276,25 @@ out:
return rc;
}
+static void iscsi_tmf_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
+{
+ struct iscsi_tm_rsp *tmf = (struct iscsi_tm_rsp *)hdr;
+
+ conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
+ conn->tmfrsp_pdus_cnt++;
+
+ if (conn->tmabort_state != TMABORT_INITIAL)
+ return;
+
+ if (tmf->response == ISCSI_TMF_RSP_COMPLETE)
+ conn->tmabort_state = TMABORT_SUCCESS;
+ else if (tmf->response == ISCSI_TMF_RSP_NO_TASK)
+ conn->tmabort_state = TMABORT_NOT_FOUND;
+ else
+ conn->tmabort_state = TMABORT_FAILED;
+ wake_up(&conn->ehwait);
+}
+
/**
* __iscsi_complete_pdu - complete pdu
* @conn: iscsi conn
@@ -340,6 +360,10 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
switch(opcode) {
case ISCSI_OP_LOGOUT_RSP:
+ if (datalen) {
+ rc = ISCSI_ERR_PROTO;
+ break;
+ }
conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
/* fall through */
case ISCSI_OP_LOGIN_RSP:
@@ -348,7 +372,8 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
* login related PDU's exp_statsn is handled in
* userspace
*/
- rc = iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen);
+ if (iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen))
+ rc = ISCSI_ERR_CONN_FAILED;
list_del(&mtask->running);
if (conn->login_mtask != mtask)
__kfifo_put(session->mgmtpool.queue,
@@ -360,25 +385,17 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
break;
}
- conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
- conn->tmfrsp_pdus_cnt++;
- if (conn->tmabort_state == TMABORT_INITIAL) {
- conn->tmabort_state =
- ((struct iscsi_tm_rsp *)hdr)->
- response == ISCSI_TMF_RSP_COMPLETE ?
- TMABORT_SUCCESS:TMABORT_FAILED;
- /* unblock eh_abort() */
- wake_up(&conn->ehwait);
- }
+ iscsi_tmf_rsp(conn, hdr);
break;
case ISCSI_OP_NOOP_IN:
- if (hdr->ttt != ISCSI_RESERVED_TAG) {
+ if (hdr->ttt != ISCSI_RESERVED_TAG || datalen) {
rc = ISCSI_ERR_PROTO;
break;
}
conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
- rc = iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen);
+ if (iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen))
+ rc = ISCSI_ERR_CONN_FAILED;
list_del(&mtask->running);
if (conn->login_mtask != mtask)
__kfifo_put(session->mgmtpool.queue,
@@ -391,14 +408,21 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
} else if (itt == ISCSI_RESERVED_TAG) {
switch(opcode) {
case ISCSI_OP_NOOP_IN:
- if (!datalen) {
- rc = iscsi_check_assign_cmdsn(session,
- (struct iscsi_nopin*)hdr);
- if (!rc && hdr->ttt != ISCSI_RESERVED_TAG)
- rc = iscsi_recv_pdu(conn->cls_conn,
- hdr, NULL, 0);
- } else
+ if (datalen) {
rc = ISCSI_ERR_PROTO;
+ break;
+ }
+
+ rc = iscsi_check_assign_cmdsn(session,
+ (struct iscsi_nopin*)hdr);
+ if (rc)
+ break;
+
+ if (hdr->ttt == ISCSI_RESERVED_TAG)
+ break;
+
+ if (iscsi_recv_pdu(conn->cls_conn, hdr, NULL, 0))
+ rc = ISCSI_ERR_CONN_FAILED;
break;
case ISCSI_OP_REJECT:
/* we need sth like iscsi_reject_rsp()*/
@@ -568,20 +592,24 @@ static int iscsi_data_xmit(struct iscsi_conn *conn)
}
/* process command queue */
- while (__kfifo_get(conn->xmitqueue, (void*)&conn->ctask,
- sizeof(void*))) {
+ spin_lock_bh(&conn->session->lock);
+ while (!list_empty(&conn->xmitqueue)) {
/*
* iscsi tcp may readd the task to the xmitqueue to send
* write data
*/
- spin_lock_bh(&conn->session->lock);
- if (list_empty(&conn->ctask->running))
- list_add_tail(&conn->ctask->running, &conn->run_list);
+ conn->ctask = list_entry(conn->xmitqueue.next,
+ struct iscsi_cmd_task, running);
+ conn->ctask->state = ISCSI_TASK_RUNNING;
+ list_move_tail(conn->xmitqueue.next, &conn->run_list);
spin_unlock_bh(&conn->session->lock);
+
rc = tt->xmit_cmd_task(conn, conn->ctask);
if (rc)
goto again;
+ spin_lock_bh(&conn->session->lock);
}
+ spin_unlock_bh(&conn->session->lock);
/* done with this ctask */
conn->ctask = NULL;
@@ -691,6 +719,7 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
sc->SCp.phase = session->age;
sc->SCp.ptr = (char *)ctask;
+ ctask->state = ISCSI_TASK_PENDING;
ctask->mtask = NULL;
ctask->conn = conn;
ctask->sc = sc;
@@ -700,7 +729,7 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
session->tt->init_cmd_task(ctask);
- __kfifo_put(conn->xmitqueue, (void*)&ctask, sizeof(void*));
+ list_add_tail(&ctask->running, &conn->xmitqueue);
debug_scsi(
"ctask enq [%s cid %d sc %lx itt 0x%x len %d cmdsn %d win %d]\n",
sc->sc_data_direction == DMA_TO_DEVICE ? "write" : "read",
@@ -977,31 +1006,27 @@ static int iscsi_exec_abort_task(struct scsi_cmnd *sc,
/*
* xmit mutex and session lock must be held
*/
-#define iscsi_remove_task(tasktype) \
-static struct iscsi_##tasktype * \
-iscsi_remove_##tasktype(struct kfifo *fifo, uint32_t itt) \
-{ \
- int i, nr_tasks = __kfifo_len(fifo) / sizeof(void*); \
- struct iscsi_##tasktype *task; \
- \
- debug_scsi("searching %d tasks\n", nr_tasks); \
- \
- for (i = 0; i < nr_tasks; i++) { \
- __kfifo_get(fifo, (void*)&task, sizeof(void*)); \
- debug_scsi("check task %u\n", task->itt); \
- \
- if (task->itt == itt) { \
- debug_scsi("matched task\n"); \
- return task; \
- } \
- \
- __kfifo_put(fifo, (void*)&task, sizeof(void*)); \
- } \
- return NULL; \
-}
+static struct iscsi_mgmt_task *
+iscsi_remove_mgmt_task(struct kfifo *fifo, uint32_t itt)
+{
+ int i, nr_tasks = __kfifo_len(fifo) / sizeof(void*);
+ struct iscsi_mgmt_task *task;
-iscsi_remove_task(mgmt_task);
-iscsi_remove_task(cmd_task);
+ debug_scsi("searching %d tasks\n", nr_tasks);
+
+ for (i = 0; i < nr_tasks; i++) {
+ __kfifo_get(fifo, (void*)&task, sizeof(void*));
+ debug_scsi("check task %u\n", task->itt);
+
+ if (task->itt == itt) {
+ debug_scsi("matched task\n");
+ return task;
+ }
+
+ __kfifo_put(fifo, (void*)&task, sizeof(void*));
+ }
+ return NULL;
+}
static int iscsi_ctask_mtask_cleanup(struct iscsi_cmd_task *ctask)
{
@@ -1027,12 +1052,13 @@ static void fail_command(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
{
struct scsi_cmnd *sc;
- conn->session->tt->cleanup_cmd_task(conn, ctask);
- iscsi_ctask_mtask_cleanup(ctask);
-
sc = ctask->sc;
if (!sc)
return;
+
+ conn->session->tt->cleanup_cmd_task(conn, ctask);
+ iscsi_ctask_mtask_cleanup(ctask);
+
sc->result = err;
sc->resid = sc->request_bufflen;
iscsi_complete_command(conn->session, ctask);
@@ -1043,7 +1069,6 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
struct iscsi_cmd_task *ctask = (struct iscsi_cmd_task *)sc->SCp.ptr;
struct iscsi_conn *conn = ctask->conn;
struct iscsi_session *session = conn->session;
- struct iscsi_cmd_task *pending_ctask;
int rc;
conn->eh_abort_cnt++;
@@ -1061,8 +1086,11 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
goto failed;
/* ctask completed before time out */
- if (!ctask->sc)
- goto success;
+ if (!ctask->sc) {
+ spin_unlock_bh(&session->lock);
+ debug_scsi("sc completed while abort in progress\n");
+ goto success_rel_mutex;
+ }
/* what should we do here ? */
if (conn->ctask == ctask) {
@@ -1071,17 +1099,8 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
goto failed;
}
- /* check for the easy pending cmd abort */
- pending_ctask = iscsi_remove_cmd_task(conn->xmitqueue, ctask->itt);
- if (pending_ctask) {
- /* iscsi_tcp queues write transfers on the xmitqueue */
- if (list_empty(&pending_ctask->running)) {
- debug_scsi("found pending task\n");
- goto success;
- } else
- __kfifo_put(conn->xmitqueue, (void*)&pending_ctask,
- sizeof(void*));
- }
+ if (ctask->state == ISCSI_TASK_PENDING)
+ goto success_cleanup;
conn->tmabort_state = TMABORT_INITIAL;
@@ -1089,25 +1108,31 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
rc = iscsi_exec_abort_task(sc, ctask);
spin_lock_bh(&session->lock);
- iscsi_ctask_mtask_cleanup(ctask);
if (rc || sc->SCp.phase != session->age ||
session->state != ISCSI_STATE_LOGGED_IN)
goto failed;
+ iscsi_ctask_mtask_cleanup(ctask);
- /* ctask completed before tmf abort response */
- if (!ctask->sc) {
- debug_scsi("sc completed while abort in progress\n");
- goto success;
- }
-
- if (conn->tmabort_state != TMABORT_SUCCESS) {
+ switch (conn->tmabort_state) {
+ case TMABORT_SUCCESS:
+ goto success_cleanup;
+ case TMABORT_NOT_FOUND:
+ if (!ctask->sc) {
+ /* ctask completed before tmf abort response */
+ spin_unlock_bh(&session->lock);
+ debug_scsi("sc completed while abort in progress\n");
+ goto success_rel_mutex;
+ }
+ /* fall through */
+ default:
+ /* timedout or failed */
spin_unlock_bh(&session->lock);
iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
spin_lock_bh(&session->lock);
goto failed;
}
-success:
+success_cleanup:
debug_scsi("abort success [sc %lx itt 0x%x]\n", (long)sc, ctask->itt);
spin_unlock_bh(&session->lock);
@@ -1121,6 +1146,7 @@ success:
spin_unlock(&session->lock);
write_unlock_bh(conn->recv_lock);
+success_rel_mutex:
mutex_unlock(&conn->xmitmutex);
return SUCCESS;
@@ -1263,6 +1289,7 @@ iscsi_session_setup(struct iscsi_transport *iscsit,
if (cmd_task_size)
ctask->dd_data = &ctask[1];
ctask->itt = cmd_i;
+ INIT_LIST_HEAD(&ctask->running);
}
spin_lock_init(&session->lock);
@@ -1282,6 +1309,7 @@ iscsi_session_setup(struct iscsi_transport *iscsit,
if (mgmt_task_size)
mtask->dd_data = &mtask[1];
mtask->itt = ISCSI_MGMT_ITT_OFFSET + cmd_i;
+ INIT_LIST_HEAD(&mtask->running);
}
if (scsi_add_host(shost, NULL))
@@ -1322,15 +1350,18 @@ void iscsi_session_teardown(struct iscsi_cls_session *cls_session)
{
struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
struct iscsi_session *session = iscsi_hostdata(shost->hostdata);
+ struct module *owner = cls_session->transport->owner;
scsi_remove_host(shost);
iscsi_pool_free(&session->mgmtpool, (void**)session->mgmt_cmds);
iscsi_pool_free(&session->cmdpool, (void**)session->cmds);
+ kfree(session->targetname);
+
iscsi_destroy_session(cls_session);
scsi_host_put(shost);
- module_put(cls_session->transport->owner);
+ module_put(owner);
}
EXPORT_SYMBOL_GPL(iscsi_session_teardown);
@@ -1361,12 +1392,7 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
conn->tmabort_state = TMABORT_INITIAL;
INIT_LIST_HEAD(&conn->run_list);
INIT_LIST_HEAD(&conn->mgmt_run_list);
-
- /* initialize general xmit PDU commands queue */
- conn->xmitqueue = kfifo_alloc(session->cmds_max * sizeof(void*),
- GFP_KERNEL, NULL);
- if (conn->xmitqueue == ERR_PTR(-ENOMEM))
- goto xmitqueue_alloc_fail;
+ INIT_LIST_HEAD(&conn->xmitqueue);
/* initialize general immediate & non-immediate PDU commands queue */
conn->immqueue = kfifo_alloc(session->mgmtpool_max * sizeof(void*),
@@ -1394,7 +1420,7 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
data = kmalloc(DEFAULT_MAX_RECV_DATA_SEGMENT_LENGTH, GFP_KERNEL);
if (!data)
goto login_mtask_data_alloc_fail;
- conn->login_mtask->data = data;
+ conn->login_mtask->data = conn->data = data;
init_timer(&conn->tmabort_timer);
mutex_init(&conn->xmitmutex);
@@ -1410,8 +1436,6 @@ login_mtask_alloc_fail:
mgmtqueue_alloc_fail:
kfifo_free(conn->immqueue);
immqueue_alloc_fail:
- kfifo_free(conn->xmitqueue);
-xmitqueue_alloc_fail:
iscsi_destroy_conn(cls_conn);
return NULL;
}
@@ -1432,12 +1456,6 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
mutex_lock(&conn->xmitmutex);
- if (conn->c_stage == ISCSI_CONN_INITIAL_STAGE) {
- if (session->tt->suspend_conn_recv)
- session->tt->suspend_conn_recv(conn);
-
- session->tt->terminate_conn(conn);
- }
spin_lock_bh(&session->lock);
conn->c_stage = ISCSI_CONN_CLEANUP_WAIT;
@@ -1474,7 +1492,8 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
}
spin_lock_bh(&session->lock);
- kfree(conn->login_mtask->data);
+ kfree(conn->data);
+ kfree(conn->persistent_address);
__kfifo_put(session->mgmtpool.queue, (void*)&conn->login_mtask,
sizeof(void*));
list_del(&conn->item);
@@ -1489,7 +1508,6 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
session->cmdsn = session->max_cmdsn = session->exp_cmdsn = 1;
spin_unlock_bh(&session->lock);
- kfifo_free(conn->xmitqueue);
kfifo_free(conn->immqueue);
kfifo_free(conn->mgmtqueue);
@@ -1572,7 +1590,7 @@ static void fail_all_commands(struct iscsi_conn *conn)
struct iscsi_cmd_task *ctask, *tmp;
/* flush pending */
- while (__kfifo_get(conn->xmitqueue, (void*)&ctask, sizeof(void*))) {
+ list_for_each_entry_safe(ctask, tmp, &conn->xmitqueue, running) {
debug_scsi("failing pending sc %p itt 0x%x\n", ctask->sc,
ctask->itt);
fail_command(conn, ctask, DID_BUS_BUSY << 16);
@@ -1615,8 +1633,9 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
spin_unlock_bh(&session->lock);
- if (session->tt->suspend_conn_recv)
- session->tt->suspend_conn_recv(conn);
+ write_lock_bh(conn->recv_lock);
+ set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
+ write_unlock_bh(conn->recv_lock);
mutex_lock(&conn->xmitmutex);
/*
@@ -1635,7 +1654,6 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
}
}
- session->tt->terminate_conn(conn);
/*
* flush queues.
*/
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index f81691fcf177..d44f9aac6b8f 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -21,10 +21,12 @@
struct lpfc_sli2_slim;
-#define LPFC_MAX_TARGET 256 /* max targets supported */
-#define LPFC_MAX_DISC_THREADS 64 /* max outstanding discovery els req */
-#define LPFC_MAX_NS_RETRY 3 /* max NameServer retries */
+#define LPFC_MAX_TARGET 256 /* max number of targets supported */
+#define LPFC_MAX_DISC_THREADS 64 /* max outstanding discovery els
+ requests */
+#define LPFC_MAX_NS_RETRY 3 /* Number of retry attempts to contact
+ the NameServer before giving up. */
#define LPFC_DFT_HBA_Q_DEPTH 2048 /* max cmds per hba */
#define LPFC_LC_HBA_Q_DEPTH 1024 /* max cmds per low cost hba */
#define LPFC_LP101_HBA_Q_DEPTH 128 /* max cmds per low cost hba */
@@ -41,7 +43,6 @@ struct lpfc_sli2_slim;
(( (u64)(high)<<16 ) << 16)|( (u64)(low))))
/* Provide maximum configuration definitions. */
#define LPFC_DRVR_TIMEOUT 16 /* driver iocb timeout value in sec */
-#define MAX_FCP_TARGET 256 /* max num of FCP targets supported */
#define FC_MAX_ADPTMSG 64
#define MAX_HBAEVT 32
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index b62a72dfab29..d384c16f4a87 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -219,8 +219,18 @@ lpfc_issue_lip(struct Scsi_Host *host)
return -ENOMEM;
memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t));
- lpfc_init_link(phba, pmboxq, phba->cfg_topology, phba->cfg_link_speed);
- mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
+ pmboxq->mb.mbxCommand = MBX_DOWN_LINK;
+ pmboxq->mb.mbxOwner = OWN_HOST;
+
+ mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO * 2);
+
+ if ((mbxstatus == MBX_SUCCESS) && (pmboxq->mb.mbxStatus == 0)) {
+ memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t));
+ lpfc_init_link(phba, pmboxq, phba->cfg_topology,
+ phba->cfg_link_speed);
+ mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq,
+ phba->fc_ratov * 2);
+ }
if (mbxstatus == MBX_TIMEOUT)
pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
@@ -233,51 +243,53 @@ lpfc_issue_lip(struct Scsi_Host *host)
return 0;
}
-static ssize_t
-lpfc_nport_evt_cnt_show(struct class_device *cdev, char *buf)
+static int
+lpfc_selective_reset(struct lpfc_hba *phba)
{
- struct Scsi_Host *host = class_to_shost(cdev);
- struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata;
- return snprintf(buf, PAGE_SIZE, "%d\n", phba->nport_event_cnt);
+ struct completion online_compl;
+ int status = 0;
+
+ init_completion(&online_compl);
+ lpfc_workq_post_event(phba, &status, &online_compl,
+ LPFC_EVT_OFFLINE);
+ wait_for_completion(&online_compl);
+
+ if (status != 0)
+ return -EIO;
+
+ init_completion(&online_compl);
+ lpfc_workq_post_event(phba, &status, &online_compl,
+ LPFC_EVT_ONLINE);
+ wait_for_completion(&online_compl);
+
+ if (status != 0)
+ return -EIO;
+
+ return 0;
}
static ssize_t
-lpfc_board_online_show(struct class_device *cdev, char *buf)
+lpfc_issue_reset(struct class_device *cdev, const char *buf, size_t count)
{
struct Scsi_Host *host = class_to_shost(cdev);
struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata;
+ int status = -EINVAL;
- if (phba->fc_flag & FC_OFFLINE_MODE)
- return snprintf(buf, PAGE_SIZE, "0\n");
+ if (strncmp(buf, "selective", sizeof("selective") - 1) == 0)
+ status = lpfc_selective_reset(phba);
+
+ if (status == 0)
+ return strlen(buf);
else
- return snprintf(buf, PAGE_SIZE, "1\n");
+ return status;
}
static ssize_t
-lpfc_board_online_store(struct class_device *cdev, const char *buf,
- size_t count)
+lpfc_nport_evt_cnt_show(struct class_device *cdev, char *buf)
{
struct Scsi_Host *host = class_to_shost(cdev);
struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata;
- struct completion online_compl;
- int val=0, status=0;
-
- if (sscanf(buf, "%d", &val) != 1)
- return -EINVAL;
-
- init_completion(&online_compl);
-
- if (val)
- lpfc_workq_post_event(phba, &status, &online_compl,
- LPFC_EVT_ONLINE);
- else
- lpfc_workq_post_event(phba, &status, &online_compl,
- LPFC_EVT_OFFLINE);
- wait_for_completion(&online_compl);
- if (!status)
- return strlen(buf);
- else
- return -EIO;
+ return snprintf(buf, PAGE_SIZE, "%d\n", phba->nport_event_cnt);
}
static ssize_t
@@ -532,10 +544,9 @@ static CLASS_DEVICE_ATTR(lpfc_drvr_version, S_IRUGO, lpfc_drvr_version_show,
NULL);
static CLASS_DEVICE_ATTR(management_version, S_IRUGO, management_version_show,
NULL);
-static CLASS_DEVICE_ATTR(board_online, S_IRUGO | S_IWUSR,
- lpfc_board_online_show, lpfc_board_online_store);
static CLASS_DEVICE_ATTR(board_mode, S_IRUGO | S_IWUSR,
lpfc_board_mode_show, lpfc_board_mode_store);
+static CLASS_DEVICE_ATTR(issue_reset, S_IWUSR, NULL, lpfc_issue_reset);
static int lpfc_poll = 0;
module_param(lpfc_poll, int, 0);
@@ -695,12 +706,12 @@ LPFC_ATTR(discovery_threads, 32, 1, 64, "Maximum number of ELS commands "
"during discovery");
/*
-# lpfc_max_luns: maximum number of LUNs per target driver will support
-# Value range is [1,32768]. Default value is 256.
-# NOTE: The SCSI layer will scan each target for this many luns
+# lpfc_max_luns: maximum allowed LUN.
+# Value range is [0,65535]. Default value is 255.
+# NOTE: The SCSI layer might probe all allowed LUN on some old targets.
*/
-LPFC_ATTR_R(max_luns, 256, 1, 32768,
- "Maximum number of LUNs per target driver will support");
+LPFC_ATTR_R(max_luns, 255, 0, 65535,
+ "Maximum allowed LUN");
/*
# lpfc_poll_tmo: .Milliseconds driver will wait between polling FCP ring.
@@ -739,8 +750,8 @@ struct class_device_attribute *lpfc_host_attrs[] = {
&class_device_attr_lpfc_max_luns,
&class_device_attr_nport_evt_cnt,
&class_device_attr_management_version,
- &class_device_attr_board_online,
&class_device_attr_board_mode,
+ &class_device_attr_issue_reset,
&class_device_attr_lpfc_poll,
&class_device_attr_lpfc_poll_tmo,
NULL,
@@ -873,7 +884,7 @@ sysfs_mbox_write(struct kobject *kobj, char *buf, loff_t off, size_t count)
phba->sysfs_mbox.mbox == NULL ) {
sysfs_mbox_idle(phba);
spin_unlock_irq(host->host_lock);
- return -EINVAL;
+ return -EAGAIN;
}
}
@@ -989,14 +1000,15 @@ sysfs_mbox_read(struct kobject *kobj, char *buf, loff_t off, size_t count)
spin_unlock_irq(phba->host->host_lock);
rc = lpfc_sli_issue_mbox_wait (phba,
phba->sysfs_mbox.mbox,
- phba->fc_ratov * 2);
+ lpfc_mbox_tmo_val(phba,
+ phba->sysfs_mbox.mbox->mb.mbxCommand) * HZ);
spin_lock_irq(phba->host->host_lock);
}
if (rc != MBX_SUCCESS) {
sysfs_mbox_idle(phba);
spin_unlock_irq(host->host_lock);
- return -ENODEV;
+ return (rc == MBX_TIMEOUT) ? -ETIME : -ENODEV;
}
phba->sysfs_mbox.state = SMBOX_READING;
}
@@ -1005,7 +1017,7 @@ sysfs_mbox_read(struct kobject *kobj, char *buf, loff_t off, size_t count)
printk(KERN_WARNING "mbox_read: Bad State\n");
sysfs_mbox_idle(phba);
spin_unlock_irq(host->host_lock);
- return -EINVAL;
+ return -EAGAIN;
}
memcpy(buf, (uint8_t *) & phba->sysfs_mbox.mbox->mb + off, count);
@@ -1199,8 +1211,10 @@ lpfc_get_stats(struct Scsi_Host *shost)
struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata;
struct lpfc_sli *psli = &phba->sli;
struct fc_host_statistics *hs = &phba->link_stats;
+ struct lpfc_lnk_stat * lso = &psli->lnk_stat_offsets;
LPFC_MBOXQ_t *pmboxq;
MAILBOX_t *pmb;
+ unsigned long seconds;
int rc = 0;
pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
@@ -1261,22 +1275,103 @@ lpfc_get_stats(struct Scsi_Host *shost)
hs->invalid_crc_count = pmb->un.varRdLnk.crcCnt;
hs->error_frames = pmb->un.varRdLnk.crcCnt;
+ hs->link_failure_count -= lso->link_failure_count;
+ hs->loss_of_sync_count -= lso->loss_of_sync_count;
+ hs->loss_of_signal_count -= lso->loss_of_signal_count;
+ hs->prim_seq_protocol_err_count -= lso->prim_seq_protocol_err_count;
+ hs->invalid_tx_word_count -= lso->invalid_tx_word_count;
+ hs->invalid_crc_count -= lso->invalid_crc_count;
+ hs->error_frames -= lso->error_frames;
+
if (phba->fc_topology == TOPOLOGY_LOOP) {
hs->lip_count = (phba->fc_eventTag >> 1);
+ hs->lip_count -= lso->link_events;
hs->nos_count = -1;
} else {
hs->lip_count = -1;
hs->nos_count = (phba->fc_eventTag >> 1);
+ hs->nos_count -= lso->link_events;
}
hs->dumped_frames = -1;
-/* FIX ME */
- /*hs->SecondsSinceLastReset = (jiffies - lpfc_loadtime) / HZ;*/
+ seconds = get_seconds();
+ if (seconds < psli->stats_start)
+ hs->seconds_since_last_reset = seconds +
+ ((unsigned long)-1 - psli->stats_start);
+ else
+ hs->seconds_since_last_reset = seconds - psli->stats_start;
return hs;
}
+static void
+lpfc_reset_stats(struct Scsi_Host *shost)
+{
+ struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata;
+ struct lpfc_sli *psli = &phba->sli;
+ struct lpfc_lnk_stat * lso = &psli->lnk_stat_offsets;
+ LPFC_MBOXQ_t *pmboxq;
+ MAILBOX_t *pmb;
+ int rc = 0;
+
+ pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!pmboxq)
+ return;
+ memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
+
+ pmb = &pmboxq->mb;
+ pmb->mbxCommand = MBX_READ_STATUS;
+ pmb->mbxOwner = OWN_HOST;
+ pmb->un.varWords[0] = 0x1; /* reset request */
+ pmboxq->context1 = NULL;
+
+ if ((phba->fc_flag & FC_OFFLINE_MODE) ||
+ (!(psli->sli_flag & LPFC_SLI2_ACTIVE)))
+ rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
+ else
+ rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
+
+ if (rc != MBX_SUCCESS) {
+ if (rc == MBX_TIMEOUT)
+ pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ else
+ mempool_free(pmboxq, phba->mbox_mem_pool);
+ return;
+ }
+
+ memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
+ pmb->mbxCommand = MBX_READ_LNK_STAT;
+ pmb->mbxOwner = OWN_HOST;
+ pmboxq->context1 = NULL;
+
+ if ((phba->fc_flag & FC_OFFLINE_MODE) ||
+ (!(psli->sli_flag & LPFC_SLI2_ACTIVE)))
+ rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
+ else
+ rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
+
+ if (rc != MBX_SUCCESS) {
+ if (rc == MBX_TIMEOUT)
+ pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ else
+ mempool_free( pmboxq, phba->mbox_mem_pool);
+ return;
+ }
+
+ lso->link_failure_count = pmb->un.varRdLnk.linkFailureCnt;
+ lso->loss_of_sync_count = pmb->un.varRdLnk.lossSyncCnt;
+ lso->loss_of_signal_count = pmb->un.varRdLnk.lossSignalCnt;
+ lso->prim_seq_protocol_err_count = pmb->un.varRdLnk.primSeqErrCnt;
+ lso->invalid_tx_word_count = pmb->un.varRdLnk.invalidXmitWord;
+ lso->invalid_crc_count = pmb->un.varRdLnk.crcCnt;
+ lso->error_frames = pmb->un.varRdLnk.crcCnt;
+ lso->link_events = (phba->fc_eventTag >> 1);
+
+ psli->stats_start = get_seconds();
+
+ return;
+}
/*
* The LPFC driver treats linkdown handling as target loss events so there
@@ -1420,8 +1515,7 @@ struct fc_function_template lpfc_transport_functions = {
*/
.get_fc_host_stats = lpfc_get_stats,
-
- /* the LPFC driver doesn't support resetting stats yet */
+ .reset_fc_host_stats = lpfc_reset_stats,
.dd_fcrport_size = sizeof(struct lpfc_rport_data),
.show_rport_maxframe_size = 1,
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index ee22173fce43..2a176467f71b 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -127,6 +127,7 @@ void lpfc_config_port(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_kill_board(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_mbox_put(struct lpfc_hba *, LPFC_MBOXQ_t *);
LPFC_MBOXQ_t *lpfc_mbox_get(struct lpfc_hba *);
+int lpfc_mbox_tmo_val(struct lpfc_hba *, int);
int lpfc_mem_alloc(struct lpfc_hba *);
void lpfc_mem_free(struct lpfc_hba *);
@@ -147,6 +148,7 @@ int lpfc_sli_hba_setup(struct lpfc_hba *);
int lpfc_sli_hba_down(struct lpfc_hba *);
int lpfc_sli_issue_mbox(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
int lpfc_sli_handle_mb_event(struct lpfc_hba *);
+int lpfc_sli_flush_mbox_queue(struct lpfc_hba *);
int lpfc_sli_handle_slow_ring_event(struct lpfc_hba *,
struct lpfc_sli_ring *, uint32_t);
void lpfc_sli_def_mbox_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index b65ee57af53e..bbb7310210b0 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -131,6 +131,7 @@ lpfc_ct_unsol_event(struct lpfc_hba * phba,
}
ct_unsol_event_exit_piocbq:
+ list_del(&head);
if (pmbuf) {
list_for_each_entry_safe(matp, next_matp, &pmbuf->list, list) {
lpfc_mbuf_free(phba, matp->virt, matp->phys);
@@ -481,7 +482,7 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
if (CTrsp->CommandResponse.bits.CmdRsp ==
be16_to_cpu(SLI_CT_RESPONSE_FS_ACC)) {
lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
- "%d:0239 NameServer Rsp "
+ "%d:0208 NameServer Rsp "
"Data: x%x\n",
phba->brd_no,
phba->fc_flag);
@@ -588,13 +589,9 @@ lpfc_get_hba_sym_node_name(struct lpfc_hba * phba, uint8_t * symbp)
lpfc_decode_firmware_rev(phba, fwrev, 0);
- if (phba->Port[0]) {
- sprintf(symbp, "Emulex %s Port %s FV%s DV%s", phba->ModelName,
- phba->Port, fwrev, lpfc_release_version);
- } else {
- sprintf(symbp, "Emulex %s FV%s DV%s", phba->ModelName,
- fwrev, lpfc_release_version);
- }
+ sprintf(symbp, "Emulex %s FV%s DV%s", phba->ModelName,
+ fwrev, lpfc_release_version);
+ return;
}
/*
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 4126fd87956f..3567de613162 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -648,33 +648,32 @@ lpfc_more_plogi(struct lpfc_hba * phba)
}
static struct lpfc_nodelist *
-lpfc_plogi_confirm_nport(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
+lpfc_plogi_confirm_nport(struct lpfc_hba * phba, struct lpfc_dmabuf *prsp,
struct lpfc_nodelist *ndlp)
{
struct lpfc_nodelist *new_ndlp;
- struct lpfc_dmabuf *pcmd, *prsp;
uint32_t *lp;
struct serv_parm *sp;
uint8_t name[sizeof (struct lpfc_name)];
uint32_t rc;
- pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
- prsp = (struct lpfc_dmabuf *) pcmd->list.next;
lp = (uint32_t *) prsp->virt;
sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t));
+ memset(name, 0, sizeof (struct lpfc_name));
/* Now we to find out if the NPort we are logging into, matches the WWPN
* we have for that ndlp. If not, we have some work to do.
*/
new_ndlp = lpfc_findnode_wwpn(phba, NLP_SEARCH_ALL, &sp->portName);
- memset(name, 0, sizeof (struct lpfc_name));
- rc = memcmp(&ndlp->nlp_portname, name, sizeof(struct lpfc_name));
- if (!rc || (new_ndlp == ndlp)) {
+ if (new_ndlp == ndlp)
return ndlp;
- }
if (!new_ndlp) {
+ rc =
+ memcmp(&ndlp->nlp_portname, name, sizeof(struct lpfc_name));
+ if (!rc)
+ return ndlp;
new_ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_ATOMIC);
if (!new_ndlp)
return ndlp;
@@ -683,17 +682,21 @@ lpfc_plogi_confirm_nport(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
}
lpfc_unreg_rpi(phba, new_ndlp);
- new_ndlp->nlp_prev_state = ndlp->nlp_state;
new_ndlp->nlp_DID = ndlp->nlp_DID;
- new_ndlp->nlp_state = NLP_STE_PLOGI_ISSUE;
- lpfc_nlp_list(phba, new_ndlp, NLP_PLOGI_LIST);
+ new_ndlp->nlp_prev_state = ndlp->nlp_prev_state;
+ new_ndlp->nlp_state = ndlp->nlp_state;
+ lpfc_nlp_list(phba, new_ndlp, ndlp->nlp_flag & NLP_LIST_MASK);
/* Move this back to NPR list */
- lpfc_unreg_rpi(phba, ndlp);
- ndlp->nlp_DID = 0; /* Two ndlps cannot have the same did */
- ndlp->nlp_state = NLP_STE_NPR_NODE;
- lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
-
+ if (memcmp(&ndlp->nlp_portname, name, sizeof(struct lpfc_name)) == 0) {
+ lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
+ }
+ else {
+ lpfc_unreg_rpi(phba, ndlp);
+ ndlp->nlp_DID = 0; /* Two ndlps cannot have the same did */
+ ndlp->nlp_state = NLP_STE_NPR_NODE;
+ lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
+ }
return new_ndlp;
}
@@ -703,6 +706,7 @@ lpfc_cmpl_els_plogi(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
{
IOCB_t *irsp;
struct lpfc_nodelist *ndlp;
+ struct lpfc_dmabuf *prsp;
int disc, rc, did, type;
@@ -769,7 +773,10 @@ lpfc_cmpl_els_plogi(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
}
} else {
/* Good status, call state machine */
- ndlp = lpfc_plogi_confirm_nport(phba, cmdiocb, ndlp);
+ prsp = list_entry(((struct lpfc_dmabuf *)
+ cmdiocb->context2)->list.next,
+ struct lpfc_dmabuf, list);
+ ndlp = lpfc_plogi_confirm_nport(phba, prsp, ndlp);
rc = lpfc_disc_state_machine(phba, ndlp, cmdiocb,
NLP_EVT_CMPL_PLOGI);
}
@@ -1841,9 +1848,12 @@ static void
lpfc_cmpl_els_acc(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
struct lpfc_iocbq * rspiocb)
{
+ IOCB_t *irsp;
struct lpfc_nodelist *ndlp;
LPFC_MBOXQ_t *mbox = NULL;
+ irsp = &rspiocb->iocb;
+
ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
if (cmdiocb->context_un.mbox)
mbox = cmdiocb->context_un.mbox;
@@ -1886,9 +1896,15 @@ lpfc_cmpl_els_acc(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
mempool_free( mbox, phba->mbox_mem_pool);
} else {
mempool_free( mbox, phba->mbox_mem_pool);
- if (ndlp->nlp_flag & NLP_ACC_REGLOGIN) {
- lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
- ndlp = NULL;
+ /* Do not call NO_LIST for lpfc_els_abort'ed ELS cmds */
+ if (!((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
+ ((irsp->un.ulpWord[4] == IOERR_SLI_ABORTED) ||
+ (irsp->un.ulpWord[4] == IOERR_LINK_DOWN) ||
+ (irsp->un.ulpWord[4] == IOERR_SLI_DOWN)))) {
+ if (ndlp->nlp_flag & NLP_ACC_REGLOGIN) {
+ lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
+ ndlp = NULL;
+ }
}
}
}
@@ -2832,7 +2848,7 @@ lpfc_els_rsp_rps_acc(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
/* Xmit ELS RPS ACC response tag <ulpIoTag> */
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
- "%d:0128 Xmit ELS RPS ACC response tag x%x "
+ "%d:0118 Xmit ELS RPS ACC response tag x%x "
"Data: x%x x%x x%x x%x x%x\n",
phba->brd_no,
elsiocb->iocb.ulpIoTag,
@@ -2941,7 +2957,7 @@ lpfc_els_rsp_rpl_acc(struct lpfc_hba * phba, uint16_t cmdsize,
/* Xmit ELS RPL ACC response tag <ulpIoTag> */
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
- "%d:0128 Xmit ELS RPL ACC response tag x%x "
+ "%d:0120 Xmit ELS RPL ACC response tag x%x "
"Data: x%x x%x x%x x%x x%x\n",
phba->brd_no,
elsiocb->iocb.ulpIoTag,
@@ -3102,7 +3118,7 @@ lpfc_els_rcv_fan(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
struct lpfc_nodelist *ndlp, *next_ndlp;
/* FAN received */
- lpfc_printf_log(phba, KERN_INFO, LOG_ELS, "%d:265 FAN received\n",
+ lpfc_printf_log(phba, KERN_INFO, LOG_ELS, "%d:0265 FAN received\n",
phba->brd_no);
icmd = &cmdiocb->iocb;
@@ -3282,10 +3298,9 @@ lpfc_els_timeout_handler(struct lpfc_hba *phba)
} else
lpfc_sli_release_iocbq(phba, piocb);
}
- if (phba->sli.ring[LPFC_ELS_RING].txcmplq_cnt) {
- phba->els_tmofunc.expires = jiffies + HZ * timeout;
- add_timer(&phba->els_tmofunc);
- }
+ if (phba->sli.ring[LPFC_ELS_RING].txcmplq_cnt)
+ mod_timer(&phba->els_tmofunc, jiffies + HZ * timeout);
+
spin_unlock_irq(phba->host->host_lock);
}
@@ -3442,6 +3457,8 @@ lpfc_els_unsol_event(struct lpfc_hba * phba,
if ((did & Fabric_DID_MASK) == Fabric_DID_MASK) {
ndlp->nlp_type |= NLP_FABRIC;
}
+ ndlp->nlp_state = NLP_STE_UNUSED_NODE;
+ lpfc_nlp_list(phba, ndlp, NLP_UNUSED_LIST);
}
phba->fc_stat.elsRcvFrame++;
@@ -3463,13 +3480,14 @@ lpfc_els_unsol_event(struct lpfc_hba * phba,
rjt_err = 1;
break;
}
+ ndlp = lpfc_plogi_confirm_nport(phba, mp, ndlp);
lpfc_disc_state_machine(phba, ndlp, elsiocb, NLP_EVT_RCV_PLOGI);
break;
case ELS_CMD_FLOGI:
phba->fc_stat.elsRcvFLOGI++;
lpfc_els_rcv_flogi(phba, elsiocb, ndlp, newnode);
if (newnode) {
- mempool_free( ndlp, phba->nlp_mem_pool);
+ lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
}
break;
case ELS_CMD_LOGO:
@@ -3492,7 +3510,7 @@ lpfc_els_unsol_event(struct lpfc_hba * phba,
phba->fc_stat.elsRcvRSCN++;
lpfc_els_rcv_rscn(phba, elsiocb, ndlp, newnode);
if (newnode) {
- mempool_free( ndlp, phba->nlp_mem_pool);
+ lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
}
break;
case ELS_CMD_ADISC:
@@ -3535,28 +3553,28 @@ lpfc_els_unsol_event(struct lpfc_hba * phba,
phba->fc_stat.elsRcvLIRR++;
lpfc_els_rcv_lirr(phba, elsiocb, ndlp);
if (newnode) {
- mempool_free( ndlp, phba->nlp_mem_pool);
+ lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
}
break;
case ELS_CMD_RPS:
phba->fc_stat.elsRcvRPS++;
lpfc_els_rcv_rps(phba, elsiocb, ndlp);
if (newnode) {
- mempool_free( ndlp, phba->nlp_mem_pool);
+ lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
}
break;
case ELS_CMD_RPL:
phba->fc_stat.elsRcvRPL++;
lpfc_els_rcv_rpl(phba, elsiocb, ndlp);
if (newnode) {
- mempool_free( ndlp, phba->nlp_mem_pool);
+ lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
}
break;
case ELS_CMD_RNID:
phba->fc_stat.elsRcvRNID++;
lpfc_els_rcv_rnid(phba, elsiocb, ndlp);
if (newnode) {
- mempool_free( ndlp, phba->nlp_mem_pool);
+ lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
}
break;
default:
@@ -3568,7 +3586,7 @@ lpfc_els_unsol_event(struct lpfc_hba * phba,
"%d:0115 Unknown ELS command x%x received from "
"NPORT x%x\n", phba->brd_no, cmd, did);
if (newnode) {
- mempool_free( ndlp, phba->nlp_mem_pool);
+ lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
}
break;
}
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index adb086009ae0..b2f1552f1848 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -1084,7 +1084,7 @@ lpfc_register_remote_port(struct lpfc_hba * phba,
fc_remote_port_rolechg(rport, rport_ids.roles);
if ((rport->scsi_target_id != -1) &&
- (rport->scsi_target_id < MAX_FCP_TARGET)) {
+ (rport->scsi_target_id < LPFC_MAX_TARGET)) {
ndlp->nlp_sid = rport->scsi_target_id;
}
@@ -1313,7 +1313,7 @@ lpfc_nlp_list(struct lpfc_hba * phba, struct lpfc_nodelist * nlp, int list)
if ((rport_add == mapped) &&
((!nlp->rport) ||
(nlp->rport->scsi_target_id == -1) ||
- (nlp->rport->scsi_target_id >= MAX_FCP_TARGET))) {
+ (nlp->rport->scsi_target_id >= LPFC_MAX_TARGET))) {
nlp->nlp_state = NLP_STE_UNMAPPED_NODE;
spin_lock_irq(phba->host->host_lock);
nlp->nlp_flag |= NLP_TGT_NO_SCSIID;
@@ -1557,6 +1557,8 @@ lpfc_freenode(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
}
}
+
+ spin_lock_irq(phba->host->host_lock);
list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) &&
(ndlp == (struct lpfc_nodelist *) mb->context2)) {
@@ -1569,6 +1571,7 @@ lpfc_freenode(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
mempool_free(mb, phba->mbox_mem_pool);
}
}
+ spin_unlock_irq(phba->host->host_lock);
lpfc_els_abort(phba,ndlp,0);
spin_lock_irq(phba->host->host_lock);
@@ -1782,7 +1785,7 @@ lpfc_findnode_did(struct lpfc_hba * phba, uint32_t order, uint32_t did)
/* LOG change to REGLOGIN */
/* FIND node DID reglogin */
lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
- "%d:0931 FIND node DID reglogin"
+ "%d:0901 FIND node DID reglogin"
" Data: x%p x%x x%x x%x\n",
phba->brd_no,
ndlp, ndlp->nlp_DID,
@@ -1805,7 +1808,7 @@ lpfc_findnode_did(struct lpfc_hba * phba, uint32_t order, uint32_t did)
/* LOG change to PRLI */
/* FIND node DID prli */
lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
- "%d:0931 FIND node DID prli "
+ "%d:0902 FIND node DID prli "
"Data: x%p x%x x%x x%x\n",
phba->brd_no,
ndlp, ndlp->nlp_DID,
@@ -1828,7 +1831,7 @@ lpfc_findnode_did(struct lpfc_hba * phba, uint32_t order, uint32_t did)
/* LOG change to NPR */
/* FIND node DID npr */
lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
- "%d:0931 FIND node DID npr "
+ "%d:0903 FIND node DID npr "
"Data: x%p x%x x%x x%x\n",
phba->brd_no,
ndlp, ndlp->nlp_DID,
@@ -1851,7 +1854,7 @@ lpfc_findnode_did(struct lpfc_hba * phba, uint32_t order, uint32_t did)
/* LOG change to UNUSED */
/* FIND node DID unused */
lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
- "%d:0931 FIND node DID unused "
+ "%d:0905 FIND node DID unused "
"Data: x%p x%x x%x x%x\n",
phba->brd_no,
ndlp, ndlp->nlp_DID,
@@ -2335,7 +2338,7 @@ lpfc_disc_timeout_handler(struct lpfc_hba *phba)
initlinkmbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!initlinkmbox) {
lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
- "%d:0226 Device Discovery "
+ "%d:0206 Device Discovery "
"completion error\n",
phba->brd_no);
phba->hba_state = LPFC_HBA_ERROR;
@@ -2365,7 +2368,7 @@ lpfc_disc_timeout_handler(struct lpfc_hba *phba)
if (!clearlambox) {
clrlaerr = 1;
lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
- "%d:0226 Device Discovery "
+ "%d:0207 Device Discovery "
"completion error\n",
phba->brd_no);
phba->hba_state = LPFC_HBA_ERROR;
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 81755a3f7c68..f6948ffe689a 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -71,6 +71,7 @@ lpfc_config_port_prep(struct lpfc_hba * phba)
uint16_t offset = 0;
static char licensed[56] =
"key unlock for use with gnu public licensed code only\0";
+ static int init_key = 1;
pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!pmb) {
@@ -82,10 +83,13 @@ lpfc_config_port_prep(struct lpfc_hba * phba)
phba->hba_state = LPFC_INIT_MBX_CMDS;
if (lpfc_is_LC_HBA(phba->pcidev->device)) {
- uint32_t *ptext = (uint32_t *) licensed;
+ if (init_key) {
+ uint32_t *ptext = (uint32_t *) licensed;
- for (i = 0; i < 56; i += sizeof (uint32_t), ptext++)
- *ptext = cpu_to_be32(*ptext);
+ for (i = 0; i < 56; i += sizeof (uint32_t), ptext++)
+ *ptext = cpu_to_be32(*ptext);
+ init_key = 0;
+ }
lpfc_read_nv(phba, pmb);
memset((char*)mb->un.varRDnvp.rsvd3, 0,
@@ -405,19 +409,26 @@ lpfc_config_port_post(struct lpfc_hba * phba)
}
/* MBOX buffer will be freed in mbox compl */
- i = 0;
+ return (0);
+}
+
+static int
+lpfc_discovery_wait(struct lpfc_hba *phba)
+{
+ int i = 0;
+
while ((phba->hba_state != LPFC_HBA_READY) ||
(phba->num_disc_nodes) || (phba->fc_prli_sent) ||
((phba->fc_map_cnt == 0) && (i<2)) ||
- (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) {
+ (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE)) {
/* Check every second for 30 retries. */
i++;
if (i > 30) {
- break;
+ return -ETIMEDOUT;
}
if ((i >= 15) && (phba->hba_state <= LPFC_LINK_DOWN)) {
/* The link is down. Set linkdown timeout */
- break;
+ return -ETIMEDOUT;
}
/* Delay for 1 second to give discovery time to complete. */
@@ -425,12 +436,7 @@ lpfc_config_port_post(struct lpfc_hba * phba)
}
- /* Since num_disc_nodes keys off of PLOGI, delay a bit to let
- * any potential PRLIs to flush thru the SLI sub-system.
- */
- msleep(50);
-
- return (0);
+ return 0;
}
/************************************************************************/
@@ -1339,7 +1345,8 @@ lpfc_offline(struct lpfc_hba * phba)
struct lpfc_sli_ring *pring;
struct lpfc_sli *psli;
unsigned long iflag;
- int i = 0;
+ int i;
+ int cnt = 0;
if (!phba)
return 0;
@@ -1348,20 +1355,31 @@ lpfc_offline(struct lpfc_hba * phba)
return 0;
psli = &phba->sli;
- pring = &psli->ring[psli->fcp_ring];
lpfc_linkdown(phba);
+ lpfc_sli_flush_mbox_queue(phba);
- /* The linkdown event takes 30 seconds to timeout. */
- while (pring->txcmplq_cnt) {
- mdelay(10);
- if (i++ > 3000)
- break;
+ for (i = 0; i < psli->num_rings; i++) {
+ pring = &psli->ring[i];
+ /* The linkdown event takes 30 seconds to timeout. */
+ while (pring->txcmplq_cnt) {
+ mdelay(10);
+ if (cnt++ > 3000) {
+ lpfc_printf_log(phba,
+ KERN_WARNING, LOG_INIT,
+ "%d:0466 Outstanding IO when "
+ "bringing Adapter offline\n",
+ phba->brd_no);
+ break;
+ }
+ }
}
+
/* stop all timers associated with this hba */
lpfc_stop_timer(phba);
phba->work_hba_events = 0;
+ phba->work_ha = 0;
lpfc_printf_log(phba,
KERN_WARNING,
@@ -1599,7 +1617,11 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
goto out_free_iocbq;
}
- /* We can rely on a queue depth attribute only after SLI HBA setup */
+ /*
+ * Set initial can_queue value since 0 is no longer supported and
+ * scsi_add_host will fail. This will be adjusted later based on the
+ * max xri value determined in hba setup.
+ */
host->can_queue = phba->cfg_hba_queue_depth - 10;
/* Tell the midlayer we support 16 byte commands */
@@ -1639,6 +1661,14 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
goto out_free_irq;
}
+ /*
+ * hba setup may have changed the hba_queue_depth so we need to adjust
+ * the value of can_queue.
+ */
+ host->can_queue = phba->cfg_hba_queue_depth - 10;
+
+ lpfc_discovery_wait(phba);
+
if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
spin_lock_irq(phba->host->host_lock);
lpfc_poll_start_timer(phba);
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index e42f22aaf71b..4d016c2a1b26 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -651,3 +651,19 @@ lpfc_mbox_get(struct lpfc_hba * phba)
return mbq;
}
+
+int
+lpfc_mbox_tmo_val(struct lpfc_hba *phba, int cmd)
+{
+ switch (cmd) {
+ case MBX_WRITE_NV: /* 0x03 */
+ case MBX_UPDATE_CFG: /* 0x1B */
+ case MBX_DOWN_LOAD: /* 0x1C */
+ case MBX_DEL_LD_ENTRY: /* 0x1D */
+ case MBX_LOAD_AREA: /* 0x81 */
+ case MBX_FLASH_WR_ULA: /* 0x98 */
+ case MBX_LOAD_EXP_ROM: /* 0x9C */
+ return LPFC_MBOX_TMO_FLASH_CMD;
+ }
+ return LPFC_MBOX_TMO;
+}
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c
index 07017658ac56..066292d3995a 100644
--- a/drivers/scsi/lpfc/lpfc_mem.c
+++ b/drivers/scsi/lpfc/lpfc_mem.c
@@ -133,6 +133,11 @@ lpfc_mem_free(struct lpfc_hba * phba)
pci_pool_destroy(phba->lpfc_scsi_dma_buf_pool);
pci_pool_destroy(phba->lpfc_mbuf_pool);
+
+ /* Free the iocb lookup array */
+ kfree(psli->iocbq_lookup);
+ psli->iocbq_lookup = NULL;
+
}
void *
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 27d60ad897cd..20449a8dd53d 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -179,7 +179,7 @@ lpfc_els_abort(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
/* Abort outstanding I/O on NPort <nlp_DID> */
lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
- "%d:0201 Abort outstanding I/O on NPort x%x "
+ "%d:0205 Abort outstanding I/O on NPort x%x "
"Data: x%x x%x x%x\n",
phba->brd_no, ndlp->nlp_DID, ndlp->nlp_flag,
ndlp->nlp_state, ndlp->nlp_rpi);
@@ -393,6 +393,20 @@ lpfc_rcv_plogi(struct lpfc_hba * phba,
mbox->context2 = ndlp;
ndlp->nlp_flag |= (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI);
+ /*
+ * If there is an outstanding PLOGI issued, abort it before
+ * sending ACC rsp for received PLOGI. If pending plogi
+ * is not canceled here, the plogi will be rejected by
+ * remote port and will be retried. On a configuration with
+ * single discovery thread, this will cause a huge delay in
+ * discovery. Also this will cause multiple state machines
+ * running in parallel for this node.
+ */
+ if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE) {
+ /* software abort outstanding PLOGI */
+ lpfc_els_abort(phba, ndlp, 1);
+ }
+
lpfc_els_rsp_acc(phba, ELS_CMD_PLOGI, cmdiocb, ndlp, mbox, 0);
return 1;
@@ -1110,6 +1124,17 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_hba * phba,
phba->brd_no,
did, mb->mbxStatus, phba->hba_state);
+ /*
+ * If RegLogin failed due to lack of HBA resources do not
+ * retry discovery.
+ */
+ if (mb->mbxStatus == MBXERR_RPI_FULL) {
+ ndlp->nlp_prev_state = NLP_STE_UNUSED_NODE;
+ ndlp->nlp_state = NLP_STE_UNUSED_NODE;
+ lpfc_nlp_list(phba, ndlp, NLP_UNUSED_LIST);
+ return ndlp->nlp_state;
+ }
+
/* Put ndlp in npr list set plogi timer for 1 sec */
mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
spin_lock_irq(phba->host->host_lock);
@@ -1590,7 +1615,13 @@ lpfc_rcv_padisc_npr_node(struct lpfc_hba * phba,
lpfc_rcv_padisc(phba, ndlp, cmdiocb);
- if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
+ /*
+ * Do not start discovery if discovery is about to start
+ * or discovery in progress for this node. Starting discovery
+ * here will affect the counting of discovery threads.
+ */
+ if ((!(ndlp->nlp_flag & NLP_DELAY_TMO)) &&
+ (ndlp->nlp_flag & NLP_NPR_2B_DISC)){
if (ndlp->nlp_flag & NLP_NPR_ADISC) {
ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
ndlp->nlp_state = NLP_STE_ADISC_ISSUE;
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index aea1ee472f3d..a8816a8738f8 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -21,6 +21,7 @@
#include <linux/pci.h>
#include <linux/interrupt.h>
+#include <linux/delay.h>
#include <scsi/scsi.h>
#include <scsi/scsi_device.h>
@@ -153,22 +154,6 @@ static void
lpfc_release_scsi_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb)
{
unsigned long iflag = 0;
- /*
- * There are only two special cases to consider. (1) the scsi command
- * requested scatter-gather usage or (2) the scsi command allocated
- * a request buffer, but did not request use_sg. There is a third
- * case, but it does not require resource deallocation.
- */
- if ((psb->seg_cnt > 0) && (psb->pCmd->use_sg)) {
- dma_unmap_sg(&phba->pcidev->dev, psb->pCmd->request_buffer,
- psb->seg_cnt, psb->pCmd->sc_data_direction);
- } else {
- if ((psb->nonsg_phys) && (psb->pCmd->request_bufflen)) {
- dma_unmap_single(&phba->pcidev->dev, psb->nonsg_phys,
- psb->pCmd->request_bufflen,
- psb->pCmd->sc_data_direction);
- }
- }
spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
psb->pCmd = NULL;
@@ -282,6 +267,27 @@ lpfc_scsi_prep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd)
}
static void
+lpfc_scsi_unprep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb)
+{
+ /*
+ * There are only two special cases to consider. (1) the scsi command
+ * requested scatter-gather usage or (2) the scsi command allocated
+ * a request buffer, but did not request use_sg. There is a third
+ * case, but it does not require resource deallocation.
+ */
+ if ((psb->seg_cnt > 0) && (psb->pCmd->use_sg)) {
+ dma_unmap_sg(&phba->pcidev->dev, psb->pCmd->request_buffer,
+ psb->seg_cnt, psb->pCmd->sc_data_direction);
+ } else {
+ if ((psb->nonsg_phys) && (psb->pCmd->request_bufflen)) {
+ dma_unmap_single(&phba->pcidev->dev, psb->nonsg_phys,
+ psb->pCmd->request_bufflen,
+ psb->pCmd->sc_data_direction);
+ }
+ }
+}
+
+static void
lpfc_handle_fcp_err(struct lpfc_scsi_buf *lpfc_cmd)
{
struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
@@ -454,6 +460,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
cmd->scsi_done(cmd);
if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
+ lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
lpfc_release_scsi_buf(phba, lpfc_cmd);
return;
}
@@ -511,6 +518,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
}
}
+ lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
lpfc_release_scsi_buf(phba, lpfc_cmd);
}
@@ -609,6 +617,7 @@ lpfc_scsi_prep_cmnd(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd,
static int
lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_hba *phba,
struct lpfc_scsi_buf *lpfc_cmd,
+ unsigned int lun,
uint8_t task_mgmt_cmd)
{
struct lpfc_sli *psli;
@@ -627,8 +636,7 @@ lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_hba *phba,
piocb = &piocbq->iocb;
fcp_cmnd = lpfc_cmd->fcp_cmnd;
- int_to_scsilun(lpfc_cmd->pCmd->device->lun,
- &lpfc_cmd->fcp_cmnd->fcp_lun);
+ int_to_scsilun(lun, &lpfc_cmd->fcp_cmnd->fcp_lun);
fcp_cmnd->fcpCntl2 = task_mgmt_cmd;
piocb->ulpCommand = CMD_FCP_ICMND64_CR;
@@ -655,14 +663,16 @@ lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_hba *phba,
static int
lpfc_scsi_tgt_reset(struct lpfc_scsi_buf * lpfc_cmd, struct lpfc_hba * phba,
- unsigned tgt_id, struct lpfc_rport_data *rdata)
+ unsigned tgt_id, unsigned int lun,
+ struct lpfc_rport_data *rdata)
{
struct lpfc_iocbq *iocbq;
struct lpfc_iocbq *iocbqrsp;
int ret;
lpfc_cmd->rdata = rdata;
- ret = lpfc_scsi_prep_task_mgmt_cmd(phba, lpfc_cmd, FCP_TARGET_RESET);
+ ret = lpfc_scsi_prep_task_mgmt_cmd(phba, lpfc_cmd, lun,
+ FCP_TARGET_RESET);
if (!ret)
return FAILED;
@@ -822,6 +832,7 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
return 0;
out_host_busy_free_buf:
+ lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
lpfc_release_scsi_buf(phba, lpfc_cmd);
out_host_busy:
return SCSI_MLQUEUE_HOST_BUSY;
@@ -831,6 +842,21 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
return 0;
}
+static void
+lpfc_block_error_handler(struct scsi_cmnd *cmnd)
+{
+ struct Scsi_Host *shost = cmnd->device->host;
+ struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
+
+ spin_lock_irq(shost->host_lock);
+ while (rport->port_state == FC_PORTSTATE_BLOCKED) {
+ spin_unlock_irq(shost->host_lock);
+ msleep(1000);
+ spin_lock_irq(shost->host_lock);
+ }
+ spin_unlock_irq(shost->host_lock);
+ return;
+}
static int
lpfc_abort_handler(struct scsi_cmnd *cmnd)
@@ -845,6 +871,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
unsigned int loop_count = 0;
int ret = SUCCESS;
+ lpfc_block_error_handler(cmnd);
spin_lock_irq(shost->host_lock);
lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble;
@@ -947,6 +974,7 @@ lpfc_reset_lun_handler(struct scsi_cmnd *cmnd)
int ret = FAILED;
int cnt, loopcnt;
+ lpfc_block_error_handler(cmnd);
spin_lock_irq(shost->host_lock);
/*
* If target is not in a MAPPED state, delay the reset until
@@ -969,12 +997,12 @@ lpfc_reset_lun_handler(struct scsi_cmnd *cmnd)
if (lpfc_cmd == NULL)
goto out;
- lpfc_cmd->pCmd = cmnd;
lpfc_cmd->timeout = 60;
lpfc_cmd->scsi_hba = phba;
lpfc_cmd->rdata = rdata;
- ret = lpfc_scsi_prep_task_mgmt_cmd(phba, lpfc_cmd, FCP_LUN_RESET);
+ ret = lpfc_scsi_prep_task_mgmt_cmd(phba, lpfc_cmd, cmnd->device->lun,
+ FCP_LUN_RESET);
if (!ret)
goto out_free_scsi_buf;
@@ -1001,7 +1029,6 @@ lpfc_reset_lun_handler(struct scsi_cmnd *cmnd)
cmd_status = iocbqrsp->iocb.ulpStatus;
lpfc_sli_release_iocbq(phba, iocbqrsp);
- lpfc_release_scsi_buf(phba, lpfc_cmd);
/*
* All outstanding txcmplq I/Os should have been aborted by the device.
@@ -1040,6 +1067,8 @@ lpfc_reset_lun_handler(struct scsi_cmnd *cmnd)
}
out_free_scsi_buf:
+ lpfc_release_scsi_buf(phba, lpfc_cmd);
+
lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
"%d:0713 SCSI layer issued LUN reset (%d, %d) "
"Data: x%x x%x x%x\n",
@@ -1062,6 +1091,7 @@ lpfc_reset_bus_handler(struct scsi_cmnd *cmnd)
int cnt, loopcnt;
struct lpfc_scsi_buf * lpfc_cmd;
+ lpfc_block_error_handler(cmnd);
spin_lock_irq(shost->host_lock);
lpfc_cmd = lpfc_get_scsi_buf(phba);
@@ -1070,7 +1100,6 @@ lpfc_reset_bus_handler(struct scsi_cmnd *cmnd)
/* The lpfc_cmd storage is reused. Set all loop invariants. */
lpfc_cmd->timeout = 60;
- lpfc_cmd->pCmd = cmnd;
lpfc_cmd->scsi_hba = phba;
/*
@@ -1078,7 +1107,7 @@ lpfc_reset_bus_handler(struct scsi_cmnd *cmnd)
* targets known to the driver. Should any target reset
* fail, this routine returns failure to the midlayer.
*/
- for (i = 0; i < MAX_FCP_TARGET; i++) {
+ for (i = 0; i < LPFC_MAX_TARGET; i++) {
/* Search the mapped list for this target ID */
match = 0;
list_for_each_entry(ndlp, &phba->fc_nlpmap_list, nlp_listp) {
@@ -1090,11 +1119,11 @@ lpfc_reset_bus_handler(struct scsi_cmnd *cmnd)
if (!match)
continue;
- ret = lpfc_scsi_tgt_reset(lpfc_cmd, phba,
- i, ndlp->rport->dd_data);
+ ret = lpfc_scsi_tgt_reset(lpfc_cmd, phba, i, cmnd->device->lun,
+ ndlp->rport->dd_data);
if (ret != SUCCESS) {
lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
- "%d:0713 Bus Reset on target %d failed\n",
+ "%d:0700 Bus Reset on target %d failed\n",
phba->brd_no, i);
err_count++;
}
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index bb69a7a1ec59..70f4d5a1348e 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -191,35 +191,12 @@ static int
lpfc_sli_ringtxcmpl_put(struct lpfc_hba * phba,
struct lpfc_sli_ring * pring, struct lpfc_iocbq * piocb)
{
- uint16_t iotag;
-
list_add_tail(&piocb->list, &pring->txcmplq);
pring->txcmplq_cnt++;
if (unlikely(pring->ringno == LPFC_ELS_RING))
mod_timer(&phba->els_tmofunc,
jiffies + HZ * (phba->fc_ratov << 1));
- if (pring->fast_lookup) {
- /* Setup fast lookup based on iotag for completion */
- iotag = piocb->iocb.ulpIoTag;
- if (iotag && (iotag < pring->fast_iotag))
- *(pring->fast_lookup + iotag) = piocb;
- else {
-
- /* Cmd ring <ringno> put: iotag <iotag> greater then
- configured max <fast_iotag> wd0 <icmd> */
- lpfc_printf_log(phba,
- KERN_ERR,
- LOG_SLI,
- "%d:0316 Cmd ring %d put: iotag x%x "
- "greater then configured max x%x "
- "wd0 x%x\n",
- phba->brd_no,
- pring->ringno, iotag,
- pring->fast_iotag,
- *(((uint32_t *)(&piocb->iocb)) + 7));
- }
- }
return (0);
}
@@ -343,7 +320,8 @@ lpfc_sli_next_iotag(struct lpfc_hba * phba, struct lpfc_iocbq * iocbq)
kfree(old_arr);
return iotag;
}
- }
+ } else
+ spin_unlock_irq(phba->host->host_lock);
lpfc_printf_log(phba, KERN_ERR,LOG_SLI,
"%d:0318 Failed to allocate IOTAG.last IOTAG is %d\n",
@@ -601,7 +579,7 @@ lpfc_sli_handle_mb_event(struct lpfc_hba * phba)
/* Stray Mailbox Interrupt, mbxCommand <cmd> mbxStatus
<status> */
lpfc_printf_log(phba,
- KERN_ERR,
+ KERN_WARNING,
LOG_MBOX | LOG_SLI,
"%d:0304 Stray Mailbox Interrupt "
"mbxCommand x%x mbxStatus x%x\n",
@@ -992,9 +970,11 @@ void lpfc_sli_poll_fcp_ring(struct lpfc_hba * phba)
* resources need to be recovered.
*/
if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
- printk(KERN_INFO "%s: IOCB cmd 0x%x processed."
- " Skipping completion\n", __FUNCTION__,
- irsp->ulpCommand);
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "%d:0314 IOCB cmd 0x%x"
+ " processed. Skipping"
+ " completion", phba->brd_no,
+ irsp->ulpCommand);
break;
}
@@ -1127,7 +1107,7 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba,
if (unlikely(irsp->ulpStatus)) {
/* Rsp ring <ringno> error: IOCB */
lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
- "%d:0326 Rsp Ring %d error: IOCB Data: "
+ "%d:0336 Rsp Ring %d error: IOCB Data: "
"x%x x%x x%x x%x x%x x%x x%x x%x\n",
phba->brd_no, pring->ringno,
irsp->un.ulpWord[0], irsp->un.ulpWord[1],
@@ -1145,9 +1125,11 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba,
* resources need to be recovered.
*/
if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
- printk(KERN_INFO "%s: IOCB cmd 0x%x processed. "
- "Skipping completion\n", __FUNCTION__,
- irsp->ulpCommand);
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "%d:0333 IOCB cmd 0x%x"
+ " processed. Skipping"
+ " completion\n", phba->brd_no,
+ irsp->ulpCommand);
break;
}
@@ -1178,7 +1160,7 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba,
} else {
/* Unknown IOCB command */
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
- "%d:0321 Unknown IOCB command "
+ "%d:0334 Unknown IOCB command "
"Data: x%x, x%x x%x x%x x%x\n",
phba->brd_no, type, irsp->ulpCommand,
irsp->ulpStatus, irsp->ulpIoTag,
@@ -1261,7 +1243,7 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba * phba,
lpfc_printf_log(phba,
KERN_ERR,
LOG_SLI,
- "%d:0312 Ring %d handler: portRspPut %d "
+ "%d:0303 Ring %d handler: portRspPut %d "
"is bigger then rsp ring %d\n",
phba->brd_no,
pring->ringno, portRspPut, portRspMax);
@@ -1406,7 +1388,7 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba * phba,
lpfc_printf_log(phba,
KERN_ERR,
LOG_SLI,
- "%d:0321 Unknown IOCB command "
+ "%d:0335 Unknown IOCB command "
"Data: x%x x%x x%x x%x\n",
phba->brd_no,
irsp->ulpCommand,
@@ -1422,11 +1404,11 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba * phba,
next_iocb,
&saveq->list,
list) {
+ list_del(&rspiocbp->list);
lpfc_sli_release_iocbq(phba,
rspiocbp);
}
}
-
lpfc_sli_release_iocbq(phba, saveq);
}
}
@@ -1570,8 +1552,8 @@ lpfc_sli_brdready(struct lpfc_hba * phba, uint32_t mask)
void lpfc_reset_barrier(struct lpfc_hba * phba)
{
- uint32_t * resp_buf;
- uint32_t * mbox_buf;
+ uint32_t __iomem *resp_buf;
+ uint32_t __iomem *mbox_buf;
volatile uint32_t mbox;
uint32_t hc_copy;
int i;
@@ -1587,7 +1569,7 @@ void lpfc_reset_barrier(struct lpfc_hba * phba)
* Tell the other part of the chip to suspend temporarily all
* its DMA activity.
*/
- resp_buf = (uint32_t *)phba->MBslimaddr;
+ resp_buf = phba->MBslimaddr;
/* Disable the error attention */
hc_copy = readl(phba->HCregaddr);
@@ -1605,7 +1587,7 @@ void lpfc_reset_barrier(struct lpfc_hba * phba)
((MAILBOX_t *)&mbox)->mbxOwner = OWN_CHIP;
writel(BARRIER_TEST_PATTERN, (resp_buf + 1));
- mbox_buf = (uint32_t *)phba->MBslimaddr;
+ mbox_buf = phba->MBslimaddr;
writel(mbox, mbox_buf);
for (i = 0;
@@ -1734,15 +1716,13 @@ lpfc_sli_brdreset(struct lpfc_hba * phba)
phba->fc_myDID = 0;
phba->fc_prevDID = 0;
- psli->sli_flag = 0;
-
/* Turn off parity checking and serr during the physical reset */
pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value);
pci_write_config_word(phba->pcidev, PCI_COMMAND,
(cfg_value &
~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
- psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
+ psli->sli_flag &= ~(LPFC_SLI2_ACTIVE | LPFC_PROCESS_LA);
/* Now toggle INITFF bit in the Host Control Register */
writel(HC_INITFF, phba->HCregaddr);
mdelay(1);
@@ -1783,7 +1763,7 @@ lpfc_sli_brdrestart(struct lpfc_hba * phba)
/* Restart HBA */
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
- "%d:0328 Restart HBA Data: x%x x%x\n", phba->brd_no,
+ "%d:0337 Restart HBA Data: x%x x%x\n", phba->brd_no,
phba->hba_state, psli->sli_flag);
word0 = 0;
@@ -1805,7 +1785,7 @@ lpfc_sli_brdrestart(struct lpfc_hba * phba)
skip_post = 0;
word0 = 0; /* This is really setting up word1 */
}
- to_slim = (uint8_t *) phba->MBslimaddr + sizeof (uint32_t);
+ to_slim = phba->MBslimaddr + sizeof (uint32_t);
writel(*(uint32_t *) mb, to_slim);
readl(to_slim); /* flush */
@@ -1815,6 +1795,9 @@ lpfc_sli_brdrestart(struct lpfc_hba * phba)
spin_unlock_irq(phba->host->host_lock);
+ memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
+ psli->stats_start = get_seconds();
+
if (skip_post)
mdelay(100);
else
@@ -1925,6 +1908,9 @@ lpfc_sli_hba_setup(struct lpfc_hba * phba)
}
while (resetcount < 2 && !done) {
+ spin_lock_irq(phba->host->host_lock);
+ phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE;
+ spin_unlock_irq(phba->host->host_lock);
phba->hba_state = LPFC_STATE_UNKNOWN;
lpfc_sli_brdrestart(phba);
msleep(2500);
@@ -1932,6 +1918,9 @@ lpfc_sli_hba_setup(struct lpfc_hba * phba)
if (rc)
break;
+ spin_lock_irq(phba->host->host_lock);
+ phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
+ spin_unlock_irq(phba->host->host_lock);
resetcount++;
/* Call pre CONFIG_PORT mailbox command initialization. A value of 0
@@ -2217,7 +2206,8 @@ lpfc_sli_issue_mbox(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmbox, uint32_t flag)
return (MBX_NOT_FINISHED);
}
/* timeout active mbox command */
- mod_timer(&psli->mbox_tmo, jiffies + HZ * LPFC_MBOX_TMO);
+ mod_timer(&psli->mbox_tmo, (jiffies +
+ (HZ * lpfc_mbox_tmo_val(phba, mb->mbxCommand))));
}
/* Mailbox cmd <cmd> issue */
@@ -2277,7 +2267,6 @@ lpfc_sli_issue_mbox(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmbox, uint32_t flag)
break;
case MBX_POLL:
- i = 0;
psli->mbox_active = NULL;
if (psli->sli_flag & LPFC_SLI2_ACTIVE) {
/* First read mbox status word */
@@ -2291,11 +2280,14 @@ lpfc_sli_issue_mbox(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmbox, uint32_t flag)
/* Read the HBA Host Attention Register */
ha_copy = readl(phba->HAregaddr);
+ i = lpfc_mbox_tmo_val(phba, mb->mbxCommand);
+ i *= 1000; /* Convert to ms */
+
/* Wait for command to complete */
while (((word0 & OWN_CHIP) == OWN_CHIP) ||
(!(ha_copy & HA_MBATT) &&
(phba->hba_state > LPFC_WARM_START))) {
- if (i++ >= 100) {
+ if (i-- <= 0) {
psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
spin_unlock_irqrestore(phba->host->host_lock,
drvr_flag);
@@ -2313,7 +2305,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmbox, uint32_t flag)
/* Can be in interrupt context, do not sleep */
/* (or might be called with interrupts disabled) */
- mdelay(i);
+ mdelay(1);
spin_lock_irqsave(phba->host->host_lock, drvr_flag);
@@ -2659,8 +2651,6 @@ lpfc_sli_hba_down(struct lpfc_hba * phba)
INIT_LIST_HEAD(&(pring->txq));
- kfree(pring->fast_lookup);
- pring->fast_lookup = NULL;
}
spin_unlock_irqrestore(phba->host->host_lock, flags);
@@ -3030,7 +3020,7 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba * phba,
if (timeleft == 0) {
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
- "%d:0329 IOCB wait timeout error - no "
+ "%d:0338 IOCB wait timeout error - no "
"wake response Data x%x\n",
phba->brd_no, timeout);
retval = IOCB_TIMEDOUT;
@@ -3110,6 +3100,24 @@ lpfc_sli_issue_mbox_wait(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq,
return retval;
}
+int
+lpfc_sli_flush_mbox_queue(struct lpfc_hba * phba)
+{
+ int i = 0;
+
+ while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE && !phba->stopped) {
+ if (i++ > LPFC_MBOX_TMO * 1000)
+ return 1;
+
+ if (lpfc_sli_handle_mb_event(phba) == 0)
+ i = 0;
+
+ msleep(1);
+ }
+
+ return (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) ? 1 : 0;
+}
+
irqreturn_t
lpfc_intr_handler(int irq, void *dev_id, struct pt_regs * regs)
{
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index a52d6c6cf083..e26de6809358 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -135,8 +135,6 @@ struct lpfc_sli_ring {
uint32_t fast_iotag; /* max fastlookup based iotag */
uint32_t iotag_ctr; /* keeps track of the next iotag to use */
uint32_t iotag_max; /* max iotag value to use */
- struct lpfc_iocbq ** fast_lookup; /* array of IOCB ptrs indexed by
- iotag */
struct list_head txq;
uint16_t txq_cnt; /* current length of queue */
uint16_t txq_max; /* max length */
@@ -174,6 +172,18 @@ struct lpfc_sli_stat {
uint32_t mbox_busy; /* Mailbox cmd busy */
};
+/* Structure to store link status values when port stats are reset */
+struct lpfc_lnk_stat {
+ uint32_t link_failure_count;
+ uint32_t loss_of_sync_count;
+ uint32_t loss_of_signal_count;
+ uint32_t prim_seq_protocol_err_count;
+ uint32_t invalid_tx_word_count;
+ uint32_t invalid_crc_count;
+ uint32_t error_frames;
+ uint32_t link_events;
+};
+
/* Structure used to hold SLI information */
struct lpfc_sli {
uint32_t num_rings;
@@ -203,6 +213,8 @@ struct lpfc_sli {
struct lpfc_iocbq ** iocbq_lookup; /* array to lookup IOCB by IOTAG */
size_t iocbq_lookup_len; /* current lengs of the array */
uint16_t last_iotag; /* last allocated IOTAG */
+ unsigned long stats_start; /* in seconds */
+ struct lpfc_lnk_stat lnk_stat_offsets;
};
/* Given a pointer to the start of the ring, and the slot number of
@@ -213,3 +225,9 @@ struct lpfc_sli {
#define LPFC_MBOX_TMO 30 /* Sec tmo for outstanding mbox
command */
+#define LPFC_MBOX_TMO_FLASH_CMD 300 /* Sec tmo for outstanding FLASH write
+ * or erase cmds. This is especially
+ * long because of the potential of
+ * multiple flash erases that can be
+ * spawned.
+ */
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 6b737568b831..c7091ea29f3f 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "8.1.6"
+#define LPFC_DRIVER_VERSION "8.1.9"
#define LPFC_DRIVER_NAME "lpfc"
diff --git a/drivers/scsi/mac53c94.c b/drivers/scsi/mac53c94.c
index 93edaa8696cf..89ef34df5a1d 100644
--- a/drivers/scsi/mac53c94.c
+++ b/drivers/scsi/mac53c94.c
@@ -378,7 +378,7 @@ static void set_dma_cmds(struct fsc_state *state, struct scsi_cmnd *cmd)
int nseg;
total = 0;
- scl = (struct scatterlist *) cmd->buffer;
+ scl = (struct scatterlist *) cmd->request_buffer;
nseg = pci_map_sg(state->pdev, scl, cmd->use_sg,
cmd->sc_data_direction);
for (i = 0; i < nseg; ++i) {
diff --git a/drivers/scsi/megaraid/mega_common.h b/drivers/scsi/megaraid/mega_common.h
index 4675343228ad..8cd0bd1d0f7c 100644
--- a/drivers/scsi/megaraid/mega_common.h
+++ b/drivers/scsi/megaraid/mega_common.h
@@ -37,6 +37,12 @@
#define LSI_MAX_CHANNELS 16
#define LSI_MAX_LOGICAL_DRIVES_64LD (64+1)
+#define HBA_SIGNATURE_64_BIT 0x299
+#define PCI_CONF_AMISIG64 0xa4
+
+#define MEGA_SCSI_INQ_EVPD 1
+#define MEGA_INVALID_FIELD_IN_CDB 0x24
+
/**
* scb_t - scsi command control block
diff --git a/drivers/scsi/megaraid/megaraid_ioctl.h b/drivers/scsi/megaraid/megaraid_ioctl.h
index bdaee144a1c3..b8aa34202ec3 100644
--- a/drivers/scsi/megaraid/megaraid_ioctl.h
+++ b/drivers/scsi/megaraid/megaraid_ioctl.h
@@ -132,6 +132,10 @@ typedef struct uioc {
/* Driver Data: */
void __user * user_data;
uint32_t user_data_len;
+
+ /* 64bit alignment */
+ uint32_t pad_for_64bit_align;
+
mraid_passthru_t __user *user_pthru;
mraid_passthru_t *pthru32;
diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
index 92715130ac09..cd982c877da0 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.c
+++ b/drivers/scsi/megaraid/megaraid_mbox.c
@@ -10,7 +10,7 @@
* 2 of the License, or (at your option) any later version.
*
* FILE : megaraid_mbox.c
- * Version : v2.20.4.8 (Apr 11 2006)
+ * Version : v2.20.4.9 (Jul 16 2006)
*
* Authors:
* Atul Mukker <Atul.Mukker@lsil.com>
@@ -720,6 +720,7 @@ megaraid_init_mbox(adapter_t *adapter)
struct pci_dev *pdev;
mraid_device_t *raid_dev;
int i;
+ uint32_t magic64;
adapter->ito = MBOX_TIMEOUT;
@@ -863,12 +864,33 @@ megaraid_init_mbox(adapter_t *adapter)
// Set the DMA mask to 64-bit. All supported controllers as capable of
// DMA in this range
- if (pci_set_dma_mask(adapter->pdev, DMA_64BIT_MASK) != 0) {
-
- con_log(CL_ANN, (KERN_WARNING
- "megaraid: could not set DMA mask for 64-bit.\n"));
+ pci_read_config_dword(adapter->pdev, PCI_CONF_AMISIG64, &magic64);
+
+ if (((magic64 == HBA_SIGNATURE_64_BIT) &&
+ ((adapter->pdev->subsystem_device !=
+ PCI_SUBSYS_ID_MEGARAID_SATA_150_6) ||
+ (adapter->pdev->subsystem_device !=
+ PCI_SUBSYS_ID_MEGARAID_SATA_150_4))) ||
+ (adapter->pdev->vendor == PCI_VENDOR_ID_LSI_LOGIC &&
+ adapter->pdev->device == PCI_DEVICE_ID_VERDE) ||
+ (adapter->pdev->vendor == PCI_VENDOR_ID_LSI_LOGIC &&
+ adapter->pdev->device == PCI_DEVICE_ID_DOBSON) ||
+ (adapter->pdev->vendor == PCI_VENDOR_ID_LSI_LOGIC &&
+ adapter->pdev->device == PCI_DEVICE_ID_LINDSAY) ||
+ (adapter->pdev->vendor == PCI_VENDOR_ID_DELL &&
+ adapter->pdev->device == PCI_DEVICE_ID_PERC4_DI_EVERGLADES) ||
+ (adapter->pdev->vendor == PCI_VENDOR_ID_DELL &&
+ adapter->pdev->device == PCI_DEVICE_ID_PERC4E_DI_KOBUK)) {
+ if (pci_set_dma_mask(adapter->pdev, DMA_64BIT_MASK)) {
+ con_log(CL_ANN, (KERN_WARNING
+ "megaraid: DMA mask for 64-bit failed\n"));
- goto out_free_sysfs_res;
+ if (pci_set_dma_mask (adapter->pdev, DMA_32BIT_MASK)) {
+ con_log(CL_ANN, (KERN_WARNING
+ "megaraid: 32-bit DMA mask failed\n"));
+ goto out_free_sysfs_res;
+ }
+ }
}
// setup tasklet for DPC
@@ -1622,6 +1644,14 @@ megaraid_mbox_build_cmd(adapter_t *adapter, struct scsi_cmnd *scp, int *busy)
rdev->last_disp |= (1L << SCP2CHANNEL(scp));
}
+ if (scp->cmnd[1] & MEGA_SCSI_INQ_EVPD) {
+ scp->sense_buffer[0] = 0x70;
+ scp->sense_buffer[2] = ILLEGAL_REQUEST;
+ scp->sense_buffer[12] = MEGA_INVALID_FIELD_IN_CDB;
+ scp->result = CHECK_CONDITION << 1;
+ return NULL;
+ }
+
/* Fall through */
case READ_CAPACITY:
diff --git a/drivers/scsi/megaraid/megaraid_mbox.h b/drivers/scsi/megaraid/megaraid_mbox.h
index 868fb0ec93e7..2b5a3285f799 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.h
+++ b/drivers/scsi/megaraid/megaraid_mbox.h
@@ -21,8 +21,8 @@
#include "megaraid_ioctl.h"
-#define MEGARAID_VERSION "2.20.4.8"
-#define MEGARAID_EXT_VERSION "(Release Date: Mon Apr 11 12:27:22 EST 2006)"
+#define MEGARAID_VERSION "2.20.4.9"
+#define MEGARAID_EXT_VERSION "(Release Date: Sun Jul 16 12:27:22 EST 2006)"
/*
diff --git a/drivers/scsi/megaraid/megaraid_mm.c b/drivers/scsi/megaraid/megaraid_mm.c
index e8f534fb336b..d85b9a8f1b8d 100644
--- a/drivers/scsi/megaraid/megaraid_mm.c
+++ b/drivers/scsi/megaraid/megaraid_mm.c
@@ -10,7 +10,7 @@
* 2 of the License, or (at your option) any later version.
*
* FILE : megaraid_mm.c
- * Version : v2.20.2.6 (Mar 7 2005)
+ * Version : v2.20.2.7 (Jul 16 2006)
*
* Common management module
*/
diff --git a/drivers/scsi/megaraid/megaraid_mm.h b/drivers/scsi/megaraid/megaraid_mm.h
index 3d9e67d6849d..c8762b2b8ed1 100644
--- a/drivers/scsi/megaraid/megaraid_mm.h
+++ b/drivers/scsi/megaraid/megaraid_mm.h
@@ -27,9 +27,9 @@
#include "megaraid_ioctl.h"
-#define LSI_COMMON_MOD_VERSION "2.20.2.6"
+#define LSI_COMMON_MOD_VERSION "2.20.2.7"
#define LSI_COMMON_MOD_EXT_VERSION \
- "(Release Date: Mon Mar 7 00:01:03 EST 2005)"
+ "(Release Date: Sun Jul 16 00:01:03 EST 2006)"
#define LSI_DBGLVL dbglevel
diff --git a/drivers/scsi/mesh.c b/drivers/scsi/mesh.c
index c88717727be8..5572981a9f92 100644
--- a/drivers/scsi/mesh.c
+++ b/drivers/scsi/mesh.c
@@ -1268,7 +1268,7 @@ static void set_dma_cmds(struct mesh_state *ms, struct scsi_cmnd *cmd)
if (cmd->use_sg > 0) {
int nseg;
total = 0;
- scl = (struct scatterlist *) cmd->buffer;
+ scl = (struct scatterlist *) cmd->request_buffer;
off = ms->data_ptr;
nseg = pci_map_sg(ms->pdev, scl, cmd->use_sg,
cmd->sc_data_direction);
diff --git a/drivers/scsi/pdc_adma.c b/drivers/scsi/pdc_adma.c
index d1f38c32aa15..efc8fff1d250 100644
--- a/drivers/scsi/pdc_adma.c
+++ b/drivers/scsi/pdc_adma.c
@@ -183,7 +183,8 @@ static struct ata_port_info adma_port_info[] = {
{
.sht = &adma_ata_sht,
.host_flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST |
- ATA_FLAG_NO_LEGACY | ATA_FLAG_MMIO,
+ ATA_FLAG_NO_LEGACY | ATA_FLAG_MMIO |
+ ATA_FLAG_PIO_POLLING,
.pio_mask = 0x10, /* pio4 */
.udma_mask = 0x1f, /* udma0-4 */
.port_ops = &adma_ata_ops,
diff --git a/drivers/scsi/pluto.c b/drivers/scsi/pluto.c
index 7abf64d1bfc9..0bd9c60e6455 100644
--- a/drivers/scsi/pluto.c
+++ b/drivers/scsi/pluto.c
@@ -169,8 +169,6 @@ int __init pluto_detect(struct scsi_host_template *tpnt)
SCpnt->request->rq_status = RQ_SCSI_BUSY;
SCpnt->done = pluto_detect_done;
- SCpnt->bufflen = 256;
- SCpnt->buffer = fcs[i].inquiry;
SCpnt->request_bufflen = 256;
SCpnt->request_buffer = fcs[i].inquiry;
PLD(("set up %d %08lx\n", i, (long)SCpnt))
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 139ea0e27fd7..0930260aec2c 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -487,6 +487,7 @@ typedef struct {
#define MBA_IP_RCV_BUFFER_EMPTY 0x8026 /* IP receive buffer queue empty. */
#define MBA_IP_HDR_DATA_SPLIT 0x8027 /* IP header/data splitting feature */
/* used. */
+#define MBA_TRACE_NOTIFICATION 0x8028 /* Trace/Diagnostic notification. */
#define MBA_POINT_TO_POINT 0x8030 /* Point to point mode. */
#define MBA_CMPLT_1_16BIT 0x8031 /* Completion 1 16bit IOSB. */
#define MBA_CMPLT_2_16BIT 0x8032 /* Completion 2 16bit IOSB. */
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 9758dba95542..859649160caa 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -3063,6 +3063,7 @@ qla2x00_update_fcports(scsi_qla_host_t *ha)
int
qla2x00_abort_isp(scsi_qla_host_t *ha)
{
+ int rval;
unsigned long flags = 0;
uint16_t cnt;
srb_t *sp;
@@ -3119,6 +3120,16 @@ qla2x00_abort_isp(scsi_qla_host_t *ha)
ha->isp_abort_cnt = 0;
clear_bit(ISP_ABORT_RETRY, &ha->dpc_flags);
+
+ if (ha->eft) {
+ rval = qla2x00_trace_control(ha, TC_ENABLE,
+ ha->eft_dma, EFT_NUM_BUFFERS);
+ if (rval) {
+ qla_printk(KERN_WARNING, ha,
+ "Unable to reinitialize EFT "
+ "(%d).\n", rval);
+ }
+ }
} else { /* failed the ISP abort */
ha->flags.online = 1;
if (test_bit(ISP_ABORT_RETRY, &ha->dpc_flags)) {
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 2b60a27eff0b..c5b3c610a32a 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -471,6 +471,7 @@ __qla2x00_marker(scsi_qla_host_t *ha, uint16_t loop_id, uint16_t lun,
mrk24->nport_handle = cpu_to_le16(loop_id);
mrk24->lun[1] = LSB(lun);
mrk24->lun[2] = MSB(lun);
+ host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
} else {
SET_TARGET_ID(ha, mrk->target, loop_id);
mrk->lun = cpu_to_le16(lun);
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 795bf15b1b8f..de0613135f70 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -587,6 +587,11 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
DEBUG2(printk("scsi(%ld): Discard RND Frame -- %04x %04x "
"%04x.\n", ha->host_no, mb[1], mb[2], mb[3]));
break;
+
+ case MBA_TRACE_NOTIFICATION:
+ DEBUG2(printk("scsi(%ld): Trace Notification -- %04x %04x.\n",
+ ha->host_no, mb[1], mb[2]));
+ break;
}
}
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index ec7ebb6037e6..65cbe2f5eea2 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -744,7 +744,6 @@ qla2xxx_eh_device_reset(struct scsi_cmnd *cmd)
{
scsi_qla_host_t *ha = to_qla_host(cmd->device->host);
fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
- srb_t *sp;
int ret;
unsigned int id, lun;
unsigned long serial;
@@ -755,8 +754,7 @@ qla2xxx_eh_device_reset(struct scsi_cmnd *cmd)
lun = cmd->device->lun;
serial = cmd->serial_number;
- sp = (srb_t *) CMD_SP(cmd);
- if (!sp || !fcport)
+ if (!fcport)
return ret;
qla_printk(KERN_INFO, ha,
@@ -875,7 +873,6 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
{
scsi_qla_host_t *ha = to_qla_host(cmd->device->host);
fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
- srb_t *sp;
int ret;
unsigned int id, lun;
unsigned long serial;
@@ -886,8 +883,7 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
lun = cmd->device->lun;
serial = cmd->serial_number;
- sp = (srb_t *) CMD_SP(cmd);
- if (!sp || !fcport)
+ if (!fcport)
return ret;
qla_printk(KERN_INFO, ha,
@@ -936,7 +932,6 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
{
scsi_qla_host_t *ha = to_qla_host(cmd->device->host);
fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
- srb_t *sp;
int ret;
unsigned int id, lun;
unsigned long serial;
@@ -947,8 +942,7 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
lun = cmd->device->lun;
serial = cmd->serial_number;
- sp = (srb_t *) CMD_SP(cmd);
- if (!sp || !fcport)
+ if (!fcport)
return ret;
qla_printk(KERN_INFO, ha,
@@ -2244,9 +2238,6 @@ qla2x00_do_dpc(void *data)
next_loopid = 0;
list_for_each_entry(fcport, &ha->fcports, list) {
- if (fcport->port_type != FCT_TARGET)
- continue;
-
/*
* If the port is not ONLINE then try to login
* to it if we haven't run out of retries.
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index d2d683440659..971259032ef7 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,9 +7,9 @@
/*
* Driver version
*/
-#define QLA2XXX_VERSION "8.01.05-k3"
+#define QLA2XXX_VERSION "8.01.07-k1"
#define QLA_DRIVER_MAJOR_VER 8
#define QLA_DRIVER_MINOR_VER 1
-#define QLA_DRIVER_PATCH_VER 5
+#define QLA_DRIVER_PATCH_VER 7
#define QLA_DRIVER_BETA_VER 0
diff --git a/drivers/scsi/qlogicpti.c b/drivers/scsi/qlogicpti.c
index 69e0551a81d2..5b2f0741a55b 100644
--- a/drivers/scsi/qlogicpti.c
+++ b/drivers/scsi/qlogicpti.c
@@ -874,7 +874,7 @@ static inline int load_cmd(struct scsi_cmnd *Cmnd, struct Command_Entry *cmd,
if (Cmnd->use_sg) {
int sg_count;
- sg = (struct scatterlist *) Cmnd->buffer;
+ sg = (struct scatterlist *) Cmnd->request_buffer;
sg_count = sbus_map_sg(qpti->sdev, sg, Cmnd->use_sg, Cmnd->sc_data_direction);
ds = cmd->dataseg;
@@ -1278,7 +1278,7 @@ static struct scsi_cmnd *qlogicpti_intr_handler(struct qlogicpti *qpti)
if (Cmnd->use_sg) {
sbus_unmap_sg(qpti->sdev,
- (struct scatterlist *)Cmnd->buffer,
+ (struct scatterlist *)Cmnd->request_buffer,
Cmnd->use_sg,
Cmnd->sc_data_direction);
} else {
diff --git a/drivers/scsi/sata_promise.c b/drivers/scsi/sata_promise.c
index 64631bd38952..4776f4e55839 100644
--- a/drivers/scsi/sata_promise.c
+++ b/drivers/scsi/sata_promise.c
@@ -269,8 +269,15 @@ static const struct pci_device_id pdc_ata_pci_tbl[] = {
{ PCI_VENDOR_ID_PROMISE, 0x6629, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
board_20619 },
+/* TODO: remove all associated board_20771 code, as it completely
+ * duplicates board_2037x code, unless reason for separation can be
+ * divined.
+ */
+#if 0
{ PCI_VENDOR_ID_PROMISE, 0x3570, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
board_20771 },
+#endif
+
{ } /* terminate list */
};
diff --git a/drivers/scsi/sata_sil.c b/drivers/scsi/sata_sil.c
index 7aabb45c35e5..d0a85073ebf7 100644
--- a/drivers/scsi/sata_sil.c
+++ b/drivers/scsi/sata_sil.c
@@ -109,6 +109,7 @@ enum {
};
static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
+static int sil_pci_device_resume(struct pci_dev *pdev);
static void sil_dev_config(struct ata_port *ap, struct ata_device *dev);
static u32 sil_scr_read (struct ata_port *ap, unsigned int sc_reg);
static void sil_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
@@ -160,6 +161,8 @@ static struct pci_driver sil_pci_driver = {
.id_table = sil_pci_tbl,
.probe = sil_init_one,
.remove = ata_pci_remove_one,
+ .suspend = ata_pci_device_suspend,
+ .resume = sil_pci_device_resume,
};
static struct scsi_host_template sil_sht = {
@@ -178,6 +181,8 @@ static struct scsi_host_template sil_sht = {
.slave_configure = ata_scsi_slave_config,
.slave_destroy = ata_scsi_slave_destroy,
.bios_param = ata_std_bios_param,
+ .suspend = ata_scsi_device_suspend,
+ .resume = ata_scsi_device_resume,
};
static const struct ata_port_operations sil_ops = {
@@ -370,7 +375,7 @@ static void sil_host_intr(struct ata_port *ap, u32 bmdma2)
* during hardreset makes controllers with broken SIEN
* repeat probing needlessly.
*/
- if (!(ap->flags & ATA_FLAG_FROZEN)) {
+ if (!(ap->pflags & ATA_PFLAG_FROZEN)) {
ata_ehi_hotplugged(&ap->eh_info);
ap->eh_info.serror |= serror;
}
@@ -561,6 +566,52 @@ static void sil_dev_config(struct ata_port *ap, struct ata_device *dev)
}
}
+static void sil_init_controller(struct pci_dev *pdev,
+ int n_ports, unsigned long host_flags,
+ void __iomem *mmio_base)
+{
+ u8 cls;
+ u32 tmp;
+ int i;
+
+ /* Initialize FIFO PCI bus arbitration */
+ cls = sil_get_device_cache_line(pdev);
+ if (cls) {
+ cls >>= 3;
+ cls++; /* cls = (line_size/8)+1 */
+ for (i = 0; i < n_ports; i++)
+ writew(cls << 8 | cls,
+ mmio_base + sil_port[i].fifo_cfg);
+ } else
+ dev_printk(KERN_WARNING, &pdev->dev,
+ "cache line size not set. Driver may not function\n");
+
+ /* Apply R_ERR on DMA activate FIS errata workaround */
+ if (host_flags & SIL_FLAG_RERR_ON_DMA_ACT) {
+ int cnt;
+
+ for (i = 0, cnt = 0; i < n_ports; i++) {
+ tmp = readl(mmio_base + sil_port[i].sfis_cfg);
+ if ((tmp & 0x3) != 0x01)
+ continue;
+ if (!cnt)
+ dev_printk(KERN_INFO, &pdev->dev,
+ "Applying R_ERR on DMA activate "
+ "FIS errata fix\n");
+ writel(tmp & ~0x3, mmio_base + sil_port[i].sfis_cfg);
+ cnt++;
+ }
+ }
+
+ if (n_ports == 4) {
+ /* flip the magic "make 4 ports work" bit */
+ tmp = readl(mmio_base + sil_port[2].bmdma);
+ if ((tmp & SIL_INTR_STEERING) == 0)
+ writel(tmp | SIL_INTR_STEERING,
+ mmio_base + sil_port[2].bmdma);
+ }
+}
+
static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
{
static int printed_version;
@@ -570,8 +621,6 @@ static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
int rc;
unsigned int i;
int pci_dev_busy = 0;
- u32 tmp;
- u8 cls;
if (!printed_version++)
dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
@@ -630,42 +679,8 @@ static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
ata_std_ports(&probe_ent->port[i]);
}
- /* Initialize FIFO PCI bus arbitration */
- cls = sil_get_device_cache_line(pdev);
- if (cls) {
- cls >>= 3;
- cls++; /* cls = (line_size/8)+1 */
- for (i = 0; i < probe_ent->n_ports; i++)
- writew(cls << 8 | cls,
- mmio_base + sil_port[i].fifo_cfg);
- } else
- dev_printk(KERN_WARNING, &pdev->dev,
- "cache line size not set. Driver may not function\n");
-
- /* Apply R_ERR on DMA activate FIS errata workaround */
- if (probe_ent->host_flags & SIL_FLAG_RERR_ON_DMA_ACT) {
- int cnt;
-
- for (i = 0, cnt = 0; i < probe_ent->n_ports; i++) {
- tmp = readl(mmio_base + sil_port[i].sfis_cfg);
- if ((tmp & 0x3) != 0x01)
- continue;
- if (!cnt)
- dev_printk(KERN_INFO, &pdev->dev,
- "Applying R_ERR on DMA activate "
- "FIS errata fix\n");
- writel(tmp & ~0x3, mmio_base + sil_port[i].sfis_cfg);
- cnt++;
- }
- }
-
- if (ent->driver_data == sil_3114) {
- /* flip the magic "make 4 ports work" bit */
- tmp = readl(mmio_base + sil_port[2].bmdma);
- if ((tmp & SIL_INTR_STEERING) == 0)
- writel(tmp | SIL_INTR_STEERING,
- mmio_base + sil_port[2].bmdma);
- }
+ sil_init_controller(pdev, probe_ent->n_ports, probe_ent->host_flags,
+ mmio_base);
pci_set_master(pdev);
@@ -685,6 +700,18 @@ err_out:
return rc;
}
+static int sil_pci_device_resume(struct pci_dev *pdev)
+{
+ struct ata_host_set *host_set = dev_get_drvdata(&pdev->dev);
+
+ ata_pci_device_do_resume(pdev);
+ sil_init_controller(pdev, host_set->n_ports, host_set->ports[0]->flags,
+ host_set->mmio_base);
+ ata_host_set_resume(host_set);
+
+ return 0;
+}
+
static int __init sil_init(void)
{
return pci_module_init(&sil_pci_driver);
diff --git a/drivers/scsi/sata_sil24.c b/drivers/scsi/sata_sil24.c
index 07a1c6a8a414..3f368c7d3ef9 100644
--- a/drivers/scsi/sata_sil24.c
+++ b/drivers/scsi/sata_sil24.c
@@ -92,6 +92,7 @@ enum {
HOST_CTRL_STOP = (1 << 18), /* latched PCI STOP */
HOST_CTRL_DEVSEL = (1 << 19), /* latched PCI DEVSEL */
HOST_CTRL_REQ64 = (1 << 20), /* latched PCI REQ64 */
+ HOST_CTRL_GLOBAL_RST = (1 << 31), /* global reset */
/*
* Port registers
@@ -338,6 +339,7 @@ static int sil24_port_start(struct ata_port *ap);
static void sil24_port_stop(struct ata_port *ap);
static void sil24_host_stop(struct ata_host_set *host_set);
static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
+static int sil24_pci_device_resume(struct pci_dev *pdev);
static const struct pci_device_id sil24_pci_tbl[] = {
{ 0x1095, 0x3124, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BID_SIL3124 },
@@ -353,6 +355,8 @@ static struct pci_driver sil24_pci_driver = {
.id_table = sil24_pci_tbl,
.probe = sil24_init_one,
.remove = ata_pci_remove_one, /* safe? */
+ .suspend = ata_pci_device_suspend,
+ .resume = sil24_pci_device_resume,
};
static struct scsi_host_template sil24_sht = {
@@ -372,6 +376,8 @@ static struct scsi_host_template sil24_sht = {
.slave_configure = ata_scsi_slave_config,
.slave_destroy = ata_scsi_slave_destroy,
.bios_param = ata_std_bios_param,
+ .suspend = ata_scsi_device_suspend,
+ .resume = ata_scsi_device_resume,
};
static const struct ata_port_operations sil24_ops = {
@@ -607,7 +613,7 @@ static int sil24_hardreset(struct ata_port *ap, unsigned int *class)
/* SStatus oscillates between zero and valid status after
* DEV_RST, debounce it.
*/
- rc = sata_phy_debounce(ap, sata_deb_timing_before_fsrst);
+ rc = sata_phy_debounce(ap, sata_deb_timing_long);
if (rc) {
reason = "PHY debouncing failed";
goto err;
@@ -988,6 +994,64 @@ static void sil24_host_stop(struct ata_host_set *host_set)
kfree(hpriv);
}
+static void sil24_init_controller(struct pci_dev *pdev, int n_ports,
+ unsigned long host_flags,
+ void __iomem *host_base,
+ void __iomem *port_base)
+{
+ u32 tmp;
+ int i;
+
+ /* GPIO off */
+ writel(0, host_base + HOST_FLASH_CMD);
+
+ /* clear global reset & mask interrupts during initialization */
+ writel(0, host_base + HOST_CTRL);
+
+ /* init ports */
+ for (i = 0; i < n_ports; i++) {
+ void __iomem *port = port_base + i * PORT_REGS_SIZE;
+
+ /* Initial PHY setting */
+ writel(0x20c, port + PORT_PHY_CFG);
+
+ /* Clear port RST */
+ tmp = readl(port + PORT_CTRL_STAT);
+ if (tmp & PORT_CS_PORT_RST) {
+ writel(PORT_CS_PORT_RST, port + PORT_CTRL_CLR);
+ tmp = ata_wait_register(port + PORT_CTRL_STAT,
+ PORT_CS_PORT_RST,
+ PORT_CS_PORT_RST, 10, 100);
+ if (tmp & PORT_CS_PORT_RST)
+ dev_printk(KERN_ERR, &pdev->dev,
+ "failed to clear port RST\n");
+ }
+
+ /* Configure IRQ WoC */
+ if (host_flags & SIL24_FLAG_PCIX_IRQ_WOC)
+ writel(PORT_CS_IRQ_WOC, port + PORT_CTRL_STAT);
+ else
+ writel(PORT_CS_IRQ_WOC, port + PORT_CTRL_CLR);
+
+ /* Zero error counters. */
+ writel(0x8000, port + PORT_DECODE_ERR_THRESH);
+ writel(0x8000, port + PORT_CRC_ERR_THRESH);
+ writel(0x8000, port + PORT_HSHK_ERR_THRESH);
+ writel(0x0000, port + PORT_DECODE_ERR_CNT);
+ writel(0x0000, port + PORT_CRC_ERR_CNT);
+ writel(0x0000, port + PORT_HSHK_ERR_CNT);
+
+ /* Always use 64bit activation */
+ writel(PORT_CS_32BIT_ACTV, port + PORT_CTRL_CLR);
+
+ /* Clear port multiplier enable and resume bits */
+ writel(PORT_CS_PM_EN | PORT_CS_RESUME, port + PORT_CTRL_CLR);
+ }
+
+ /* Turn on interrupts */
+ writel(IRQ_STAT_4PORTS, host_base + HOST_CTRL);
+}
+
static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
static int printed_version = 0;
@@ -1042,7 +1106,6 @@ static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
probe_ent->irq = pdev->irq;
probe_ent->irq_flags = IRQF_SHARED;
- probe_ent->mmio_base = port_base;
probe_ent->private_data = hpriv;
hpriv->host_base = host_base;
@@ -1076,9 +1139,6 @@ static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
}
}
- /* GPIO off */
- writel(0, host_base + HOST_FLASH_CMD);
-
/* Apply workaround for completion IRQ loss on PCI-X errata */
if (probe_ent->host_flags & SIL24_FLAG_PCIX_IRQ_WOC) {
tmp = readl(host_base + HOST_CTRL);
@@ -1090,56 +1150,18 @@ static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
probe_ent->host_flags &= ~SIL24_FLAG_PCIX_IRQ_WOC;
}
- /* clear global reset & mask interrupts during initialization */
- writel(0, host_base + HOST_CTRL);
-
for (i = 0; i < probe_ent->n_ports; i++) {
- void __iomem *port = port_base + i * PORT_REGS_SIZE;
- unsigned long portu = (unsigned long)port;
+ unsigned long portu =
+ (unsigned long)port_base + i * PORT_REGS_SIZE;
probe_ent->port[i].cmd_addr = portu;
probe_ent->port[i].scr_addr = portu + PORT_SCONTROL;
ata_std_ports(&probe_ent->port[i]);
-
- /* Initial PHY setting */
- writel(0x20c, port + PORT_PHY_CFG);
-
- /* Clear port RST */
- tmp = readl(port + PORT_CTRL_STAT);
- if (tmp & PORT_CS_PORT_RST) {
- writel(PORT_CS_PORT_RST, port + PORT_CTRL_CLR);
- tmp = ata_wait_register(port + PORT_CTRL_STAT,
- PORT_CS_PORT_RST,
- PORT_CS_PORT_RST, 10, 100);
- if (tmp & PORT_CS_PORT_RST)
- dev_printk(KERN_ERR, &pdev->dev,
- "failed to clear port RST\n");
- }
-
- /* Configure IRQ WoC */
- if (probe_ent->host_flags & SIL24_FLAG_PCIX_IRQ_WOC)
- writel(PORT_CS_IRQ_WOC, port + PORT_CTRL_STAT);
- else
- writel(PORT_CS_IRQ_WOC, port + PORT_CTRL_CLR);
-
- /* Zero error counters. */
- writel(0x8000, port + PORT_DECODE_ERR_THRESH);
- writel(0x8000, port + PORT_CRC_ERR_THRESH);
- writel(0x8000, port + PORT_HSHK_ERR_THRESH);
- writel(0x0000, port + PORT_DECODE_ERR_CNT);
- writel(0x0000, port + PORT_CRC_ERR_CNT);
- writel(0x0000, port + PORT_HSHK_ERR_CNT);
-
- /* Always use 64bit activation */
- writel(PORT_CS_32BIT_ACTV, port + PORT_CTRL_CLR);
-
- /* Clear port multiplier enable and resume bits */
- writel(PORT_CS_PM_EN | PORT_CS_RESUME, port + PORT_CTRL_CLR);
}
- /* Turn on interrupts */
- writel(IRQ_STAT_4PORTS, host_base + HOST_CTRL);
+ sil24_init_controller(pdev, probe_ent->n_ports, probe_ent->host_flags,
+ host_base, port_base);
pci_set_master(pdev);
@@ -1162,6 +1184,25 @@ static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
return rc;
}
+static int sil24_pci_device_resume(struct pci_dev *pdev)
+{
+ struct ata_host_set *host_set = dev_get_drvdata(&pdev->dev);
+ struct sil24_host_priv *hpriv = host_set->private_data;
+
+ ata_pci_device_do_resume(pdev);
+
+ if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND)
+ writel(HOST_CTRL_GLOBAL_RST, hpriv->host_base + HOST_CTRL);
+
+ sil24_init_controller(pdev, host_set->n_ports,
+ host_set->ports[0]->flags,
+ hpriv->host_base, hpriv->port_base);
+
+ ata_host_set_resume(host_set);
+
+ return 0;
+}
+
static int __init sil24_init(void)
{
return pci_module_init(&sil24_pci_driver);
diff --git a/drivers/scsi/sata_via.c b/drivers/scsi/sata_via.c
index 03baec2191bf..01d40369a8a5 100644
--- a/drivers/scsi/sata_via.c
+++ b/drivers/scsi/sata_via.c
@@ -74,6 +74,7 @@ enum {
static int svia_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
static u32 svia_scr_read (struct ata_port *ap, unsigned int sc_reg);
static void svia_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
+static void vt6420_error_handler(struct ata_port *ap);
static const struct pci_device_id svia_pci_tbl[] = {
{ 0x1106, 0x3149, PCI_ANY_ID, PCI_ANY_ID, 0, 0, vt6420 },
@@ -107,7 +108,38 @@ static struct scsi_host_template svia_sht = {
.bios_param = ata_std_bios_param,
};
-static const struct ata_port_operations svia_sata_ops = {
+static const struct ata_port_operations vt6420_sata_ops = {
+ .port_disable = ata_port_disable,
+
+ .tf_load = ata_tf_load,
+ .tf_read = ata_tf_read,
+ .check_status = ata_check_status,
+ .exec_command = ata_exec_command,
+ .dev_select = ata_std_dev_select,
+
+ .bmdma_setup = ata_bmdma_setup,
+ .bmdma_start = ata_bmdma_start,
+ .bmdma_stop = ata_bmdma_stop,
+ .bmdma_status = ata_bmdma_status,
+
+ .qc_prep = ata_qc_prep,
+ .qc_issue = ata_qc_issue_prot,
+ .data_xfer = ata_pio_data_xfer,
+
+ .freeze = ata_bmdma_freeze,
+ .thaw = ata_bmdma_thaw,
+ .error_handler = vt6420_error_handler,
+ .post_internal_cmd = ata_bmdma_post_internal_cmd,
+
+ .irq_handler = ata_interrupt,
+ .irq_clear = ata_bmdma_irq_clear,
+
+ .port_start = ata_port_start,
+ .port_stop = ata_port_stop,
+ .host_stop = ata_host_stop,
+};
+
+static const struct ata_port_operations vt6421_sata_ops = {
.port_disable = ata_port_disable,
.tf_load = ata_tf_load,
@@ -141,13 +173,13 @@ static const struct ata_port_operations svia_sata_ops = {
.host_stop = ata_host_stop,
};
-static struct ata_port_info svia_port_info = {
+static struct ata_port_info vt6420_port_info = {
.sht = &svia_sht,
.host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
.pio_mask = 0x1f,
.mwdma_mask = 0x07,
.udma_mask = 0x7f,
- .port_ops = &svia_sata_ops,
+ .port_ops = &vt6420_sata_ops,
};
MODULE_AUTHOR("Jeff Garzik");
@@ -170,6 +202,81 @@ static void svia_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
outl(val, ap->ioaddr.scr_addr + (4 * sc_reg));
}
+/**
+ * vt6420_prereset - prereset for vt6420
+ * @ap: target ATA port
+ *
+ * SCR registers on vt6420 are pieces of shit and may hang the
+ * whole machine completely if accessed with the wrong timing.
+ * To avoid such catastrophe, vt6420 doesn't provide generic SCR
+ * access operations, but uses SStatus and SControl only during
+ * boot probing in controlled way.
+ *
+ * As the old (pre EH update) probing code is proven to work, we
+ * strictly follow the access pattern.
+ *
+ * LOCKING:
+ * Kernel thread context (may sleep)
+ *
+ * RETURNS:
+ * 0 on success, -errno otherwise.
+ */
+static int vt6420_prereset(struct ata_port *ap)
+{
+ struct ata_eh_context *ehc = &ap->eh_context;
+ unsigned long timeout = jiffies + (HZ * 5);
+ u32 sstatus, scontrol;
+ int online;
+
+ /* don't do any SCR stuff if we're not loading */
+ if (!ATA_PFLAG_LOADING)
+ goto skip_scr;
+
+ /* Resume phy. This is the old resume sequence from
+ * __sata_phy_reset().
+ */
+ svia_scr_write(ap, SCR_CONTROL, 0x300);
+ svia_scr_read(ap, SCR_CONTROL); /* flush */
+
+ /* wait for phy to become ready, if necessary */
+ do {
+ msleep(200);
+ if ((svia_scr_read(ap, SCR_STATUS) & 0xf) != 1)
+ break;
+ } while (time_before(jiffies, timeout));
+
+ /* open code sata_print_link_status() */
+ sstatus = svia_scr_read(ap, SCR_STATUS);
+ scontrol = svia_scr_read(ap, SCR_CONTROL);
+
+ online = (sstatus & 0xf) == 0x3;
+
+ ata_port_printk(ap, KERN_INFO,
+ "SATA link %s 1.5 Gbps (SStatus %X SControl %X)\n",
+ online ? "up" : "down", sstatus, scontrol);
+
+ /* SStatus is read one more time */
+ svia_scr_read(ap, SCR_STATUS);
+
+ if (!online) {
+ /* tell EH to bail */
+ ehc->i.action &= ~ATA_EH_RESET_MASK;
+ return 0;
+ }
+
+ skip_scr:
+ /* wait for !BSY */
+ ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
+
+ return 0;
+}
+
+static void vt6420_error_handler(struct ata_port *ap)
+{
+ return ata_bmdma_drive_eh(ap, vt6420_prereset, ata_std_softreset,
+ NULL, ata_std_postreset);
+}
+
static const unsigned int svia_bar_sizes[] = {
8, 4, 8, 4, 16, 256
};
@@ -210,7 +317,7 @@ static void vt6421_init_addrs(struct ata_probe_ent *probe_ent,
static struct ata_probe_ent *vt6420_init_probe_ent(struct pci_dev *pdev)
{
struct ata_probe_ent *probe_ent;
- struct ata_port_info *ppi = &svia_port_info;
+ struct ata_port_info *ppi = &vt6420_port_info;
probe_ent = ata_pci_init_native_mode(pdev, &ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
if (!probe_ent)
@@ -239,7 +346,7 @@ static struct ata_probe_ent *vt6421_init_probe_ent(struct pci_dev *pdev)
probe_ent->sht = &svia_sht;
probe_ent->host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY;
- probe_ent->port_ops = &svia_sata_ops;
+ probe_ent->port_ops = &vt6421_sata_ops;
probe_ent->n_ports = N_PORTS;
probe_ent->irq = pdev->irq;
probe_ent->irq_flags = IRQF_SHARED;
diff --git a/drivers/scsi/sata_vsc.c b/drivers/scsi/sata_vsc.c
index 916fe6fba756..ad37871594f5 100644
--- a/drivers/scsi/sata_vsc.c
+++ b/drivers/scsi/sata_vsc.c
@@ -297,7 +297,7 @@ static const struct ata_port_operations vsc_sata_ops = {
.bmdma_status = ata_bmdma_status,
.qc_prep = ata_qc_prep,
.qc_issue = ata_qc_issue_prot,
- .data_xfer = ata_pio_data_xfer,
+ .data_xfer = ata_mmio_data_xfer,
.freeze = ata_bmdma_freeze,
.thaw = ata_bmdma_thaw,
.error_handler = ata_bmdma_error_handler,
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 2ab7df0dcfe8..b332caddd5b3 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -346,7 +346,7 @@ void scsi_log_send(struct scsi_cmnd *cmd)
if (level > 3) {
printk(KERN_INFO "buffer = 0x%p, bufflen = %d,"
" done = 0x%p, queuecommand 0x%p\n",
- cmd->buffer, cmd->bufflen,
+ cmd->request_buffer, cmd->request_bufflen,
cmd->done,
sdev->host->hostt->queuecommand);
@@ -661,11 +661,6 @@ void __scsi_done(struct scsi_cmnd *cmd)
*/
int scsi_retry_command(struct scsi_cmnd *cmd)
{
- /*
- * Restore the SCSI command state.
- */
- scsi_setup_cmd_retry(cmd);
-
/*
* Zero the sense information from the last time we tried
* this command.
@@ -711,10 +706,6 @@ void scsi_finish_command(struct scsi_cmnd *cmd)
"Notifying upper driver of completion "
"(result %x)\n", cmd->result));
- /*
- * We can get here with use_sg=0, causing a panic in the upper level
- */
- cmd->use_sg = cmd->old_use_sg;
cmd->done(cmd);
}
EXPORT_SYMBOL(scsi_finish_command);
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 9c63b00773c4..a80303c6b3fd 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -286,7 +286,7 @@ static int inquiry_evpd_83(unsigned char * arr, int target_dev_id,
int dev_id_num, const char * dev_id_str,
int dev_id_str_len);
static int inquiry_evpd_88(unsigned char * arr, int target_dev_id);
-static void do_create_driverfs_files(void);
+static int do_create_driverfs_files(void);
static void do_remove_driverfs_files(void);
static int sdebug_add_adapter(void);
@@ -2487,19 +2487,22 @@ static ssize_t sdebug_add_host_store(struct device_driver * ddp,
DRIVER_ATTR(add_host, S_IRUGO | S_IWUSR, sdebug_add_host_show,
sdebug_add_host_store);
-static void do_create_driverfs_files(void)
+static int do_create_driverfs_files(void)
{
- driver_create_file(&sdebug_driverfs_driver, &driver_attr_add_host);
- driver_create_file(&sdebug_driverfs_driver, &driver_attr_delay);
- driver_create_file(&sdebug_driverfs_driver, &driver_attr_dev_size_mb);
- driver_create_file(&sdebug_driverfs_driver, &driver_attr_dsense);
- driver_create_file(&sdebug_driverfs_driver, &driver_attr_every_nth);
- driver_create_file(&sdebug_driverfs_driver, &driver_attr_max_luns);
- driver_create_file(&sdebug_driverfs_driver, &driver_attr_num_tgts);
- driver_create_file(&sdebug_driverfs_driver, &driver_attr_num_parts);
- driver_create_file(&sdebug_driverfs_driver, &driver_attr_ptype);
- driver_create_file(&sdebug_driverfs_driver, &driver_attr_opts);
- driver_create_file(&sdebug_driverfs_driver, &driver_attr_scsi_level);
+ int ret;
+
+ ret = driver_create_file(&sdebug_driverfs_driver, &driver_attr_add_host);
+ ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_delay);
+ ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_dev_size_mb);
+ ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_dsense);
+ ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_every_nth);
+ ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_max_luns);
+ ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_num_tgts);
+ ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_num_parts);
+ ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_ptype);
+ ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_opts);
+ ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_scsi_level);
+ return ret;
}
static void do_remove_driverfs_files(void)
@@ -2522,6 +2525,7 @@ static int __init scsi_debug_init(void)
unsigned int sz;
int host_to_add;
int k;
+ int ret;
if (scsi_debug_dev_size_mb < 1)
scsi_debug_dev_size_mb = 1; /* force minimum 1 MB ramdisk */
@@ -2560,12 +2564,32 @@ static int __init scsi_debug_init(void)
if (scsi_debug_num_parts > 0)
sdebug_build_parts(fake_storep);
- init_all_queued();
+ ret = device_register(&pseudo_primary);
+ if (ret < 0) {
+ printk(KERN_WARNING "scsi_debug: device_register error: %d\n",
+ ret);
+ goto free_vm;
+ }
+ ret = bus_register(&pseudo_lld_bus);
+ if (ret < 0) {
+ printk(KERN_WARNING "scsi_debug: bus_register error: %d\n",
+ ret);
+ goto dev_unreg;
+ }
+ ret = driver_register(&sdebug_driverfs_driver);
+ if (ret < 0) {
+ printk(KERN_WARNING "scsi_debug: driver_register error: %d\n",
+ ret);
+ goto bus_unreg;
+ }
+ ret = do_create_driverfs_files();
+ if (ret < 0) {
+ printk(KERN_WARNING "scsi_debug: driver_create_file error: %d\n",
+ ret);
+ goto del_files;
+ }
- device_register(&pseudo_primary);
- bus_register(&pseudo_lld_bus);
- driver_register(&sdebug_driverfs_driver);
- do_create_driverfs_files();
+ init_all_queued();
sdebug_driver_template.proc_name = (char *)sdebug_proc_name;
@@ -2585,6 +2609,18 @@ static int __init scsi_debug_init(void)
scsi_debug_add_host);
}
return 0;
+
+del_files:
+ do_remove_driverfs_files();
+ driver_unregister(&sdebug_driverfs_driver);
+bus_unreg:
+ bus_unregister(&pseudo_lld_bus);
+dev_unreg:
+ device_unregister(&pseudo_primary);
+free_vm:
+ vfree(fake_storep);
+
+ return ret;
}
static void __exit scsi_debug_exit(void)
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 6683d596234a..a8ed5a22009d 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -460,19 +460,71 @@ static void scsi_eh_done(struct scsi_cmnd *scmd)
* Return value:
* SUCCESS or FAILED or NEEDS_RETRY
**/
-static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, int timeout)
+static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, unsigned char *cmnd,
+ int cmnd_size, int timeout, int copy_sense)
{
struct scsi_device *sdev = scmd->device;
struct Scsi_Host *shost = sdev->host;
+ int old_result = scmd->result;
DECLARE_COMPLETION(done);
unsigned long timeleft;
unsigned long flags;
+ unsigned char old_cmnd[MAX_COMMAND_SIZE];
+ enum dma_data_direction old_data_direction;
+ unsigned short old_use_sg;
+ unsigned char old_cmd_len;
+ unsigned old_bufflen;
+ void *old_buffer;
int rtn;
+ /*
+ * We need saved copies of a number of fields - this is because
+ * error handling may need to overwrite these with different values
+ * to run different commands, and once error handling is complete,
+ * we will need to restore these values prior to running the actual
+ * command.
+ */
+ old_buffer = scmd->request_buffer;
+ old_bufflen = scmd->request_bufflen;
+ memcpy(old_cmnd, scmd->cmnd, sizeof(scmd->cmnd));
+ old_data_direction = scmd->sc_data_direction;
+ old_cmd_len = scmd->cmd_len;
+ old_use_sg = scmd->use_sg;
+
+ memset(scmd->cmnd, 0, sizeof(scmd->cmnd));
+ memcpy(scmd->cmnd, cmnd, cmnd_size);
+
+ if (copy_sense) {
+ int gfp_mask = GFP_ATOMIC;
+
+ if (shost->hostt->unchecked_isa_dma)
+ gfp_mask |= __GFP_DMA;
+
+ scmd->sc_data_direction = DMA_FROM_DEVICE;
+ scmd->request_bufflen = 252;
+ scmd->request_buffer = kzalloc(scmd->request_bufflen, gfp_mask);
+ if (!scmd->request_buffer)
+ return FAILED;
+ } else {
+ scmd->request_buffer = NULL;
+ scmd->request_bufflen = 0;
+ scmd->sc_data_direction = DMA_NONE;
+ }
+
+ scmd->underflow = 0;
+ scmd->use_sg = 0;
+ scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]);
+
if (sdev->scsi_level <= SCSI_2)
scmd->cmnd[1] = (scmd->cmnd[1] & 0x1f) |
(sdev->lun << 5 & 0xe0);
+ /*
+ * Zero the sense buffer. The scsi spec mandates that any
+ * untransferred sense data should be interpreted as being zero.
+ */
+ memset(scmd->sense_buffer, 0, sizeof(scmd->sense_buffer));
+
shost->eh_action = &done;
spin_lock_irqsave(shost->host_lock, flags);
@@ -522,6 +574,29 @@ static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, int timeout)
rtn = FAILED;
}
+
+ /*
+ * Last chance to have valid sense data.
+ */
+ if (copy_sense) {
+ if (!SCSI_SENSE_VALID(scmd)) {
+ memcpy(scmd->sense_buffer, scmd->request_buffer,
+ sizeof(scmd->sense_buffer));
+ }
+ kfree(scmd->request_buffer);
+ }
+
+
+ /*
+ * Restore original data
+ */
+ scmd->request_buffer = old_buffer;
+ scmd->request_bufflen = old_bufflen;
+ memcpy(scmd->cmnd, old_cmnd, sizeof(scmd->cmnd));
+ scmd->sc_data_direction = old_data_direction;
+ scmd->cmd_len = old_cmd_len;
+ scmd->use_sg = old_use_sg;
+ scmd->result = old_result;
return rtn;
}
@@ -537,56 +612,9 @@ static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, int timeout)
static int scsi_request_sense(struct scsi_cmnd *scmd)
{
static unsigned char generic_sense[6] =
- {REQUEST_SENSE, 0, 0, 0, 252, 0};
- unsigned char *scsi_result;
- int saved_result;
- int rtn;
-
- memcpy(scmd->cmnd, generic_sense, sizeof(generic_sense));
-
- scsi_result = kmalloc(252, GFP_ATOMIC | ((scmd->device->host->hostt->unchecked_isa_dma) ? __GFP_DMA : 0));
-
-
- if (unlikely(!scsi_result)) {
- printk(KERN_ERR "%s: cannot allocate scsi_result.\n",
- __FUNCTION__);
- return FAILED;
- }
-
- /*
- * zero the sense buffer. some host adapters automatically always
- * request sense, so it is not a good idea that
- * scmd->request_buffer and scmd->sense_buffer point to the same
- * address (db). 0 is not a valid sense code.
- */
- memset(scmd->sense_buffer, 0, sizeof(scmd->sense_buffer));
- memset(scsi_result, 0, 252);
+ {REQUEST_SENSE, 0, 0, 0, 252, 0};
- saved_result = scmd->result;
- scmd->request_buffer = scsi_result;
- scmd->request_bufflen = 252;
- scmd->use_sg = 0;
- scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]);
- scmd->sc_data_direction = DMA_FROM_DEVICE;
- scmd->underflow = 0;
-
- rtn = scsi_send_eh_cmnd(scmd, SENSE_TIMEOUT);
-
- /* last chance to have valid sense data */
- if(!SCSI_SENSE_VALID(scmd)) {
- memcpy(scmd->sense_buffer, scmd->request_buffer,
- sizeof(scmd->sense_buffer));
- }
-
- kfree(scsi_result);
-
- /*
- * when we eventually call scsi_finish, we really wish to complete
- * the original request, so let's restore the original data. (db)
- */
- scsi_setup_cmd_retry(scmd);
- scmd->result = saved_result;
- return rtn;
+ return scsi_send_eh_cmnd(scmd, generic_sense, 6, SENSE_TIMEOUT, 1);
}
/**
@@ -605,12 +633,6 @@ void scsi_eh_finish_cmd(struct scsi_cmnd *scmd, struct list_head *done_q)
{
scmd->device->host->host_failed--;
scmd->eh_eflags = 0;
-
- /*
- * set this back so that the upper level can correctly free up
- * things.
- */
- scsi_setup_cmd_retry(scmd);
list_move_tail(&scmd->eh_entry, done_q);
}
EXPORT_SYMBOL(scsi_eh_finish_cmd);
@@ -715,47 +737,23 @@ static int scsi_eh_tur(struct scsi_cmnd *scmd)
{
static unsigned char tur_command[6] = {TEST_UNIT_READY, 0, 0, 0, 0, 0};
int retry_cnt = 1, rtn;
- int saved_result;
retry_tur:
- memcpy(scmd->cmnd, tur_command, sizeof(tur_command));
-
- /*
- * zero the sense buffer. the scsi spec mandates that any
- * untransferred sense data should be interpreted as being zero.
- */
- memset(scmd->sense_buffer, 0, sizeof(scmd->sense_buffer));
-
- saved_result = scmd->result;
- scmd->request_buffer = NULL;
- scmd->request_bufflen = 0;
- scmd->use_sg = 0;
- scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]);
- scmd->underflow = 0;
- scmd->sc_data_direction = DMA_NONE;
+ rtn = scsi_send_eh_cmnd(scmd, tur_command, 6, SENSE_TIMEOUT, 0);
- rtn = scsi_send_eh_cmnd(scmd, SENSE_TIMEOUT);
-
- /*
- * when we eventually call scsi_finish, we really wish to complete
- * the original request, so let's restore the original data. (db)
- */
- scsi_setup_cmd_retry(scmd);
- scmd->result = saved_result;
-
- /*
- * hey, we are done. let's look to see what happened.
- */
SCSI_LOG_ERROR_RECOVERY(3, printk("%s: scmd %p rtn %x\n",
__FUNCTION__, scmd, rtn));
- if (rtn == SUCCESS)
- return 0;
- else if (rtn == NEEDS_RETRY) {
+
+ switch (rtn) {
+ case NEEDS_RETRY:
if (retry_cnt--)
goto retry_tur;
+ /*FALLTHRU*/
+ case SUCCESS:
return 0;
+ default:
+ return 1;
}
- return 1;
}
/**
@@ -837,44 +835,16 @@ static int scsi_try_bus_device_reset(struct scsi_cmnd *scmd)
static int scsi_eh_try_stu(struct scsi_cmnd *scmd)
{
static unsigned char stu_command[6] = {START_STOP, 0, 0, 0, 1, 0};
- int rtn;
- int saved_result;
-
- if (!scmd->device->allow_restart)
- return 1;
-
- memcpy(scmd->cmnd, stu_command, sizeof(stu_command));
-
- /*
- * zero the sense buffer. the scsi spec mandates that any
- * untransferred sense data should be interpreted as being zero.
- */
- memset(scmd->sense_buffer, 0, sizeof(scmd->sense_buffer));
- saved_result = scmd->result;
- scmd->request_buffer = NULL;
- scmd->request_bufflen = 0;
- scmd->use_sg = 0;
- scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]);
- scmd->underflow = 0;
- scmd->sc_data_direction = DMA_NONE;
+ if (scmd->device->allow_restart) {
+ int rtn;
- rtn = scsi_send_eh_cmnd(scmd, START_UNIT_TIMEOUT);
-
- /*
- * when we eventually call scsi_finish, we really wish to complete
- * the original request, so let's restore the original data. (db)
- */
- scsi_setup_cmd_retry(scmd);
- scmd->result = saved_result;
+ rtn = scsi_send_eh_cmnd(scmd, stu_command, 6,
+ START_UNIT_TIMEOUT, 0);
+ if (rtn == SUCCESS)
+ return 0;
+ }
- /*
- * hey, we are done. let's look to see what happened.
- */
- SCSI_LOG_ERROR_RECOVERY(3, printk("%s: scmd %p rtn %x\n",
- __FUNCTION__, scmd, rtn));
- if (rtn == SUCCESS)
- return 0;
return 1;
}
@@ -1684,8 +1654,6 @@ scsi_reset_provider(struct scsi_device *dev, int flag)
scmd->scsi_done = scsi_reset_provider_done_command;
scmd->done = NULL;
- scmd->buffer = NULL;
- scmd->bufflen = 0;
scmd->request_buffer = NULL;
scmd->request_bufflen = 0;
diff --git a/drivers/scsi/scsi_ioctl.c b/drivers/scsi/scsi_ioctl.c
index a89c4115cfba..32293f451669 100644
--- a/drivers/scsi/scsi_ioctl.c
+++ b/drivers/scsi/scsi_ioctl.c
@@ -110,11 +110,8 @@ static int ioctl_internal_command(struct scsi_device *sdev, char *cmd,
sshdr.asc, sshdr.ascq);
break;
case NOT_READY: /* This happens if there is no disc in drive */
- if (sdev->removable && (cmd[0] != TEST_UNIT_READY)) {
- printk(KERN_INFO "Device not ready. Make sure"
- " there is a disc in the drive.\n");
+ if (sdev->removable)
break;
- }
case UNIT_ATTENTION:
if (sdev->removable) {
sdev->changed = 1;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 08af9aae7df3..077c1c691210 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -436,60 +436,16 @@ EXPORT_SYMBOL_GPL(scsi_execute_async);
*
* Arguments: cmd - command that is ready to be queued.
*
- * Returns: Nothing
- *
* Notes: This function has the job of initializing a number of
* fields related to error handling. Typically this will
* be called once for each command, as required.
*/
-static int scsi_init_cmd_errh(struct scsi_cmnd *cmd)
+static void scsi_init_cmd_errh(struct scsi_cmnd *cmd)
{
cmd->serial_number = 0;
-
memset(cmd->sense_buffer, 0, sizeof cmd->sense_buffer);
-
if (cmd->cmd_len == 0)
cmd->cmd_len = COMMAND_SIZE(cmd->cmnd[0]);
-
- /*
- * We need saved copies of a number of fields - this is because
- * error handling may need to overwrite these with different values
- * to run different commands, and once error handling is complete,
- * we will need to restore these values prior to running the actual
- * command.
- */
- cmd->old_use_sg = cmd->use_sg;
- cmd->old_cmd_len = cmd->cmd_len;
- cmd->sc_old_data_direction = cmd->sc_data_direction;
- cmd->old_underflow = cmd->underflow;
- memcpy(cmd->data_cmnd, cmd->cmnd, sizeof(cmd->cmnd));
- cmd->buffer = cmd->request_buffer;
- cmd->bufflen = cmd->request_bufflen;
-
- return 1;
-}
-
-/*
- * Function: scsi_setup_cmd_retry()
- *
- * Purpose: Restore the command state for a retry
- *
- * Arguments: cmd - command to be restored
- *
- * Returns: Nothing
- *
- * Notes: Immediately prior to retrying a command, we need
- * to restore certain fields that we saved above.
- */
-void scsi_setup_cmd_retry(struct scsi_cmnd *cmd)
-{
- memcpy(cmd->cmnd, cmd->data_cmnd, sizeof(cmd->data_cmnd));
- cmd->request_buffer = cmd->buffer;
- cmd->request_bufflen = cmd->bufflen;
- cmd->use_sg = cmd->old_use_sg;
- cmd->cmd_len = cmd->old_cmd_len;
- cmd->sc_data_direction = cmd->sc_old_data_direction;
- cmd->underflow = cmd->old_underflow;
}
void scsi_device_unbusy(struct scsi_device *sdev)
@@ -807,22 +763,13 @@ static void scsi_free_sgtable(struct scatterlist *sgl, int index)
*/
static void scsi_release_buffers(struct scsi_cmnd *cmd)
{
- struct request *req = cmd->request;
-
- /*
- * Free up any indirection buffers we allocated for DMA purposes.
- */
if (cmd->use_sg)
scsi_free_sgtable(cmd->request_buffer, cmd->sglist_len);
- else if (cmd->request_buffer != req->buffer)
- kfree(cmd->request_buffer);
/*
* Zero these out. They now point to freed memory, and it is
* dangerous to hang onto the pointers.
*/
- cmd->buffer = NULL;
- cmd->bufflen = 0;
cmd->request_buffer = NULL;
cmd->request_bufflen = 0;
}
@@ -858,7 +805,7 @@ static void scsi_release_buffers(struct scsi_cmnd *cmd)
void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
{
int result = cmd->result;
- int this_count = cmd->bufflen;
+ int this_count = cmd->request_bufflen;
request_queue_t *q = cmd->device->request_queue;
struct request *req = cmd->request;
int clear_errors = 1;
@@ -866,28 +813,14 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
int sense_valid = 0;
int sense_deferred = 0;
- /*
- * Free up any indirection buffers we allocated for DMA purposes.
- * For the case of a READ, we need to copy the data out of the
- * bounce buffer and into the real buffer.
- */
- if (cmd->use_sg)
- scsi_free_sgtable(cmd->buffer, cmd->sglist_len);
- else if (cmd->buffer != req->buffer) {
- if (rq_data_dir(req) == READ) {
- unsigned long flags;
- char *to = bio_kmap_irq(req->bio, &flags);
- memcpy(to, cmd->buffer, cmd->bufflen);
- bio_kunmap_irq(to, &flags);
- }
- kfree(cmd->buffer);
- }
+ scsi_release_buffers(cmd);
if (result) {
sense_valid = scsi_command_normalize_sense(cmd, &sshdr);
if (sense_valid)
sense_deferred = scsi_sense_is_deferred(&sshdr);
}
+
if (blk_pc_request(req)) { /* SG_IO ioctl from block level */
req->errors = result;
if (result) {
@@ -908,15 +841,6 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
}
/*
- * Zero these out. They now point to freed memory, and it is
- * dangerous to hang onto the pointers.
- */
- cmd->buffer = NULL;
- cmd->bufflen = 0;
- cmd->request_buffer = NULL;
- cmd->request_bufflen = 0;
-
- /*
* Next deal with any sectors which we were able to correctly
* handle.
*/
@@ -1012,7 +936,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
if (!(req->flags & REQ_QUIET)) {
scmd_printk(KERN_INFO, cmd,
"Volume overflow, CDB: ");
- __scsi_print_command(cmd->data_cmnd);
+ __scsi_print_command(cmd->cmnd);
scsi_print_sense("", cmd);
}
/* See SSC3rXX or current. */
@@ -1143,7 +1067,7 @@ static void scsi_blk_pc_done(struct scsi_cmnd *cmd)
* successfully. Since this is a REQ_BLOCK_PC command the
* caller should check the request's errors value
*/
- scsi_io_completion(cmd, cmd->bufflen);
+ scsi_io_completion(cmd, cmd->request_bufflen);
}
static void scsi_setup_blk_pc_cmnd(struct scsi_cmnd *cmd)
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index e2fbe9a9d5a9..ae24c85aaeea 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -57,7 +57,6 @@ extern int scsi_eh_scmd_add(struct scsi_cmnd *, int);
/* scsi_lib.c */
extern int scsi_maybe_unblock_host(struct scsi_device *sdev);
-extern void scsi_setup_cmd_retry(struct scsi_cmnd *cmd);
extern void scsi_device_unbusy(struct scsi_device *sdev);
extern int scsi_queue_insert(struct scsi_cmnd *cmd, int reason);
extern void scsi_next_command(struct scsi_cmnd *cmd);
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 7b9e8fa1a4e0..2ecd14188574 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -34,6 +34,7 @@
#define ISCSI_SESSION_ATTRS 11
#define ISCSI_CONN_ATTRS 11
#define ISCSI_HOST_ATTRS 0
+#define ISCSI_TRANSPORT_VERSION "1.1-646"
struct iscsi_internal {
int daemon_pid;
@@ -634,13 +635,13 @@ mempool_zone_get_skb(struct mempool_zone *zone)
}
static int
-iscsi_broadcast_skb(struct mempool_zone *zone, struct sk_buff *skb)
+iscsi_broadcast_skb(struct mempool_zone *zone, struct sk_buff *skb, gfp_t gfp)
{
unsigned long flags;
int rc;
skb_get(skb);
- rc = netlink_broadcast(nls, skb, 0, 1, GFP_KERNEL);
+ rc = netlink_broadcast(nls, skb, 0, 1, gfp);
if (rc < 0) {
mempool_free(skb, zone->pool);
printk(KERN_ERR "iscsi: can not broadcast skb (%d)\n", rc);
@@ -749,7 +750,7 @@ void iscsi_conn_error(struct iscsi_cls_conn *conn, enum iscsi_err error)
ev->r.connerror.cid = conn->cid;
ev->r.connerror.sid = iscsi_conn_get_sid(conn);
- iscsi_broadcast_skb(conn->z_error, skb);
+ iscsi_broadcast_skb(conn->z_error, skb, GFP_ATOMIC);
dev_printk(KERN_INFO, &conn->dev, "iscsi: detected conn error (%d)\n",
error);
@@ -895,7 +896,7 @@ int iscsi_if_destroy_session_done(struct iscsi_cls_conn *conn)
* this will occur if the daemon is not up, so we just warn
* the user and when the daemon is restarted it will handle it
*/
- rc = iscsi_broadcast_skb(conn->z_pdu, skb);
+ rc = iscsi_broadcast_skb(conn->z_pdu, skb, GFP_KERNEL);
if (rc < 0)
dev_printk(KERN_ERR, &conn->dev, "Cannot notify userspace of "
"session destruction event. Check iscsi daemon\n");
@@ -958,7 +959,7 @@ int iscsi_if_create_session_done(struct iscsi_cls_conn *conn)
* this will occur if the daemon is not up, so we just warn
* the user and when the daemon is restarted it will handle it
*/
- rc = iscsi_broadcast_skb(conn->z_pdu, skb);
+ rc = iscsi_broadcast_skb(conn->z_pdu, skb, GFP_KERNEL);
if (rc < 0)
dev_printk(KERN_ERR, &conn->dev, "Cannot notify userspace of "
"session creation event. Check iscsi daemon\n");
@@ -1613,6 +1614,9 @@ static __init int iscsi_transport_init(void)
{
int err;
+ printk(KERN_INFO "Loading iSCSI transport class v%s.",
+ ISCSI_TRANSPORT_VERSION);
+
err = class_register(&iscsi_transport_class);
if (err)
return err;
@@ -1678,3 +1682,4 @@ MODULE_AUTHOR("Mike Christie <michaelc@cs.wisc.edu>, "
"Alex Aizman <itn780@yahoo.com>");
MODULE_DESCRIPTION("iSCSI Transport Interface");
MODULE_LICENSE("GPL");
+MODULE_VERSION(ISCSI_TRANSPORT_VERSION);
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index dd075627e605..5a625c3fddae 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -41,6 +41,7 @@ struct sas_host_attrs {
struct mutex lock;
u32 next_target_id;
u32 next_expander_id;
+ int next_port_id;
};
#define to_sas_host_attrs(host) ((struct sas_host_attrs *)(host)->shost_data)
@@ -146,6 +147,7 @@ static int sas_host_setup(struct transport_container *tc, struct device *dev,
mutex_init(&sas_host->lock);
sas_host->next_target_id = 0;
sas_host->next_expander_id = 0;
+ sas_host->next_port_id = 0;
return 0;
}
@@ -327,7 +329,7 @@ sas_phy_protocol_attr(identify.target_port_protocols,
sas_phy_simple_attr(identify.sas_address, sas_address, "0x%016llx\n",
unsigned long long);
sas_phy_simple_attr(identify.phy_identifier, phy_identifier, "%d\n", u8);
-//sas_phy_simple_attr(port_identifier, port_identifier, "%d\n", u8);
+//sas_phy_simple_attr(port_identifier, port_identifier, "%d\n", int);
sas_phy_linkspeed_attr(negotiated_linkrate);
sas_phy_linkspeed_attr(minimum_linkrate_hw);
sas_phy_linkspeed_attr(minimum_linkrate);
@@ -590,6 +592,38 @@ struct sas_port *sas_port_alloc(struct device *parent, int port_id)
}
EXPORT_SYMBOL(sas_port_alloc);
+/** sas_port_alloc_num - allocate and initialize a SAS port structure
+ *
+ * @parent: parent device
+ *
+ * Allocates a SAS port structure and a number to go with it. This
+ * interface is really for adapters where the port number has no
+ * meansing, so the sas class should manage them. It will be added to
+ * the device tree below the device specified by @parent which must be
+ * either a Scsi_Host or a sas_expander_device.
+ *
+ * Returns %NULL on error
+ */
+struct sas_port *sas_port_alloc_num(struct device *parent)
+{
+ int index;
+ struct Scsi_Host *shost = dev_to_shost(parent);
+ struct sas_host_attrs *sas_host = to_sas_host_attrs(shost);
+
+ /* FIXME: use idr for this eventually */
+ mutex_lock(&sas_host->lock);
+ if (scsi_is_sas_expander_device(parent)) {
+ struct sas_rphy *rphy = dev_to_rphy(parent);
+ struct sas_expander_device *exp = rphy_to_expander_device(rphy);
+
+ index = exp->next_port_id++;
+ } else
+ index = sas_host->next_port_id++;
+ mutex_unlock(&sas_host->lock);
+ return sas_port_alloc(parent, index);
+}
+EXPORT_SYMBOL(sas_port_alloc_num);
+
/**
* sas_port_add - add a SAS port to the device hierarchy
*
@@ -658,6 +692,13 @@ void sas_port_delete(struct sas_port *port)
}
mutex_unlock(&port->phy_list_mutex);
+ if (port->is_backlink) {
+ struct device *parent = port->dev.parent;
+
+ sysfs_remove_link(&port->dev.kobj, parent->bus_id);
+ port->is_backlink = 0;
+ }
+
transport_remove_device(dev);
device_del(dev);
transport_destroy_device(dev);
@@ -733,6 +774,19 @@ void sas_port_delete_phy(struct sas_port *port, struct sas_phy *phy)
}
EXPORT_SYMBOL(sas_port_delete_phy);
+void sas_port_mark_backlink(struct sas_port *port)
+{
+ struct device *parent = port->dev.parent->parent->parent;
+
+ if (port->is_backlink)
+ return;
+ port->is_backlink = 1;
+ sysfs_create_link(&port->dev.kobj, &parent->kobj,
+ parent->bus_id);
+
+}
+EXPORT_SYMBOL(sas_port_mark_backlink);
+
/*
* SAS remote PHY attributes.
*/
@@ -1140,7 +1194,7 @@ int sas_rphy_add(struct sas_rphy *rphy)
if (identify->device_type == SAS_END_DEVICE &&
rphy->scsi_target_id != -1) {
- scsi_scan_target(&rphy->dev, parent->port_identifier,
+ scsi_scan_target(&rphy->dev, 0,
rphy->scsi_target_id, ~0, 0);
}
@@ -1242,15 +1296,13 @@ static int sas_user_scan(struct Scsi_Host *shost, uint channel,
mutex_lock(&sas_host->lock);
list_for_each_entry(rphy, &sas_host->rphy_list, list) {
- struct sas_port *parent = dev_to_sas_port(rphy->dev.parent);
-
if (rphy->identify.device_type != SAS_END_DEVICE ||
rphy->scsi_target_id == -1)
continue;
- if ((channel == SCAN_WILD_CARD || channel == parent->port_identifier) &&
+ if ((channel == SCAN_WILD_CARD || channel == 0) &&
(id == SCAN_WILD_CARD || id == rphy->scsi_target_id)) {
- scsi_scan_target(&rphy->dev, parent->port_identifier,
+ scsi_scan_target(&rphy->dev, 0,
rphy->scsi_target_id, lun, 1);
}
}
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 3225d31449e1..98bd3aab9739 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -502,8 +502,7 @@ static int sd_init_command(struct scsi_cmnd * SCpnt)
SCpnt->cmnd[4] = (unsigned char) this_count;
SCpnt->cmnd[5] = 0;
}
- SCpnt->request_bufflen = SCpnt->bufflen =
- this_count * sdp->sector_size;
+ SCpnt->request_bufflen = this_count * sdp->sector_size;
/*
* We shouldn't disconnect in the middle of a sector, so with a dumb
diff --git a/drivers/scsi/seagate.c b/drivers/scsi/seagate.c
index 3f312a84c6a7..2679ea8bff1a 100644
--- a/drivers/scsi/seagate.c
+++ b/drivers/scsi/seagate.c
@@ -1002,7 +1002,7 @@ connect_loop:
}
#endif
- buffer = (struct scatterlist *) SCint->buffer;
+ buffer = (struct scatterlist *) SCint->request_buffer;
len = buffer->length;
data = page_address(buffer->page) + buffer->offset;
} else {
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 65eef33846bb..34f9343ed0af 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -18,8 +18,8 @@
*
*/
-static int sg_version_num = 30533; /* 2 digits for each component */
-#define SG_VERSION_STR "3.5.33"
+static int sg_version_num = 30534; /* 2 digits for each component */
+#define SG_VERSION_STR "3.5.34"
/*
* D. P. Gilbert (dgilbert@interlog.com, dougg@triode.net.au), notes:
@@ -60,7 +60,7 @@ static int sg_version_num = 30533; /* 2 digits for each component */
#ifdef CONFIG_SCSI_PROC_FS
#include <linux/proc_fs.h>
-static char *sg_version_date = "20050908";
+static char *sg_version_date = "20060818";
static int sg_proc_init(void);
static void sg_proc_cleanup(void);
@@ -1164,7 +1164,7 @@ sg_vma_nopage(struct vm_area_struct *vma, unsigned long addr, int *type)
len = vma->vm_end - sa;
len = (len < sg->length) ? len : sg->length;
if (offset < len) {
- page = sg->page;
+ page = virt_to_page(page_address(sg->page) + offset);
get_page(page); /* increment page count */
break;
}
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index fd94408577e5..fae6e95a6298 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -360,7 +360,7 @@ static int sr_init_command(struct scsi_cmnd * SCpnt)
"mismatch count %d, bytes %d\n",
size, SCpnt->request_bufflen);
if (SCpnt->request_bufflen > size)
- SCpnt->request_bufflen = SCpnt->bufflen = size;
+ SCpnt->request_bufflen = size;
}
}
@@ -387,8 +387,7 @@ static int sr_init_command(struct scsi_cmnd * SCpnt)
if (this_count > 0xffff) {
this_count = 0xffff;
- SCpnt->request_bufflen = SCpnt->bufflen =
- this_count * s_size;
+ SCpnt->request_bufflen = this_count * s_size;
}
SCpnt->cmnd[2] = (unsigned char) (block >> 24) & 0xff;
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 756ceb93ddc8..7f669b600677 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -368,7 +368,7 @@ static int st_chk_result(struct scsi_tape *STp, struct st_request * SRpnt)
SRpnt->cmd[0], SRpnt->cmd[1], SRpnt->cmd[2],
SRpnt->cmd[3], SRpnt->cmd[4], SRpnt->cmd[5]);
if (cmdstatp->have_sense)
- __scsi_print_sense("st", SRpnt->sense, SCSI_SENSE_BUFFERSIZE);
+ __scsi_print_sense(name, SRpnt->sense, SCSI_SENSE_BUFFERSIZE);
} ) /* end DEB */
if (!debugging) { /* Abnormal conditions for tape */
if (!cmdstatp->have_sense)
@@ -384,9 +384,8 @@ static int st_chk_result(struct scsi_tape *STp, struct st_request * SRpnt)
scode != VOLUME_OVERFLOW &&
SRpnt->cmd[0] != MODE_SENSE &&
SRpnt->cmd[0] != TEST_UNIT_READY) {
- printk(KERN_WARNING "%s: Error with sense data: ", name);
- __scsi_print_sense("st", SRpnt->sense,
- SCSI_SENSE_BUFFERSIZE);
+
+ __scsi_print_sense(name, SRpnt->sense, SCSI_SENSE_BUFFERSIZE);
}
}
diff --git a/drivers/scsi/sun3_NCR5380.c b/drivers/scsi/sun3_NCR5380.c
index 2ebe0d663899..2f8073b73bf3 100644
--- a/drivers/scsi/sun3_NCR5380.c
+++ b/drivers/scsi/sun3_NCR5380.c
@@ -517,7 +517,7 @@ static __inline__ void initialize_SCp(Scsi_Cmnd *cmd)
*/
if (cmd->use_sg) {
- cmd->SCp.buffer = (struct scatterlist *) cmd->buffer;
+ cmd->SCp.buffer = (struct scatterlist *) cmd->request_buffer;
cmd->SCp.buffers_residual = cmd->use_sg - 1;
cmd->SCp.ptr = (char *) SGADDR(cmd->SCp.buffer);
cmd->SCp.this_residual = cmd->SCp.buffer->length;
diff --git a/drivers/scsi/sun3x_esp.c b/drivers/scsi/sun3x_esp.c
index 1f328cae5c05..6b60536ac92b 100644
--- a/drivers/scsi/sun3x_esp.c
+++ b/drivers/scsi/sun3x_esp.c
@@ -347,7 +347,7 @@ static void dma_mmu_release_scsi_one (struct NCR_ESP *esp, Scsi_Cmnd *sp)
static void dma_mmu_release_scsi_sgl (struct NCR_ESP *esp, Scsi_Cmnd *sp)
{
int sz = sp->use_sg - 1;
- struct scatterlist *sg = (struct scatterlist *)sp->buffer;
+ struct scatterlist *sg = (struct scatterlist *)sp->request_buffer;
while(sz >= 0) {
dvma_unmap((char *)sg[sz].dma_address);
diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c
index 8c505076c0eb..739d3ef46a40 100644
--- a/drivers/scsi/sym53c8xx_2/sym_glue.c
+++ b/drivers/scsi/sym53c8xx_2/sym_glue.c
@@ -2084,7 +2084,7 @@ static struct pci_device_id sym2_id_table[] __devinitdata = {
{ PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C860,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
{ PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_53C1510,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+ PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_STORAGE_SCSI<<8, 0xffff00, 0UL },
{ PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C896,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
{ PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C895,
diff --git a/drivers/scsi/wd33c93.c b/drivers/scsi/wd33c93.c
index 680f38ab60d8..2083454db511 100644
--- a/drivers/scsi/wd33c93.c
+++ b/drivers/scsi/wd33c93.c
@@ -373,7 +373,7 @@ wd33c93_queuecommand(struct scsi_cmnd *cmd,
*/
if (cmd->use_sg) {
- cmd->SCp.buffer = (struct scatterlist *) cmd->buffer;
+ cmd->SCp.buffer = (struct scatterlist *) cmd->request_buffer;
cmd->SCp.buffers_residual = cmd->use_sg - 1;
cmd->SCp.ptr = page_address(cmd->SCp.buffer->page) +
cmd->SCp.buffer->offset;