diff options
222 files changed, 8448 insertions, 14783 deletions
diff --git a/Documentation/ABI/stable/sysfs-driver-ib_srp b/Documentation/ABI/stable/sysfs-driver-ib_srp index b9688de8455b..7049a2b50359 100644 --- a/Documentation/ABI/stable/sysfs-driver-ib_srp +++ b/Documentation/ABI/stable/sysfs-driver-ib_srp @@ -55,12 +55,12 @@ Description: Interface for making ib_srp connect to a new target. only safe with partial memory descriptor list support enabled (allow_ext_sg=1). * comp_vector, a number in the range 0..n-1 specifying the - MSI-X completion vector. Some HCA's allocate multiple (n) - MSI-X vectors per HCA port. If the IRQ affinity masks of - these interrupts have been configured such that each MSI-X - interrupt is handled by a different CPU then the comp_vector - parameter can be used to spread the SRP completion workload - over multiple CPU's. + MSI-X completion vector of the first RDMA channel. Some + HCA's allocate multiple (n) MSI-X vectors per HCA port. If + the IRQ affinity masks of these interrupts have been + configured such that each MSI-X interrupt is handled by a + different CPU then the comp_vector parameter can be used to + spread the SRP completion workload over multiple CPU's. * tl_retry_count, a number in the range 2..7 specifying the IB RC retry count. * queue_size, the maximum number of commands that the @@ -88,6 +88,13 @@ Description: Whether ib_srp is allowed to include a partial memory descriptor list in an SRP_CMD when communicating with an SRP target. +What: /sys/class/scsi_host/host<n>/ch_count +Date: April 1, 2015 +KernelVersion: 3.19 +Contact: linux-rdma@vger.kernel.org +Description: Number of RDMA channels used for communication with the SRP + target. + What: /sys/class/scsi_host/host<n>/cmd_sg_entries Date: May 19, 2011 KernelVersion: 2.6.39 @@ -95,6 +102,12 @@ Contact: linux-rdma@vger.kernel.org Description: Maximum number of data buffer descriptors that may be sent to the target in a single SRP_CMD request. +What: /sys/class/scsi_host/host<n>/comp_vector +Date: September 2, 2013 +KernelVersion: 3.11 +Contact: linux-rdma@vger.kernel.org +Description: Completion vector used for the first RDMA channel. + What: /sys/class/scsi_host/host<n>/dgid Date: June 17, 2006 KernelVersion: 2.6.17 diff --git a/Documentation/block/biodoc.txt b/Documentation/block/biodoc.txt index 2101e718670d..6b972b287795 100644 --- a/Documentation/block/biodoc.txt +++ b/Documentation/block/biodoc.txt @@ -827,10 +827,6 @@ but in the event of any barrier requests in the tag queue we need to ensure that requests are restarted in the order they were queue. This may happen if the driver needs to use blk_queue_invalidate_tags(). -Tagging also defines a new request flag, REQ_QUEUED. This is set whenever -a request is currently tagged. You should not use this flag directly, -blk_rq_tagged(rq) is the portable way to do so. - 3.3 I/O Submission The routine submit_bio() is used to submit a single io. Higher level i/o diff --git a/Documentation/scsi/libsas.txt b/Documentation/scsi/libsas.txt index 3cc9c7843e15..8cac6492aade 100644 --- a/Documentation/scsi/libsas.txt +++ b/Documentation/scsi/libsas.txt @@ -226,9 +226,6 @@ static int register_sas_ha(struct my_sas_ha *my_ha) my_ha->sas_ha.lldd_dev_found = my_dev_found; my_ha->sas_ha.lldd_dev_gone = my_dev_gone; - my_ha->sas_ha.lldd_max_execute_num = lldd_max_execute_num; (1) - - my_ha->sas_ha.lldd_queue_size = ha_can_queue; my_ha->sas_ha.lldd_execute_task = my_execute_task; my_ha->sas_ha.lldd_abort_task = my_abort_task; @@ -247,28 +244,6 @@ static int register_sas_ha(struct my_sas_ha *my_ha) return sas_register_ha(&my_ha->sas_ha); } -(1) This is normally a LLDD parameter, something of the -lines of a task collector. What it tells the SAS Layer is -whether the SAS layer should run in Direct Mode (default: -value 0 or 1) or Task Collector Mode (value greater than 1). - -In Direct Mode, the SAS Layer calls Execute Task as soon as -it has a command to send to the SDS, _and_ this is a single -command, i.e. not linked. - -Some hardware (e.g. aic94xx) has the capability to DMA more -than one task at a time (interrupt) from host memory. Task -Collector Mode is an optional feature for HAs which support -this in their hardware. (Again, it is completely optional -even if your hardware supports it.) - -In Task Collector Mode, the SAS Layer would do _natural_ -coalescing of tasks and at the appropriate moment it would -call your driver to DMA more than one task in a single HA -interrupt. DMBS may want to use this by insmod/modprobe -setting the lldd_max_execute_num to something greater than -1. - (2) SAS 1.1 does not define I_T Nexus Reset TMF. Events @@ -325,71 +300,22 @@ PHYE_SPINUP_HOLD -- SATA is present, COMWAKE not sent. The Execute Command SCSI RPC: - int (*lldd_execute_task)(struct sas_task *, int num, - unsigned long gfp_flags); + int (*lldd_execute_task)(struct sas_task *, gfp_t gfp_flags); -Used to queue a task to the SAS LLDD. @task is the tasks to -be executed. @num should be the number of tasks being -queued at this function call (they are linked listed via -task::list), @gfp_mask should be the gfp_mask defining the -context of the caller. +Used to queue a task to the SAS LLDD. @task is the task to be executed. +@gfp_mask is the gfp_mask defining the context of the caller. This function should implement the Execute Command SCSI RPC, -or if you're sending a SCSI Task as linked commands, you -should also use this function. -That is, when lldd_execute_task() is called, the command(s) +That is, when lldd_execute_task() is called, the command go out on the transport *immediately*. There is *no* queuing of any sort and at any level in a SAS LLDD. -The use of task::list is two-fold, one for linked commands, -the other discussed below. - -It is possible to queue up more than one task at a time, by -initializing the list element of struct sas_task, and -passing the number of tasks enlisted in this manner in num. - Returns: -SAS_QUEUE_FULL, -ENOMEM, nothing was queued; 0, the task(s) were queued. -If you want to pass num > 1, then either -A) you're the only caller of this function and keep track - of what you've queued to the LLDD, or -B) you know what you're doing and have a strategy of - retrying. - -As opposed to queuing one task at a time (function call), -batch queuing of tasks, by having num > 1, greatly -simplifies LLDD code, sequencer code, and _hardware design_, -and has some performance advantages in certain situations -(DBMS). - -The LLDD advertises if it can take more than one command at -a time at lldd_execute_task(), by setting the -lldd_max_execute_num parameter (controlled by "collector" -module parameter in aic94xx SAS LLDD). - -You should leave this to the default 1, unless you know what -you're doing. - -This is a function of the LLDD, to which the SAS layer can -cater to. - -int lldd_queue_size - The host adapter's queue size. This is the maximum -number of commands the lldd can have pending to domain -devices on behalf of all upper layers submitting through -lldd_execute_task(). - -You really want to set this to something (much) larger than -1. - -This _really_ has absolutely nothing to do with queuing. -There is no queuing in SAS LLDDs. - struct sas_task { dev -- the device this task is destined to - list -- must be initialized (INIT_LIST_HEAD) task_proto -- _one_ of enum sas_proto scatter -- pointer to scatter gather list array num_scatter -- number of elements in scatter diff --git a/Documentation/scsi/scsi_mid_low_api.txt b/Documentation/scsi/scsi_mid_low_api.txt index d6a9bdeee7f2..731bc4f4c5e6 100644 --- a/Documentation/scsi/scsi_mid_low_api.txt +++ b/Documentation/scsi/scsi_mid_low_api.txt @@ -149,7 +149,7 @@ scsi_add_host() ----> scsi_scan_host() -------+ | slave_alloc() - slave_configure() --> scsi_adjust_queue_depth() + slave_configure() --> scsi_change_queue_depth() | slave_alloc() slave_configure() @@ -159,7 +159,7 @@ scsi_scan_host() -------+ ------------------------------------------------------------ If the LLD wants to adjust the default queue settings, it can invoke -scsi_adjust_queue_depth() in its slave_configure() routine. +scsi_change_queue_depth() in its slave_configure() routine. *** For scsi devices that the mid level tries to scan but do not respond, a slave_alloc(), slave_destroy() pair is called. @@ -203,7 +203,7 @@ LLD mid level LLD scsi_add_device() ------+ | slave_alloc() - slave_configure() [--> scsi_adjust_queue_depth()] + slave_configure() [--> scsi_change_queue_depth()] ------------------------------------------------------------ In a similar fashion, an LLD may become aware that a SCSI device has been @@ -261,7 +261,7 @@ init_this_scsi_driver() ----+ | scsi_register() | slave_alloc() - slave_configure() --> scsi_adjust_queue_depth() + slave_configure() --> scsi_change_queue_depth() slave_alloc() *** slave_destroy() *** | @@ -271,9 +271,9 @@ init_this_scsi_driver() ----+ slave_destroy() *** ------------------------------------------------------------ -The mid level invokes scsi_adjust_queue_depth() with tagged queuing off and -"cmd_per_lun" for that host as the queue length. These settings can be -overridden by a slave_configure() supplied by the LLD. +The mid level invokes scsi_change_queue_depth() with "cmd_per_lun" for that +host as the queue length. These settings can be overridden by a +slave_configure() supplied by the LLD. *** For scsi devices that the mid level tries to scan but do not respond, a slave_alloc(), slave_destroy() pair is called. @@ -366,13 +366,11 @@ is initialized. The functions below are listed alphabetically and their names all start with "scsi_". Summary: - scsi_activate_tcq - turn on tag command queueing scsi_add_device - creates new scsi device (lu) instance scsi_add_host - perform sysfs registration and set up transport class - scsi_adjust_queue_depth - change the queue depth on a SCSI device + scsi_change_queue_depth - change the queue depth on a SCSI device scsi_bios_ptable - return copy of block device's partition table scsi_block_requests - prevent further commands being queued to given host - scsi_deactivate_tcq - turn off tag command queueing scsi_host_alloc - return a new scsi_host instance whose refcount==1 scsi_host_get - increments Scsi_Host instance's refcount scsi_host_put - decrements Scsi_Host instance's refcount (free if 0) @@ -390,24 +388,6 @@ Summary: Details: /** - * scsi_activate_tcq - turn on tag command queueing ("ordered" task attribute) - * @sdev: device to turn on TCQ for - * @depth: queue depth - * - * Returns nothing - * - * Might block: no - * - * Notes: Eventually, it is hoped depth would be the maximum depth - * the device could cope with and the real queue depth - * would be adjustable from 0 to depth. - * - * Defined (inline) in: include/scsi/scsi_tcq.h - **/ -void scsi_activate_tcq(struct scsi_device *sdev, int depth) - - -/** * scsi_add_device - creates new scsi device (lu) instance * @shost: pointer to scsi host instance * @channel: channel number (rarely other than 0) @@ -456,11 +436,8 @@ int scsi_add_host(struct Scsi_Host *shost, struct device * dev) /** - * scsi_adjust_queue_depth - allow LLD to change queue depth on a SCSI device + * scsi_change_queue_depth - allow LLD to change queue depth on a SCSI device * @sdev: pointer to SCSI device to change queue depth on - * @tagged: 0 - no tagged queuing - * MSG_SIMPLE_TAG - simple tagged queuing - * MSG_ORDERED_TAG - ordered tagged queuing * @tags Number of tags allowed if tagged queuing enabled, * or number of commands the LLD can queue up * in non-tagged mode (as per cmd_per_lun). @@ -471,15 +448,12 @@ int scsi_add_host(struct Scsi_Host *shost, struct device * dev) * * Notes: Can be invoked any time on a SCSI device controlled by this * LLD. [Specifically during and after slave_configure() and prior to - * slave_destroy().] Can safely be invoked from interrupt code. Actual - * queue depth change may be delayed until the next command is being - * processed. See also scsi_activate_tcq() and scsi_deactivate_tcq(). + * slave_destroy().] Can safely be invoked from interrupt code. * * Defined in: drivers/scsi/scsi.c [see source code for more notes] * **/ -void scsi_adjust_queue_depth(struct scsi_device * sdev, int tagged, - int tags) +int scsi_change_queue_depth(struct scsi_device *sdev, int tags) /** @@ -515,20 +489,6 @@ void scsi_block_requests(struct Scsi_Host * shost) /** - * scsi_deactivate_tcq - turn off tag command queueing - * @sdev: device to turn off TCQ for - * @depth: queue depth (stored in sdev) - * - * Returns nothing - * - * Might block: no - * - * Defined (inline) in: include/scsi/scsi_tcq.h - **/ -void scsi_deactivate_tcq(struct scsi_device *sdev, int depth) - - -/** * scsi_host_alloc - create a scsi host adapter instance and perform basic * initialization. * @sht: pointer to scsi host template @@ -1254,7 +1214,7 @@ of interest: for disk firmware uploads. cmd_per_lun - maximum number of commands that can be queued on devices controlled by the host. Overridden by LLD calls to - scsi_adjust_queue_depth(). + scsi_change_queue_depth(). unchecked_isa_dma - 1=>only use bottom 16 MB of ram (ISA DMA addressing restriction), 0=>can use full 32 bit (or better) DMA address space @@ -1294,7 +1254,7 @@ struct scsi_cmnd Instances of this structure convey SCSI commands to the LLD and responses back to the mid level. The SCSI mid level will ensure that no more SCSI commands become queued against the LLD than are indicated by -scsi_adjust_queue_depth() (or struct Scsi_Host::cmd_per_lun). There will +scsi_change_queue_depth() (or struct Scsi_Host::cmd_per_lun). There will be at least one instance of struct scsi_cmnd available for each SCSI device. Members of interest: cmnd - array containing SCSI command diff --git a/Documentation/scsi/st.txt b/Documentation/scsi/st.txt index f346abbdd6ff..0d5bdb153d3b 100644 --- a/Documentation/scsi/st.txt +++ b/Documentation/scsi/st.txt @@ -506,9 +506,11 @@ user does not request data that far.) DEBUGGING HINTS -To enable debugging messages, edit st.c and #define DEBUG 1. As seen -above, debugging can be switched off with an ioctl if debugging is -compiled into the driver. The debugging output is not voluminous. +Debugging code is now compiled in by default but debugging is turned off +with the kernel module parameter debug_flag defaulting to 0. Debugging +can still be switched on and off with an ioctl. To enable debug at +module load time add debug_flag=1 to the module load options, the +debugging output is not voluminous. If the tape seems to hang, I would be very interested to hear where the driver is waiting. With the command 'ps -l' you can see the state diff --git a/Documentation/scsi/wd719x.txt b/Documentation/scsi/wd719x.txt new file mode 100644 index 000000000000..0816b0220238 --- /dev/null +++ b/Documentation/scsi/wd719x.txt @@ -0,0 +1,21 @@ +Driver for Western Digital WD7193, WD7197 and WD7296 SCSI cards +--------------------------------------------------------------- + +The card requires firmware that can be cut out of the Windows NT driver that +can be downloaded from WD at: +http://support.wdc.com/product/download.asp?groupid=801&sid=27&lang=en + +There is no license anywhere in the file or on the page - so the firmware +probably cannot be added to linux-firmware. + +This script downloads and extracts the firmware, creating wd719x-risc.bin and +d719x-wcs.bin files. Put them in /lib/firmware/. + +#!/bin/sh +wget http://support.wdc.com/download/archive/pciscsi.exe +lha xi pciscsi.exe pci-scsi.exe +lha xi pci-scsi.exe nt/wd7296a.sys +rm pci-scsi.exe +dd if=wd7296a.sys of=wd719x-risc.bin bs=1 skip=5760 count=14336 +dd if=wd7296a.sys of=wd719x-wcs.bin bs=1 skip=20096 count=514 +rm wd7296a.sys diff --git a/MAINTAINERS b/MAINTAINERS index c721042e7e45..8ef028d3afc7 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -2862,11 +2862,10 @@ F: Documentation/networking/dmfe.txt F: drivers/net/ethernet/dec/tulip/dmfe.c DC390/AM53C974 SCSI driver -M: Kurt Garloff <garloff@suse.de> -W: http://www.garloff.de/kurt/linux/dc390/ -M: Guennadi Liakhovetski <g.liakhovetski@gmx.de> +M: Hannes Reinecke <hare@suse.de> +L: linux-scsi@vger.kernel.org S: Maintained -F: drivers/scsi/tmscsim.* +F: drivers/scsi/am53c974.c DC395x SCSI driver M: Oliver Neukum <oliver@neukum.org> @@ -5991,10 +5990,13 @@ W: http://linuxtv.org S: Odd Fixes F: drivers/media/parport/pms* -MEGARAID SCSI DRIVERS -M: Neela Syam Kolli <megaraidlinux@lsi.com> +MEGARAID SCSI/SAS DRIVERS +M: Kashyap Desai <kashyap.desai@avagotech.com> +M: Sumit Saxena <sumit.saxena@avagotech.com> +M: Uday Lingala <uday.lingala@avagotech.com> +L: megaraidlinux.pdl@avagotech.com L: linux-scsi@vger.kernel.org -W: http://megaraid.lsilogic.com +W: http://www.lsi.com S: Maintained F: Documentation/scsi/megaraid.txt F: drivers/scsi/megaraid.* @@ -6305,7 +6307,6 @@ F: drivers/scsi/g_NCR5380.* F: drivers/scsi/g_NCR5380_mmio.c F: drivers/scsi/mac_scsi.* F: drivers/scsi/pas16.* -F: drivers/scsi/sun3_NCR5380.c F: drivers/scsi/sun3_scsi.* F: drivers/scsi/sun3_scsi_vme.c F: drivers/scsi/t128.* diff --git a/arch/m68k/atari/config.c b/arch/m68k/atari/config.c index 01a62161b08a..192b00f098f4 100644 --- a/arch/m68k/atari/config.c +++ b/arch/m68k/atari/config.c @@ -858,6 +858,24 @@ static struct platform_device *atari_netusbee_devices[] __initdata = { }; #endif /* CONFIG_ATARI_ETHERNEC */ +#ifdef CONFIG_ATARI_SCSI +static const struct resource atari_scsi_st_rsrc[] __initconst = { + { + .flags = IORESOURCE_IRQ, + .start = IRQ_MFP_FSCSI, + .end = IRQ_MFP_FSCSI, + }, +}; + +static const struct resource atari_scsi_tt_rsrc[] __initconst = { + { + .flags = IORESOURCE_IRQ, + .start = IRQ_TT_MFP_SCSI, + .end = IRQ_TT_MFP_SCSI, + }, +}; +#endif + int __init atari_platform_init(void) { int rv = 0; @@ -892,6 +910,15 @@ int __init atari_platform_init(void) } #endif +#ifdef CONFIG_ATARI_SCSI + if (ATARIHW_PRESENT(ST_SCSI)) + platform_device_register_simple("atari_scsi", -1, + atari_scsi_st_rsrc, ARRAY_SIZE(atari_scsi_st_rsrc)); + else if (ATARIHW_PRESENT(TT_SCSI)) + platform_device_register_simple("atari_scsi", -1, + atari_scsi_tt_rsrc, ARRAY_SIZE(atari_scsi_tt_rsrc)); +#endif + return rv; } diff --git a/arch/m68k/atari/stdma.c b/arch/m68k/atari/stdma.c index ddbf43ca8858..e5a66596b116 100644 --- a/arch/m68k/atari/stdma.c +++ b/arch/m68k/atari/stdma.c @@ -59,6 +59,31 @@ static irqreturn_t stdma_int (int irq, void *dummy); /************************* End of Prototypes **************************/ +/** + * stdma_try_lock - attempt to acquire ST DMA interrupt "lock" + * @handler: interrupt handler to use after acquisition + * + * Returns !0 if lock was acquired; otherwise 0. + */ + +int stdma_try_lock(irq_handler_t handler, void *data) +{ + unsigned long flags; + + local_irq_save(flags); + if (stdma_locked) { + local_irq_restore(flags); + return 0; + } + + stdma_locked = 1; + stdma_isr = handler; + stdma_isr_data = data; + local_irq_restore(flags); + return 1; +} +EXPORT_SYMBOL(stdma_try_lock); + /* * Function: void stdma_lock( isrfunc isr, void *data ) @@ -78,19 +103,10 @@ static irqreturn_t stdma_int (int irq, void *dummy); void stdma_lock(irq_handler_t handler, void *data) { - unsigned long flags; - - local_irq_save(flags); /* protect lock */ - /* Since the DMA is used for file system purposes, we have to sleep uninterruptible (there may be locked buffers) */ - wait_event(stdma_wait, !stdma_locked); - - stdma_locked = 1; - stdma_isr = handler; - stdma_isr_data = data; - local_irq_restore(flags); + wait_event(stdma_wait, stdma_try_lock(handler, data)); } EXPORT_SYMBOL(stdma_lock); @@ -122,22 +138,25 @@ void stdma_release(void) EXPORT_SYMBOL(stdma_release); -/* - * Function: int stdma_others_waiting( void ) - * - * Purpose: Check if someone waits for the ST-DMA lock. - * - * Inputs: none - * - * Returns: 0 if no one is waiting, != 0 otherwise +/** + * stdma_is_locked_by - allow lock holder to check whether it needs to release. + * @handler: interrupt handler previously used to acquire lock. * + * Returns !0 if locked for the given handler; 0 otherwise. */ -int stdma_others_waiting(void) +int stdma_is_locked_by(irq_handler_t handler) { - return waitqueue_active(&stdma_wait); + unsigned long flags; + int result; + + local_irq_save(flags); + result = stdma_locked && (stdma_isr == handler); + local_irq_restore(flags); + + return result; } -EXPORT_SYMBOL(stdma_others_waiting); +EXPORT_SYMBOL(stdma_is_locked_by); /* diff --git a/arch/m68k/include/asm/atari_stdma.h b/arch/m68k/include/asm/atari_stdma.h index 8e389b7fa70c..d24e34d870dc 100644 --- a/arch/m68k/include/asm/atari_stdma.h +++ b/arch/m68k/include/asm/atari_stdma.h @@ -8,11 +8,11 @@ /***************************** Prototypes *****************************/ +int stdma_try_lock(irq_handler_t, void *); void stdma_lock(irq_handler_t handler, void *data); void stdma_release( void ); -int stdma_others_waiting( void ); int stdma_islocked( void ); -void *stdma_locked_by( void ); +int stdma_is_locked_by(irq_handler_t); void stdma_init( void ); /************************* End of Prototypes **************************/ diff --git a/arch/m68k/include/asm/macintosh.h b/arch/m68k/include/asm/macintosh.h index d323b2c2d07d..29c7c6c3a5f2 100644 --- a/arch/m68k/include/asm/macintosh.h +++ b/arch/m68k/include/asm/macintosh.h @@ -53,6 +53,10 @@ struct mac_model #define MAC_SCSI_QUADRA 2 #define MAC_SCSI_QUADRA2 3 #define MAC_SCSI_QUADRA3 4 +#define MAC_SCSI_IIFX 5 +#define MAC_SCSI_DUO 6 +#define MAC_SCSI_CCL 7 +#define MAC_SCSI_LATE 8 #define MAC_IDE_NONE 0 #define MAC_IDE_QUADRA 1 diff --git a/arch/m68k/mac/config.c b/arch/m68k/mac/config.c index a471eab1a4dd..e9c3756139fc 100644 --- a/arch/m68k/mac/config.c +++ b/arch/m68k/mac/config.c @@ -278,7 +278,7 @@ static struct mac_model mac_data_table[] = { .name = "IIfx", .adb_type = MAC_ADB_IOP, .via_type = MAC_VIA_IICI, - .scsi_type = MAC_SCSI_OLD, + .scsi_type = MAC_SCSI_IIFX, .scc_type = MAC_SCC_IOP, .nubus_type = MAC_NUBUS, .floppy_type = MAC_FLOPPY_SWIM_IOP, @@ -329,7 +329,7 @@ static struct mac_model mac_data_table[] = { .name = "Color Classic", .adb_type = MAC_ADB_CUDA, .via_type = MAC_VIA_IICI, - .scsi_type = MAC_SCSI_OLD, + .scsi_type = MAC_SCSI_CCL, .scc_type = MAC_SCC_II, .nubus_type = MAC_NUBUS, .floppy_type = MAC_FLOPPY_SWIM_ADDR2, @@ -338,7 +338,7 @@ static struct mac_model mac_data_table[] = { .name = "Color Classic II", .adb_type = MAC_ADB_CUDA, .via_type = MAC_VIA_IICI, - .scsi_type = MAC_SCSI_OLD, + .scsi_type = MAC_SCSI_CCL, .scc_type = MAC_SCC_II, .nubus_type = MAC_NUBUS, .floppy_type = MAC_FLOPPY_SWIM_ADDR2, @@ -526,7 +526,7 @@ static struct mac_model mac_data_table[] = { .name = "Performa 520", .adb_type = MAC_ADB_CUDA, .via_type = MAC_VIA_IICI, - .scsi_type = MAC_SCSI_OLD, + .scsi_type = MAC_SCSI_CCL, .scc_type = MAC_SCC_II, .nubus_type = MAC_NUBUS, .floppy_type = MAC_FLOPPY_SWIM_ADDR2, @@ -535,7 +535,7 @@ static struct mac_model mac_data_table[] = { .name = "Performa 550", .adb_type = MAC_ADB_CUDA, .via_type = MAC_VIA_IICI, - .scsi_type = MAC_SCSI_OLD, + .scsi_type = MAC_SCSI_CCL, .scc_type = MAC_SCC_II, .nubus_type = MAC_NUBUS, .floppy_type = MAC_FLOPPY_SWIM_ADDR2, @@ -567,7 +567,7 @@ static struct mac_model mac_data_table[] = { .name = "TV", .adb_type = MAC_ADB_CUDA, .via_type = MAC_VIA_IICI, - .scsi_type = MAC_SCSI_OLD, + .scsi_type = MAC_SCSI_CCL, .scc_type = MAC_SCC_II, .nubus_type = MAC_NUBUS, .floppy_type = MAC_FLOPPY_SWIM_ADDR2, @@ -712,7 +712,7 @@ static struct mac_model mac_data_table[] = { .name = "PowerBook 190", .adb_type = MAC_ADB_PB2, .via_type = MAC_VIA_QUADRA, - .scsi_type = MAC_SCSI_OLD, + .scsi_type = MAC_SCSI_LATE, .ide_type = MAC_IDE_BABOON, .scc_type = MAC_SCC_QUADRA, .nubus_type = MAC_NUBUS, @@ -722,7 +722,7 @@ static struct mac_model mac_data_table[] = { .name = "PowerBook 520", .adb_type = MAC_ADB_PB2, .via_type = MAC_VIA_QUADRA, - .scsi_type = MAC_SCSI_OLD, + .scsi_type = MAC_SCSI_LATE, .scc_type = MAC_SCC_QUADRA, .ether_type = MAC_ETHER_SONIC, .nubus_type = MAC_NUBUS, @@ -740,7 +740,7 @@ static struct mac_model mac_data_table[] = { .name = "PowerBook Duo 210", .adb_type = MAC_ADB_PB2, .via_type = MAC_VIA_IICI, - .scsi_type = MAC_SCSI_OLD, + .scsi_type = MAC_SCSI_DUO, .scc_type = MAC_SCC_QUADRA, .nubus_type = MAC_NUBUS, .floppy_type = MAC_FLOPPY_SWIM_ADDR2, @@ -749,7 +749,7 @@ static struct mac_model mac_data_table[] = { .name = "PowerBook Duo 230", .adb_type = MAC_ADB_PB2, .via_type = MAC_VIA_IICI, - .scsi_type = MAC_SCSI_OLD, + .scsi_type = MAC_SCSI_DUO, .scc_type = MAC_SCC_QUADRA, .nubus_type = MAC_NUBUS, .floppy_type = MAC_FLOPPY_SWIM_ADDR2, @@ -758,7 +758,7 @@ static struct mac_model mac_data_table[] = { .name = "PowerBook Duo 250", .adb_type = MAC_ADB_PB2, .via_type = MAC_VIA_IICI, - .scsi_type = MAC_SCSI_OLD, + .scsi_type = MAC_SCSI_DUO, .scc_type = MAC_SCC_QUADRA, .nubus_type = MAC_NUBUS, .floppy_type = MAC_FLOPPY_SWIM_ADDR2, @@ -767,7 +767,7 @@ static struct mac_model mac_data_table[] = { .name = "PowerBook Duo 270c", .adb_type = MAC_ADB_PB2, .via_type = MAC_VIA_IICI, - .scsi_type = MAC_SCSI_OLD, + .scsi_type = MAC_SCSI_DUO, .scc_type = MAC_SCC_QUADRA, .nubus_type = MAC_NUBUS, .floppy_type = MAC_FLOPPY_SWIM_ADDR2, @@ -776,7 +776,7 @@ static struct mac_model mac_data_table[] = { .name = "PowerBook Duo 280", .adb_type = MAC_ADB_PB2, .via_type = MAC_VIA_IICI, - .scsi_type = MAC_SCSI_OLD, + .scsi_type = MAC_SCSI_DUO, .scc_type = MAC_SCC_QUADRA, .nubus_type = MAC_NUBUS, .floppy_type = MAC_FLOPPY_SWIM_ADDR2, @@ -785,7 +785,7 @@ static struct mac_model mac_data_table[] = { .name = "PowerBook Duo 280c", .adb_type = MAC_ADB_PB2, .via_type = MAC_VIA_IICI, - .scsi_type = MAC_SCSI_OLD, + .scsi_type = MAC_SCSI_DUO, .scc_type = MAC_SCC_QUADRA, .nubus_type = MAC_NUBUS, .floppy_type = MAC_FLOPPY_SWIM_ADDR2, @@ -929,6 +929,70 @@ static struct platform_device swim_pdev = { .resource = &swim_rsrc, }; +static const struct resource mac_scsi_iifx_rsrc[] __initconst = { + { + .flags = IORESOURCE_IRQ, + .start = IRQ_MAC_SCSI, + .end = IRQ_MAC_SCSI, + }, { + .flags = IORESOURCE_MEM, + .start = 0x50008000, + .end = 0x50009FFF, + }, +}; + +static const struct resource mac_scsi_duo_rsrc[] __initconst = { + { + .flags = IORESOURCE_MEM, + .start = 0xFEE02000, + .end = 0xFEE03FFF, + }, +}; + +static const struct resource mac_scsi_old_rsrc[] __initconst = { + { + .flags = IORESOURCE_IRQ, + .start = IRQ_MAC_SCSI, + .end = IRQ_MAC_SCSI, + }, { + .flags = IORESOURCE_MEM, + .start = 0x50010000, + .end = 0x50011FFF, + }, { + .flags = IORESOURCE_MEM, + .start = 0x50006000, + .end = 0x50007FFF, + }, +}; + +static const struct resource mac_scsi_late_rsrc[] __initconst = { + { + .flags = IORESOURCE_IRQ, + .start = IRQ_MAC_SCSI, + .end = IRQ_MAC_SCSI, + }, { + .flags = IORESOURCE_MEM, + .start = 0x50010000, + .end = 0x50011FFF, + }, +}; + +static const struct resource mac_scsi_ccl_rsrc[] __initconst = { + { + .flags = IORESOURCE_IRQ, + .start = IRQ_MAC_SCSI, + .end = IRQ_MAC_SCSI, + }, { + .flags = IORESOURCE_MEM, + .start = 0x50F10000, + .end = 0x50F11FFF, + }, { + .flags = IORESOURCE_MEM, + .start = 0x50F06000, + .end = 0x50F07FFF, + }, +}; + static struct platform_device esp_0_pdev = { .name = "mac_esp", .id = 0, @@ -1000,6 +1064,60 @@ int __init mac_platform_init(void) (macintosh_config->ident == MAC_MODEL_Q950)) platform_device_register(&esp_1_pdev); break; + case MAC_SCSI_IIFX: + /* Addresses from The Guide to Mac Family Hardware. + * $5000 8000 - $5000 9FFF: SCSI DMA + * $5000 C000 - $5000 DFFF: Alternate SCSI (DMA) + * $5000 E000 - $5000 FFFF: Alternate SCSI (Hsk) + * The SCSI DMA custom IC embeds the 53C80 core. mac_scsi does + * not make use of its DMA or hardware handshaking logic. + */ + platform_device_register_simple("mac_scsi", 0, + mac_scsi_iifx_rsrc, ARRAY_SIZE(mac_scsi_iifx_rsrc)); + break; + case MAC_SCSI_DUO: + /* Addresses from the Duo Dock II Developer Note. + * $FEE0 2000 - $FEE0 3FFF: normal mode + * $FEE0 4000 - $FEE0 5FFF: pseudo DMA without /DRQ + * $FEE0 6000 - $FEE0 7FFF: pseudo DMA with /DRQ + * The NetBSD code indicates that both 5380 chips share + * an IRQ (?) which would need careful handling (see mac_esp). + */ + platform_device_register_simple("mac_scsi", 1, + mac_scsi_duo_rsrc, ARRAY_SIZE(mac_scsi_duo_rsrc)); + /* fall through */ + case MAC_SCSI_OLD: + /* Addresses from Developer Notes for Duo System, + * PowerBook 180 & 160, 140 & 170, Macintosh IIsi + * and also from The Guide to Mac Family Hardware for + * SE/30, II, IIx, IIcx, IIci. + * $5000 6000 - $5000 7FFF: pseudo-DMA with /DRQ + * $5001 0000 - $5001 1FFF: normal mode + * $5001 2000 - $5001 3FFF: pseudo-DMA without /DRQ + * GMFH says that $5000 0000 - $50FF FFFF "wraps + * $5000 0000 - $5001 FFFF eight times" (!) + * mess.org says IIci and Color Classic do not alias + * I/O address space. + */ + platform_device_register_simple("mac_scsi", 0, + mac_scsi_old_rsrc, ARRAY_SIZE(mac_scsi_old_rsrc)); + break; + case MAC_SCSI_LATE: + /* PDMA logic in 68040 PowerBooks is somehow different to + * '030 models. It's probably more like Quadras (see mac_esp). + */ + platform_device_register_simple("mac_scsi", 0, + mac_scsi_late_rsrc, ARRAY_SIZE(mac_scsi_late_rsrc)); + break; + case MAC_SCSI_CCL: + /* Addresses from the Color Classic Developer Note. + * $50F0 6000 - $50F0 7FFF: SCSI handshake + * $50F1 0000 - $50F1 1FFF: SCSI + * $50F1 2000 - $50F1 3FFF: SCSI DMA + */ + platform_device_register_simple("mac_scsi", 0, + mac_scsi_ccl_rsrc, ARRAY_SIZE(mac_scsi_ccl_rsrc)); + break; } /* diff --git a/arch/m68k/sun3/config.c b/arch/m68k/sun3/config.c index f59ec58083f8..a8b942bf7163 100644 --- a/arch/m68k/sun3/config.c +++ b/arch/m68k/sun3/config.c @@ -16,6 +16,7 @@ #include <linux/console.h> #include <linux/init.h> #include <linux/bootmem.h> +#include <linux/platform_device.h> #include <asm/oplib.h> #include <asm/setup.h> @@ -27,6 +28,7 @@ #include <asm/sun3mmu.h> #include <asm/rtc.h> #include <asm/machdep.h> +#include <asm/machines.h> #include <asm/idprom.h> #include <asm/intersil.h> #include <asm/irq.h> @@ -169,3 +171,61 @@ static void __init sun3_sched_init(irq_handler_t timer_routine) intersil_clear(); } +#ifdef CONFIG_SUN3_SCSI + +static const struct resource sun3_scsi_vme_rsrc[] __initconst = { + { + .flags = IORESOURCE_IRQ, + .start = SUN3_VEC_VMESCSI0, + .end = SUN3_VEC_VMESCSI0, + }, { + .flags = IORESOURCE_MEM, + .start = 0xff200000, + .end = 0xff200021, + }, { + .flags = IORESOURCE_IRQ, + .start = SUN3_VEC_VMESCSI1, + .end = SUN3_VEC_VMESCSI1, + }, { + .flags = IORESOURCE_MEM, + .start = 0xff204000, + .end = 0xff204021, + }, +}; + +/* + * Int: level 2 autovector + * IO: type 1, base 0x00140000, 5 bits phys space: A<4..0> + */ +static const struct resource sun3_scsi_rsrc[] __initconst = { + { + .flags = IORESOURCE_IRQ, + .start = 2, + .end = 2, + }, { + .flags = IORESOURCE_MEM, + .start = 0x00140000, + .end = 0x0014001f, + }, +}; + +int __init sun3_platform_init(void) +{ + switch (idprom->id_machtype) { + case SM_SUN3 | SM_3_160: + case SM_SUN3 | SM_3_260: + platform_device_register_simple("sun3_scsi_vme", -1, + sun3_scsi_vme_rsrc, ARRAY_SIZE(sun3_scsi_vme_rsrc)); + break; + case SM_SUN3 | SM_3_50: + case SM_SUN3 | SM_3_60: + platform_device_register_simple("sun3_scsi", -1, + sun3_scsi_rsrc, ARRAY_SIZE(sun3_scsi_rsrc)); + break; + } + return 0; +} + +arch_initcall(sun3_platform_init); + +#endif diff --git a/block/blk-core.c b/block/blk-core.c index 0421b53e6431..2e7424b42947 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -1266,7 +1266,7 @@ void blk_requeue_request(struct request_queue *q, struct request *rq) blk_clear_rq_complete(rq); trace_block_rq_requeue(q, rq); - if (blk_rq_tagged(rq)) + if (rq->cmd_flags & REQ_QUEUED) blk_queue_end_tag(q, rq); BUG_ON(blk_queued_rq(rq)); @@ -2554,7 +2554,7 @@ EXPORT_SYMBOL_GPL(blk_unprep_request); */ void blk_finish_request(struct request *req, int error) { - if (blk_rq_tagged(req)) + if (req->cmd_flags & REQ_QUEUED) blk_queue_end_tag(req->q, req); BUG_ON(blk_queued_rq(req)); diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c index 8317175a3009..728b9a4d5f56 100644 --- a/block/blk-mq-tag.c +++ b/block/blk-mq-tag.c @@ -584,6 +584,34 @@ int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int tdepth) return 0; } +/** + * blk_mq_unique_tag() - return a tag that is unique queue-wide + * @rq: request for which to compute a unique tag + * + * The tag field in struct request is unique per hardware queue but not over + * all hardware queues. Hence this function that returns a tag with the + * hardware context index in the upper bits and the per hardware queue tag in + * the lower bits. + * + * Note: When called for a request that is queued on a non-multiqueue request + * queue, the hardware context index is set to zero. + */ +u32 blk_mq_unique_tag(struct request *rq) +{ + struct request_queue *q = rq->q; + struct blk_mq_hw_ctx *hctx; + int hwq = 0; + + if (q->mq_ops) { + hctx = q->mq_ops->map_queue(q, rq->mq_ctx->cpu); + hwq = hctx->queue_num; + } + + return (hwq << BLK_MQ_UNIQUE_TAG_BITS) | + (rq->tag & BLK_MQ_UNIQUE_TAG_MASK); +} +EXPORT_SYMBOL(blk_mq_unique_tag); + ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page) { char *orig_page = page; diff --git a/block/blk-mq.c b/block/blk-mq.c index 1d016fc9a8b6..92ceef0d2ab9 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -2049,6 +2049,8 @@ static int blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set) */ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set) { + BUILD_BUG_ON(BLK_MQ_MAX_DEPTH > 1 << BLK_MQ_UNIQUE_TAG_BITS); + if (!set->nr_hw_queues) return -EINVAL; if (!set->queue_depth) diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c index b0c2a616c8f9..28163fad3c5d 100644 --- a/block/scsi_ioctl.c +++ b/block/scsi_ioctl.c @@ -142,7 +142,7 @@ static void blk_set_cmd_filter_defaults(struct blk_cmd_filter *filter) __set_bit(GPCMD_VERIFY_10, filter->read_ok); __set_bit(VERIFY_16, filter->read_ok); __set_bit(REPORT_LUNS, filter->read_ok); - __set_bit(SERVICE_ACTION_IN, filter->read_ok); + __set_bit(SERVICE_ACTION_IN_16, filter->read_ok); __set_bit(RECEIVE_DIAGNOSTIC, filter->read_ok); __set_bit(MAINTENANCE_IN, filter->read_ok); __set_bit(GPCMD_READ_BUFFER_CAPACITY, filter->read_ok); diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index 0586f66d70fa..dd45c6a03e5d 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -1164,7 +1164,7 @@ static int ata_scsi_dev_config(struct scsi_device *sdev, depth = min(sdev->host->can_queue, ata_id_queue_depth(dev->id)); depth = min(ATA_MAX_QUEUE - 1, depth); - scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, depth); + scsi_change_queue_depth(sdev, depth); } blk_queue_flush_queueable(q, false); @@ -1243,21 +1243,17 @@ void ata_scsi_slave_destroy(struct scsi_device *sdev) * @ap: ATA port to which the device change the queue depth * @sdev: SCSI device to configure queue depth for * @queue_depth: new queue depth - * @reason: calling context * * libsas and libata have different approaches for associating a sdev to * its ata_port. * */ int __ata_change_queue_depth(struct ata_port *ap, struct scsi_device *sdev, - int queue_depth, int reason) + int queue_depth) { struct ata_device *dev; unsigned long flags; - if (reason != SCSI_QDEPTH_DEFAULT) - return -EOPNOTSUPP; - if (queue_depth < 1 || queue_depth == sdev->queue_depth) return sdev->queue_depth; @@ -1282,15 +1278,13 @@ int __ata_change_queue_depth(struct ata_port *ap, struct scsi_device *sdev, if (sdev->queue_depth == queue_depth) return -EINVAL; - scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, queue_depth); - return queue_depth; + return scsi_change_queue_depth(sdev, queue_depth); } /** * ata_scsi_change_queue_depth - SCSI callback for queue depth config * @sdev: SCSI device to configure queue depth for * @queue_depth: new queue depth - * @reason: calling context * * This is libata standard hostt->change_queue_depth callback. * SCSI will call into this callback when user tries to set queue @@ -1302,12 +1296,11 @@ int __ata_change_queue_depth(struct ata_port *ap, struct scsi_device *sdev, * RETURNS: * Newly configured queue depth. */ -int ata_scsi_change_queue_depth(struct scsi_device *sdev, int queue_depth, - int reason) +int ata_scsi_change_queue_depth(struct scsi_device *sdev, int queue_depth) { struct ata_port *ap = ata_shost_to_port(sdev->host); - return __ata_change_queue_depth(ap, sdev, queue_depth, reason); + return __ata_change_queue_depth(ap, sdev, queue_depth); } /** @@ -3570,7 +3563,7 @@ void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd) ata_scsi_rbuf_fill(&args, ata_scsiop_read_cap); break; - case SERVICE_ACTION_IN: + case SERVICE_ACTION_IN_16: if ((scsicmd[1] & 0x1f) == SAI_READ_CAPACITY_16) ata_scsi_rbuf_fill(&args, ata_scsiop_read_cap); else diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c index cdf99fac139a..1db6f5ce5e89 100644 --- a/drivers/ata/sata_nv.c +++ b/drivers/ata/sata_nv.c @@ -1951,7 +1951,7 @@ static int nv_swncq_slave_config(struct scsi_device *sdev) ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num)); if (strncmp(model_num, "Maxtor", 6) == 0) { - ata_scsi_change_queue_depth(sdev, 1, SCSI_QDEPTH_DEFAULT); + ata_scsi_change_queue_depth(sdev, 1); ata_dev_notice(dev, "Disabling SWNCQ mode (depth %x)\n", sdev->queue_depth); } diff --git a/drivers/block/nvme-scsi.c b/drivers/block/nvme-scsi.c index a4cd6d691c63..0b4b2775600e 100644 --- a/drivers/block/nvme-scsi.c +++ b/drivers/block/nvme-scsi.c @@ -329,7 +329,7 @@ INQUIRY_EVPD_BIT_MASK) ? 1 : 0) (GET_U32_FROM_CDB(cdb, READ_CAP_16_CDB_ALLOC_LENGTH_OFFSET)) #define IS_READ_CAP_16(cdb) \ -((cdb[0] == SERVICE_ACTION_IN && cdb[1] == SAI_READ_CAPACITY_16) ? 1 : 0) +((cdb[0] == SERVICE_ACTION_IN_16 && cdb[1] == SAI_READ_CAPACITY_16) ? 1 : 0) /* Request Sense Helper Macros */ #define GET_REQUEST_SENSE_ALLOC_LENGTH(cdb) \ @@ -2947,7 +2947,7 @@ static int nvme_scsi_translate(struct nvme_ns *ns, struct sg_io_hdr *hdr) case READ_CAPACITY: retcode = nvme_trans_read_capacity(ns, hdr, cmd); break; - case SERVICE_ACTION_IN: + case SERVICE_ACTION_IN_16: if (IS_READ_CAP_16(cmd)) retcode = nvme_trans_read_capacity(ns, hdr, cmd); else diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c index f42ab14105ac..20ca6a619476 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.c +++ b/drivers/infiniband/ulp/iser/iscsi_iser.c @@ -911,7 +911,7 @@ static struct scsi_host_template iscsi_iser_sht = { .module = THIS_MODULE, .name = "iSCSI Initiator over iSER", .queuecommand = iscsi_queuecommand, - .change_queue_depth = iscsi_change_queue_depth, + .change_queue_depth = scsi_change_queue_depth, .sg_tablesize = ISCSI_ISER_SG_TABLESIZE, .max_sectors = 1024, .cmd_per_lun = ISER_DEF_CMD_PER_LUN, @@ -922,6 +922,7 @@ static struct scsi_host_template iscsi_iser_sht = { .use_clustering = DISABLE_CLUSTERING, .proc_name = "iscsi_iser", .this_id = -1, + .track_queue_depth = 1, }; static struct iscsi_transport iscsi_iser_transport = { diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index 62d2a18e1b41..5461924c9f10 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c @@ -123,10 +123,15 @@ MODULE_PARM_DESC(dev_loss_tmo, " if fast_io_fail_tmo has not been set. \"off\" means that" " this functionality is disabled."); +static unsigned ch_count; +module_param(ch_count, uint, 0444); +MODULE_PARM_DESC(ch_count, + "Number of RDMA channels to use for communication with an SRP target. Using more than one channel improves performance if the HCA supports multiple completion vectors. The default value is the minimum of four times the number of online CPU sockets and the number of completion vectors supported by the HCA."); + static void srp_add_one(struct ib_device *device); static void srp_remove_one(struct ib_device *device); -static void srp_recv_completion(struct ib_cq *cq, void *target_ptr); -static void srp_send_completion(struct ib_cq *cq, void *target_ptr); +static void srp_recv_completion(struct ib_cq *cq, void *ch_ptr); +static void srp_send_completion(struct ib_cq *cq, void *ch_ptr); static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event); static struct scsi_transport_template *ib_srp_transport_template; @@ -262,7 +267,7 @@ static int srp_init_qp(struct srp_target_port *target, ret = ib_find_pkey(target->srp_host->srp_dev->dev, target->srp_host->port, - be16_to_cpu(target->path.pkey), + be16_to_cpu(target->pkey), &attr->pkey_index); if (ret) goto out; @@ -283,18 +288,23 @@ out: return ret; } -static int srp_new_cm_id(struct srp_target_port *target) +static int srp_new_cm_id(struct srp_rdma_ch *ch) { + struct srp_target_port *target = ch->target; struct ib_cm_id *new_cm_id; new_cm_id = ib_create_cm_id(target->srp_host->srp_dev->dev, - srp_cm_handler, target); + srp_cm_handler, ch); if (IS_ERR(new_cm_id)) return PTR_ERR(new_cm_id); - if (target->cm_id) - ib_destroy_cm_id(target->cm_id); - target->cm_id = new_cm_id; + if (ch->cm_id) + ib_destroy_cm_id(ch->cm_id); + ch->cm_id = new_cm_id; + ch->path.sgid = target->sgid; + ch->path.dgid = target->orig_dgid; + ch->path.pkey = target->pkey; + ch->path.service_id = target->service_id; return 0; } @@ -443,8 +453,44 @@ static struct srp_fr_pool *srp_alloc_fr_pool(struct srp_target_port *target) dev->max_pages_per_mr); } -static int srp_create_target_ib(struct srp_target_port *target) +/** + * srp_destroy_qp() - destroy an RDMA queue pair + * @ch: SRP RDMA channel. + * + * Change a queue pair into the error state and wait until all receive + * completions have been processed before destroying it. This avoids that + * the receive completion handler can access the queue pair while it is + * being destroyed. + */ +static void srp_destroy_qp(struct srp_rdma_ch *ch) +{ + struct srp_target_port *target = ch->target; + static struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR }; + static struct ib_recv_wr wr = { .wr_id = SRP_LAST_WR_ID }; + struct ib_recv_wr *bad_wr; + int ret; + + /* Destroying a QP and reusing ch->done is only safe if not connected */ + WARN_ON_ONCE(target->connected); + + ret = ib_modify_qp(ch->qp, &attr, IB_QP_STATE); + WARN_ONCE(ret, "ib_cm_init_qp_attr() returned %d\n", ret); + if (ret) + goto out; + + init_completion(&ch->done); + ret = ib_post_recv(ch->qp, &wr, &bad_wr); + WARN_ONCE(ret, "ib_post_recv() returned %d\n", ret); + if (ret == 0) + wait_for_completion(&ch->done); + +out: + ib_destroy_qp(ch->qp); +} + +static int srp_create_ch_ib(struct srp_rdma_ch *ch) { + struct srp_target_port *target = ch->target; struct srp_device *dev = target->srp_host->srp_dev; struct ib_qp_init_attr *init_attr; struct ib_cq *recv_cq, *send_cq; @@ -458,15 +504,16 @@ static int srp_create_target_ib(struct srp_target_port *target) if (!init_attr) return -ENOMEM; - recv_cq = ib_create_cq(dev->dev, srp_recv_completion, NULL, target, - target->queue_size, target->comp_vector); + /* + 1 for SRP_LAST_WR_ID */ + recv_cq = ib_create_cq(dev->dev, srp_recv_completion, NULL, ch, + target->queue_size + 1, ch->comp_vector); if (IS_ERR(recv_cq)) { ret = PTR_ERR(recv_cq); goto err; } - send_cq = ib_create_cq(dev->dev, srp_send_completion, NULL, target, - m * target->queue_size, target->comp_vector); + send_cq = ib_create_cq(dev->dev, srp_send_completion, NULL, ch, + m * target->queue_size, ch->comp_vector); if (IS_ERR(send_cq)) { ret = PTR_ERR(send_cq); goto err_recv_cq; @@ -476,7 +523,7 @@ static int srp_create_target_ib(struct srp_target_port *target) init_attr->event_handler = srp_qp_event; init_attr->cap.max_send_wr = m * target->queue_size; - init_attr->cap.max_recv_wr = target->queue_size; + init_attr->cap.max_recv_wr = target->queue_size + 1; init_attr->cap.max_recv_sge = 1; init_attr->cap.max_send_sge = 1; init_attr->sq_sig_type = IB_SIGNAL_REQ_WR; @@ -502,9 +549,9 @@ static int srp_create_target_ib(struct srp_target_port *target) "FR pool allocation failed (%d)\n", ret); goto err_qp; } - if (target->fr_pool) - srp_destroy_fr_pool(target->fr_pool); - target->fr_pool = fr_pool; + if (ch->fr_pool) + srp_destroy_fr_pool(ch->fr_pool); + ch->fr_pool = fr_pool; } else if (!dev->use_fast_reg && dev->has_fmr) { fmr_pool = srp_alloc_fmr_pool(target); if (IS_ERR(fmr_pool)) { @@ -513,21 +560,21 @@ static int srp_create_target_ib(struct srp_target_port *target) "FMR pool allocation failed (%d)\n", ret); goto err_qp; } - if (target->fmr_pool) - ib_destroy_fmr_pool(target->fmr_pool); - target->fmr_pool = fmr_pool; + if (ch->fmr_pool) + ib_destroy_fmr_pool(ch->fmr_pool); + ch->fmr_pool = fmr_pool; } - if (target->qp) - ib_destroy_qp(target->qp); - if (target->recv_cq) - ib_destroy_cq(target->recv_cq); - if (target->send_cq) - ib_destroy_cq(target->send_cq); + if (ch->qp) + srp_destroy_qp(ch); + if (ch->recv_cq) + ib_destroy_cq(ch->recv_cq); + if (ch->send_cq) + ib_destroy_cq(ch->send_cq); - target->qp = qp; - target->recv_cq = recv_cq; - target->send_cq = send_cq; + ch->qp = qp; + ch->recv_cq = recv_cq; + ch->send_cq = send_cq; kfree(init_attr); return 0; @@ -548,93 +595,117 @@ err: /* * Note: this function may be called without srp_alloc_iu_bufs() having been - * invoked. Hence the target->[rt]x_ring checks. + * invoked. Hence the ch->[rt]x_ring checks. */ -static void srp_free_target_ib(struct srp_target_port *target) +static void srp_free_ch_ib(struct srp_target_port *target, + struct srp_rdma_ch *ch) { struct srp_device *dev = target->srp_host->srp_dev; int i; + if (!ch->target) + return; + + if (ch->cm_id) { + ib_destroy_cm_id(ch->cm_id); + ch->cm_id = NULL; + } + + /* If srp_new_cm_id() succeeded but srp_create_ch_ib() not, return. */ + if (!ch->qp) + return; + if (dev->use_fast_reg) { - if (target->fr_pool) - srp_destroy_fr_pool(target->fr_pool); + if (ch->fr_pool) + srp_destroy_fr_pool(ch->fr_pool); } else { - if (target->fmr_pool) - ib_destroy_fmr_pool(target->fmr_pool); + if (ch->fmr_pool) + ib_destroy_fmr_pool(ch->fmr_pool); } - ib_destroy_qp(target->qp); - ib_destroy_cq(target->send_cq); - ib_destroy_cq(target->recv_cq); + srp_destroy_qp(ch); + ib_destroy_cq(ch->send_cq); + ib_destroy_cq(ch->recv_cq); + + /* + * Avoid that the SCSI error handler tries to use this channel after + * it has been freed. The SCSI error handler can namely continue + * trying to perform recovery actions after scsi_remove_host() + * returned. + */ + ch->target = NULL; - target->qp = NULL; - target->send_cq = target->recv_cq = NULL; + ch->qp = NULL; + ch->send_cq = ch->recv_cq = NULL; - if (target->rx_ring) { + if (ch->rx_ring) { for (i = 0; i < target->queue_size; ++i) - srp_free_iu(target->srp_host, target->rx_ring[i]); - kfree(target->rx_ring); - target->rx_ring = NULL; + srp_free_iu(target->srp_host, ch->rx_ring[i]); + kfree(ch->rx_ring); + ch->rx_ring = NULL; } - if (target->tx_ring) { + if (ch->tx_ring) { for (i = 0; i < target->queue_size; ++i) - srp_free_iu(target->srp_host, target->tx_ring[i]); - kfree(target->tx_ring); - target->tx_ring = NULL; + srp_free_iu(target->srp_host, ch->tx_ring[i]); + kfree(ch->tx_ring); + ch->tx_ring = NULL; } } static void srp_path_rec_completion(int status, struct ib_sa_path_rec *pathrec, - void *target_ptr) + void *ch_ptr) { - struct srp_target_port *target = target_ptr; + struct srp_rdma_ch *ch = ch_ptr; + struct srp_target_port *target = ch->target; - target->status = status; + ch->status = status; if (status) shost_printk(KERN_ERR, target->scsi_host, PFX "Got failed path rec status %d\n", status); else - target->path = *pathrec; - complete(&target->done); + ch->path = *pathrec; + complete(&ch->done); } -static int srp_lookup_path(struct srp_target_port *target) +static int srp_lookup_path(struct srp_rdma_ch *ch) { + struct srp_target_port *target = ch->target; int ret; - target->path.numb_path = 1; - - init_completion(&target->done); - - target->path_query_id = ib_sa_path_rec_get(&srp_sa_client, - target->srp_host->srp_dev->dev, - target->srp_host->port, - &target->path, - IB_SA_PATH_REC_SERVICE_ID | - IB_SA_PATH_REC_DGID | - IB_SA_PATH_REC_SGID | - IB_SA_PATH_REC_NUMB_PATH | - IB_SA_PATH_REC_PKEY, - SRP_PATH_REC_TIMEOUT_MS, - GFP_KERNEL, - srp_path_rec_completion, - target, &target->path_query); - if (target->path_query_id < 0) - return target->path_query_id; - - ret = wait_for_completion_interruptible(&target->done); + ch->path.numb_path = 1; + + init_completion(&ch->done); + + ch->path_query_id = ib_sa_path_rec_get(&srp_sa_client, + target->srp_host->srp_dev->dev, + target->srp_host->port, + &ch->path, + IB_SA_PATH_REC_SERVICE_ID | + IB_SA_PATH_REC_DGID | + IB_SA_PATH_REC_SGID | + IB_SA_PATH_REC_NUMB_PATH | + IB_SA_PATH_REC_PKEY, + SRP_PATH_REC_TIMEOUT_MS, + GFP_KERNEL, + srp_path_rec_completion, + ch, &ch->path_query); + if (ch->path_query_id < 0) + return ch->path_query_id; + + ret = wait_for_completion_interruptible(&ch->done); if (ret < 0) return ret; - if (target->status < 0) + if (ch->status < 0) shost_printk(KERN_WARNING, target->scsi_host, PFX "Path record query failed\n"); - return target->status; + return ch->status; } -static int srp_send_req(struct srp_target_port *target) +static int srp_send_req(struct srp_rdma_ch *ch, bool multich) { + struct srp_target_port *target = ch->target; struct { struct ib_cm_req_param param; struct srp_login_req priv; @@ -645,11 +716,11 @@ static int srp_send_req(struct srp_target_port *target) if (!req) return -ENOMEM; - req->param.primary_path = &target->path; + req->param.primary_path = &ch->path; req->param.alternate_path = NULL; req->param.service_id = target->service_id; - req->param.qp_num = target->qp->qp_num; - req->param.qp_type = target->qp->qp_type; + req->param.qp_num = ch->qp->qp_num; + req->param.qp_type = ch->qp->qp_type; req->param.private_data = &req->priv; req->param.private_data_len = sizeof req->priv; req->param.flow_control = 1; @@ -673,6 +744,8 @@ static int srp_send_req(struct srp_target_port *target) req->priv.req_it_iu_len = cpu_to_be32(target->max_iu_len); req->priv.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT | SRP_BUF_FORMAT_INDIRECT); + req->priv.req_flags = (multich ? SRP_MULTICHAN_MULTI : + SRP_MULTICHAN_SINGLE); /* * In the published SRP specification (draft rev. 16a), the * port identifier format is 8 bytes of ID extension followed @@ -684,7 +757,7 @@ static int srp_send_req(struct srp_target_port *target) */ if (target->io_class == SRP_REV10_IB_IO_CLASS) { memcpy(req->priv.initiator_port_id, - &target->path.sgid.global.interface_id, 8); + &target->sgid.global.interface_id, 8); memcpy(req->priv.initiator_port_id + 8, &target->initiator_ext, 8); memcpy(req->priv.target_port_id, &target->ioc_guid, 8); @@ -693,7 +766,7 @@ static int srp_send_req(struct srp_target_port *target) memcpy(req->priv.initiator_port_id, &target->initiator_ext, 8); memcpy(req->priv.initiator_port_id + 8, - &target->path.sgid.global.interface_id, 8); + &target->sgid.global.interface_id, 8); memcpy(req->priv.target_port_id, &target->id_ext, 8); memcpy(req->priv.target_port_id + 8, &target->ioc_guid, 8); } @@ -713,7 +786,7 @@ static int srp_send_req(struct srp_target_port *target) &target->srp_host->srp_dev->dev->node_guid, 8); } - status = ib_send_cm_req(target->cm_id, &req->param); + status = ib_send_cm_req(ch->cm_id, &req->param); kfree(req); @@ -754,28 +827,35 @@ static bool srp_change_conn_state(struct srp_target_port *target, static void srp_disconnect_target(struct srp_target_port *target) { + struct srp_rdma_ch *ch; + int i; + if (srp_change_conn_state(target, false)) { /* XXX should send SRP_I_LOGOUT request */ - if (ib_send_cm_dreq(target->cm_id, NULL, 0)) { - shost_printk(KERN_DEBUG, target->scsi_host, - PFX "Sending CM DREQ failed\n"); + for (i = 0; i < target->ch_count; i++) { + ch = &target->ch[i]; + if (ch->cm_id && ib_send_cm_dreq(ch->cm_id, NULL, 0)) { + shost_printk(KERN_DEBUG, target->scsi_host, + PFX "Sending CM DREQ failed\n"); + } } } } -static void srp_free_req_data(struct srp_target_port *target) +static void srp_free_req_data(struct srp_target_port *target, + struct srp_rdma_ch *ch) { struct srp_device *dev = target->srp_host->srp_dev; struct ib_device *ibdev = dev->dev; struct srp_request *req; int i; - if (!target->req_ring) + if (!ch->target || !ch->req_ring) return; for (i = 0; i < target->req_ring_size; ++i) { - req = &target->req_ring[i]; + req = &ch->req_ring[i]; if (dev->use_fast_reg) kfree(req->fr_list); else @@ -789,12 +869,13 @@ static void srp_free_req_data(struct srp_target_port *target) kfree(req->indirect_desc); } - kfree(target->req_ring); - target->req_ring = NULL; + kfree(ch->req_ring); + ch->req_ring = NULL; } -static int srp_alloc_req_data(struct srp_target_port *target) +static int srp_alloc_req_data(struct srp_rdma_ch *ch) { + struct srp_target_port *target = ch->target; struct srp_device *srp_dev = target->srp_host->srp_dev; struct ib_device *ibdev = srp_dev->dev; struct srp_request *req; @@ -802,15 +883,13 @@ static int srp_alloc_req_data(struct srp_target_port *target) dma_addr_t dma_addr; int i, ret = -ENOMEM; - INIT_LIST_HEAD(&target->free_reqs); - - target->req_ring = kzalloc(target->req_ring_size * - sizeof(*target->req_ring), GFP_KERNEL); - if (!target->req_ring) + ch->req_ring = kcalloc(target->req_ring_size, sizeof(*ch->req_ring), + GFP_KERNEL); + if (!ch->req_ring) goto out; for (i = 0; i < target->req_ring_size; ++i) { - req = &target->req_ring[i]; + req = &ch->req_ring[i]; mr_list = kmalloc(target->cmd_sg_cnt * sizeof(void *), GFP_KERNEL); if (!mr_list) @@ -834,8 +913,6 @@ static int srp_alloc_req_data(struct srp_target_port *target) goto out; req->indirect_dma_addr = dma_addr; - req->index = i; - list_add_tail(&req->list, &target->free_reqs); } ret = 0; @@ -860,6 +937,9 @@ static void srp_del_scsi_host_attr(struct Scsi_Host *shost) static void srp_remove_target(struct srp_target_port *target) { + struct srp_rdma_ch *ch; + int i; + WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED); srp_del_scsi_host_attr(target->scsi_host); @@ -868,11 +948,18 @@ static void srp_remove_target(struct srp_target_port *target) scsi_remove_host(target->scsi_host); srp_stop_rport_timers(target->rport); srp_disconnect_target(target); - ib_destroy_cm_id(target->cm_id); - srp_free_target_ib(target); + for (i = 0; i < target->ch_count; i++) { + ch = &target->ch[i]; + srp_free_ch_ib(target, ch); + } cancel_work_sync(&target->tl_err_work); srp_rport_put(target->rport); - srp_free_req_data(target); + for (i = 0; i < target->ch_count; i++) { + ch = &target->ch[i]; + srp_free_req_data(target, ch); + } + kfree(target->ch); + target->ch = NULL; spin_lock(&target->srp_host->target_lock); list_del(&target->list); @@ -898,25 +985,25 @@ static void srp_rport_delete(struct srp_rport *rport) srp_queue_remove_work(target); } -static int srp_connect_target(struct srp_target_port *target) +static int srp_connect_ch(struct srp_rdma_ch *ch, bool multich) { - int retries = 3; + struct srp_target_port *target = ch->target; int ret; - WARN_ON_ONCE(target->connected); + WARN_ON_ONCE(!multich && target->connected); target->qp_in_error = false; - ret = srp_lookup_path(target); + ret = srp_lookup_path(ch); if (ret) return ret; while (1) { - init_completion(&target->done); - ret = srp_send_req(target); + init_completion(&ch->done); + ret = srp_send_req(ch, multich); if (ret) return ret; - ret = wait_for_completion_interruptible(&target->done); + ret = wait_for_completion_interruptible(&ch->done); if (ret < 0) return ret; @@ -926,13 +1013,13 @@ static int srp_connect_target(struct srp_target_port *target) * back, or SRP_DLID_REDIRECT if we get a lid/qp * redirect REJ back. */ - switch (target->status) { + switch (ch->status) { case 0: srp_change_conn_state(target, true); return 0; case SRP_PORT_REDIRECT: - ret = srp_lookup_path(target); + ret = srp_lookup_path(ch); if (ret) return ret; break; @@ -941,27 +1028,18 @@ static int srp_connect_target(struct srp_target_port *target) break; case SRP_STALE_CONN: - /* Our current CM id was stale, and is now in timewait. - * Try to reconnect with a new one. - */ - if (!retries-- || srp_new_cm_id(target)) { - shost_printk(KERN_ERR, target->scsi_host, PFX - "giving up on stale connection\n"); - target->status = -ECONNRESET; - return target->status; - } - shost_printk(KERN_ERR, target->scsi_host, PFX - "retrying stale connection\n"); - break; + "giving up on stale connection\n"); + ch->status = -ECONNRESET; + return ch->status; default: - return target->status; + return ch->status; } } } -static int srp_inv_rkey(struct srp_target_port *target, u32 rkey) +static int srp_inv_rkey(struct srp_rdma_ch *ch, u32 rkey) { struct ib_send_wr *bad_wr; struct ib_send_wr wr = { @@ -973,13 +1051,14 @@ static int srp_inv_rkey(struct srp_target_port *target, u32 rkey) .ex.invalidate_rkey = rkey, }; - return ib_post_send(target->qp, &wr, &bad_wr); + return ib_post_send(ch->qp, &wr, &bad_wr); } static void srp_unmap_data(struct scsi_cmnd *scmnd, - struct srp_target_port *target, + struct srp_rdma_ch *ch, struct srp_request *req) { + struct srp_target_port *target = ch->target; struct srp_device *dev = target->srp_host->srp_dev; struct ib_device *ibdev = dev->dev; int i, res; @@ -993,7 +1072,7 @@ static void srp_unmap_data(struct scsi_cmnd *scmnd, struct srp_fr_desc **pfr; for (i = req->nmdesc, pfr = req->fr_list; i > 0; i--, pfr++) { - res = srp_inv_rkey(target, (*pfr)->mr->rkey); + res = srp_inv_rkey(ch, (*pfr)->mr->rkey); if (res < 0) { shost_printk(KERN_ERR, target->scsi_host, PFX "Queueing INV WR for rkey %#x failed (%d)\n", @@ -1003,7 +1082,7 @@ static void srp_unmap_data(struct scsi_cmnd *scmnd, } } if (req->nmdesc) - srp_fr_pool_put(target->fr_pool, req->fr_list, + srp_fr_pool_put(ch->fr_pool, req->fr_list, req->nmdesc); } else { struct ib_pool_fmr **pfmr; @@ -1018,7 +1097,7 @@ static void srp_unmap_data(struct scsi_cmnd *scmnd, /** * srp_claim_req - Take ownership of the scmnd associated with a request. - * @target: SRP target port. + * @ch: SRP RDMA channel. * @req: SRP request. * @sdev: If not NULL, only take ownership for this SCSI device. * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take @@ -1027,14 +1106,14 @@ static void srp_unmap_data(struct scsi_cmnd *scmnd, * Return value: * Either NULL or a pointer to the SCSI command the caller became owner of. */ -static struct scsi_cmnd *srp_claim_req(struct srp_target_port *target, +static struct scsi_cmnd *srp_claim_req(struct srp_rdma_ch *ch, struct srp_request *req, struct scsi_device *sdev, struct scsi_cmnd *scmnd) { unsigned long flags; - spin_lock_irqsave(&target->lock, flags); + spin_lock_irqsave(&ch->lock, flags); if (req->scmnd && (!sdev || req->scmnd->device == sdev) && (!scmnd || req->scmnd == scmnd)) { @@ -1043,40 +1122,37 @@ static struct scsi_cmnd *srp_claim_req(struct srp_target_port *target, } else { scmnd = NULL; } - spin_unlock_irqrestore(&target->lock, flags); + spin_unlock_irqrestore(&ch->lock, flags); return scmnd; } /** * srp_free_req() - Unmap data and add request to the free request list. - * @target: SRP target port. + * @ch: SRP RDMA channel. * @req: Request to be freed. * @scmnd: SCSI command associated with @req. * @req_lim_delta: Amount to be added to @target->req_lim. */ -static void srp_free_req(struct srp_target_port *target, - struct srp_request *req, struct scsi_cmnd *scmnd, - s32 req_lim_delta) +static void srp_free_req(struct srp_rdma_ch *ch, struct srp_request *req, + struct scsi_cmnd *scmnd, s32 req_lim_delta) { unsigned long flags; - srp_unmap_data(scmnd, target, req); + srp_unmap_data(scmnd, ch, req); - spin_lock_irqsave(&target->lock, flags); - target->req_lim += req_lim_delta; - list_add_tail(&req->list, &target->free_reqs); - spin_unlock_irqrestore(&target->lock, flags); + spin_lock_irqsave(&ch->lock, flags); + ch->req_lim += req_lim_delta; + spin_unlock_irqrestore(&ch->lock, flags); } -static void srp_finish_req(struct srp_target_port *target, - struct srp_request *req, struct scsi_device *sdev, - int result) +static void srp_finish_req(struct srp_rdma_ch *ch, struct srp_request *req, + struct scsi_device *sdev, int result) { - struct scsi_cmnd *scmnd = srp_claim_req(target, req, sdev, NULL); + struct scsi_cmnd *scmnd = srp_claim_req(ch, req, sdev, NULL); if (scmnd) { - srp_free_req(target, req, scmnd, 0); + srp_free_req(ch, req, scmnd, 0); scmnd->result = result; scmnd->scsi_done(scmnd); } @@ -1085,9 +1161,10 @@ static void srp_finish_req(struct srp_target_port *target, static void srp_terminate_io(struct srp_rport *rport) { struct srp_target_port *target = rport->lld_data; + struct srp_rdma_ch *ch; struct Scsi_Host *shost = target->scsi_host; struct scsi_device *sdev; - int i; + int i, j; /* * Invoking srp_terminate_io() while srp_queuecommand() is running @@ -1096,9 +1173,15 @@ static void srp_terminate_io(struct srp_rport *rport) shost_for_each_device(sdev, shost) WARN_ON_ONCE(sdev->request_queue->request_fn_active); - for (i = 0; i < target->req_ring_size; ++i) { - struct srp_request *req = &target->req_ring[i]; - srp_finish_req(target, req, NULL, DID_TRANSPORT_FAILFAST << 16); + for (i = 0; i < target->ch_count; i++) { + ch = &target->ch[i]; + + for (j = 0; j < target->req_ring_size; ++j) { + struct srp_request *req = &ch->req_ring[j]; + + srp_finish_req(ch, req, NULL, + DID_TRANSPORT_FAILFAST << 16); + } } } @@ -1114,34 +1197,61 @@ static void srp_terminate_io(struct srp_rport *rport) static int srp_rport_reconnect(struct srp_rport *rport) { struct srp_target_port *target = rport->lld_data; - int i, ret; + struct srp_rdma_ch *ch; + int i, j, ret = 0; + bool multich = false; srp_disconnect_target(target); + + if (target->state == SRP_TARGET_SCANNING) + return -ENODEV; + /* * Now get a new local CM ID so that we avoid confusing the target in * case things are really fouled up. Doing so also ensures that all CM * callbacks will have finished before a new QP is allocated. */ - ret = srp_new_cm_id(target); - - for (i = 0; i < target->req_ring_size; ++i) { - struct srp_request *req = &target->req_ring[i]; - srp_finish_req(target, req, NULL, DID_RESET << 16); + for (i = 0; i < target->ch_count; i++) { + ch = &target->ch[i]; + if (!ch->target) + break; + ret += srp_new_cm_id(ch); } + for (i = 0; i < target->ch_count; i++) { + ch = &target->ch[i]; + if (!ch->target) + break; + for (j = 0; j < target->req_ring_size; ++j) { + struct srp_request *req = &ch->req_ring[j]; - /* - * Whether or not creating a new CM ID succeeded, create a new - * QP. This guarantees that all callback functions for the old QP have - * finished before any send requests are posted on the new QP. - */ - ret += srp_create_target_ib(target); - - INIT_LIST_HEAD(&target->free_tx); - for (i = 0; i < target->queue_size; ++i) - list_add(&target->tx_ring[i]->list, &target->free_tx); + srp_finish_req(ch, req, NULL, DID_RESET << 16); + } + } + for (i = 0; i < target->ch_count; i++) { + ch = &target->ch[i]; + if (!ch->target) + break; + /* + * Whether or not creating a new CM ID succeeded, create a new + * QP. This guarantees that all completion callback function + * invocations have finished before request resetting starts. + */ + ret += srp_create_ch_ib(ch); - if (ret == 0) - ret = srp_connect_target(target); + INIT_LIST_HEAD(&ch->free_tx); + for (j = 0; j < target->queue_size; ++j) + list_add(&ch->tx_ring[j]->list, &ch->free_tx); + } + for (i = 0; i < target->ch_count; i++) { + ch = &target->ch[i]; + if (ret || !ch->target) { + if (i > 1) + ret = 0; + break; + } + ret = srp_connect_ch(ch, multich); + multich = true; + } if (ret == 0) shost_printk(KERN_INFO, target->scsi_host, @@ -1165,12 +1275,12 @@ static void srp_map_desc(struct srp_map_state *state, dma_addr_t dma_addr, } static int srp_map_finish_fmr(struct srp_map_state *state, - struct srp_target_port *target) + struct srp_rdma_ch *ch) { struct ib_pool_fmr *fmr; u64 io_addr = 0; - fmr = ib_fmr_pool_map_phys(target->fmr_pool, state->pages, + fmr = ib_fmr_pool_map_phys(ch->fmr_pool, state->pages, state->npages, io_addr); if (IS_ERR(fmr)) return PTR_ERR(fmr); @@ -1184,15 +1294,16 @@ static int srp_map_finish_fmr(struct srp_map_state *state, } static int srp_map_finish_fr(struct srp_map_state *state, - struct srp_target_port *target) + struct srp_rdma_ch *ch) { + struct srp_target_port *target = ch->target; struct srp_device *dev = target->srp_host->srp_dev; struct ib_send_wr *bad_wr; struct ib_send_wr wr; struct srp_fr_desc *desc; u32 rkey; - desc = srp_fr_pool_get(target->fr_pool); + desc = srp_fr_pool_get(ch->fr_pool); if (!desc) return -ENOMEM; @@ -1221,12 +1332,13 @@ static int srp_map_finish_fr(struct srp_map_state *state, srp_map_desc(state, state->base_dma_addr, state->dma_len, desc->mr->rkey); - return ib_post_send(target->qp, &wr, &bad_wr); + return ib_post_send(ch->qp, &wr, &bad_wr); } static int srp_finish_mapping(struct srp_map_state *state, - struct srp_target_port *target) + struct srp_rdma_ch *ch) { + struct srp_target_port *target = ch->target; int ret = 0; if (state->npages == 0) @@ -1237,8 +1349,8 @@ static int srp_finish_mapping(struct srp_map_state *state, target->rkey); else ret = target->srp_host->srp_dev->use_fast_reg ? - srp_map_finish_fr(state, target) : - srp_map_finish_fmr(state, target); + srp_map_finish_fr(state, ch) : + srp_map_finish_fmr(state, ch); if (ret == 0) { state->npages = 0; @@ -1258,10 +1370,11 @@ static void srp_map_update_start(struct srp_map_state *state, } static int srp_map_sg_entry(struct srp_map_state *state, - struct srp_target_port *target, + struct srp_rdma_ch *ch, struct scatterlist *sg, int sg_index, bool use_mr) { + struct srp_target_port *target = ch->target; struct srp_device *dev = target->srp_host->srp_dev; struct ib_device *ibdev = dev->dev; dma_addr_t dma_addr = ib_sg_dma_address(ibdev, sg); @@ -1290,7 +1403,7 @@ static int srp_map_sg_entry(struct srp_map_state *state, */ if ((!dev->use_fast_reg && dma_addr & ~dev->mr_page_mask) || dma_len > dev->mr_max_size) { - ret = srp_finish_mapping(state, target); + ret = srp_finish_mapping(state, ch); if (ret) return ret; @@ -1311,7 +1424,7 @@ static int srp_map_sg_entry(struct srp_map_state *state, while (dma_len) { unsigned offset = dma_addr & ~dev->mr_page_mask; if (state->npages == dev->max_pages_per_mr || offset != 0) { - ret = srp_finish_mapping(state, target); + ret = srp_finish_mapping(state, ch); if (ret) return ret; @@ -1335,17 +1448,18 @@ static int srp_map_sg_entry(struct srp_map_state *state, */ ret = 0; if (len != dev->mr_page_size) { - ret = srp_finish_mapping(state, target); + ret = srp_finish_mapping(state, ch); if (!ret) srp_map_update_start(state, NULL, 0, 0); } return ret; } -static int srp_map_sg(struct srp_map_state *state, - struct srp_target_port *target, struct srp_request *req, - struct scatterlist *scat, int count) +static int srp_map_sg(struct srp_map_state *state, struct srp_rdma_ch *ch, + struct srp_request *req, struct scatterlist *scat, + int count) { + struct srp_target_port *target = ch->target; struct srp_device *dev = target->srp_host->srp_dev; struct ib_device *ibdev = dev->dev; struct scatterlist *sg; @@ -1356,14 +1470,14 @@ static int srp_map_sg(struct srp_map_state *state, state->pages = req->map_page; if (dev->use_fast_reg) { state->next_fr = req->fr_list; - use_mr = !!target->fr_pool; + use_mr = !!ch->fr_pool; } else { state->next_fmr = req->fmr_list; - use_mr = !!target->fmr_pool; + use_mr = !!ch->fmr_pool; } for_each_sg(scat, sg, count, i) { - if (srp_map_sg_entry(state, target, sg, i, use_mr)) { + if (srp_map_sg_entry(state, ch, sg, i, use_mr)) { /* * Memory registration failed, so backtrack to the * first unmapped entry and continue on without using @@ -1385,7 +1499,7 @@ backtrack: } } - if (use_mr && srp_finish_mapping(state, target)) + if (use_mr && srp_finish_mapping(state, ch)) goto backtrack; req->nmdesc = state->nmdesc; @@ -1393,9 +1507,10 @@ backtrack: return 0; } -static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target, +static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch, struct srp_request *req) { + struct srp_target_port *target = ch->target; struct scatterlist *scat; struct srp_cmd *cmd = req->cmd->buf; int len, nents, count; @@ -1457,7 +1572,7 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target, target->indirect_size, DMA_TO_DEVICE); memset(&state, 0, sizeof(state)); - srp_map_sg(&state, target, req, scat, count); + srp_map_sg(&state, ch, req, scat, count); /* We've mapped the request, now pull as much of the indirect * descriptor table as we can into the command buffer. If this @@ -1518,20 +1633,20 @@ map_complete: /* * Return an IU and possible credit to the free pool */ -static void srp_put_tx_iu(struct srp_target_port *target, struct srp_iu *iu, +static void srp_put_tx_iu(struct srp_rdma_ch *ch, struct srp_iu *iu, enum srp_iu_type iu_type) { unsigned long flags; - spin_lock_irqsave(&target->lock, flags); - list_add(&iu->list, &target->free_tx); + spin_lock_irqsave(&ch->lock, flags); + list_add(&iu->list, &ch->free_tx); if (iu_type != SRP_IU_RSP) - ++target->req_lim; - spin_unlock_irqrestore(&target->lock, flags); + ++ch->req_lim; + spin_unlock_irqrestore(&ch->lock, flags); } /* - * Must be called with target->lock held to protect req_lim and free_tx. + * Must be called with ch->lock held to protect req_lim and free_tx. * If IU is not sent, it must be returned using srp_put_tx_iu(). * * Note: @@ -1543,35 +1658,36 @@ static void srp_put_tx_iu(struct srp_target_port *target, struct srp_iu *iu, * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than * one unanswered SRP request to an initiator. */ -static struct srp_iu *__srp_get_tx_iu(struct srp_target_port *target, +static struct srp_iu *__srp_get_tx_iu(struct srp_rdma_ch *ch, enum srp_iu_type iu_type) { + struct srp_target_port *target = ch->target; s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE; struct srp_iu *iu; - srp_send_completion(target->send_cq, target); + srp_send_completion(ch->send_cq, ch); - if (list_empty(&target->free_tx)) + if (list_empty(&ch->free_tx)) return NULL; /* Initiator responses to target requests do not consume credits */ if (iu_type != SRP_IU_RSP) { - if (target->req_lim <= rsv) { + if (ch->req_lim <= rsv) { ++target->zero_req_lim; return NULL; } - --target->req_lim; + --ch->req_lim; } - iu = list_first_entry(&target->free_tx, struct srp_iu, list); + iu = list_first_entry(&ch->free_tx, struct srp_iu, list); list_del(&iu->list); return iu; } -static int srp_post_send(struct srp_target_port *target, - struct srp_iu *iu, int len) +static int srp_post_send(struct srp_rdma_ch *ch, struct srp_iu *iu, int len) { + struct srp_target_port *target = ch->target; struct ib_sge list; struct ib_send_wr wr, *bad_wr; @@ -1586,11 +1702,12 @@ static int srp_post_send(struct srp_target_port *target, wr.opcode = IB_WR_SEND; wr.send_flags = IB_SEND_SIGNALED; - return ib_post_send(target->qp, &wr, &bad_wr); + return ib_post_send(ch->qp, &wr, &bad_wr); } -static int srp_post_recv(struct srp_target_port *target, struct srp_iu *iu) +static int srp_post_recv(struct srp_rdma_ch *ch, struct srp_iu *iu) { + struct srp_target_port *target = ch->target; struct ib_recv_wr wr, *bad_wr; struct ib_sge list; @@ -1603,35 +1720,39 @@ static int srp_post_recv(struct srp_target_port *target, struct srp_iu *iu) wr.sg_list = &list; wr.num_sge = 1; - return ib_post_recv(target->qp, &wr, &bad_wr); + return ib_post_recv(ch->qp, &wr, &bad_wr); } -static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp) +static void srp_process_rsp(struct srp_rdma_ch *ch, struct srp_rsp *rsp) { + struct srp_target_port *target = ch->target; struct srp_request *req; struct scsi_cmnd *scmnd; unsigned long flags; if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) { - spin_lock_irqsave(&target->lock, flags); - target->req_lim += be32_to_cpu(rsp->req_lim_delta); - spin_unlock_irqrestore(&target->lock, flags); + spin_lock_irqsave(&ch->lock, flags); + ch->req_lim += be32_to_cpu(rsp->req_lim_delta); + spin_unlock_irqrestore(&ch->lock, flags); - target->tsk_mgmt_status = -1; + ch->tsk_mgmt_status = -1; if (be32_to_cpu(rsp->resp_data_len) >= 4) - target->tsk_mgmt_status = rsp->data[3]; - complete(&target->tsk_mgmt_done); + ch->tsk_mgmt_status = rsp->data[3]; + complete(&ch->tsk_mgmt_done); } else { - req = &target->req_ring[rsp->tag]; - scmnd = srp_claim_req(target, req, NULL, NULL); + scmnd = scsi_host_find_tag(target->scsi_host, rsp->tag); + if (scmnd) { + req = (void *)scmnd->host_scribble; + scmnd = srp_claim_req(ch, req, NULL, scmnd); + } if (!scmnd) { shost_printk(KERN_ERR, target->scsi_host, - "Null scmnd for RSP w/tag %016llx\n", - (unsigned long long) rsp->tag); + "Null scmnd for RSP w/tag %#016llx received on ch %td / QP %#x\n", + rsp->tag, ch - target->ch, ch->qp->qp_num); - spin_lock_irqsave(&target->lock, flags); - target->req_lim += be32_to_cpu(rsp->req_lim_delta); - spin_unlock_irqrestore(&target->lock, flags); + spin_lock_irqsave(&ch->lock, flags); + ch->req_lim += be32_to_cpu(rsp->req_lim_delta); + spin_unlock_irqrestore(&ch->lock, flags); return; } @@ -1653,7 +1774,7 @@ static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp) else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOOVER)) scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_out_res_cnt)); - srp_free_req(target, req, scmnd, + srp_free_req(ch, req, scmnd, be32_to_cpu(rsp->req_lim_delta)); scmnd->host_scribble = NULL; @@ -1661,18 +1782,19 @@ static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp) } } -static int srp_response_common(struct srp_target_port *target, s32 req_delta, +static int srp_response_common(struct srp_rdma_ch *ch, s32 req_delta, void *rsp, int len) { + struct srp_target_port *target = ch->target; struct ib_device *dev = target->srp_host->srp_dev->dev; unsigned long flags; struct srp_iu *iu; int err; - spin_lock_irqsave(&target->lock, flags); - target->req_lim += req_delta; - iu = __srp_get_tx_iu(target, SRP_IU_RSP); - spin_unlock_irqrestore(&target->lock, flags); + spin_lock_irqsave(&ch->lock, flags); + ch->req_lim += req_delta; + iu = __srp_get_tx_iu(ch, SRP_IU_RSP); + spin_unlock_irqrestore(&ch->lock, flags); if (!iu) { shost_printk(KERN_ERR, target->scsi_host, PFX @@ -1684,17 +1806,17 @@ static int srp_response_common(struct srp_target_port *target, s32 req_delta, memcpy(iu->buf, rsp, len); ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE); - err = srp_post_send(target, iu, len); + err = srp_post_send(ch, iu, len); if (err) { shost_printk(KERN_ERR, target->scsi_host, PFX "unable to post response: %d\n", err); - srp_put_tx_iu(target, iu, SRP_IU_RSP); + srp_put_tx_iu(ch, iu, SRP_IU_RSP); } return err; } -static void srp_process_cred_req(struct srp_target_port *target, +static void srp_process_cred_req(struct srp_rdma_ch *ch, struct srp_cred_req *req) { struct srp_cred_rsp rsp = { @@ -1703,14 +1825,15 @@ static void srp_process_cred_req(struct srp_target_port *target, }; s32 delta = be32_to_cpu(req->req_lim_delta); - if (srp_response_common(target, delta, &rsp, sizeof rsp)) - shost_printk(KERN_ERR, target->scsi_host, PFX + if (srp_response_common(ch, delta, &rsp, sizeof(rsp))) + shost_printk(KERN_ERR, ch->target->scsi_host, PFX "problems processing SRP_CRED_REQ\n"); } -static void srp_process_aer_req(struct srp_target_port *target, +static void srp_process_aer_req(struct srp_rdma_ch *ch, struct srp_aer_req *req) { + struct srp_target_port *target = ch->target; struct srp_aer_rsp rsp = { .opcode = SRP_AER_RSP, .tag = req->tag, @@ -1720,19 +1843,20 @@ static void srp_process_aer_req(struct srp_target_port *target, shost_printk(KERN_ERR, target->scsi_host, PFX "ignoring AER for LUN %llu\n", be64_to_cpu(req->lun)); - if (srp_response_common(target, delta, &rsp, sizeof rsp)) + if (srp_response_common(ch, delta, &rsp, sizeof(rsp))) shost_printk(KERN_ERR, target->scsi_host, PFX "problems processing SRP_AER_REQ\n"); } -static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc) +static void srp_handle_recv(struct srp_rdma_ch *ch, struct ib_wc *wc) { + struct srp_target_port *target = ch->target; struct ib_device *dev = target->srp_host->srp_dev->dev; struct srp_iu *iu = (struct srp_iu *) (uintptr_t) wc->wr_id; int res; u8 opcode; - ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_ti_iu_len, + ib_dma_sync_single_for_cpu(dev, iu->dma, ch->max_ti_iu_len, DMA_FROM_DEVICE); opcode = *(u8 *) iu->buf; @@ -1746,15 +1870,15 @@ static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc) switch (opcode) { case SRP_RSP: - srp_process_rsp(target, iu->buf); + srp_process_rsp(ch, iu->buf); break; case SRP_CRED_REQ: - srp_process_cred_req(target, iu->buf); + srp_process_cred_req(ch, iu->buf); break; case SRP_AER_REQ: - srp_process_aer_req(target, iu->buf); + srp_process_aer_req(ch, iu->buf); break; case SRP_T_LOGOUT: @@ -1769,10 +1893,10 @@ static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc) break; } - ib_dma_sync_single_for_device(dev, iu->dma, target->max_ti_iu_len, + ib_dma_sync_single_for_device(dev, iu->dma, ch->max_ti_iu_len, DMA_FROM_DEVICE); - res = srp_post_recv(target, iu); + res = srp_post_recv(ch, iu); if (res != 0) shost_printk(KERN_ERR, target->scsi_host, PFX "Recv failed with error code %d\n", res); @@ -1795,8 +1919,15 @@ static void srp_tl_err_work(struct work_struct *work) } static void srp_handle_qp_err(u64 wr_id, enum ib_wc_status wc_status, - bool send_err, struct srp_target_port *target) + bool send_err, struct srp_rdma_ch *ch) { + struct srp_target_port *target = ch->target; + + if (wr_id == SRP_LAST_WR_ID) { + complete(&ch->done); + return; + } + if (target->connected && !target->qp_in_error) { if (wr_id & LOCAL_INV_WR_ID_MASK) { shost_printk(KERN_ERR, target->scsi_host, PFX @@ -1817,33 +1948,33 @@ static void srp_handle_qp_err(u64 wr_id, enum ib_wc_status wc_status, target->qp_in_error = true; } -static void srp_recv_completion(struct ib_cq *cq, void *target_ptr) +static void srp_recv_completion(struct ib_cq *cq, void *ch_ptr) { - struct srp_target_port *target = target_ptr; + struct srp_rdma_ch *ch = ch_ptr; struct ib_wc wc; ib_req_notify_cq(cq, IB_CQ_NEXT_COMP); while (ib_poll_cq(cq, 1, &wc) > 0) { if (likely(wc.status == IB_WC_SUCCESS)) { - srp_handle_recv(target, &wc); + srp_handle_recv(ch, &wc); } else { - srp_handle_qp_err(wc.wr_id, wc.status, false, target); + srp_handle_qp_err(wc.wr_id, wc.status, false, ch); } } } -static void srp_send_completion(struct ib_cq *cq, void *target_ptr) +static void srp_send_completion(struct ib_cq *cq, void *ch_ptr) { - struct srp_target_port *target = target_ptr; + struct srp_rdma_ch *ch = ch_ptr; struct ib_wc wc; struct srp_iu *iu; while (ib_poll_cq(cq, 1, &wc) > 0) { if (likely(wc.status == IB_WC_SUCCESS)) { iu = (struct srp_iu *) (uintptr_t) wc.wr_id; - list_add(&iu->list, &target->free_tx); + list_add(&iu->list, &ch->free_tx); } else { - srp_handle_qp_err(wc.wr_id, wc.status, true, target); + srp_handle_qp_err(wc.wr_id, wc.status, true, ch); } } } @@ -1852,11 +1983,14 @@ static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd) { struct srp_target_port *target = host_to_target(shost); struct srp_rport *rport = target->rport; + struct srp_rdma_ch *ch; struct srp_request *req; struct srp_iu *iu; struct srp_cmd *cmd; struct ib_device *dev; unsigned long flags; + u32 tag; + u16 idx; int len, ret; const bool in_scsi_eh = !in_interrupt() && current == shost->ehandler; @@ -1873,15 +2007,22 @@ static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd) if (unlikely(scmnd->result)) goto err; - spin_lock_irqsave(&target->lock, flags); - iu = __srp_get_tx_iu(target, SRP_IU_CMD); - if (!iu) - goto err_unlock; + WARN_ON_ONCE(scmnd->request->tag < 0); + tag = blk_mq_unique_tag(scmnd->request); + ch = &target->ch[blk_mq_unique_tag_to_hwq(tag)]; + idx = blk_mq_unique_tag_to_tag(tag); + WARN_ONCE(idx >= target->req_ring_size, "%s: tag %#x: idx %d >= %d\n", + dev_name(&shost->shost_gendev), tag, idx, + target->req_ring_size); - req = list_first_entry(&target->free_reqs, struct srp_request, list); - list_del(&req->list); - spin_unlock_irqrestore(&target->lock, flags); + spin_lock_irqsave(&ch->lock, flags); + iu = __srp_get_tx_iu(ch, SRP_IU_CMD); + spin_unlock_irqrestore(&ch->lock, flags); + if (!iu) + goto err; + + req = &ch->req_ring[idx]; dev = target->srp_host->srp_dev->dev; ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_iu_len, DMA_TO_DEVICE); @@ -1893,13 +2034,13 @@ static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd) cmd->opcode = SRP_CMD; cmd->lun = cpu_to_be64((u64) scmnd->device->lun << 48); - cmd->tag = req->index; + cmd->tag = tag; memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len); req->scmnd = scmnd; req->cmd = iu; - len = srp_map_data(scmnd, target, req); + len = srp_map_data(scmnd, ch, req); if (len < 0) { shost_printk(KERN_ERR, target->scsi_host, PFX "Failed to map data (%d)\n", len); @@ -1917,7 +2058,7 @@ static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd) ib_dma_sync_single_for_device(dev, iu->dma, target->max_iu_len, DMA_TO_DEVICE); - if (srp_post_send(target, iu, len)) { + if (srp_post_send(ch, iu, len)) { shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n"); goto err_unmap; } @@ -1931,10 +2072,10 @@ unlock_rport: return ret; err_unmap: - srp_unmap_data(scmnd, target, req); + srp_unmap_data(scmnd, ch, req); err_iu: - srp_put_tx_iu(target, iu, SRP_IU_CMD); + srp_put_tx_iu(ch, iu, SRP_IU_CMD); /* * Avoid that the loops that iterate over the request ring can @@ -1942,12 +2083,6 @@ err_iu: */ req->scmnd = NULL; - spin_lock_irqsave(&target->lock, flags); - list_add(&req->list, &target->free_reqs); - -err_unlock: - spin_unlock_irqrestore(&target->lock, flags); - err: if (scmnd->result) { scmnd->scsi_done(scmnd); @@ -1961,53 +2096,54 @@ err: /* * Note: the resources allocated in this function are freed in - * srp_free_target_ib(). + * srp_free_ch_ib(). */ -static int srp_alloc_iu_bufs(struct srp_target_port *target) +static int srp_alloc_iu_bufs(struct srp_rdma_ch *ch) { + struct srp_target_port *target = ch->target; int i; - target->rx_ring = kzalloc(target->queue_size * sizeof(*target->rx_ring), - GFP_KERNEL); - if (!target->rx_ring) + ch->rx_ring = kcalloc(target->queue_size, sizeof(*ch->rx_ring), + GFP_KERNEL); + if (!ch->rx_ring) goto err_no_ring; - target->tx_ring = kzalloc(target->queue_size * sizeof(*target->tx_ring), - GFP_KERNEL); - if (!target->tx_ring) + ch->tx_ring = kcalloc(target->queue_size, sizeof(*ch->tx_ring), + GFP_KERNEL); + if (!ch->tx_ring) goto err_no_ring; for (i = 0; i < target->queue_size; ++i) { - target->rx_ring[i] = srp_alloc_iu(target->srp_host, - target->max_ti_iu_len, - GFP_KERNEL, DMA_FROM_DEVICE); - if (!target->rx_ring[i]) + ch->rx_ring[i] = srp_alloc_iu(target->srp_host, + ch->max_ti_iu_len, + GFP_KERNEL, DMA_FROM_DEVICE); + if (!ch->rx_ring[i]) goto err; } for (i = 0; i < target->queue_size; ++i) { - target->tx_ring[i] = srp_alloc_iu(target->srp_host, - target->max_iu_len, - GFP_KERNEL, DMA_TO_DEVICE); - if (!target->tx_ring[i]) + ch->tx_ring[i] = srp_alloc_iu(target->srp_host, + target->max_iu_len, + GFP_KERNEL, DMA_TO_DEVICE); + if (!ch->tx_ring[i]) goto err; - list_add(&target->tx_ring[i]->list, &target->free_tx); + list_add(&ch->tx_ring[i]->list, &ch->free_tx); } return 0; err: for (i = 0; i < target->queue_size; ++i) { - srp_free_iu(target->srp_host, target->rx_ring[i]); - srp_free_iu(target->srp_host, target->tx_ring[i]); + srp_free_iu(target->srp_host, ch->rx_ring[i]); + srp_free_iu(target->srp_host, ch->tx_ring[i]); } err_no_ring: - kfree(target->tx_ring); - target->tx_ring = NULL; - kfree(target->rx_ring); - target->rx_ring = NULL; + kfree(ch->tx_ring); + ch->tx_ring = NULL; + kfree(ch->rx_ring); + ch->rx_ring = NULL; return -ENOMEM; } @@ -2041,23 +2177,24 @@ static uint32_t srp_compute_rq_tmo(struct ib_qp_attr *qp_attr, int attr_mask) static void srp_cm_rep_handler(struct ib_cm_id *cm_id, struct srp_login_rsp *lrsp, - struct srp_target_port *target) + struct srp_rdma_ch *ch) { + struct srp_target_port *target = ch->target; struct ib_qp_attr *qp_attr = NULL; int attr_mask = 0; int ret; int i; if (lrsp->opcode == SRP_LOGIN_RSP) { - target->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len); - target->req_lim = be32_to_cpu(lrsp->req_lim_delta); + ch->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len); + ch->req_lim = be32_to_cpu(lrsp->req_lim_delta); /* * Reserve credits for task management so we don't * bounce requests back to the SCSI mid-layer. */ target->scsi_host->can_queue - = min(target->req_lim - SRP_TSK_MGMT_SQ_SIZE, + = min(ch->req_lim - SRP_TSK_MGMT_SQ_SIZE, target->scsi_host->can_queue); target->scsi_host->cmd_per_lun = min_t(int, target->scsi_host->can_queue, @@ -2069,8 +2206,8 @@ static void srp_cm_rep_handler(struct ib_cm_id *cm_id, goto error; } - if (!target->rx_ring) { - ret = srp_alloc_iu_bufs(target); + if (!ch->rx_ring) { + ret = srp_alloc_iu_bufs(ch); if (ret) goto error; } @@ -2085,13 +2222,14 @@ static void srp_cm_rep_handler(struct ib_cm_id *cm_id, if (ret) goto error_free; - ret = ib_modify_qp(target->qp, qp_attr, attr_mask); + ret = ib_modify_qp(ch->qp, qp_attr, attr_mask); if (ret) goto error_free; for (i = 0; i < target->queue_size; i++) { - struct srp_iu *iu = target->rx_ring[i]; - ret = srp_post_recv(target, iu); + struct srp_iu *iu = ch->rx_ring[i]; + + ret = srp_post_recv(ch, iu); if (ret) goto error_free; } @@ -2103,7 +2241,7 @@ static void srp_cm_rep_handler(struct ib_cm_id *cm_id, target->rq_tmo_jiffies = srp_compute_rq_tmo(qp_attr, attr_mask); - ret = ib_modify_qp(target->qp, qp_attr, attr_mask); + ret = ib_modify_qp(ch->qp, qp_attr, attr_mask); if (ret) goto error_free; @@ -2113,13 +2251,14 @@ error_free: kfree(qp_attr); error: - target->status = ret; + ch->status = ret; } static void srp_cm_rej_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event, - struct srp_target_port *target) + struct srp_rdma_ch *ch) { + struct srp_target_port *target = ch->target; struct Scsi_Host *shost = target->scsi_host; struct ib_class_port_info *cpi; int opcode; @@ -2127,12 +2266,12 @@ static void srp_cm_rej_handler(struct ib_cm_id *cm_id, switch (event->param.rej_rcvd.reason) { case IB_CM_REJ_PORT_CM_REDIRECT: cpi = event->param.rej_rcvd.ari; - target->path.dlid = cpi->redirect_lid; - target->path.pkey = cpi->redirect_pkey; + ch->path.dlid = cpi->redirect_lid; + ch->path.pkey = cpi->redirect_pkey; cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff; - memcpy(target->path.dgid.raw, cpi->redirect_gid, 16); + memcpy(ch->path.dgid.raw, cpi->redirect_gid, 16); - target->status = target->path.dlid ? + ch->status = ch->path.dlid ? SRP_DLID_REDIRECT : SRP_PORT_REDIRECT; break; @@ -2143,26 +2282,26 @@ static void srp_cm_rej_handler(struct ib_cm_id *cm_id, * reject reason code 25 when they mean 24 * (port redirect). */ - memcpy(target->path.dgid.raw, + memcpy(ch->path.dgid.raw, event->param.rej_rcvd.ari, 16); shost_printk(KERN_DEBUG, shost, PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n", - (unsigned long long) be64_to_cpu(target->path.dgid.global.subnet_prefix), - (unsigned long long) be64_to_cpu(target->path.dgid.global.interface_id)); + be64_to_cpu(ch->path.dgid.global.subnet_prefix), + be64_to_cpu(ch->path.dgid.global.interface_id)); - target->status = SRP_PORT_REDIRECT; + ch->status = SRP_PORT_REDIRECT; } else { shost_printk(KERN_WARNING, shost, " REJ reason: IB_CM_REJ_PORT_REDIRECT\n"); - target->status = -ECONNRESET; + ch->status = -ECONNRESET; } break; case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID: shost_printk(KERN_WARNING, shost, " REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n"); - target->status = -ECONNRESET; + ch->status = -ECONNRESET; break; case IB_CM_REJ_CONSUMER_DEFINED: @@ -2177,30 +2316,31 @@ static void srp_cm_rej_handler(struct ib_cm_id *cm_id, else shost_printk(KERN_WARNING, shost, PFX "SRP LOGIN from %pI6 to %pI6 REJECTED, reason 0x%08x\n", - target->path.sgid.raw, - target->orig_dgid, reason); + target->sgid.raw, + target->orig_dgid.raw, reason); } else shost_printk(KERN_WARNING, shost, " REJ reason: IB_CM_REJ_CONSUMER_DEFINED," " opcode 0x%02x\n", opcode); - target->status = -ECONNRESET; + ch->status = -ECONNRESET; break; case IB_CM_REJ_STALE_CONN: shost_printk(KERN_WARNING, shost, " REJ reason: stale connection\n"); - target->status = SRP_STALE_CONN; + ch->status = SRP_STALE_CONN; break; default: shost_printk(KERN_WARNING, shost, " REJ reason 0x%x\n", event->param.rej_rcvd.reason); - target->status = -ECONNRESET; + ch->status = -ECONNRESET; } } static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event) { - struct srp_target_port *target = cm_id->context; + struct srp_rdma_ch *ch = cm_id->context; + struct srp_target_port *target = ch->target; int comp = 0; switch (event->event) { @@ -2208,19 +2348,19 @@ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event) shost_printk(KERN_DEBUG, target->scsi_host, PFX "Sending CM REQ failed\n"); comp = 1; - target->status = -ECONNRESET; + ch->status = -ECONNRESET; break; case IB_CM_REP_RECEIVED: comp = 1; - srp_cm_rep_handler(cm_id, event->private_data, target); + srp_cm_rep_handler(cm_id, event->private_data, ch); break; case IB_CM_REJ_RECEIVED: shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n"); comp = 1; - srp_cm_rej_handler(cm_id, event, target); + srp_cm_rej_handler(cm_id, event, ch); break; case IB_CM_DREQ_RECEIVED: @@ -2238,7 +2378,7 @@ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event) PFX "connection closed\n"); comp = 1; - target->status = 0; + ch->status = 0; break; case IB_CM_MRA_RECEIVED: @@ -2253,65 +2393,30 @@ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event) } if (comp) - complete(&target->done); + complete(&ch->done); return 0; } /** - * srp_change_queue_type - changing device queue tag type - * @sdev: scsi device struct - * @tag_type: requested tag type - * - * Returns queue tag type. - */ -static int -srp_change_queue_type(struct scsi_device *sdev, int tag_type) -{ - if (sdev->tagged_supported) { - scsi_set_tag_type(sdev, tag_type); - if (tag_type) - scsi_activate_tcq(sdev, sdev->queue_depth); - else - scsi_deactivate_tcq(sdev, sdev->queue_depth); - } else - tag_type = 0; - - return tag_type; -} - -/** * srp_change_queue_depth - setting device queue depth * @sdev: scsi device struct * @qdepth: requested queue depth - * @reason: SCSI_QDEPTH_DEFAULT/SCSI_QDEPTH_QFULL/SCSI_QDEPTH_RAMP_UP - * (see include/scsi/scsi_host.h for definition) * * Returns queue depth. */ static int -srp_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason) +srp_change_queue_depth(struct scsi_device *sdev, int qdepth) { - struct Scsi_Host *shost = sdev->host; - int max_depth; - if (reason == SCSI_QDEPTH_DEFAULT || reason == SCSI_QDEPTH_RAMP_UP) { - max_depth = shost->can_queue; - if (!sdev->tagged_supported) - max_depth = 1; - if (qdepth > max_depth) - qdepth = max_depth; - scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth); - } else if (reason == SCSI_QDEPTH_QFULL) - scsi_track_queue_full(sdev, qdepth); - else - return -EOPNOTSUPP; - - return sdev->queue_depth; + if (!sdev->tagged_supported) + qdepth = 1; + return scsi_change_queue_depth(sdev, qdepth); } -static int srp_send_tsk_mgmt(struct srp_target_port *target, - u64 req_tag, unsigned int lun, u8 func) +static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, u64 req_tag, + unsigned int lun, u8 func) { + struct srp_target_port *target = ch->target; struct srp_rport *rport = target->rport; struct ib_device *dev = target->srp_host->srp_dev->dev; struct srp_iu *iu; @@ -2320,16 +2425,16 @@ static int srp_send_tsk_mgmt(struct srp_target_port *target, if (!target->connected || target->qp_in_error) return -1; - init_completion(&target->tsk_mgmt_done); + init_completion(&ch->tsk_mgmt_done); /* - * Lock the rport mutex to avoid that srp_create_target_ib() is + * Lock the rport mutex to avoid that srp_create_ch_ib() is * invoked while a task management function is being sent. */ mutex_lock(&rport->mutex); - spin_lock_irq(&target->lock); - iu = __srp_get_tx_iu(target, SRP_IU_TSK_MGMT); - spin_unlock_irq(&target->lock); + spin_lock_irq(&ch->lock); + iu = __srp_get_tx_iu(ch, SRP_IU_TSK_MGMT); + spin_unlock_irq(&ch->lock); if (!iu) { mutex_unlock(&rport->mutex); @@ -2350,15 +2455,15 @@ static int srp_send_tsk_mgmt(struct srp_target_port *target, ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt, DMA_TO_DEVICE); - if (srp_post_send(target, iu, sizeof *tsk_mgmt)) { - srp_put_tx_iu(target, iu, SRP_IU_TSK_MGMT); + if (srp_post_send(ch, iu, sizeof(*tsk_mgmt))) { + srp_put_tx_iu(ch, iu, SRP_IU_TSK_MGMT); mutex_unlock(&rport->mutex); return -1; } mutex_unlock(&rport->mutex); - if (!wait_for_completion_timeout(&target->tsk_mgmt_done, + if (!wait_for_completion_timeout(&ch->tsk_mgmt_done, msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS))) return -1; @@ -2369,20 +2474,32 @@ static int srp_abort(struct scsi_cmnd *scmnd) { struct srp_target_port *target = host_to_target(scmnd->device->host); struct srp_request *req = (struct srp_request *) scmnd->host_scribble; + u32 tag; + u16 ch_idx; + struct srp_rdma_ch *ch; int ret; shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n"); - if (!req || !srp_claim_req(target, req, NULL, scmnd)) + if (!req) + return SUCCESS; + tag = blk_mq_unique_tag(scmnd->request); + ch_idx = blk_mq_unique_tag_to_hwq(tag); + if (WARN_ON_ONCE(ch_idx >= target->ch_count)) return SUCCESS; - if (srp_send_tsk_mgmt(target, req->index, scmnd->device->lun, + ch = &target->ch[ch_idx]; + if (!srp_claim_req(ch, req, NULL, scmnd)) + return SUCCESS; + shost_printk(KERN_ERR, target->scsi_host, + "Sending SRP abort for tag %#x\n", tag); + if (srp_send_tsk_mgmt(ch, tag, scmnd->device->lun, SRP_TSK_ABORT_TASK) == 0) ret = SUCCESS; else if (target->rport->state == SRP_RPORT_LOST) ret = FAST_IO_FAIL; else ret = FAILED; - srp_free_req(target, req, scmnd, 0); + srp_free_req(ch, req, scmnd, 0); scmnd->result = DID_ABORT << 16; scmnd->scsi_done(scmnd); @@ -2392,19 +2509,25 @@ static int srp_abort(struct scsi_cmnd *scmnd) static int srp_reset_device(struct scsi_cmnd *scmnd) { struct srp_target_port *target = host_to_target(scmnd->device->host); + struct srp_rdma_ch *ch; int i; shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n"); - if (srp_send_tsk_mgmt(target, SRP_TAG_NO_REQ, scmnd->device->lun, + ch = &target->ch[0]; + if (srp_send_tsk_mgmt(ch, SRP_TAG_NO_REQ, scmnd->device->lun, SRP_TSK_LUN_RESET)) return FAILED; - if (target->tsk_mgmt_status) + if (ch->tsk_mgmt_status) return FAILED; - for (i = 0; i < target->req_ring_size; ++i) { - struct srp_request *req = &target->req_ring[i]; - srp_finish_req(target, req, scmnd->device, DID_RESET << 16); + for (i = 0; i < target->ch_count; i++) { + ch = &target->ch[i]; + for (i = 0; i < target->req_ring_size; ++i) { + struct srp_request *req = &ch->req_ring[i]; + + srp_finish_req(ch, req, scmnd->device, DID_RESET << 16); + } } return SUCCESS; @@ -2466,7 +2589,7 @@ static ssize_t show_pkey(struct device *dev, struct device_attribute *attr, { struct srp_target_port *target = host_to_target(class_to_shost(dev)); - return sprintf(buf, "0x%04x\n", be16_to_cpu(target->path.pkey)); + return sprintf(buf, "0x%04x\n", be16_to_cpu(target->pkey)); } static ssize_t show_sgid(struct device *dev, struct device_attribute *attr, @@ -2474,15 +2597,16 @@ static ssize_t show_sgid(struct device *dev, struct device_attribute *attr, { struct srp_target_port *target = host_to_target(class_to_shost(dev)); - return sprintf(buf, "%pI6\n", target->path.sgid.raw); + return sprintf(buf, "%pI6\n", target->sgid.raw); } static ssize_t show_dgid(struct device *dev, struct device_attribute *attr, char *buf) { struct srp_target_port *target = host_to_target(class_to_shost(dev)); + struct srp_rdma_ch *ch = &target->ch[0]; - return sprintf(buf, "%pI6\n", target->path.dgid.raw); + return sprintf(buf, "%pI6\n", ch->path.dgid.raw); } static ssize_t show_orig_dgid(struct device *dev, @@ -2490,15 +2614,21 @@ static ssize_t show_orig_dgid(struct device *dev, { struct srp_target_port *target = host_to_target(class_to_shost(dev)); - return sprintf(buf, "%pI6\n", target->orig_dgid); + return sprintf(buf, "%pI6\n", target->orig_dgid.raw); } static ssize_t show_req_lim(struct device *dev, struct device_attribute *attr, char *buf) { struct srp_target_port *target = host_to_target(class_to_shost(dev)); + struct srp_rdma_ch *ch; + int i, req_lim = INT_MAX; - return sprintf(buf, "%d\n", target->req_lim); + for (i = 0; i < target->ch_count; i++) { + ch = &target->ch[i]; + req_lim = min(req_lim, ch->req_lim); + } + return sprintf(buf, "%d\n", req_lim); } static ssize_t show_zero_req_lim(struct device *dev, @@ -2525,6 +2655,14 @@ static ssize_t show_local_ib_device(struct device *dev, return sprintf(buf, "%s\n", target->srp_host->srp_dev->dev->name); } +static ssize_t show_ch_count(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct srp_target_port *target = host_to_target(class_to_shost(dev)); + + return sprintf(buf, "%d\n", target->ch_count); +} + static ssize_t show_comp_vector(struct device *dev, struct device_attribute *attr, char *buf) { @@ -2568,6 +2706,7 @@ static DEVICE_ATTR(req_lim, S_IRUGO, show_req_lim, NULL); static DEVICE_ATTR(zero_req_lim, S_IRUGO, show_zero_req_lim, NULL); static DEVICE_ATTR(local_ib_port, S_IRUGO, show_local_ib_port, NULL); static DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL); +static DEVICE_ATTR(ch_count, S_IRUGO, show_ch_count, NULL); static DEVICE_ATTR(comp_vector, S_IRUGO, show_comp_vector, NULL); static DEVICE_ATTR(tl_retry_count, S_IRUGO, show_tl_retry_count, NULL); static DEVICE_ATTR(cmd_sg_entries, S_IRUGO, show_cmd_sg_entries, NULL); @@ -2585,6 +2724,7 @@ static struct device_attribute *srp_host_attrs[] = { &dev_attr_zero_req_lim, &dev_attr_local_ib_port, &dev_attr_local_ib_device, + &dev_attr_ch_count, &dev_attr_comp_vector, &dev_attr_tl_retry_count, &dev_attr_cmd_sg_entries, @@ -2600,7 +2740,7 @@ static struct scsi_host_template srp_template = { .info = srp_target_info, .queuecommand = srp_queuecommand, .change_queue_depth = srp_change_queue_depth, - .change_queue_type = srp_change_queue_type, + .change_queue_type = scsi_change_queue_type, .eh_abort_handler = srp_abort, .eh_device_reset_handler = srp_reset_device, .eh_host_reset_handler = srp_reset_host, @@ -2610,14 +2750,28 @@ static struct scsi_host_template srp_template = { .this_id = -1, .cmd_per_lun = SRP_DEFAULT_CMD_SQ_SIZE, .use_clustering = ENABLE_CLUSTERING, - .shost_attrs = srp_host_attrs + .shost_attrs = srp_host_attrs, + .use_blk_tags = 1, + .track_queue_depth = 1, }; +static int srp_sdev_count(struct Scsi_Host *host) +{ + struct scsi_device *sdev; + int c = 0; + + shost_for_each_device(sdev, host) + c++; + + return c; +} + static int srp_add_target(struct srp_host *host, struct srp_target_port *target) { struct srp_rport_identifiers ids; struct srp_rport *rport; + target->state = SRP_TARGET_SCANNING; sprintf(target->target_name, "SRP.T10:%016llX", (unsigned long long) be64_to_cpu(target->id_ext)); @@ -2640,11 +2794,26 @@ static int srp_add_target(struct srp_host *host, struct srp_target_port *target) list_add_tail(&target->list, &host->target_list); spin_unlock(&host->target_lock); - target->state = SRP_TARGET_LIVE; - scsi_scan_target(&target->scsi_host->shost_gendev, 0, target->scsi_id, SCAN_WILD_CARD, 0); + if (!target->connected || target->qp_in_error) { + shost_printk(KERN_INFO, target->scsi_host, + PFX "SCSI scan failed - removing SCSI host\n"); + srp_queue_remove_work(target); + goto out; + } + + pr_debug(PFX "%s: SCSI scan succeeded - detected %d LUNs\n", + dev_name(&target->scsi_host->shost_gendev), + srp_sdev_count(target->scsi_host)); + + spin_lock_irq(&target->lock); + if (target->state == SRP_TARGET_SCANNING) + target->state = SRP_TARGET_LIVE; + spin_unlock_irq(&target->lock); + +out: return 0; } @@ -2801,11 +2970,15 @@ static int srp_parse_options(const char *buf, struct srp_target_port *target) } for (i = 0; i < 16; ++i) { - strlcpy(dgid, p + i * 2, 3); - target->path.dgid.raw[i] = simple_strtoul(dgid, NULL, 16); + strlcpy(dgid, p + i * 2, sizeof(dgid)); + if (sscanf(dgid, "%hhx", + &target->orig_dgid.raw[i]) < 1) { + ret = -EINVAL; + kfree(p); + goto out; + } } kfree(p); - memcpy(target->orig_dgid, target->path.dgid.raw, 16); break; case SRP_OPT_PKEY: @@ -2813,7 +2986,7 @@ static int srp_parse_options(const char *buf, struct srp_target_port *target) pr_warn("bad P_Key parameter '%s'\n", p); goto out; } - target->path.pkey = cpu_to_be16(token); + target->pkey = cpu_to_be16(token); break; case SRP_OPT_SERVICE_ID: @@ -2823,7 +2996,6 @@ static int srp_parse_options(const char *buf, struct srp_target_port *target) goto out; } target->service_id = cpu_to_be64(simple_strtoull(p, NULL, 16)); - target->path.service_id = target->service_id; kfree(p); break; @@ -2960,9 +3132,11 @@ static ssize_t srp_create_target(struct device *dev, container_of(dev, struct srp_host, dev); struct Scsi_Host *target_host; struct srp_target_port *target; + struct srp_rdma_ch *ch; struct srp_device *srp_dev = host->srp_dev; struct ib_device *ibdev = srp_dev->dev; - int ret; + int ret, node_idx, node, cpu, i; + bool multich = false; target_host = scsi_host_alloc(&srp_template, sizeof (struct srp_target_port)); @@ -2988,12 +3162,22 @@ static ssize_t srp_create_target(struct device *dev, target->tl_retry_count = 7; target->queue_size = SRP_DEFAULT_QUEUE_SIZE; + /* + * Avoid that the SCSI host can be removed by srp_remove_target() + * before this function returns. + */ + scsi_host_get(target->scsi_host); + mutex_lock(&host->add_target_mutex); ret = srp_parse_options(buf, target); if (ret) goto err; + ret = scsi_init_shared_tag_map(target_host, target_host->can_queue); + if (ret) + goto err; + target->req_ring_size = target->queue_size - SRP_TSK_MGMT_SQ_SIZE; if (!srp_conn_unique(target->srp_host, target)) { @@ -3022,59 +3206,115 @@ static ssize_t srp_create_target(struct device *dev, INIT_WORK(&target->tl_err_work, srp_tl_err_work); INIT_WORK(&target->remove_work, srp_remove_work); spin_lock_init(&target->lock); - INIT_LIST_HEAD(&target->free_tx); - ret = srp_alloc_req_data(target); + ret = ib_query_gid(ibdev, host->port, 0, &target->sgid); if (ret) - goto err_free_mem; + goto err; - ret = ib_query_gid(ibdev, host->port, 0, &target->path.sgid); - if (ret) - goto err_free_mem; + ret = -ENOMEM; + target->ch_count = max_t(unsigned, num_online_nodes(), + min(ch_count ? : + min(4 * num_online_nodes(), + ibdev->num_comp_vectors), + num_online_cpus())); + target->ch = kcalloc(target->ch_count, sizeof(*target->ch), + GFP_KERNEL); + if (!target->ch) + goto err; - ret = srp_create_target_ib(target); - if (ret) - goto err_free_mem; + node_idx = 0; + for_each_online_node(node) { + const int ch_start = (node_idx * target->ch_count / + num_online_nodes()); + const int ch_end = ((node_idx + 1) * target->ch_count / + num_online_nodes()); + const int cv_start = (node_idx * ibdev->num_comp_vectors / + num_online_nodes() + target->comp_vector) + % ibdev->num_comp_vectors; + const int cv_end = ((node_idx + 1) * ibdev->num_comp_vectors / + num_online_nodes() + target->comp_vector) + % ibdev->num_comp_vectors; + int cpu_idx = 0; + + for_each_online_cpu(cpu) { + if (cpu_to_node(cpu) != node) + continue; + if (ch_start + cpu_idx >= ch_end) + continue; + ch = &target->ch[ch_start + cpu_idx]; + ch->target = target; + ch->comp_vector = cv_start == cv_end ? cv_start : + cv_start + cpu_idx % (cv_end - cv_start); + spin_lock_init(&ch->lock); + INIT_LIST_HEAD(&ch->free_tx); + ret = srp_new_cm_id(ch); + if (ret) + goto err_disconnect; - ret = srp_new_cm_id(target); - if (ret) - goto err_free_ib; + ret = srp_create_ch_ib(ch); + if (ret) + goto err_disconnect; - ret = srp_connect_target(target); - if (ret) { - shost_printk(KERN_ERR, target->scsi_host, - PFX "Connection failed\n"); - goto err_cm_id; + ret = srp_alloc_req_data(ch); + if (ret) + goto err_disconnect; + + ret = srp_connect_ch(ch, multich); + if (ret) { + shost_printk(KERN_ERR, target->scsi_host, + PFX "Connection %d/%d failed\n", + ch_start + cpu_idx, + target->ch_count); + if (node_idx == 0 && cpu_idx == 0) { + goto err_disconnect; + } else { + srp_free_ch_ib(target, ch); + srp_free_req_data(target, ch); + target->ch_count = ch - target->ch; + break; + } + } + + multich = true; + cpu_idx++; + } + node_idx++; } + target->scsi_host->nr_hw_queues = target->ch_count; + ret = srp_add_target(host, target); if (ret) goto err_disconnect; - shost_printk(KERN_DEBUG, target->scsi_host, PFX - "new target: id_ext %016llx ioc_guid %016llx pkey %04x service_id %016llx sgid %pI6 dgid %pI6\n", - be64_to_cpu(target->id_ext), - be64_to_cpu(target->ioc_guid), - be16_to_cpu(target->path.pkey), - be64_to_cpu(target->service_id), - target->path.sgid.raw, target->path.dgid.raw); + if (target->state != SRP_TARGET_REMOVED) { + shost_printk(KERN_DEBUG, target->scsi_host, PFX + "new target: id_ext %016llx ioc_guid %016llx pkey %04x service_id %016llx sgid %pI6 dgid %pI6\n", + be64_to_cpu(target->id_ext), + be64_to_cpu(target->ioc_guid), + be16_to_cpu(target->pkey), + be64_to_cpu(target->service_id), + target->sgid.raw, target->orig_dgid.raw); + } ret = count; out: mutex_unlock(&host->add_target_mutex); + + scsi_host_put(target->scsi_host); + return ret; err_disconnect: srp_disconnect_target(target); -err_cm_id: - ib_destroy_cm_id(target->cm_id); - -err_free_ib: - srp_free_target_ib(target); + for (i = 0; i < target->ch_count; i++) { + ch = &target->ch[i]; + srp_free_ch_ib(target, ch); + srp_free_req_data(target, ch); + } -err_free_mem: - srp_free_req_data(target); + kfree(target->ch); err: scsi_host_put(target_host); diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h index e46ecb15aa0d..a611556406ac 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.h +++ b/drivers/infiniband/ulp/srp/ib_srp.h @@ -70,9 +70,12 @@ enum { LOCAL_INV_WR_ID_MASK = 1, FAST_REG_WR_ID_MASK = 2, + + SRP_LAST_WR_ID = 0xfffffffcU, }; enum srp_target_state { + SRP_TARGET_SCANNING, SRP_TARGET_LIVE, SRP_TARGET_REMOVED, }; @@ -115,7 +118,6 @@ struct srp_host { }; struct srp_request { - struct list_head list; struct scsi_cmnd *scmnd; struct srp_iu *cmd; union { @@ -126,24 +128,62 @@ struct srp_request { struct srp_direct_buf *indirect_desc; dma_addr_t indirect_dma_addr; short nmdesc; - short index; }; -struct srp_target_port { +/** + * struct srp_rdma_ch + * @comp_vector: Completion vector used by this RDMA channel. + */ +struct srp_rdma_ch { /* These are RW in the hot path, and commonly used together */ struct list_head free_tx; - struct list_head free_reqs; spinlock_t lock; s32 req_lim; /* These are read-only in the hot path */ - struct ib_cq *send_cq ____cacheline_aligned_in_smp; + struct srp_target_port *target ____cacheline_aligned_in_smp; + struct ib_cq *send_cq; struct ib_cq *recv_cq; struct ib_qp *qp; union { struct ib_fmr_pool *fmr_pool; struct srp_fr_pool *fr_pool; }; + + /* Everything above this point is used in the hot path of + * command processing. Try to keep them packed into cachelines. + */ + + struct completion done; + int status; + + struct ib_sa_path_rec path; + struct ib_sa_query *path_query; + int path_query_id; + + struct ib_cm_id *cm_id; + struct srp_iu **tx_ring; + struct srp_iu **rx_ring; + struct srp_request *req_ring; + int max_ti_iu_len; + int comp_vector; + + struct completion tsk_mgmt_done; + u8 tsk_mgmt_status; +}; + +/** + * struct srp_target_port + * @comp_vector: Completion vector used by the first RDMA channel created for + * this target port. + */ +struct srp_target_port { + /* read and written in the hot path */ + spinlock_t lock; + + /* read only in the hot path */ + struct srp_rdma_ch *ch; + u32 ch_count; u32 lkey; u32 rkey; enum srp_target_state state; @@ -152,10 +192,8 @@ struct srp_target_port { unsigned int indirect_size; bool allow_ext_sg; - /* Everything above this point is used in the hot path of - * command processing. Try to keep them packed into cachelines. - */ - + /* other member variables */ + union ib_gid sgid; __be64 id_ext; __be64 ioc_guid; __be64 service_id; @@ -172,34 +210,19 @@ struct srp_target_port { int comp_vector; int tl_retry_count; - struct ib_sa_path_rec path; - __be16 orig_dgid[8]; - struct ib_sa_query *path_query; - int path_query_id; + union ib_gid orig_dgid; + __be16 pkey; u32 rq_tmo_jiffies; bool connected; - struct ib_cm_id *cm_id; - - int max_ti_iu_len; - int zero_req_lim; - struct srp_iu **tx_ring; - struct srp_iu **rx_ring; - struct srp_request *req_ring; - struct work_struct tl_err_work; struct work_struct remove_work; struct list_head list; - struct completion done; - int status; bool qp_in_error; - - struct completion tsk_mgmt_done; - u8 tsk_mgmt_status; }; struct srp_iu { diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c index 0707fa2c701b..5bdaae15a742 100644 --- a/drivers/message/fusion/mptsas.c +++ b/drivers/message/fusion/mptsas.c @@ -1994,6 +1994,7 @@ static struct scsi_host_template mptsas_driver_template = { .cmd_per_lun = 7, .use_clustering = ENABLE_CLUSTERING, .shost_attrs = mptscsih_host_attrs, + .use_blk_tags = 1, }; static int mptsas_get_linkerrors(struct sas_phy *phy) diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c index e7dcb2583369..6c9fc11efb87 100644 --- a/drivers/message/fusion/mptscsih.c +++ b/drivers/message/fusion/mptscsih.c @@ -2311,26 +2311,21 @@ mptscsih_slave_destroy(struct scsi_device *sdev) * mptscsih_change_queue_depth - This function will set a devices queue depth * @sdev: per scsi_device pointer * @qdepth: requested queue depth - * @reason: calling context * * Adding support for new 'change_queue_depth' api. */ int -mptscsih_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason) +mptscsih_change_queue_depth(struct scsi_device *sdev, int qdepth) { MPT_SCSI_HOST *hd = shost_priv(sdev->host); VirtTarget *vtarget; struct scsi_target *starget; int max_depth; - int tagged; MPT_ADAPTER *ioc = hd->ioc; starget = scsi_target(sdev); vtarget = starget->hostdata; - if (reason != SCSI_QDEPTH_DEFAULT) - return -EOPNOTSUPP; - if (ioc->bus_type == SPI) { if (!(vtarget->tflags & MPT_TARGET_FLAGS_Q_YES)) max_depth = 1; @@ -2347,13 +2342,8 @@ mptscsih_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason) if (qdepth > max_depth) qdepth = max_depth; - if (qdepth == 1) - tagged = 0; - else - tagged = MSG_SIMPLE_TAG; - scsi_adjust_queue_depth(sdev, tagged, qdepth); - return sdev->queue_depth; + return scsi_change_queue_depth(sdev, qdepth); } /* @@ -2397,12 +2387,10 @@ mptscsih_slave_configure(struct scsi_device *sdev) ioc->name, vtarget->negoFlags, vtarget->maxOffset, vtarget->minSyncFactor)); - mptscsih_change_queue_depth(sdev, MPT_SCSI_CMD_PER_DEV_HIGH, - SCSI_QDEPTH_DEFAULT); + mptscsih_change_queue_depth(sdev, MPT_SCSI_CMD_PER_DEV_HIGH); dsprintk(ioc, printk(MYIOC_s_DEBUG_FMT - "tagged %d, simple %d, ordered %d\n", - ioc->name,sdev->tagged_supported, sdev->simple_tags, - sdev->ordered_tags)); + "tagged %d, simple %d\n", + ioc->name,sdev->tagged_supported, sdev->simple_tags)); blk_queue_dma_alignment (sdev->request_queue, 512 - 1); diff --git a/drivers/message/fusion/mptscsih.h b/drivers/message/fusion/mptscsih.h index e1b1a198a62a..2baeefd9be7a 100644 --- a/drivers/message/fusion/mptscsih.h +++ b/drivers/message/fusion/mptscsih.h @@ -128,8 +128,7 @@ extern int mptscsih_taskmgmt_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_F extern int mptscsih_scandv_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *r); extern int mptscsih_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply); extern int mptscsih_ioc_reset(MPT_ADAPTER *ioc, int post_reset); -extern int mptscsih_change_queue_depth(struct scsi_device *sdev, int qdepth, - int reason); +extern int mptscsih_change_queue_depth(struct scsi_device *sdev, int qdepth); extern u8 mptscsih_raid_id_to_num(MPT_ADAPTER *ioc, u8 channel, u8 id); extern int mptscsih_is_phys_disk(MPT_ADAPTER *ioc, u8 channel, u8 id); extern struct device_attribute *mptscsih_host_attrs[]; diff --git a/drivers/misc/eeprom/eeprom_93cx6.c b/drivers/misc/eeprom/eeprom_93cx6.c index 0ff4b02177be..0cf2c9d676be 100644 --- a/drivers/misc/eeprom/eeprom_93cx6.c +++ b/drivers/misc/eeprom/eeprom_93cx6.c @@ -170,7 +170,7 @@ static void eeprom_93cx6_read_bits(struct eeprom_93cx6 *eeprom, } /** - * eeprom_93cx6_read - Read multiple words from eeprom + * eeprom_93cx6_read - Read a word from eeprom * @eeprom: Pointer to eeprom structure * @word: Word index from where we should start reading * @data: target pointer where the information will have to be stored @@ -235,6 +235,66 @@ void eeprom_93cx6_multiread(struct eeprom_93cx6 *eeprom, const u8 word, EXPORT_SYMBOL_GPL(eeprom_93cx6_multiread); /** + * eeprom_93cx6_readb - Read a byte from eeprom + * @eeprom: Pointer to eeprom structure + * @word: Byte index from where we should start reading + * @data: target pointer where the information will have to be stored + * + * This function will read a byte of the eeprom data + * into the given data pointer. + */ +void eeprom_93cx6_readb(struct eeprom_93cx6 *eeprom, const u8 byte, + u8 *data) +{ + u16 command; + u16 tmp; + + /* + * Initialize the eeprom register + */ + eeprom_93cx6_startup(eeprom); + + /* + * Select the read opcode and the byte to be read. + */ + command = (PCI_EEPROM_READ_OPCODE << (eeprom->width + 1)) | byte; + eeprom_93cx6_write_bits(eeprom, command, + PCI_EEPROM_WIDTH_OPCODE + eeprom->width + 1); + + /* + * Read the requested 8 bits. + */ + eeprom_93cx6_read_bits(eeprom, &tmp, 8); + *data = tmp & 0xff; + + /* + * Cleanup eeprom register. + */ + eeprom_93cx6_cleanup(eeprom); +} +EXPORT_SYMBOL_GPL(eeprom_93cx6_readb); + +/** + * eeprom_93cx6_multireadb - Read multiple bytes from eeprom + * @eeprom: Pointer to eeprom structure + * @byte: Index from where we should start reading + * @data: target pointer where the information will have to be stored + * @words: Number of bytes that should be read. + * + * This function will read all requested bytes from the eeprom, + * this is done by calling eeprom_93cx6_readb() multiple times. + */ +void eeprom_93cx6_multireadb(struct eeprom_93cx6 *eeprom, const u8 byte, + u8 *data, const u16 bytes) +{ + unsigned int i; + + for (i = 0; i < bytes; i++) + eeprom_93cx6_readb(eeprom, byte + i, &data[i]); +} +EXPORT_SYMBOL_GPL(eeprom_93cx6_multireadb); + +/** * eeprom_93cx6_wren - set the write enable state * @eeprom: Pointer to eeprom structure * @enable: true to enable writes, otherwise disable writes diff --git a/drivers/misc/enclosure.c b/drivers/misc/enclosure.c index 2cf2bbc0b927..180a5442fd4b 100644 --- a/drivers/misc/enclosure.c +++ b/drivers/misc/enclosure.c @@ -187,6 +187,7 @@ void enclosure_unregister(struct enclosure_device *edev) EXPORT_SYMBOL_GPL(enclosure_unregister); #define ENCLOSURE_NAME_SIZE 64 +#define COMPONENT_NAME_SIZE 64 static void enclosure_link_name(struct enclosure_component *cdev, char *name) { @@ -246,6 +247,29 @@ static void enclosure_component_release(struct device *dev) put_device(dev->parent); } +static struct enclosure_component * +enclosure_component_find_by_name(struct enclosure_device *edev, + const char *name) +{ + int i; + const char *cname; + struct enclosure_component *ecomp; + + if (!edev || !name || !name[0]) + return NULL; + + for (i = 0; i < edev->components; i++) { + ecomp = &edev->component[i]; + cname = dev_name(&ecomp->cdev); + if (ecomp->number != -1 && + cname && cname[0] && + !strcmp(cname, name)) + return ecomp; + } + + return NULL; +} + static const struct attribute_group *enclosure_component_groups[]; /** @@ -269,7 +293,8 @@ enclosure_component_register(struct enclosure_device *edev, { struct enclosure_component *ecomp; struct device *cdev; - int err; + int err, i; + char newname[COMPONENT_NAME_SIZE]; if (number >= edev->components) return ERR_PTR(-EINVAL); @@ -283,9 +308,20 @@ enclosure_component_register(struct enclosure_device *edev, ecomp->number = number; cdev = &ecomp->cdev; cdev->parent = get_device(&edev->edev); - if (name && name[0]) - dev_set_name(cdev, "%s", name); - else + + if (name && name[0]) { + /* Some hardware (e.g. enclosure in RX300 S6) has components + * with non unique names. Registering duplicates in sysfs + * will lead to warnings during bootup. So make the names + * unique by appending consecutive numbers -1, -2, ... */ + i = 1; + snprintf(newname, COMPONENT_NAME_SIZE, + "%s", name); + while (enclosure_component_find_by_name(edev, newname)) + snprintf(newname, COMPONENT_NAME_SIZE, + "%s-%i", name, i++); + dev_set_name(cdev, "%s", newname); + } else dev_set_name(cdev, "%u", number); cdev->release = enclosure_component_release; diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c index 8004b071a9f2..01a73395a017 100644 --- a/drivers/s390/scsi/zfcp_aux.c +++ b/drivers/s390/scsi/zfcp_aux.c @@ -353,9 +353,11 @@ struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *ccw_device) adapter->ccw_device = ccw_device; INIT_WORK(&adapter->stat_work, _zfcp_status_read_scheduler); - INIT_WORK(&adapter->scan_work, zfcp_fc_scan_ports); + INIT_DELAYED_WORK(&adapter->scan_work, zfcp_fc_scan_ports); INIT_WORK(&adapter->ns_up_work, zfcp_fc_sym_name_update); + adapter->next_port_scan = jiffies; + if (zfcp_qdio_setup(adapter)) goto failed; @@ -420,7 +422,7 @@ void zfcp_adapter_unregister(struct zfcp_adapter *adapter) { struct ccw_device *cdev = adapter->ccw_device; - cancel_work_sync(&adapter->scan_work); + cancel_delayed_work_sync(&adapter->scan_work); cancel_work_sync(&adapter->stat_work); cancel_work_sync(&adapter->ns_up_work); zfcp_destroy_adapter_work_queue(adapter); diff --git a/drivers/s390/scsi/zfcp_ccw.c b/drivers/s390/scsi/zfcp_ccw.c index f9879d400d0e..54c7b48fdb46 100644 --- a/drivers/s390/scsi/zfcp_ccw.c +++ b/drivers/s390/scsi/zfcp_ccw.c @@ -56,8 +56,22 @@ static int zfcp_ccw_activate(struct ccw_device *cdev, int clear, char *tag) zfcp_erp_set_adapter_status(adapter, ZFCP_STATUS_COMMON_RUNNING); zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, tag); + + /* + * We want to scan ports here, with some random backoff and without + * rate limit. Recovery has already scheduled a port scan for us, + * but with both random delay and rate limit. Nevertheless we get + * what we want here by flushing the scheduled work after sleeping + * an equivalent random time. + * Let the port scan random delay elapse first. If recovery finishes + * up to that point in time, that would be perfect for both recovery + * and port scan. If not, i.e. recovery takes ages, there was no + * point in waiting a random delay on top of the time consumed by + * recovery. + */ + msleep(zfcp_fc_port_scan_backoff()); zfcp_erp_wait(adapter); - flush_work(&adapter->scan_work); /* ok to call even if nothing queued */ + flush_delayed_work(&adapter->scan_work); zfcp_ccw_adapter_put(adapter); @@ -162,11 +176,19 @@ static int zfcp_ccw_set_online(struct ccw_device *cdev) adapter->req_no = 0; zfcp_ccw_activate(cdev, 0, "ccsonl1"); - /* scan for remote ports - either at the end of any successful adapter recovery - or only after the adapter recovery for setting a device online */ + + /* + * We want to scan ports here, always, with some random delay and + * without rate limit - basically what zfcp_ccw_activate() has + * achieved for us. Not quite! That port scan depended on + * !no_auto_port_rescan. So let's cover the no_auto_port_rescan + * case here to make sure a port scan is done unconditionally. + * Since zfcp_ccw_activate() has waited the desired random time, + * we can immediately schedule and flush a port scan for the + * remaining cases. + */ zfcp_fc_inverse_conditional_port_scan(adapter); - flush_work(&adapter->scan_work); /* ok to call even if nothing queued */ + flush_delayed_work(&adapter->scan_work); zfcp_ccw_adapter_put(adapter); return 0; } diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h index d91173f326c5..b8e853e53546 100644 --- a/drivers/s390/scsi/zfcp_def.h +++ b/drivers/s390/scsi/zfcp_def.h @@ -186,12 +186,13 @@ struct zfcp_adapter { struct fc_host_statistics *fc_stats; struct fsf_qtcb_bottom_port *stats_reset_data; unsigned long stats_reset; - struct work_struct scan_work; + struct delayed_work scan_work; struct work_struct ns_up_work; struct service_level service_level; struct workqueue_struct *work_queue; struct device_dma_parameters dma_parms; struct zfcp_fc_events events; + unsigned long next_port_scan; }; struct zfcp_port { diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c index c82fe65c4128..2c5d4567d1da 100644 --- a/drivers/s390/scsi/zfcp_erp.c +++ b/drivers/s390/scsi/zfcp_erp.c @@ -821,11 +821,6 @@ static int zfcp_erp_port_forced_strategy_close(struct zfcp_erp_action *act) return ZFCP_ERP_CONTINUES; } -static void zfcp_erp_port_strategy_clearstati(struct zfcp_port *port) -{ - atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED, &port->status); -} - static int zfcp_erp_port_forced_strategy(struct zfcp_erp_action *erp_action) { struct zfcp_port *port = erp_action->port; @@ -833,7 +828,6 @@ static int zfcp_erp_port_forced_strategy(struct zfcp_erp_action *erp_action) switch (erp_action->step) { case ZFCP_ERP_STEP_UNINITIALIZED: - zfcp_erp_port_strategy_clearstati(port); if ((status & ZFCP_STATUS_PORT_PHYS_OPEN) && (status & ZFCP_STATUS_COMMON_OPEN)) return zfcp_erp_port_forced_strategy_close(erp_action); @@ -933,7 +927,6 @@ static int zfcp_erp_port_strategy(struct zfcp_erp_action *erp_action) switch (erp_action->step) { case ZFCP_ERP_STEP_UNINITIALIZED: - zfcp_erp_port_strategy_clearstati(port); if (p_status & ZFCP_STATUS_COMMON_OPEN) return zfcp_erp_port_strategy_close(erp_action); break; diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h index a9c570a09b85..5b500652572b 100644 --- a/drivers/s390/scsi/zfcp_ext.h +++ b/drivers/s390/scsi/zfcp_ext.h @@ -85,6 +85,7 @@ extern void zfcp_fc_gs_destroy(struct zfcp_adapter *); extern int zfcp_fc_exec_bsg_job(struct fc_bsg_job *); extern int zfcp_fc_timeout_bsg_job(struct fc_bsg_job *); extern void zfcp_fc_sym_name_update(struct work_struct *); +extern unsigned int zfcp_fc_port_scan_backoff(void); extern void zfcp_fc_conditional_port_scan(struct zfcp_adapter *); extern void zfcp_fc_inverse_conditional_port_scan(struct zfcp_adapter *); diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c index ca28e1c66115..25d49f32ca63 100644 --- a/drivers/s390/scsi/zfcp_fc.c +++ b/drivers/s390/scsi/zfcp_fc.c @@ -12,6 +12,7 @@ #include <linux/types.h> #include <linux/slab.h> #include <linux/utsname.h> +#include <linux/random.h> #include <scsi/fc/fc_els.h> #include <scsi/libfc.h> #include "zfcp_ext.h" @@ -31,12 +32,54 @@ module_param_named(no_auto_port_rescan, no_auto_port_rescan, bool, 0600); MODULE_PARM_DESC(no_auto_port_rescan, "no automatic port_rescan (default off)"); +static unsigned int port_scan_backoff = 500; +module_param(port_scan_backoff, uint, 0600); +MODULE_PARM_DESC(port_scan_backoff, + "upper limit of port scan random backoff in msecs (default 500)"); + +static unsigned int port_scan_ratelimit = 60000; +module_param(port_scan_ratelimit, uint, 0600); +MODULE_PARM_DESC(port_scan_ratelimit, + "minimum interval between port scans in msecs (default 60000)"); + +unsigned int zfcp_fc_port_scan_backoff(void) +{ + if (!port_scan_backoff) + return 0; + return get_random_int() % port_scan_backoff; +} + +static void zfcp_fc_port_scan_time(struct zfcp_adapter *adapter) +{ + unsigned long interval = msecs_to_jiffies(port_scan_ratelimit); + unsigned long backoff = msecs_to_jiffies(zfcp_fc_port_scan_backoff()); + + adapter->next_port_scan = jiffies + interval + backoff; +} + +static void zfcp_fc_port_scan(struct zfcp_adapter *adapter) +{ + unsigned long now = jiffies; + unsigned long next = adapter->next_port_scan; + unsigned long delay = 0, max; + + /* delay only needed within waiting period */ + if (time_before(now, next)) { + delay = next - now; + /* paranoia: never ever delay scans longer than specified */ + max = msecs_to_jiffies(port_scan_ratelimit + port_scan_backoff); + delay = min(delay, max); + } + + queue_delayed_work(adapter->work_queue, &adapter->scan_work, delay); +} + void zfcp_fc_conditional_port_scan(struct zfcp_adapter *adapter) { if (no_auto_port_rescan) return; - queue_work(adapter->work_queue, &adapter->scan_work); + zfcp_fc_port_scan(adapter); } void zfcp_fc_inverse_conditional_port_scan(struct zfcp_adapter *adapter) @@ -44,7 +87,7 @@ void zfcp_fc_inverse_conditional_port_scan(struct zfcp_adapter *adapter) if (!no_auto_port_rescan) return; - queue_work(adapter->work_queue, &adapter->scan_work); + zfcp_fc_port_scan(adapter); } /** @@ -680,12 +723,15 @@ static int zfcp_fc_eval_gpn_ft(struct zfcp_fc_req *fc_req, */ void zfcp_fc_scan_ports(struct work_struct *work) { - struct zfcp_adapter *adapter = container_of(work, struct zfcp_adapter, + struct delayed_work *dw = to_delayed_work(work); + struct zfcp_adapter *adapter = container_of(dw, struct zfcp_adapter, scan_work); int ret, i; struct zfcp_fc_req *fc_req; int chain, max_entries, buf_num, max_bytes; + zfcp_fc_port_scan_time(adapter); + chain = adapter->adapter_features & FSF_FEATURE_ELS_CT_CHAINED_SBALS; buf_num = chain ? ZFCP_FC_GPN_FT_NUM_BUFS : 1; max_entries = chain ? ZFCP_FC_GPN_FT_MAX_ENT : ZFCP_FC_GPN_FT_ENT_PAGE; diff --git a/drivers/s390/scsi/zfcp_fc.h b/drivers/s390/scsi/zfcp_fc.h index b1d2024ed513..df2b541c8287 100644 --- a/drivers/s390/scsi/zfcp_fc.h +++ b/drivers/s390/scsi/zfcp_fc.h @@ -212,8 +212,6 @@ static inline void zfcp_fc_scsi_to_fcp(struct fcp_cmnd *fcp, struct scsi_cmnd *scsi, u8 tm_flags) { - char tag[2]; - int_to_scsilun(scsi->device->lun, (struct scsi_lun *) &fcp->fc_lun); if (unlikely(tm_flags)) { @@ -221,17 +219,7 @@ void zfcp_fc_scsi_to_fcp(struct fcp_cmnd *fcp, struct scsi_cmnd *scsi, return; } - if (scsi_populate_tag_msg(scsi, tag)) { - switch (tag[0]) { - case MSG_ORDERED_TAG: - fcp->fc_pri_ta |= FCP_PTA_ORDERED; - break; - case MSG_SIMPLE_TAG: - fcp->fc_pri_ta |= FCP_PTA_SIMPLE; - break; - }; - } else - fcp->fc_pri_ta = FCP_PTA_SIMPLE; + fcp->fc_pri_ta = FCP_PTA_SIMPLE; if (scsi->sc_data_direction == DMA_FROM_DEVICE) fcp->fc_flags |= FCP_CFL_RDDATA; diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c index 0fe8d5d95119..21ec5e2f584c 100644 --- a/drivers/s390/scsi/zfcp_fsf.c +++ b/drivers/s390/scsi/zfcp_fsf.c @@ -1396,8 +1396,7 @@ static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req) port->handle = header->port_handle; atomic_set_mask(ZFCP_STATUS_COMMON_OPEN | ZFCP_STATUS_PORT_PHYS_OPEN, &port->status); - atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED | - ZFCP_STATUS_COMMON_ACCESS_BOXED, + atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_BOXED, &port->status); /* check whether D_ID has changed during open */ /* diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c index 7b353647cb90..75f4bfc2b98a 100644 --- a/drivers/s390/scsi/zfcp_scsi.c +++ b/drivers/s390/scsi/zfcp_scsi.c @@ -32,25 +32,6 @@ static bool allow_lun_scan = 1; module_param(allow_lun_scan, bool, 0600); MODULE_PARM_DESC(allow_lun_scan, "For NPIV, scan and attach all storage LUNs"); -static int zfcp_scsi_change_queue_depth(struct scsi_device *sdev, int depth, - int reason) -{ - switch (reason) { - case SCSI_QDEPTH_DEFAULT: - scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth); - break; - case SCSI_QDEPTH_QFULL: - scsi_track_queue_full(sdev, depth); - break; - case SCSI_QDEPTH_RAMP_UP: - scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth); - break; - default: - return -EOPNOTSUPP; - } - return sdev->queue_depth; -} - static void zfcp_scsi_slave_destroy(struct scsi_device *sdev) { struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); @@ -66,9 +47,7 @@ static void zfcp_scsi_slave_destroy(struct scsi_device *sdev) static int zfcp_scsi_slave_configure(struct scsi_device *sdp) { if (sdp->tagged_supported) - scsi_adjust_queue_depth(sdp, MSG_SIMPLE_TAG, default_depth); - else - scsi_adjust_queue_depth(sdp, 0, 1); + scsi_change_queue_depth(sdp, default_depth); return 0; } @@ -307,7 +286,7 @@ static struct scsi_host_template zfcp_scsi_host_template = { .slave_alloc = zfcp_scsi_slave_alloc, .slave_configure = zfcp_scsi_slave_configure, .slave_destroy = zfcp_scsi_slave_destroy, - .change_queue_depth = zfcp_scsi_change_queue_depth, + .change_queue_depth = scsi_change_queue_depth, .proc_name = "zfcp", .can_queue = 4096, .this_id = -1, @@ -322,6 +301,7 @@ static struct scsi_host_template zfcp_scsi_host_template = { .use_clustering = 1, .shost_attrs = zfcp_sysfs_shost_attrs, .sdev_attrs = zfcp_sysfs_sdev_attrs, + .track_queue_depth = 1, }; /** diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c index 672b57219e11..96a0be13e841 100644 --- a/drivers/s390/scsi/zfcp_sysfs.c +++ b/drivers/s390/scsi/zfcp_sysfs.c @@ -73,9 +73,7 @@ ZFCP_DEFINE_ATTR(zfcp_port, port, status, "0x%08x\n", ZFCP_DEFINE_ATTR(zfcp_port, port, in_recovery, "%d\n", (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_INUSE) != 0); -ZFCP_DEFINE_ATTR(zfcp_port, port, access_denied, "%d\n", - (atomic_read(&port->status) & - ZFCP_STATUS_COMMON_ACCESS_DENIED) != 0); +ZFCP_DEFINE_ATTR_CONST(port, access_denied, "%d\n", 0); ZFCP_DEFINE_ATTR(zfcp_unit, unit, status, "0x%08x\n", zfcp_unit_sdev_status(unit)); @@ -223,9 +221,13 @@ static ssize_t zfcp_sysfs_port_rescan_store(struct device *dev, if (!adapter) return -ENODEV; - /* sync the user-space- with the kernel-invocation of scan_work */ - queue_work(adapter->work_queue, &adapter->scan_work); - flush_work(&adapter->scan_work); + /* + * Users wish is our command: immediately schedule and flush a + * worker to conduct a synchronous port scan, that is, neither + * a random delay nor a rate limit is applied here. + */ + queue_delayed_work(adapter->work_queue, &adapter->scan_work, 0); + flush_delayed_work(&adapter->scan_work); zfcp_ccw_adapter_put(adapter); return (ssize_t) count; @@ -439,16 +441,15 @@ static ssize_t zfcp_sysfs_scsi_##_name##_show(struct device *dev, \ { \ struct scsi_device *sdev = to_scsi_device(dev); \ struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); \ - struct zfcp_port *port = zfcp_sdev->port; \ \ return sprintf(buf, _format, _value); \ } \ static DEVICE_ATTR(_name, S_IRUGO, zfcp_sysfs_scsi_##_name##_show, NULL); ZFCP_DEFINE_SCSI_ATTR(hba_id, "%s\n", - dev_name(&port->adapter->ccw_device->dev)); + dev_name(&zfcp_sdev->port->adapter->ccw_device->dev)); ZFCP_DEFINE_SCSI_ATTR(wwpn, "0x%016llx\n", - (unsigned long long) port->wwpn); + (unsigned long long) zfcp_sdev->port->wwpn); static ssize_t zfcp_sysfs_scsi_fcp_lun_show(struct device *dev, struct device_attribute *attr, @@ -460,6 +461,49 @@ static ssize_t zfcp_sysfs_scsi_fcp_lun_show(struct device *dev, } static DEVICE_ATTR(fcp_lun, S_IRUGO, zfcp_sysfs_scsi_fcp_lun_show, NULL); +ZFCP_DEFINE_SCSI_ATTR(zfcp_access_denied, "%d\n", + (atomic_read(&zfcp_sdev->status) & + ZFCP_STATUS_COMMON_ACCESS_DENIED) != 0); + +static ssize_t zfcp_sysfs_scsi_zfcp_failed_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct scsi_device *sdev = to_scsi_device(dev); + unsigned int status = atomic_read(&sdev_to_zfcp(sdev)->status); + unsigned int failed = status & ZFCP_STATUS_COMMON_ERP_FAILED ? 1 : 0; + + return sprintf(buf, "%d\n", failed); +} + +static ssize_t zfcp_sysfs_scsi_zfcp_failed_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct scsi_device *sdev = to_scsi_device(dev); + unsigned long val; + + if (kstrtoul(buf, 0, &val) || val != 0) + return -EINVAL; + + zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_RUNNING); + zfcp_erp_lun_reopen(sdev, ZFCP_STATUS_COMMON_ERP_FAILED, + "syufai3"); + zfcp_erp_wait(sdev_to_zfcp(sdev)->port->adapter); + + return count; +} +static DEVICE_ATTR(zfcp_failed, S_IWUSR | S_IRUGO, + zfcp_sysfs_scsi_zfcp_failed_show, + zfcp_sysfs_scsi_zfcp_failed_store); + +ZFCP_DEFINE_SCSI_ATTR(zfcp_in_recovery, "%d\n", + (atomic_read(&zfcp_sdev->status) & + ZFCP_STATUS_COMMON_ERP_INUSE) != 0); + +ZFCP_DEFINE_SCSI_ATTR(zfcp_status, "0x%08x\n", + atomic_read(&zfcp_sdev->status)); + struct device_attribute *zfcp_sysfs_sdev_attrs[] = { &dev_attr_fcp_lun, &dev_attr_wwpn, @@ -467,6 +511,10 @@ struct device_attribute *zfcp_sysfs_sdev_attrs[] = { &dev_attr_read_latency, &dev_attr_write_latency, &dev_attr_cmd_latency, + &dev_attr_zfcp_access_denied, + &dev_attr_zfcp_failed, + &dev_attr_zfcp_in_recovery, + &dev_attr_zfcp_status, NULL }; diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c index 0a7325361d29..cd4129ff7ae4 100644 --- a/drivers/scsi/3w-9xxx.c +++ b/drivers/scsi/3w-9xxx.c @@ -189,19 +189,6 @@ static ssize_t twa_show_stats(struct device *dev, return len; } /* End twa_show_stats() */ -/* This function will set a devices queue depth */ -static int twa_change_queue_depth(struct scsi_device *sdev, int queue_depth, - int reason) -{ - if (reason != SCSI_QDEPTH_DEFAULT) - return -EOPNOTSUPP; - - if (queue_depth > TW_Q_LENGTH-2) - queue_depth = TW_Q_LENGTH-2; - scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, queue_depth); - return queue_depth; -} /* End twa_change_queue_depth() */ - /* Create sysfs 'stats' entry */ static struct device_attribute twa_host_stats_attr = { .attr = { @@ -2016,7 +2003,7 @@ static struct scsi_host_template driver_template = { .queuecommand = twa_scsi_queue, .eh_host_reset_handler = twa_scsi_eh_reset, .bios_param = twa_scsi_biosparam, - .change_queue_depth = twa_change_queue_depth, + .change_queue_depth = scsi_change_queue_depth, .can_queue = TW_Q_LENGTH-2, .slave_configure = twa_slave_configure, .this_id = -1, diff --git a/drivers/scsi/3w-sas.c b/drivers/scsi/3w-sas.c index 6da6cec9a651..2361772d5909 100644 --- a/drivers/scsi/3w-sas.c +++ b/drivers/scsi/3w-sas.c @@ -191,19 +191,6 @@ static ssize_t twl_show_stats(struct device *dev, return len; } /* End twl_show_stats() */ -/* This function will set a devices queue depth */ -static int twl_change_queue_depth(struct scsi_device *sdev, int queue_depth, - int reason) -{ - if (reason != SCSI_QDEPTH_DEFAULT) - return -EOPNOTSUPP; - - if (queue_depth > TW_Q_LENGTH-2) - queue_depth = TW_Q_LENGTH-2; - scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, queue_depth); - return queue_depth; -} /* End twl_change_queue_depth() */ - /* stats sysfs attribute initializer */ static struct device_attribute twl_host_stats_attr = { .attr = { @@ -1590,7 +1577,7 @@ static struct scsi_host_template driver_template = { .queuecommand = twl_scsi_queue, .eh_host_reset_handler = twl_scsi_eh_reset, .bios_param = twl_scsi_biosparam, - .change_queue_depth = twl_change_queue_depth, + .change_queue_depth = scsi_change_queue_depth, .can_queue = TW_Q_LENGTH-2, .slave_configure = twl_slave_configure, .this_id = -1, diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c index 752624e6bc00..c75f2048319f 100644 --- a/drivers/scsi/3w-xxxx.c +++ b/drivers/scsi/3w-xxxx.c @@ -523,19 +523,6 @@ static ssize_t tw_show_stats(struct device *dev, struct device_attribute *attr, return len; } /* End tw_show_stats() */ -/* This function will set a devices queue depth */ -static int tw_change_queue_depth(struct scsi_device *sdev, int queue_depth, - int reason) -{ - if (reason != SCSI_QDEPTH_DEFAULT) - return -EOPNOTSUPP; - - if (queue_depth > TW_Q_LENGTH-2) - queue_depth = TW_Q_LENGTH-2; - scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, queue_depth); - return queue_depth; -} /* End tw_change_queue_depth() */ - /* Create sysfs 'stats' entry */ static struct device_attribute tw_host_stats_attr = { .attr = { @@ -2270,7 +2257,7 @@ static struct scsi_host_template driver_template = { .queuecommand = tw_scsi_queue, .eh_host_reset_handler = tw_scsi_eh_reset, .bios_param = tw_scsi_biosparam, - .change_queue_depth = tw_change_queue_depth, + .change_queue_depth = scsi_change_queue_depth, .can_queue = TW_Q_LENGTH-2, .slave_configure = tw_slave_configure, .this_id = -1, diff --git a/drivers/scsi/53c700.c b/drivers/scsi/53c700.c index fabd4be2c985..aa915da2a5e5 100644 --- a/drivers/scsi/53c700.c +++ b/drivers/scsi/53c700.c @@ -175,7 +175,7 @@ STATIC void NCR_700_chip_reset(struct Scsi_Host *host); STATIC int NCR_700_slave_alloc(struct scsi_device *SDpnt); STATIC int NCR_700_slave_configure(struct scsi_device *SDpnt); STATIC void NCR_700_slave_destroy(struct scsi_device *SDpnt); -static int NCR_700_change_queue_depth(struct scsi_device *SDpnt, int depth, int reason); +static int NCR_700_change_queue_depth(struct scsi_device *SDpnt, int depth); static int NCR_700_change_queue_type(struct scsi_device *SDpnt, int depth); STATIC struct device_attribute *NCR_700_dev_attrs[]; @@ -327,6 +327,7 @@ NCR_700_detect(struct scsi_host_template *tpnt, tpnt->slave_alloc = NCR_700_slave_alloc; tpnt->change_queue_depth = NCR_700_change_queue_depth; tpnt->change_queue_type = NCR_700_change_queue_type; + tpnt->use_blk_tags = 1; if(tpnt->name == NULL) tpnt->name = "53c700"; @@ -592,19 +593,14 @@ NCR_700_scsi_done(struct NCR_700_Host_Parameters *hostdata, hostdata->cmd = NULL; if(SCp != NULL) { - struct NCR_700_command_slot *slot = + struct NCR_700_command_slot *slot = (struct NCR_700_command_slot *)SCp->host_scribble; - + dma_unmap_single(hostdata->dev, slot->pCmd, MAX_COMMAND_SIZE, DMA_TO_DEVICE); if (slot->flags == NCR_700_FLAG_AUTOSENSE) { char *cmnd = NCR_700_get_sense_cmnd(SCp->device); -#ifdef NCR_700_DEBUG - printk(" ORIGINAL CMD %p RETURNED %d, new return is %d sense is\n", - SCp, SCp->cmnd[7], result); - scsi_print_sense("53c700", SCp); -#endif dma_unmap_single(hostdata->dev, slot->dma_handle, SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE); /* restore the old result if the request sense was @@ -906,8 +902,10 @@ process_message(struct Scsi_Host *host, struct NCR_700_Host_Parameters *hostdata /* we're done negotiating */ NCR_700_set_tag_neg_state(SCp->device, NCR_700_FINISHED_TAG_NEGOTIATION); hostdata->tag_negotiated &= ~(1<<scmd_id(SCp)); + SCp->device->tagged_supported = 0; - scsi_deactivate_tcq(SCp->device, host->cmd_per_lun); + scsi_change_queue_depth(SCp->device, host->cmd_per_lun); + scsi_set_tag_type(SCp->device, 0); } else { shost_printk(KERN_WARNING, host, "(%d:%d) Unexpected REJECT Message %s\n", @@ -1432,7 +1430,7 @@ NCR_700_start_command(struct scsi_cmnd *SCp) if((hostdata->tag_negotiated & (1<<scmd_id(SCp))) && (slot->tag != SCSI_NO_TAG && SCp->cmnd[0] != REQUEST_SENSE && slot->flags != NCR_700_FLAG_AUTOSENSE)) { - count += scsi_populate_tag_msg(SCp, &hostdata->msgout[count]); + count += spi_populate_tag_msg(&hostdata->msgout[count], SCp); } if(hostdata->fast && @@ -1772,7 +1770,7 @@ NCR_700_queuecommand_lck(struct scsi_cmnd *SCp, void (*done)(struct scsi_cmnd *) */ if(NCR_700_get_depth(SCp->device) != 0 && (!(hostdata->tag_negotiated & (1<<scmd_id(SCp))) - || !blk_rq_tagged(SCp->request))) { + || !(SCp->flags & SCMD_TAGGED))) { CDEBUG(KERN_ERR, SCp, "has non zero depth %d\n", NCR_700_get_depth(SCp->device)); return SCSI_MLQUEUE_DEVICE_BUSY; @@ -1800,7 +1798,7 @@ NCR_700_queuecommand_lck(struct scsi_cmnd *SCp, void (*done)(struct scsi_cmnd *) printk("53c700: scsi%d, command ", SCp->device->host->host_no); scsi_print_command(SCp); #endif - if(blk_rq_tagged(SCp->request) + if ((SCp->flags & SCMD_TAGGED) && (hostdata->tag_negotiated &(1<<scmd_id(SCp))) == 0 && NCR_700_get_tag_neg_state(SCp->device) == NCR_700_START_TAG_NEGOTIATION) { scmd_printk(KERN_ERR, SCp, "Enabling Tag Command Queuing\n"); @@ -1814,7 +1812,7 @@ NCR_700_queuecommand_lck(struct scsi_cmnd *SCp, void (*done)(struct scsi_cmnd *) * * FIXME: This will royally screw up on multiple LUN devices * */ - if(!blk_rq_tagged(SCp->request) + if (!(SCp->flags & SCMD_TAGGED) && (hostdata->tag_negotiated &(1<<scmd_id(SCp)))) { scmd_printk(KERN_INFO, SCp, "Disabling Tag Command Queuing\n"); hostdata->tag_negotiated &= ~(1<<scmd_id(SCp)); @@ -1911,9 +1909,7 @@ NCR_700_abort(struct scsi_cmnd * SCp) { struct NCR_700_command_slot *slot; - scmd_printk(KERN_INFO, SCp, - "New error handler wants to abort command\n\t"); - scsi_print_command(SCp); + scmd_printk(KERN_INFO, SCp, "abort command\n"); slot = (struct NCR_700_command_slot *)SCp->host_scribble; @@ -2056,13 +2052,10 @@ NCR_700_slave_configure(struct scsi_device *SDp) /* to do here: allocate memory; build a queue_full list */ if(SDp->tagged_supported) { - scsi_set_tag_type(SDp, MSG_ORDERED_TAG); - scsi_activate_tcq(SDp, NCR_700_DEFAULT_TAGS); + scsi_change_queue_depth(SDp, NCR_700_DEFAULT_TAGS); NCR_700_set_tag_neg_state(SDp, NCR_700_START_TAG_NEGOTIATION); - } else { - /* initialise to default depth */ - scsi_adjust_queue_depth(SDp, 0, SDp->host->cmd_per_lun); } + if(hostdata->fast) { /* Find the correct offset and period via domain validation */ if (!spi_initial_dv(SDp->sdev_target)) @@ -2082,16 +2075,11 @@ NCR_700_slave_destroy(struct scsi_device *SDp) } static int -NCR_700_change_queue_depth(struct scsi_device *SDp, int depth, int reason) +NCR_700_change_queue_depth(struct scsi_device *SDp, int depth) { - if (reason != SCSI_QDEPTH_DEFAULT) - return -EOPNOTSUPP; - if (depth > NCR_700_MAX_TAGS) depth = NCR_700_MAX_TAGS; - - scsi_adjust_queue_depth(SDp, scsi_get_tag_type(SDp), depth); - return depth; + return scsi_change_queue_depth(SDp, depth); } static int NCR_700_change_queue_type(struct scsi_device *SDp, int tag_type) @@ -2101,8 +2089,6 @@ static int NCR_700_change_queue_type(struct scsi_device *SDp, int tag_type) struct NCR_700_Host_Parameters *hostdata = (struct NCR_700_Host_Parameters *)SDp->host->hostdata[0]; - scsi_set_tag_type(SDp, tag_type); - /* We have a global (per target) flag to track whether TCQ is * enabled, so we'll be turning it off for the entire target here. * our tag algorithm will fail if we mix tagged and untagged commands, @@ -2110,15 +2096,16 @@ static int NCR_700_change_queue_type(struct scsi_device *SDp, int tag_type) if (change_tag) scsi_target_quiesce(SDp->sdev_target); + scsi_set_tag_type(SDp, tag_type); if (!tag_type) { /* shift back to the default unqueued number of commands * (the user can still raise this) */ - scsi_deactivate_tcq(SDp, SDp->host->cmd_per_lun); + scsi_change_queue_depth(SDp, SDp->host->cmd_per_lun); hostdata->tag_negotiated &= ~(1 << sdev_id(SDp)); } else { /* Here, we cleared the negotiation flag above, so this * will force the driver to renegotiate */ - scsi_activate_tcq(SDp, SDp->queue_depth); + scsi_change_queue_depth(SDp, SDp->queue_depth); if (change_tag) NCR_700_set_tag_neg_state(SDp, NCR_700_START_TAG_NEGOTIATION); } diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c index 64c75143c89a..8d66a6469e29 100644 --- a/drivers/scsi/BusLogic.c +++ b/drivers/scsi/BusLogic.c @@ -2327,12 +2327,12 @@ static int blogic_slaveconfig(struct scsi_device *dev) if (qdepth == 0) qdepth = BLOGIC_MAX_AUTO_TAG_DEPTH; adapter->qdepth[tgt_id] = qdepth; - scsi_adjust_queue_depth(dev, MSG_SIMPLE_TAG, qdepth); + scsi_change_queue_depth(dev, qdepth); } else { adapter->tagq_ok &= ~(1 << tgt_id); qdepth = adapter->untag_qdepth; adapter->qdepth[tgt_id] = qdepth; - scsi_adjust_queue_depth(dev, 0, qdepth); + scsi_change_queue_depth(dev, qdepth); } qdepth = 0; for (tgt_id = 0; tgt_id < adapter->maxdev; tgt_id++) diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index 3a820f61ce65..86cf3d671eb9 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig @@ -1341,13 +1341,15 @@ config SCSI_DC395x To compile this driver as a module, choose M here: the module will be called dc395x. -config SCSI_DC390T - tristate "Tekram DC390(T) and Am53/79C974 SCSI support" +config SCSI_AM53C974 + tristate "Tekram DC390(T) and Am53/79C974 SCSI support (new driver)" depends on PCI && SCSI + select SCSI_SPI_ATTRS ---help--- This driver supports PCI SCSI host adapters based on the Am53C974A chip, e.g. Tekram DC390(T), DawiControl 2974 and some onboard PCscsi/PCnet (Am53/79C974) solutions. + This is a new implementation base on the generic esp_scsi driver. Documentation can be found in <file:Documentation/scsi/tmscsim.txt>. @@ -1355,7 +1357,7 @@ config SCSI_DC390T based on NCR/Symbios chips. Use "NCR53C8XX SCSI support" for those. To compile this driver as a module, choose M here: the - module will be called tmscsim. + module will be called am53c974. config SCSI_T128 tristate "Trantor T128/T128F/T228 SCSI support" @@ -1451,6 +1453,14 @@ config SCSI_NSP32 To compile this driver as a module, choose M here: the module will be called nsp32. +config SCSI_WD719X + tristate "Western Digital WD7193/7197/7296 support" + depends on PCI && SCSI + select EEPROM_93CX6 + ---help--- + This is a driver for Western Digital WD7193, WD7197 and WD7296 PCI + SCSI controllers (based on WD33C296A chip). + config SCSI_DEBUG tristate "SCSI debugging host simulator" depends on SCSI @@ -1615,7 +1625,7 @@ config ATARI_SCSI_RESET_BOOT that leave the devices with SCSI operations partway completed. config MAC_SCSI - bool "Macintosh NCR5380 SCSI" + tristate "Macintosh NCR5380 SCSI" depends on MAC && SCSI=y select SCSI_SPI_ATTRS help diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile index 59f1ce6df2d6..58158f11ed7b 100644 --- a/drivers/scsi/Makefile +++ b/drivers/scsi/Makefile @@ -100,7 +100,7 @@ obj-$(CONFIG_SCSI_EATA_PIO) += eata_pio.o obj-$(CONFIG_SCSI_7000FASST) += wd7000.o obj-$(CONFIG_SCSI_EATA) += eata.o obj-$(CONFIG_SCSI_DC395x) += dc395x.o -obj-$(CONFIG_SCSI_DC390T) += tmscsim.o +obj-$(CONFIG_SCSI_AM53C974) += esp_scsi.o am53c974.o obj-$(CONFIG_MEGARAID_LEGACY) += megaraid.o obj-$(CONFIG_MEGARAID_NEWGEN) += megaraid/ obj-$(CONFIG_MEGARAID_SAS) += megaraid/ @@ -143,6 +143,7 @@ obj-$(CONFIG_SCSI_VIRTIO) += virtio_scsi.o obj-$(CONFIG_VMWARE_PVSCSI) += vmw_pvscsi.o obj-$(CONFIG_XEN_SCSI_FRONTEND) += xen-scsifront.o obj-$(CONFIG_HYPERV_STORAGE) += hv_storvsc.o +obj-$(CONFIG_SCSI_WD719X) += wd719x.o obj-$(CONFIG_ARM) += arm/ diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c index 45da3c823322..36244d63def2 100644 --- a/drivers/scsi/NCR5380.c +++ b/drivers/scsi/NCR5380.c @@ -11,8 +11,6 @@ * drew@colorado.edu * +1 (303) 666-5836 * - * DISTRIBUTION RELEASE 6. - * * For more information, please consult * * NCR 5380 Family @@ -279,7 +277,7 @@ static void do_reset(struct Scsi_Host *host); * Set up the internal fields in the SCSI command. */ -static __inline__ void initialize_SCp(Scsi_Cmnd * cmd) +static inline void initialize_SCp(struct scsi_cmnd *cmd) { /* * Initialize the Scsi Pointer field so that all of the commands in the @@ -574,12 +572,12 @@ static int __init __maybe_unused NCR5380_probe_irq(struct Scsi_Host *instance, int trying_irqs, i, mask; NCR5380_setup(instance); - for (trying_irqs = i = 0, mask = 1; i < 16; ++i, mask <<= 1) + for (trying_irqs = 0, i = 1, mask = 2; i < 16; ++i, mask <<= 1) if ((mask & possible) && (request_irq(i, &probe_intr, 0, "NCR-probe", NULL) == 0)) trying_irqs |= mask; timeout = jiffies + (250 * HZ / 1000); - probe_irq = SCSI_IRQ_NONE; + probe_irq = NO_IRQ; /* * A interrupt is triggered whenever BSY = false, SEL = true @@ -596,13 +594,13 @@ static int __init __maybe_unused NCR5380_probe_irq(struct Scsi_Host *instance, NCR5380_write(OUTPUT_DATA_REG, hostdata->id_mask); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA | ICR_ASSERT_SEL); - while (probe_irq == SCSI_IRQ_NONE && time_before(jiffies, timeout)) + while (probe_irq == NO_IRQ && time_before(jiffies, timeout)) schedule_timeout_uninterruptible(1); NCR5380_write(SELECT_ENABLE_REG, 0); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); - for (i = 0, mask = 1; i < 16; ++i, mask <<= 1) + for (i = 1, mask = 2; i < 16; ++i, mask <<= 1) if (trying_irqs & mask) free_irq(i, NULL); @@ -610,50 +608,70 @@ static int __init __maybe_unused NCR5380_probe_irq(struct Scsi_Host *instance, } /** - * NCR58380_print_options - show options - * @instance: unused for now + * NCR58380_info - report driver and host information + * @instance: relevant scsi host instance * - * Called by probe code indicating the NCR5380 driver options that - * were selected. At some point this will switch to runtime options - * read from the adapter in question + * For use as the host template info() handler. * * Locks: none */ -static void __init __maybe_unused -NCR5380_print_options(struct Scsi_Host *instance) +static const char *NCR5380_info(struct Scsi_Host *instance) { - printk(" generic options" -#ifdef AUTOPROBE_IRQ - " AUTOPROBE_IRQ" + struct NCR5380_hostdata *hostdata = shost_priv(instance); + + return hostdata->info; +} + +static void prepare_info(struct Scsi_Host *instance) +{ + struct NCR5380_hostdata *hostdata = shost_priv(instance); + + snprintf(hostdata->info, sizeof(hostdata->info), + "%s, io_port 0x%lx, n_io_port %d, " + "base 0x%lx, irq %d, " + "can_queue %d, cmd_per_lun %d, " + "sg_tablesize %d, this_id %d, " + "flags { %s%s%s}, " +#if defined(USLEEP_POLL) && defined(USLEEP_WAITLONG) + "USLEEP_POLL %d, USLEEP_WAITLONG %d, " #endif -#ifdef AUTOSENSE - " AUTOSENSE" + "options { %s} ", + instance->hostt->name, instance->io_port, instance->n_io_port, + instance->base, instance->irq, + instance->can_queue, instance->cmd_per_lun, + instance->sg_tablesize, instance->this_id, + hostdata->flags & FLAG_NCR53C400 ? "NCR53C400 " : "", + hostdata->flags & FLAG_DTC3181E ? "DTC3181E " : "", + hostdata->flags & FLAG_NO_PSEUDO_DMA ? "NO_PSEUDO_DMA " : "", +#if defined(USLEEP_POLL) && defined(USLEEP_WAITLONG) + USLEEP_POLL, USLEEP_WAITLONG, +#endif +#ifdef AUTOPROBE_IRQ + "AUTOPROBE_IRQ " #endif #ifdef DIFFERENTIAL - " DIFFERENTIAL" + "DIFFERENTIAL " #endif #ifdef REAL_DMA - " REAL DMA" + "REAL_DMA " #endif #ifdef REAL_DMA_POLL - " REAL DMA POLL" + "REAL_DMA_POLL " #endif #ifdef PARITY - " PARITY" + "PARITY " #endif #ifdef PSEUDO_DMA - " PSEUDO DMA" + "PSEUDO_DMA " #endif #ifdef UNSAFE - " UNSAFE " + "UNSAFE " #endif - ); - printk(" USLEEP, USLEEP_POLL=%d USLEEP_SLEEP=%d", USLEEP_POLL, USLEEP_SLEEP); - printk(" generic release=%d", NCR5380_PUBLIC_RELEASE); - if (((struct NCR5380_hostdata *) instance->hostdata)->flags & FLAG_NCR53C400) { - printk(" ncr53c400 release=%d", NCR53C400_PUBLIC_RELEASE); - } +#ifdef NCR53C400 + "NCR53C400 " +#endif + ""); } /** @@ -672,6 +690,7 @@ static void NCR5380_print_status(struct Scsi_Host *instance) NCR5380_dprint_phase(NDEBUG_ANY, instance); } +#ifdef PSEUDO_DMA /******************************************/ /* * /proc/scsi/[dtc pas16 t128 generic]/[0-ASC_NUM_BOARD_SUPPORTED] @@ -689,19 +708,18 @@ static void NCR5380_print_status(struct Scsi_Host *instance) static int __maybe_unused NCR5380_write_info(struct Scsi_Host *instance, char *buffer, int length) { -#ifdef DTC_PUBLIC_RELEASE - dtc_wmaxi = dtc_maxi = 0; -#endif -#ifdef PAS16_PUBLIC_RELEASE - pas_wmaxi = pas_maxi = 0; -#endif - return (-ENOSYS); /* Currently this is a no-op */ + struct NCR5380_hostdata *hostdata = shost_priv(instance); + + hostdata->spin_max_r = 0; + hostdata->spin_max_w = 0; + return 0; } +#endif #undef SPRINTF #define SPRINTF(args...) seq_printf(m, ## args) static -void lprint_Scsi_Cmnd(Scsi_Cmnd * cmd, struct seq_file *m); +void lprint_Scsi_Cmnd(struct scsi_cmnd *cmd, struct seq_file *m); static void lprint_command(unsigned char *cmd, struct seq_file *m); static @@ -711,56 +729,31 @@ static int __maybe_unused NCR5380_show_info(struct seq_file *m, struct Scsi_Host *instance) { struct NCR5380_hostdata *hostdata; - Scsi_Cmnd *ptr; + struct scsi_cmnd *ptr; hostdata = (struct NCR5380_hostdata *) instance->hostdata; - SPRINTF("NCR5380 core release=%d. ", NCR5380_PUBLIC_RELEASE); - if (((struct NCR5380_hostdata *) instance->hostdata)->flags & FLAG_NCR53C400) - SPRINTF("ncr53c400 release=%d. ", NCR53C400_PUBLIC_RELEASE); -#ifdef DTC_PUBLIC_RELEASE - SPRINTF("DTC 3180/3280 release %d", DTC_PUBLIC_RELEASE); -#endif -#ifdef T128_PUBLIC_RELEASE - SPRINTF("T128 release %d", T128_PUBLIC_RELEASE); -#endif -#ifdef GENERIC_NCR5380_PUBLIC_RELEASE - SPRINTF("Generic5380 release %d", GENERIC_NCR5380_PUBLIC_RELEASE); -#endif -#ifdef PAS16_PUBLIC_RELEASE - SPRINTF("PAS16 release=%d", PAS16_PUBLIC_RELEASE); -#endif - - SPRINTF("\nBase Addr: 0x%05lX ", (long) instance->base); - SPRINTF("io_port: %04x ", (int) instance->io_port); - if (instance->irq == SCSI_IRQ_NONE) - SPRINTF("IRQ: None.\n"); - else - SPRINTF("IRQ: %d.\n", instance->irq); - -#ifdef DTC_PUBLIC_RELEASE - SPRINTF("Highwater I/O busy_spin_counts -- write: %d read: %d\n", dtc_wmaxi, dtc_maxi); -#endif -#ifdef PAS16_PUBLIC_RELEASE - SPRINTF("Highwater I/O busy_spin_counts -- write: %d read: %d\n", pas_wmaxi, pas_maxi); +#ifdef PSEUDO_DMA + SPRINTF("Highwater I/O busy spin counts: write %d, read %d\n", + hostdata->spin_max_w, hostdata->spin_max_r); #endif spin_lock_irq(instance->host_lock); if (!hostdata->connected) SPRINTF("scsi%d: no currently connected command\n", instance->host_no); else - lprint_Scsi_Cmnd((Scsi_Cmnd *) hostdata->connected, m); + lprint_Scsi_Cmnd((struct scsi_cmnd *) hostdata->connected, m); SPRINTF("scsi%d: issue_queue\n", instance->host_no); - for (ptr = (Scsi_Cmnd *) hostdata->issue_queue; ptr; ptr = (Scsi_Cmnd *) ptr->host_scribble) + for (ptr = (struct scsi_cmnd *) hostdata->issue_queue; ptr; ptr = (struct scsi_cmnd *) ptr->host_scribble) lprint_Scsi_Cmnd(ptr, m); SPRINTF("scsi%d: disconnected_queue\n", instance->host_no); - for (ptr = (Scsi_Cmnd *) hostdata->disconnected_queue; ptr; ptr = (Scsi_Cmnd *) ptr->host_scribble) + for (ptr = (struct scsi_cmnd *) hostdata->disconnected_queue; ptr; ptr = (struct scsi_cmnd *) ptr->host_scribble) lprint_Scsi_Cmnd(ptr, m); spin_unlock_irq(instance->host_lock); return 0; } -static void lprint_Scsi_Cmnd(Scsi_Cmnd * cmd, struct seq_file *m) +static void lprint_Scsi_Cmnd(struct scsi_cmnd *cmd, struct seq_file *m) { SPRINTF("scsi%d : destination target %d, lun %llu\n", cmd->device->host->host_no, cmd->device->id, cmd->device->lun); SPRINTF(" command = "); @@ -836,18 +829,6 @@ static int NCR5380_init(struct Scsi_Host *instance, int flags) INIT_DELAYED_WORK(&hostdata->coroutine, NCR5380_main); -#ifdef NCR5380_STATS - for (i = 0; i < 8; ++i) { - hostdata->time_read[i] = 0; - hostdata->time_write[i] = 0; - hostdata->bytes_read[i] = 0; - hostdata->bytes_write[i] = 0; - } - hostdata->timebase = 0; - hostdata->pendingw = 0; - hostdata->pendingr = 0; -#endif - /* The CHECK code seems to break the 53C400. Will check it later maybe */ if (flags & FLAG_NCR53C400) hostdata->flags = FLAG_HAS_LAST_BYTE_SENT | flags; @@ -857,11 +838,7 @@ static int NCR5380_init(struct Scsi_Host *instance, int flags) hostdata->host = instance; hostdata->time_expires = 0; -#ifndef AUTOSENSE - if ((instance->cmd_per_lun > 1) || instance->can_queue > 1) - printk(KERN_WARNING "scsi%d : WARNING : support for multiple outstanding commands enabled\n" " without AUTOSENSE option, contingent allegiance conditions may\n" - " be incorrectly cleared.\n", instance->host_no); -#endif /* def AUTOSENSE */ + prepare_info(instance); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); NCR5380_write(MODE_REG, MR_BASE); @@ -935,11 +912,11 @@ static void NCR5380_exit(struct Scsi_Host *instance) * Locks: host lock taken by caller */ -static int NCR5380_queue_command_lck(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *)) +static int NCR5380_queue_command_lck(struct scsi_cmnd *cmd, void (*done) (struct scsi_cmnd *)) { struct Scsi_Host *instance = cmd->device->host; struct NCR5380_hostdata *hostdata = (struct NCR5380_hostdata *) instance->hostdata; - Scsi_Cmnd *tmp; + struct scsi_cmnd *tmp; #if (NDEBUG & NDEBUG_NO_WRITE) switch (cmd->cmnd[0]) { @@ -952,25 +929,6 @@ static int NCR5380_queue_command_lck(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *) } #endif /* (NDEBUG & NDEBUG_NO_WRITE) */ -#ifdef NCR5380_STATS - switch (cmd->cmnd[0]) { - case WRITE: - case WRITE_6: - case WRITE_10: - hostdata->time_write[cmd->device->id] -= (jiffies - hostdata->timebase); - hostdata->bytes_write[cmd->device->id] += scsi_bufflen(cmd); - hostdata->pendingw++; - break; - case READ: - case READ_6: - case READ_10: - hostdata->time_read[cmd->device->id] -= (jiffies - hostdata->timebase); - hostdata->bytes_read[cmd->device->id] += scsi_bufflen(cmd); - hostdata->pendingr++; - break; - } -#endif - /* * We use the host_scribble field as a pointer to the next command * in a queue @@ -992,7 +950,7 @@ static int NCR5380_queue_command_lck(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *) cmd->host_scribble = (unsigned char *) hostdata->issue_queue; hostdata->issue_queue = cmd; } else { - for (tmp = (Scsi_Cmnd *) hostdata->issue_queue; tmp->host_scribble; tmp = (Scsi_Cmnd *) tmp->host_scribble); + for (tmp = (struct scsi_cmnd *) hostdata->issue_queue; tmp->host_scribble; tmp = (struct scsi_cmnd *) tmp->host_scribble); LIST(cmd, tmp); tmp->host_scribble = (unsigned char *) cmd; } @@ -1023,7 +981,7 @@ static void NCR5380_main(struct work_struct *work) struct NCR5380_hostdata *hostdata = container_of(work, struct NCR5380_hostdata, coroutine.work); struct Scsi_Host *instance = hostdata->host; - Scsi_Cmnd *tmp, *prev; + struct scsi_cmnd *tmp, *prev; int done; spin_lock_irq(instance->host_lock); @@ -1036,7 +994,7 @@ static void NCR5380_main(struct work_struct *work) * Search through the issue_queue for a command destined * for a target that's not busy. */ - for (tmp = (Scsi_Cmnd *) hostdata->issue_queue, prev = NULL; tmp; prev = tmp, tmp = (Scsi_Cmnd *) tmp->host_scribble) + for (tmp = (struct scsi_cmnd *) hostdata->issue_queue, prev = NULL; tmp; prev = tmp, tmp = (struct scsi_cmnd *) tmp->host_scribble) { if (prev != tmp) dprintk(NDEBUG_LISTS, "MAIN tmp=%p target=%d busy=%d lun=%llu\n", tmp, tmp->device->id, hostdata->busy[tmp->device->id], tmp->device->lun); @@ -1048,7 +1006,7 @@ static void NCR5380_main(struct work_struct *work) prev->host_scribble = tmp->host_scribble; } else { REMOVE(-1, hostdata->issue_queue, tmp, tmp->host_scribble); - hostdata->issue_queue = (Scsi_Cmnd *) tmp->host_scribble; + hostdata->issue_queue = (struct scsi_cmnd *) tmp->host_scribble; } tmp->host_scribble = NULL; @@ -1073,14 +1031,14 @@ static void NCR5380_main(struct work_struct *work) hostdata->selecting = NULL; /* RvC: have to preset this to indicate a new command is being performed */ - if (!NCR5380_select(instance, tmp, - /* - * REQUEST SENSE commands are issued without tagged - * queueing, even on SCSI-II devices because the - * contingent allegiance condition exists for the - * entire unit. - */ - (tmp->cmnd[0] == REQUEST_SENSE) ? TAG_NONE : TAG_NEXT)) { + /* + * REQUEST SENSE commands are issued without tagged + * queueing, even on SCSI-II devices because the + * contingent allegiance condition exists for the + * entire unit. + */ + + if (!NCR5380_select(instance, tmp)) { break; } else { LIST(tmp, hostdata->issue_queue); @@ -1095,9 +1053,9 @@ static void NCR5380_main(struct work_struct *work) /* exited locked */ } /* if (!hostdata->connected) */ if (hostdata->selecting) { - tmp = (Scsi_Cmnd *) hostdata->selecting; + tmp = (struct scsi_cmnd *) hostdata->selecting; /* Selection will drop and retake the lock */ - if (!NCR5380_select(instance, tmp, (tmp->cmnd[0] == REQUEST_SENSE) ? TAG_NONE : TAG_NEXT)) { + if (!NCR5380_select(instance, tmp)) { /* Ok ?? */ } else { /* RvC: device failed, so we wait a long time @@ -1216,47 +1174,16 @@ static irqreturn_t NCR5380_intr(int dummy, void *dev_id) #endif -/** - * collect_stats - collect stats on a scsi command - * @hostdata: adapter - * @cmd: command being issued - * - * Update the statistical data by parsing the command in question - */ - -static void collect_stats(struct NCR5380_hostdata *hostdata, Scsi_Cmnd * cmd) -{ -#ifdef NCR5380_STATS - switch (cmd->cmnd[0]) { - case WRITE: - case WRITE_6: - case WRITE_10: - hostdata->time_write[scmd_id(cmd)] += (jiffies - hostdata->timebase); - hostdata->pendingw--; - break; - case READ: - case READ_6: - case READ_10: - hostdata->time_read[scmd_id(cmd)] += (jiffies - hostdata->timebase); - hostdata->pendingr--; - break; - } -#endif -} - - /* - * Function : int NCR5380_select (struct Scsi_Host *instance, Scsi_Cmnd *cmd, - * int tag); + * Function : int NCR5380_select(struct Scsi_Host *instance, + * struct scsi_cmnd *cmd) * * Purpose : establishes I_T_L or I_T_L_Q nexus for new or existing command, * including ARBITRATION, SELECTION, and initial message out for * IDENTIFY and queue messages. * * Inputs : instance - instantiation of the 5380 driver on which this - * target lives, cmd - SCSI command to execute, tag - set to TAG_NEXT for - * new tag, TAG_NONE for untagged queueing, otherwise set to the tag for - * the command that is presently connected. + * target lives, cmd - SCSI command to execute. * * Returns : -1 if selection could not execute for some reason, * 0 if selection succeeded or failed because the target @@ -1278,7 +1205,7 @@ static void collect_stats(struct NCR5380_hostdata *hostdata, Scsi_Cmnd * cmd) * Locks: caller holds hostdata lock in IRQ mode */ -static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd * cmd, int tag) +static int NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd) { NCR5380_local_declare(); struct NCR5380_hostdata *hostdata = (struct NCR5380_hostdata *) instance->hostdata; @@ -1476,7 +1403,6 @@ part2: return -1; } cmd->result = DID_BAD_TARGET << 16; - collect_stats(hostdata, cmd); cmd->scsi_done(cmd); NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); dprintk(NDEBUG_SELECTION, "scsi%d : target did not respond within 250ms\n", instance->host_no); @@ -1513,7 +1439,7 @@ part2: } dprintk(NDEBUG_SELECTION, "scsi%d : target %d selected, going into MESSAGE OUT phase.\n", instance->host_no, cmd->device->id); - tmp[0] = IDENTIFY(((instance->irq == SCSI_IRQ_NONE) ? 0 : 1), cmd->device->lun); + tmp[0] = IDENTIFY(((instance->irq == NO_IRQ) ? 0 : 1), cmd->device->lun); len = 1; cmd->tag = 0; @@ -2086,7 +2012,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) { #endif unsigned char *data; unsigned char phase, tmp, extended_msg[10], old_phase = 0xff; - Scsi_Cmnd *cmd = (Scsi_Cmnd *) hostdata->connected; + struct scsi_cmnd *cmd = (struct scsi_cmnd *) hostdata->connected; /* RvC: we need to set the end of the polling time */ unsigned long poll_time = jiffies + USLEEP_POLL; @@ -2228,7 +2154,6 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) { cmd->next_link->tag = cmd->tag; cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8); dprintk(NDEBUG_LINKED, "scsi%d : target %d lun %llu linked request done, calling scsi_done().\n", instance->host_no, cmd->device->id, cmd->device->lun); - collect_stats(hostdata, cmd); cmd->scsi_done(cmd); cmd = hostdata->connected; break; @@ -2263,7 +2188,6 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) { else if (status_byte(cmd->SCp.Status) != GOOD) cmd->result = (cmd->result & 0x00ffff) | (DID_ERROR << 16); -#ifdef AUTOSENSE if ((cmd->cmnd[0] == REQUEST_SENSE) && hostdata->ses.cmd_len) { scsi_eh_restore_cmnd(cmd, &hostdata->ses); @@ -2278,12 +2202,9 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) { LIST(cmd, hostdata->issue_queue); cmd->host_scribble = (unsigned char *) hostdata->issue_queue; - hostdata->issue_queue = (Scsi_Cmnd *) cmd; + hostdata->issue_queue = (struct scsi_cmnd *) cmd; dprintk(NDEBUG_QUEUES, "scsi%d : REQUEST SENSE added to head of issue queue\n", instance->host_no); - } else -#endif /* def AUTOSENSE */ - { - collect_stats(hostdata, cmd); + } else { cmd->scsi_done(cmd); } @@ -2430,7 +2351,6 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) { hostdata->busy[cmd->device->id] &= ~(1 << (cmd->device->lun & 0xFF)); hostdata->connected = NULL; cmd->result = DID_ERROR << 16; - collect_stats(hostdata, cmd); cmd->scsi_done(cmd); NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); return; @@ -2479,7 +2399,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) { * Function : void NCR5380_reselect (struct Scsi_Host *instance) * * Purpose : does reselection, initializing the instance->connected - * field to point to the Scsi_Cmnd for which the I_T_L or I_T_L_Q + * field to point to the scsi_cmnd for which the I_T_L or I_T_L_Q * nexus has been reestablished, * * Inputs : instance - this instance of the NCR5380. @@ -2496,7 +2416,7 @@ static void NCR5380_reselect(struct Scsi_Host *instance) { int len; unsigned char msg[3]; unsigned char *data; - Scsi_Cmnd *tmp = NULL, *prev; + struct scsi_cmnd *tmp = NULL, *prev; int abort = 0; NCR5380_setup(instance); @@ -2562,7 +2482,7 @@ static void NCR5380_reselect(struct Scsi_Host *instance) { */ - for (tmp = (Scsi_Cmnd *) hostdata->disconnected_queue, prev = NULL; tmp; prev = tmp, tmp = (Scsi_Cmnd *) tmp->host_scribble) + for (tmp = (struct scsi_cmnd *) hostdata->disconnected_queue, prev = NULL; tmp; prev = tmp, tmp = (struct scsi_cmnd *) tmp->host_scribble) if ((target_mask == (1 << tmp->device->id)) && (lun == (u8)tmp->device->lun) ) { if (prev) { @@ -2570,7 +2490,7 @@ static void NCR5380_reselect(struct Scsi_Host *instance) { prev->host_scribble = tmp->host_scribble; } else { REMOVE(-1, hostdata->disconnected_queue, tmp, tmp->host_scribble); - hostdata->disconnected_queue = (Scsi_Cmnd *) tmp->host_scribble; + hostdata->disconnected_queue = (struct scsi_cmnd *) tmp->host_scribble; } tmp->host_scribble = NULL; break; @@ -2601,7 +2521,7 @@ static void NCR5380_reselect(struct Scsi_Host *instance) { * * Inputs : instance - this instance of the NCR5380. * - * Returns : pointer to the Scsi_Cmnd structure for which the I_T_L + * Returns : pointer to the scsi_cmnd structure for which the I_T_L * nexus has been reestablished, on failure NULL is returned. */ @@ -2643,32 +2563,32 @@ static void NCR5380_dma_complete(NCR5380_instance * instance) { #endif /* def REAL_DMA */ /* - * Function : int NCR5380_abort (Scsi_Cmnd *cmd) + * Function : int NCR5380_abort (struct scsi_cmnd *cmd) * * Purpose : abort a command * - * Inputs : cmd - the Scsi_Cmnd to abort, code - code to set the - * host byte of the result field to, if zero DID_ABORTED is + * Inputs : cmd - the scsi_cmnd to abort, code - code to set the + * host byte of the result field to, if zero DID_ABORTED is * used. * - * Returns : 0 - success, -1 on failure. + * Returns : SUCCESS - success, FAILED on failure. * - * XXX - there is no way to abort the command that is currently - * connected, you have to wait for it to complete. If this is + * XXX - there is no way to abort the command that is currently + * connected, you have to wait for it to complete. If this is * a problem, we could implement longjmp() / setjmp(), setjmp() * called where the loop started in NCR5380_main(). * * Locks: host lock taken by caller */ -static int NCR5380_abort(Scsi_Cmnd * cmd) { +static int NCR5380_abort(struct scsi_cmnd *cmd) +{ NCR5380_local_declare(); struct Scsi_Host *instance = cmd->device->host; struct NCR5380_hostdata *hostdata = (struct NCR5380_hostdata *) instance->hostdata; - Scsi_Cmnd *tmp, **prev; - - printk(KERN_WARNING "scsi%d : aborting command\n", instance->host_no); - scsi_print_command(cmd); + struct scsi_cmnd *tmp, **prev; + + scmd_printk(KERN_WARNING, cmd, "aborting command\n"); NCR5380_print_status(instance); @@ -2704,7 +2624,7 @@ static int NCR5380_abort(Scsi_Cmnd * cmd) { * aborted flag and get back into our main loop. */ - return 0; + return SUCCESS; } #endif @@ -2714,10 +2634,10 @@ static int NCR5380_abort(Scsi_Cmnd * cmd) { */ dprintk(NDEBUG_ABORT, "scsi%d : abort going into loop.\n", instance->host_no); - for (prev = (Scsi_Cmnd **) & (hostdata->issue_queue), tmp = (Scsi_Cmnd *) hostdata->issue_queue; tmp; prev = (Scsi_Cmnd **) & (tmp->host_scribble), tmp = (Scsi_Cmnd *) tmp->host_scribble) + for (prev = (struct scsi_cmnd **) &(hostdata->issue_queue), tmp = (struct scsi_cmnd *) hostdata->issue_queue; tmp; prev = (struct scsi_cmnd **) &(tmp->host_scribble), tmp = (struct scsi_cmnd *) tmp->host_scribble) if (cmd == tmp) { REMOVE(5, *prev, tmp, tmp->host_scribble); - (*prev) = (Scsi_Cmnd *) tmp->host_scribble; + (*prev) = (struct scsi_cmnd *) tmp->host_scribble; tmp->host_scribble = NULL; tmp->result = DID_ABORT << 16; dprintk(NDEBUG_ABORT, "scsi%d : abort removed command from issue queue.\n", instance->host_no); @@ -2770,20 +2690,20 @@ static int NCR5380_abort(Scsi_Cmnd * cmd) { * it from the disconnected queue. */ - for (tmp = (Scsi_Cmnd *) hostdata->disconnected_queue; tmp; tmp = (Scsi_Cmnd *) tmp->host_scribble) + for (tmp = (struct scsi_cmnd *) hostdata->disconnected_queue; tmp; tmp = (struct scsi_cmnd *) tmp->host_scribble) if (cmd == tmp) { dprintk(NDEBUG_ABORT, "scsi%d : aborting disconnected command.\n", instance->host_no); - if (NCR5380_select(instance, cmd, (int) cmd->tag)) + if (NCR5380_select(instance, cmd)) return FAILED; dprintk(NDEBUG_ABORT, "scsi%d : nexus reestablished.\n", instance->host_no); do_abort(instance); - for (prev = (Scsi_Cmnd **) & (hostdata->disconnected_queue), tmp = (Scsi_Cmnd *) hostdata->disconnected_queue; tmp; prev = (Scsi_Cmnd **) & (tmp->host_scribble), tmp = (Scsi_Cmnd *) tmp->host_scribble) + for (prev = (struct scsi_cmnd **) &(hostdata->disconnected_queue), tmp = (struct scsi_cmnd *) hostdata->disconnected_queue; tmp; prev = (struct scsi_cmnd **) &(tmp->host_scribble), tmp = (struct scsi_cmnd *) tmp->host_scribble) if (cmd == tmp) { REMOVE(5, *prev, tmp, tmp->host_scribble); - *prev = (Scsi_Cmnd *) tmp->host_scribble; + *prev = (struct scsi_cmnd *) tmp->host_scribble; tmp->host_scribble = NULL; tmp->result = DID_ABORT << 16; tmp->scsi_done(tmp); @@ -2806,7 +2726,7 @@ static int NCR5380_abort(Scsi_Cmnd * cmd) { /* - * Function : int NCR5380_bus_reset (Scsi_Cmnd *cmd) + * Function : int NCR5380_bus_reset (struct scsi_cmnd *cmd) * * Purpose : reset the SCSI bus. * @@ -2815,7 +2735,7 @@ static int NCR5380_abort(Scsi_Cmnd * cmd) { * Locks: host lock taken by caller */ -static int NCR5380_bus_reset(Scsi_Cmnd * cmd) +static int NCR5380_bus_reset(struct scsi_cmnd *cmd) { struct Scsi_Host *instance = cmd->device->host; diff --git a/drivers/scsi/NCR5380.h b/drivers/scsi/NCR5380.h index c79ddfa6f53c..162112dd1bf8 100644 --- a/drivers/scsi/NCR5380.h +++ b/drivers/scsi/NCR5380.h @@ -7,8 +7,6 @@ * drew@colorado.edu * +1 (303) 666-5836 * - * DISTRIBUTION RELEASE 7 - * * For more information, please consult * * NCR 5380 Family @@ -25,13 +23,7 @@ #define NCR5380_H #include <linux/interrupt.h> - -#ifdef AUTOSENSE #include <scsi/scsi_eh.h> -#endif - -#define NCR5380_PUBLIC_RELEASE 7 -#define NCR53C400_PUBLIC_RELEASE 2 #define NDEBUG_ARBITRATION 0x1 #define NDEBUG_AUTOSENSE 0x2 @@ -224,33 +216,44 @@ #define DISCONNECT_LONG 2 /* - * These are "special" values for the tag parameter passed to NCR5380_select. + * "Special" value for the (unsigned char) command tag, to indicate + * I_T_L nexus instead of I_T_L_Q. */ -#define TAG_NEXT -1 /* Use next free tag */ -#define TAG_NONE -2 /* - * Establish I_T_L nexus instead of I_T_L_Q - * even on SCSI-II devices. - */ +#define TAG_NONE 0xff /* * These are "special" values for the irq and dma_channel fields of the * Scsi_Host structure */ -#define SCSI_IRQ_NONE 255 #define DMA_NONE 255 #define IRQ_AUTO 254 #define DMA_AUTO 254 #define PORT_AUTO 0xffff /* autoprobe io port for 53c400a */ +#ifndef NO_IRQ +#define NO_IRQ 0 +#endif + #define FLAG_HAS_LAST_BYTE_SENT 1 /* NCR53c81 or better */ #define FLAG_CHECK_LAST_BYTE_SENT 2 /* Only test once */ #define FLAG_NCR53C400 4 /* NCR53c400 */ #define FLAG_NO_PSEUDO_DMA 8 /* Inhibit DMA */ #define FLAG_DTC3181E 16 /* DTC3181E */ +#define FLAG_LATE_DMA_SETUP 32 /* Setup NCR before DMA H/W */ +#define FLAG_TAGGED_QUEUING 64 /* as X3T9.2 spelled it */ #ifndef ASM + +#ifdef SUPPORT_TAGS +struct tag_alloc { + DECLARE_BITMAP(allocated, MAX_TAGS); + int nr_allocated; + int queue_size; +}; +#endif + struct NCR5380_hostdata { NCR5380_implementation_fields; /* implementation specific */ struct Scsi_Host *host; /* Host backpointer */ @@ -263,9 +266,9 @@ struct NCR5380_hostdata { volatile int dma_len; /* requested length of DMA */ #endif volatile unsigned char last_message; /* last message OUT */ - volatile Scsi_Cmnd *connected; /* currently connected command */ - volatile Scsi_Cmnd *issue_queue; /* waiting to be issued */ - volatile Scsi_Cmnd *disconnected_queue; /* waiting for reconnect */ + volatile struct scsi_cmnd *connected; /* currently connected command */ + volatile struct scsi_cmnd *issue_queue; /* waiting to be issued */ + volatile struct scsi_cmnd *disconnected_queue; /* waiting for reconnect */ volatile int restart_select; /* we have disconnected, used to restart NCR5380_select() */ @@ -273,19 +276,21 @@ struct NCR5380_hostdata { int flags; unsigned long time_expires; /* in jiffies, set prior to sleeping */ int select_time; /* timer in select for target response */ - volatile Scsi_Cmnd *selecting; + volatile struct scsi_cmnd *selecting; struct delayed_work coroutine; /* our co-routine */ -#ifdef NCR5380_STATS - unsigned timebase; /* Base for time calcs */ - long time_read[8]; /* time to do reads */ - long time_write[8]; /* time to do writes */ - unsigned long bytes_read[8]; /* bytes read */ - unsigned long bytes_write[8]; /* bytes written */ - unsigned pendingr; - unsigned pendingw; -#endif -#ifdef AUTOSENSE struct scsi_eh_save ses; + char info[256]; + int read_overruns; /* number of bytes to cut from a + * transfer to handle chip overruns */ + int retain_dma_intr; + struct work_struct main_task; + volatile int main_running; +#ifdef SUPPORT_TAGS + struct tag_alloc TagAlloc[8][8]; /* 8 targets and 8 LUNs */ +#endif +#ifdef PSEUDO_DMA + unsigned spin_max_r; + unsigned spin_max_w; #endif }; @@ -296,7 +301,8 @@ struct NCR5380_hostdata { #endif #define dprintk(flg, fmt, ...) \ - do { if ((NDEBUG) & (flg)) pr_debug(fmt, ## __VA_ARGS__); } while (0) + do { if ((NDEBUG) & (flg)) \ + printk(KERN_DEBUG fmt, ## __VA_ARGS__); } while (0) #if NDEBUG #define NCR5380_dprint(flg, arg) \ @@ -320,17 +326,9 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance); static irqreturn_t NCR5380_intr(int irq, void *dev_id); #endif static void NCR5380_main(struct work_struct *work); -static void __maybe_unused NCR5380_print_options(struct Scsi_Host *instance); -static int NCR5380_abort(Scsi_Cmnd * cmd); -static int NCR5380_bus_reset(Scsi_Cmnd * cmd); -static int NCR5380_queue_command(struct Scsi_Host *, struct scsi_cmnd *); -static int __maybe_unused NCR5380_show_info(struct seq_file *, - struct Scsi_Host *); -static int __maybe_unused NCR5380_write_info(struct Scsi_Host *instance, - char *buffer, int length); - +static const char *NCR5380_info(struct Scsi_Host *instance); static void NCR5380_reselect(struct Scsi_Host *instance); -static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd * cmd, int tag); +static int NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd); #if defined(PSEUDO_DMA) || defined(REAL_DMA) || defined(REAL_DMA_POLL) static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char *phase, int *count, unsigned char **data); #endif diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c index 681434e2dfe9..b32e77db0c48 100644 --- a/drivers/scsi/aacraid/aachba.c +++ b/drivers/scsi/aacraid/aachba.c @@ -2181,7 +2181,7 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd) (fsa_dev_ptr[cid].sense_data.sense_key == NOT_READY)) { switch (scsicmd->cmnd[0]) { - case SERVICE_ACTION_IN: + case SERVICE_ACTION_IN_16: if (!(dev->raw_io_interface) || !(dev->raw_io_64) || ((scsicmd->cmnd[1] & 0x1f) != SAI_READ_CAPACITY_16)) @@ -2309,7 +2309,7 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd) scsi_sg_copy_from_buffer(scsicmd, &inq_data, sizeof(inq_data)); return aac_get_container_name(scsicmd); } - case SERVICE_ACTION_IN: + case SERVICE_ACTION_IN_16: if (!(dev->raw_io_interface) || !(dev->raw_io_64) || ((scsicmd->cmnd[1] & 0x1f) != SAI_READ_CAPACITY_16)) diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c index a759cb2d4b15..fdcdf9f781bc 100644 --- a/drivers/scsi/aacraid/linit.c +++ b/drivers/scsi/aacraid/linit.c @@ -462,9 +462,9 @@ static int aac_slave_configure(struct scsi_device *sdev) depth = 256; else if (depth < 2) depth = 2; - scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, depth); + scsi_change_queue_depth(sdev, depth); } else - scsi_adjust_queue_depth(sdev, 0, 1); + scsi_change_queue_depth(sdev, 1); return 0; } @@ -478,12 +478,8 @@ static int aac_slave_configure(struct scsi_device *sdev) * total capacity and the queue depth supported by the target device. */ -static int aac_change_queue_depth(struct scsi_device *sdev, int depth, - int reason) +static int aac_change_queue_depth(struct scsi_device *sdev, int depth) { - if (reason != SCSI_QDEPTH_DEFAULT) - return -EOPNOTSUPP; - if (sdev->tagged_supported && (sdev->type == TYPE_DISK) && (sdev_channel(sdev) == CONTAINER_CHANNEL)) { struct scsi_device * dev; @@ -504,10 +500,10 @@ static int aac_change_queue_depth(struct scsi_device *sdev, int depth, depth = 256; else if (depth < 2) depth = 2; - scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, depth); - } else - scsi_adjust_queue_depth(sdev, 0, 1); - return sdev->queue_depth; + return scsi_change_queue_depth(sdev, depth); + } + + return scsi_change_queue_depth(sdev, 1); } static ssize_t aac_show_raid_level(struct device *dev, struct device_attribute *attr, char *buf) @@ -555,7 +551,7 @@ static int aac_eh_abort(struct scsi_cmnd* cmd) AAC_DRIVERNAME, host->host_no, sdev_channel(dev), sdev_id(dev), dev->lun); switch (cmd->cmnd[0]) { - case SERVICE_ACTION_IN: + case SERVICE_ACTION_IN_16: if (!(aac->raw_io_interface) || !(aac->raw_io_64) || ((cmd->cmnd[1] & 0x1f) != SAI_READ_CAPACITY_16)) diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c index 43761c1c46f0..6719a3390ebd 100644 --- a/drivers/scsi/advansys.c +++ b/drivers/scsi/advansys.c @@ -7706,7 +7706,7 @@ advansys_narrow_slave_configure(struct scsi_device *sdev, ASC_DVC_VAR *asc_dvc) asc_dvc->cfg->can_tagged_qng |= tid_bit; asc_dvc->use_tagged_qng |= tid_bit; } - scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, + scsi_change_queue_depth(sdev, asc_dvc->max_dvc_qng[sdev->id]); } } else { @@ -7714,7 +7714,6 @@ advansys_narrow_slave_configure(struct scsi_device *sdev, ASC_DVC_VAR *asc_dvc) asc_dvc->cfg->can_tagged_qng &= ~tid_bit; asc_dvc->use_tagged_qng &= ~tid_bit; } - scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun); } if ((sdev->lun == 0) && @@ -7848,12 +7847,8 @@ advansys_wide_slave_configure(struct scsi_device *sdev, ADV_DVC_VAR *adv_dvc) } } - if ((adv_dvc->tagqng_able & tidmask) && sdev->tagged_supported) { - scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, - adv_dvc->max_dvc_qng); - } else { - scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun); - } + if ((adv_dvc->tagqng_able & tidmask) && sdev->tagged_supported) + scsi_change_queue_depth(sdev, adv_dvc->max_dvc_qng); } /* diff --git a/drivers/scsi/aha152x.c b/drivers/scsi/aha152x.c index e77b72f78006..2b960b326daf 100644 --- a/drivers/scsi/aha152x.c +++ b/drivers/scsi/aha152x.c @@ -230,7 +230,7 @@ * * ************************************************************************** - + see Documentation/scsi/aha152x.txt for configuration details **************************************************************************/ @@ -279,45 +279,11 @@ static LIST_HEAD(aha152x_host_list); #error define AUTOCONF or SETUP0 #endif -#if defined(AHA152X_DEBUG) -#define DEBUG_DEFAULT debug_eh - -#define DPRINTK(when,msgs...) \ - do { if(HOSTDATA(shpnt)->debug & (when)) printk(msgs); } while(0) - -#define DO_LOCK(flags) \ - do { \ - if(spin_is_locked(&QLOCK)) { \ - DPRINTK(debug_intr, DEBUG_LEAD "(%s:%d) already locked at %s:%d\n", CMDINFO(CURRENT_SC), __func__, __LINE__, QLOCKER, QLOCKERL); \ - } \ - DPRINTK(debug_locking, DEBUG_LEAD "(%s:%d) locking\n", CMDINFO(CURRENT_SC), __func__, __LINE__); \ - spin_lock_irqsave(&QLOCK,flags); \ - DPRINTK(debug_locking, DEBUG_LEAD "(%s:%d) locked\n", CMDINFO(CURRENT_SC), __func__, __LINE__); \ - QLOCKER=__func__; \ - QLOCKERL=__LINE__; \ - } while(0) - -#define DO_UNLOCK(flags) \ - do { \ - DPRINTK(debug_locking, DEBUG_LEAD "(%s:%d) unlocking (locked at %s:%d)\n", CMDINFO(CURRENT_SC), __func__, __LINE__, QLOCKER, QLOCKERL); \ - spin_unlock_irqrestore(&QLOCK,flags); \ - DPRINTK(debug_locking, DEBUG_LEAD "(%s:%d) unlocked\n", CMDINFO(CURRENT_SC), __func__, __LINE__); \ - QLOCKER="(not locked)"; \ - QLOCKERL=0; \ - } while(0) - -#else -#define DPRINTK(when,msgs...) #define DO_LOCK(flags) spin_lock_irqsave(&QLOCK,flags) #define DO_UNLOCK(flags) spin_unlock_irqrestore(&QLOCK,flags) -#endif #define LEAD "(scsi%d:%d:%d) " -#define WARN_LEAD KERN_WARNING LEAD #define INFO_LEAD KERN_INFO LEAD -#define NOTE_LEAD KERN_NOTICE LEAD -#define ERR_LEAD KERN_ERR LEAD -#define DEBUG_LEAD KERN_DEBUG LEAD #define CMDINFO(cmd) \ (cmd) ? ((cmd)->device->host->host_no) : -1, \ (cmd) ? ((cmd)->device->id & 0x0f) : -1, \ @@ -345,10 +311,10 @@ CMD_INC_RESID(struct scsi_cmnd *cmd, int inc) enum { not_issued = 0x0001, /* command not yet issued */ - selecting = 0x0002, /* target is beeing selected */ + selecting = 0x0002, /* target is being selected */ identified = 0x0004, /* IDENTIFY was sent */ disconnected = 0x0008, /* target disconnected */ - completed = 0x0010, /* target sent COMMAND COMPLETE */ + completed = 0x0010, /* target sent COMMAND COMPLETE */ aborted = 0x0020, /* ABORT was sent */ resetted = 0x0040, /* BUS DEVICE RESET was sent */ spiordy = 0x0080, /* waiting for SPIORDY to raise */ @@ -396,7 +362,6 @@ static int exttrans[] = {0, 0}; module_param_array(exttrans, int, NULL, 0); MODULE_PARM_DESC(exttrans,"use extended translation"); -#if !defined(AHA152X_DEBUG) static int aha152x[] = {0, 11, 7, 1, 1, 0, DELAY_DEFAULT, 0}; module_param_array(aha152x, int, NULL, 0); MODULE_PARM_DESC(aha152x, "parameters for first controller"); @@ -404,19 +369,6 @@ MODULE_PARM_DESC(aha152x, "parameters for first controller"); static int aha152x1[] = {0, 11, 7, 1, 1, 0, DELAY_DEFAULT, 0}; module_param_array(aha152x1, int, NULL, 0); MODULE_PARM_DESC(aha152x1, "parameters for second controller"); -#else -static int debug[] = {DEBUG_DEFAULT, DEBUG_DEFAULT}; -module_param_array(debug, int, NULL, 0); -MODULE_PARM_DESC(debug, "flags for driver debugging"); - -static int aha152x[] = {0, 11, 7, 1, 1, 1, DELAY_DEFAULT, 0, DEBUG_DEFAULT}; -module_param_array(aha152x, int, NULL, 0); -MODULE_PARM_DESC(aha152x, "parameters for first controller"); - -static int aha152x1[] = {0, 11, 7, 1, 1, 1, DELAY_DEFAULT, 0, DEBUG_DEFAULT}; -module_param_array(aha152x1, int, NULL, 0); -MODULE_PARM_DESC(aha152x1, "parameters for second controller"); -#endif /* !defined(AHA152X_DEBUG) */ #endif /* MODULE */ #ifdef __ISAPNP__ @@ -446,7 +398,7 @@ static struct scsi_host_template aha152x_driver_template; /* * internal states of the host * - */ + */ enum aha152x_state { idle=0, unknown, @@ -485,24 +437,16 @@ struct aha152x_hostdata { spinlock_t lock; /* host lock */ -#if defined(AHA152X_DEBUG) - const char *locker; - /* which function has the lock */ - int lockerl; /* where did it get it */ - - int debug; /* current debugging setting */ -#endif - #if defined(AHA152X_STAT) - int total_commands; + int total_commands; int disconnections; int busfree_without_any_action; int busfree_without_old_command; int busfree_without_new_command; int busfree_without_done_command; int busfree_with_check_condition; - int count[maxstate]; - int count_trans[maxstate]; + int count[maxstate]; + int count_trans[maxstate]; unsigned long time[maxstate]; #endif @@ -514,7 +458,7 @@ struct aha152x_hostdata { int delay; /* reset out delay */ int ext_trans; /* extended translation enabled */ - int swint; /* software-interrupt was fired during detect() */ + int swint; /* software-interrupt was fired during detect() */ int service; /* bh needs to be run */ int in_intr; /* bh is running */ @@ -543,7 +487,7 @@ struct aha152x_hostdata { unsigned char msgi[256]; /* received message bytes */ - int msgo_i, msgo_len; + int msgo_i, msgo_len; /* number of sent bytes and length of current messages */ unsigned char msgo[256]; /* pending messages */ @@ -689,7 +633,6 @@ static void aha152x_error(struct Scsi_Host *shpnt, char *msg); static void done(struct Scsi_Host *shpnt, int error); /* diagnostics */ -static void disp_ports(struct Scsi_Host *shpnt); static void show_command(Scsi_Cmnd * ptr); static void show_queues(struct Scsi_Host *shpnt); static void disp_enintr(struct Scsi_Host *shpnt); @@ -812,10 +755,6 @@ struct Scsi_Host *aha152x_probe_one(struct aha152x_setup *setup) DELAY = setup->delay; EXT_TRANS = setup->ext_trans; -#if defined(AHA152X_DEBUG) - HOSTDATA(shpnt)->debug = setup->debug; -#endif - SETPORT(SCSIID, setup->scsiid << 4); shpnt->this_id = setup->scsiid; @@ -941,31 +880,24 @@ void aha152x_release(struct Scsi_Host *shpnt) * setup controller to generate interrupts depending * on current state (lock has to be acquired) * - */ + */ static int setup_expected_interrupts(struct Scsi_Host *shpnt) { if(CURRENT_SC) { CURRENT_SC->SCp.phase |= 1 << 16; - + if(CURRENT_SC->SCp.phase & selecting) { - DPRINTK(debug_intr, DEBUG_LEAD "expecting: (seldo) (seltimo) (seldi)\n", CMDINFO(CURRENT_SC)); SETPORT(SSTAT1, SELTO); SETPORT(SIMODE0, ENSELDO | (DISCONNECTED_SC ? ENSELDI : 0)); SETPORT(SIMODE1, ENSELTIMO); } else { - DPRINTK(debug_intr, DEBUG_LEAD "expecting: (phase change) (busfree) %s\n", CMDINFO(CURRENT_SC), CURRENT_SC->SCp.phase & spiordy ? "(spiordy)" : ""); SETPORT(SIMODE0, (CURRENT_SC->SCp.phase & spiordy) ? ENSPIORDY : 0); - SETPORT(SIMODE1, ENPHASEMIS | ENSCSIRST | ENSCSIPERR | ENBUSFREE); + SETPORT(SIMODE1, ENPHASEMIS | ENSCSIRST | ENSCSIPERR | ENBUSFREE); } } else if(STATE==seldi) { - DPRINTK(debug_intr, DEBUG_LEAD "expecting: (phase change) (identify)\n", CMDINFO(CURRENT_SC)); SETPORT(SIMODE0, 0); - SETPORT(SIMODE1, ENPHASEMIS | ENSCSIRST | ENSCSIPERR | ENBUSFREE); + SETPORT(SIMODE1, ENPHASEMIS | ENSCSIRST | ENSCSIPERR | ENBUSFREE); } else { - DPRINTK(debug_intr, DEBUG_LEAD "expecting: %s %s\n", - CMDINFO(CURRENT_SC), - DISCONNECTED_SC ? "(reselection)" : "", - ISSUE_SC ? "(busfree)" : ""); SETPORT(SIMODE0, DISCONNECTED_SC ? ENSELDI : 0); SETPORT(SIMODE1, ENSCSIRST | ( (ISSUE_SC||DONE_SC) ? ENBUSFREE : 0)); } @@ -977,7 +909,7 @@ static int setup_expected_interrupts(struct Scsi_Host *shpnt) } -/* +/* * Queue a command and setup interrupts for a free bus. */ static int aha152x_internal_queue(Scsi_Cmnd *SCpnt, struct completion *complete, @@ -986,15 +918,6 @@ static int aha152x_internal_queue(Scsi_Cmnd *SCpnt, struct completion *complete, struct Scsi_Host *shpnt = SCpnt->device->host; unsigned long flags; -#if defined(AHA152X_DEBUG) - if (HOSTDATA(shpnt)->debug & debug_queue) { - printk(INFO_LEAD "queue: %p; cmd_len=%d pieces=%d size=%u cmnd=", - CMDINFO(SCpnt), SCpnt, SCpnt->cmd_len, - scsi_sg_count(SCpnt), scsi_bufflen(SCpnt)); - __scsi_print_command(SCpnt->cmnd); - } -#endif - SCpnt->scsi_done = done; SCpnt->SCp.phase = not_issued | phase; SCpnt->SCp.Status = 0x1; /* Ilegal status by SCSI standard */ @@ -1004,13 +927,13 @@ static int aha152x_internal_queue(Scsi_Cmnd *SCpnt, struct completion *complete, if(SCpnt->SCp.phase & (resetting|check_condition)) { if (!SCpnt->host_scribble || SCSEM(SCpnt) || SCNEXT(SCpnt)) { - printk(ERR_LEAD "cannot reuse command\n", CMDINFO(SCpnt)); + scmd_printk(KERN_ERR, SCpnt, "cannot reuse command\n"); return FAILED; } } else { SCpnt->host_scribble = kmalloc(sizeof(struct aha152x_scdata), GFP_ATOMIC); if(!SCpnt->host_scribble) { - printk(ERR_LEAD "allocation failed\n", CMDINFO(SCpnt)); + scmd_printk(KERN_ERR, SCpnt, "allocation failed\n"); return FAILED; } } @@ -1066,15 +989,6 @@ static int aha152x_internal_queue(Scsi_Cmnd *SCpnt, struct completion *complete, */ static int aha152x_queue_lck(Scsi_Cmnd *SCpnt, void (*done)(Scsi_Cmnd *)) { -#if 0 - if(*SCpnt->cmnd == REQUEST_SENSE) { - SCpnt->result = 0; - done(SCpnt); - - return 0; - } -#endif - return aha152x_internal_queue(SCpnt, NULL, 0, done); } @@ -1082,15 +996,10 @@ static DEF_SCSI_QCMD(aha152x_queue) /* - * * */ static void reset_done(Scsi_Cmnd *SCpnt) { -#if 0 - struct Scsi_Host *shpnt = SCpnt->host; - DPRINTK(debug_eh, INFO_LEAD "reset_done called\n", CMDINFO(SCpnt)); -#endif if(SCSEM(SCpnt)) { complete(SCSEM(SCpnt)); } else { @@ -1108,20 +1017,11 @@ static int aha152x_abort(Scsi_Cmnd *SCpnt) Scsi_Cmnd *ptr; unsigned long flags; -#if defined(AHA152X_DEBUG) - if(HOSTDATA(shpnt)->debug & debug_eh) { - printk(DEBUG_LEAD "abort(%p)", CMDINFO(SCpnt), SCpnt); - show_queues(shpnt); - } -#endif - DO_LOCK(flags); ptr=remove_SC(&ISSUE_SC, SCpnt); if(ptr) { - DPRINTK(debug_eh, DEBUG_LEAD "not yet issued - SUCCESS\n", CMDINFO(SCpnt)); - HOSTDATA(shpnt)->commands--; if (!HOSTDATA(shpnt)->commands) SETPORT(PORTA, 0); @@ -1131,7 +1031,7 @@ static int aha152x_abort(Scsi_Cmnd *SCpnt) SCpnt->host_scribble=NULL; return SUCCESS; - } + } DO_UNLOCK(flags); @@ -1142,7 +1042,8 @@ static int aha152x_abort(Scsi_Cmnd *SCpnt) * */ - printk(ERR_LEAD "cannot abort running or disconnected command\n", CMDINFO(SCpnt)); + scmd_printk(KERN_ERR, SCpnt, + "cannot abort running or disconnected command\n"); return FAILED; } @@ -1160,15 +1061,8 @@ static int aha152x_device_reset(Scsi_Cmnd * SCpnt) unsigned long flags; unsigned long timeleft; -#if defined(AHA152X_DEBUG) - if(HOSTDATA(shpnt)->debug & debug_eh) { - printk(INFO_LEAD "aha152x_device_reset(%p)", CMDINFO(SCpnt), SCpnt); - show_queues(shpnt); - } -#endif - if(CURRENT_SC==SCpnt) { - printk(ERR_LEAD "cannot reset current device\n", CMDINFO(SCpnt)); + scmd_printk(KERN_ERR, SCpnt, "cannot reset current device\n"); return FAILED; } @@ -1208,7 +1102,7 @@ static int aha152x_device_reset(Scsi_Cmnd * SCpnt) } else if(disconnected) { append_SC(&DISCONNECTED_SC, SCpnt); } - + ret = FAILED; } @@ -1227,12 +1121,12 @@ static void free_hard_reset_SCs(struct Scsi_Host *shpnt, Scsi_Cmnd **SCs) if(SCDATA(ptr)) { next = SCNEXT(ptr); } else { - printk(DEBUG_LEAD "queue corrupted at %p\n", CMDINFO(ptr), ptr); + scmd_printk(KERN_DEBUG, ptr, + "queue corrupted at %p\n", ptr); next = NULL; } if (!ptr->device->soft_reset) { - DPRINTK(debug_eh, DEBUG_LEAD "disconnected command %p removed\n", CMDINFO(ptr), ptr); remove_SC(SCs, ptr); HOSTDATA(shpnt)->commands--; kfree(ptr->host_scribble); @@ -1253,25 +1147,14 @@ static int aha152x_bus_reset_host(struct Scsi_Host *shpnt) DO_LOCK(flags); -#if defined(AHA152X_DEBUG) - if(HOSTDATA(shpnt)->debug & debug_eh) { - printk(KERN_DEBUG "scsi%d: bus reset", shpnt->host_no); - show_queues(shpnt); - } -#endif - free_hard_reset_SCs(shpnt, &ISSUE_SC); free_hard_reset_SCs(shpnt, &DISCONNECTED_SC); - DPRINTK(debug_eh, KERN_DEBUG "scsi%d: resetting bus\n", shpnt->host_no); - SETPORT(SCSISEQ, SCSIRSTO); mdelay(256); SETPORT(SCSISEQ, 0); mdelay(DELAY); - DPRINTK(debug_eh, KERN_DEBUG "scsi%d: bus resetted\n", shpnt->host_no); - setup_expected_interrupts(shpnt); if(HOSTDATA(shpnt)->commands==0) SETPORT(PORTA, 0); @@ -1333,11 +1216,7 @@ static void reset_ports(struct Scsi_Host *shpnt) */ int aha152x_host_reset_host(struct Scsi_Host *shpnt) { - DPRINTK(debug_eh, KERN_DEBUG "scsi%d: host reset\n", shpnt->host_no); - aha152x_bus_reset_host(shpnt); - - DPRINTK(debug_eh, KERN_DEBUG "scsi%d: resetting ports\n", shpnt->host_no); reset_ports(shpnt); return SUCCESS; @@ -1345,7 +1224,7 @@ int aha152x_host_reset_host(struct Scsi_Host *shpnt) /* * Reset the host (bus and controller) - * + * */ static int aha152x_host_reset(Scsi_Cmnd *SCpnt) { @@ -1411,7 +1290,9 @@ static void done(struct Scsi_Host *shpnt, int error) { if (CURRENT_SC) { if(DONE_SC) - printk(ERR_LEAD "there's already a completed command %p - will cause abort\n", CMDINFO(CURRENT_SC), DONE_SC); + scmd_printk(KERN_ERR, CURRENT_SC, + "there's already a completed command %p " + "- will cause abort\n", DONE_SC); DONE_SC = CURRENT_SC; CURRENT_SC = NULL; @@ -1466,7 +1347,7 @@ static irqreturn_t intr(int irqno, void *dev_id) return IRQ_NONE; if( TESTLO(DMASTAT, INTSTAT) ) - return IRQ_NONE; + return IRQ_NONE; /* no more interrupts from the controller, while we're busy. INTEN is restored by the BH handler */ @@ -1501,7 +1382,7 @@ static void busfree_run(struct Scsi_Host *shpnt) SETPORT(SXFRCTL0, CH1); SETPORT(SSTAT1, CLRBUSFREE); - + if(CURRENT_SC) { #if defined(AHA152X_STAT) action++; @@ -1513,19 +1394,13 @@ static void busfree_run(struct Scsi_Host *shpnt) done(shpnt, (CURRENT_SC->SCp.Status & 0xff) | ((CURRENT_SC->SCp.Message & 0xff) << 8) | (DID_OK << 16)); } else if(CURRENT_SC->SCp.phase & aborted) { - DPRINTK(debug_eh, DEBUG_LEAD "ABORT sent\n", CMDINFO(CURRENT_SC)); done(shpnt, (CURRENT_SC->SCp.Status & 0xff) | ((CURRENT_SC->SCp.Message & 0xff) << 8) | (DID_ABORT << 16)); } else if(CURRENT_SC->SCp.phase & resetted) { - DPRINTK(debug_eh, DEBUG_LEAD "BUS DEVICE RESET sent\n", CMDINFO(CURRENT_SC)); done(shpnt, (CURRENT_SC->SCp.Status & 0xff) | ((CURRENT_SC->SCp.Message & 0xff) << 8) | (DID_RESET << 16)); } else if(CURRENT_SC->SCp.phase & disconnected) { /* target sent DISCONNECT */ - DPRINTK(debug_selection, DEBUG_LEAD "target disconnected at %d/%d\n", - CMDINFO(CURRENT_SC), - scsi_get_resid(CURRENT_SC), - scsi_bufflen(CURRENT_SC)); #if defined(AHA152X_STAT) HOSTDATA(shpnt)->disconnections++; #endif @@ -1553,13 +1428,6 @@ static void busfree_run(struct Scsi_Host *shpnt) struct scsi_cmnd *cmd = HOSTDATA(shpnt)->done_SC; struct aha152x_scdata *sc = SCDATA(cmd); -#if 0 - if(HOSTDATA(shpnt)->debug & debug_eh) { - printk(ERR_LEAD "received sense: ", CMDINFO(DONE_SC)); - scsi_print_sense("bh", DONE_SC); - } -#endif - scsi_eh_restore_cmnd(cmd, &sc->ses); cmd->SCp.Status = SAM_STAT_CHECK_CONDITION; @@ -1571,17 +1439,11 @@ static void busfree_run(struct Scsi_Host *shpnt) #if defined(AHA152X_STAT) HOSTDATA(shpnt)->busfree_with_check_condition++; #endif -#if 0 - DPRINTK(debug_eh, ERR_LEAD "CHECK CONDITION found\n", CMDINFO(DONE_SC)); -#endif if(!(DONE_SC->SCp.phase & not_issued)) { struct aha152x_scdata *sc; Scsi_Cmnd *ptr = DONE_SC; DONE_SC=NULL; -#if 0 - DPRINTK(debug_eh, ERR_LEAD "requesting sense\n", CMDINFO(ptr)); -#endif sc = SCDATA(ptr); /* It was allocated in aha152x_internal_queue? */ @@ -1591,19 +1453,10 @@ static void busfree_run(struct Scsi_Host *shpnt) DO_UNLOCK(flags); aha152x_internal_queue(ptr, NULL, check_condition, ptr->scsi_done); DO_LOCK(flags); -#if 0 - } else { - DPRINTK(debug_eh, ERR_LEAD "command not issued - CHECK CONDITION ignored\n", CMDINFO(DONE_SC)); -#endif } } if(DONE_SC && DONE_SC->scsi_done) { -#if defined(AHA152X_DEBUG) - int hostno=DONE_SC->device->host->host_no; - int id=DONE_SC->device->id & 0xf; - int lun=((u8)DONE_SC->device->lun) & 0x7; -#endif Scsi_Cmnd *ptr = DONE_SC; DONE_SC=NULL; @@ -1618,9 +1471,7 @@ static void busfree_run(struct Scsi_Host *shpnt) } DO_UNLOCK(flags); - DPRINTK(debug_done, DEBUG_LEAD "calling scsi_done(%p)\n", hostno, id, lun, ptr); - ptr->scsi_done(ptr); - DPRINTK(debug_done, DEBUG_LEAD "scsi_done(%p) returned\n", hostno, id, lun, ptr); + ptr->scsi_done(ptr); DO_LOCK(flags); } @@ -1640,9 +1491,7 @@ static void busfree_run(struct Scsi_Host *shpnt) #if defined(AHA152X_STAT) action++; #endif - CURRENT_SC->SCp.phase |= selecting; - - DPRINTK(debug_selection, DEBUG_LEAD "selecting target\n", CMDINFO(CURRENT_SC)); + CURRENT_SC->SCp.phase |= selecting; /* clear selection timeout */ SETPORT(SSTAT1, SELTO); @@ -1674,18 +1523,19 @@ static void seldo_run(struct Scsi_Host *shpnt) SETPORT(SSTAT1, CLRBUSFREE); SETPORT(SSTAT1, CLRPHASECHG); - CURRENT_SC->SCp.phase &= ~(selecting|not_issued); + CURRENT_SC->SCp.phase &= ~(selecting|not_issued); SETPORT(SCSISEQ, 0); if (TESTLO(SSTAT0, SELDO)) { - printk(ERR_LEAD "aha152x: passing bus free condition\n", CMDINFO(CURRENT_SC)); + scmd_printk(KERN_ERR, CURRENT_SC, + "aha152x: passing bus free condition\n"); done(shpnt, DID_NO_CONNECT << 16); return; } SETPORT(SSTAT0, CLRSELDO); - + ADDMSGO(IDENTIFY(RECONNECT, CURRENT_SC->device->lun)); if (CURRENT_SC->SCp.phase & aborting) { @@ -1693,7 +1543,7 @@ static void seldo_run(struct Scsi_Host *shpnt) } else if (CURRENT_SC->SCp.phase & resetting) { ADDMSGO(BUS_DEVICE_RESET); } else if (SYNCNEG==0 && SYNCHRONOUS) { - CURRENT_SC->SCp.phase |= syncneg; + CURRENT_SC->SCp.phase |= syncneg; MSGOLEN += spi_populate_sync_msg(&MSGO(MSGOLEN), 50, 8); SYNCNEG=1; /* negotiation in progress */ } @@ -1708,29 +1558,21 @@ static void seldo_run(struct Scsi_Host *shpnt) */ static void selto_run(struct Scsi_Host *shpnt) { - SETPORT(SCSISEQ, 0); + SETPORT(SCSISEQ, 0); SETPORT(SSTAT1, CLRSELTIMO); - DPRINTK(debug_selection, DEBUG_LEAD "selection timeout\n", CMDINFO(CURRENT_SC)); - - if(!CURRENT_SC) { - DPRINTK(debug_selection, DEBUG_LEAD "!CURRENT_SC\n", CMDINFO(CURRENT_SC)); + if (!CURRENT_SC) return; - } - CURRENT_SC->SCp.phase &= ~selecting; + CURRENT_SC->SCp.phase &= ~selecting; - if (CURRENT_SC->SCp.phase & aborted) { - DPRINTK(debug_selection, DEBUG_LEAD "aborted\n", CMDINFO(CURRENT_SC)); + if (CURRENT_SC->SCp.phase & aborted) done(shpnt, DID_ABORT << 16); - } else if (TESTLO(SSTAT0, SELINGO)) { - DPRINTK(debug_selection, DEBUG_LEAD "arbitration not won\n", CMDINFO(CURRENT_SC)); + else if (TESTLO(SSTAT0, SELINGO)) done(shpnt, DID_BUS_BUSY << 16); - } else { + else /* ARBITRATION won, but SELECTION failed */ - DPRINTK(debug_selection, DEBUG_LEAD "selection failed\n", CMDINFO(CURRENT_SC)); done(shpnt, DID_NO_CONNECT << 16); - } } /* @@ -1753,9 +1595,8 @@ static void seldi_run(struct Scsi_Host *shpnt) if(CURRENT_SC) { if(!(CURRENT_SC->SCp.phase & not_issued)) - printk(ERR_LEAD "command should not have been issued yet\n", CMDINFO(CURRENT_SC)); - - DPRINTK(debug_selection, ERR_LEAD "command requeued - reselection\n", CMDINFO(CURRENT_SC)); + scmd_printk(KERN_ERR, CURRENT_SC, + "command should not have been issued yet\n"); DO_LOCK(flags); append_SC(&ISSUE_SC, CURRENT_SC); @@ -1764,17 +1605,16 @@ static void seldi_run(struct Scsi_Host *shpnt) CURRENT_SC = NULL; } - if(!DISCONNECTED_SC) { - DPRINTK(debug_selection, DEBUG_LEAD "unexpected SELDI ", CMDINFO(CURRENT_SC)); + if (!DISCONNECTED_SC) return; - } RECONN_TARGET=-1; selid = GETPORT(SELID) & ~(1 << shpnt->this_id); if (selid==0) { - printk("aha152x%d: target id unknown (%02x)\n", HOSTNO, selid); + shost_printk(KERN_INFO, shpnt, + "target id unknown (%02x)\n", selid); return; } @@ -1782,8 +1622,8 @@ static void seldi_run(struct Scsi_Host *shpnt) ; if(selid & ~(1 << target)) { - printk("aha152x%d: multiple targets reconnected (%02x)\n", - HOSTNO, selid); + shost_printk(KERN_INFO, shpnt, + "multiple targets reconnected (%02x)\n", selid); } @@ -1793,7 +1633,6 @@ static void seldi_run(struct Scsi_Host *shpnt) SETRATE(HOSTDATA(shpnt)->syncrate[target]); RECONN_TARGET=target; - DPRINTK(debug_selection, DEBUG_LEAD "target %d reselected (%02x).\n", CMDINFO(CURRENT_SC), target, selid); } /* @@ -1817,31 +1656,24 @@ static void msgi_run(struct Scsi_Host *shpnt) if(sstat1 & (PHASECHG|PHASEMIS|BUSFREE) || !(sstat1 & REQINIT)) return; - if(TESTLO(SSTAT0,SPIORDY)) { - DPRINTK(debug_msgi, DEBUG_LEAD "!SPIORDY\n", CMDINFO(CURRENT_SC)); + if (TESTLO(SSTAT0, SPIORDY)) return; - } ADDMSGI(GETPORT(SCSIDAT)); -#if defined(AHA152X_DEBUG) - if (HOSTDATA(shpnt)->debug & debug_msgi) { - printk(INFO_LEAD "inbound message %02x ", CMDINFO(CURRENT_SC), MSGI(0)); - spi_print_msg(&MSGI(0)); - printk("\n"); - } -#endif - if(!CURRENT_SC) { if(LASTSTATE!=seldi) { - printk(KERN_ERR "aha152x%d: message in w/o current command not after reselection\n", HOSTNO); + shost_printk(KERN_ERR, shpnt, + "message in w/o current command" + " not after reselection\n"); } /* - * Handle reselection - */ + * Handle reselection + */ if(!(MSGI(0) & IDENTIFY_BASE)) { - printk(KERN_ERR "aha152x%d: target didn't identify after reselection\n", HOSTNO); + shost_printk(KERN_ERR, shpnt, + "target didn't identify after reselection\n"); continue; } @@ -1849,12 +1681,13 @@ static void msgi_run(struct Scsi_Host *shpnt) if (!CURRENT_SC) { show_queues(shpnt); - printk(KERN_ERR "aha152x%d: no disconnected command for target %d/%d\n", HOSTNO, RECONN_TARGET, MSGI(0) & 0x3f); + shost_printk(KERN_ERR, shpnt, + "no disconnected command" + " for target %d/%d\n", + RECONN_TARGET, MSGI(0) & 0x3f); continue; } - DPRINTK(debug_msgi, DEBUG_LEAD "target reconnected\n", CMDINFO(CURRENT_SC)); - CURRENT_SC->SCp.Message = MSGI(0); CURRENT_SC->SCp.phase &= ~disconnected; @@ -1862,31 +1695,32 @@ static void msgi_run(struct Scsi_Host *shpnt) /* next message if any */ continue; - } + } CURRENT_SC->SCp.Message = MSGI(0); switch (MSGI(0)) { case DISCONNECT: if (!RECONNECT) - printk(WARN_LEAD "target was not allowed to disconnect\n", CMDINFO(CURRENT_SC)); + scmd_printk(KERN_WARNING, CURRENT_SC, + "target was not allowed to disconnect\n"); CURRENT_SC->SCp.phase |= disconnected; break; case COMMAND_COMPLETE: - if(CURRENT_SC->SCp.phase & completed) - DPRINTK(debug_msgi, DEBUG_LEAD "again COMMAND COMPLETE\n", CMDINFO(CURRENT_SC)); - CURRENT_SC->SCp.phase |= completed; break; case MESSAGE_REJECT: if (SYNCNEG==1) { - printk(INFO_LEAD "Synchronous Data Transfer Request was rejected\n", CMDINFO(CURRENT_SC)); + scmd_printk(KERN_INFO, CURRENT_SC, + "Synchronous Data Transfer Request" + " was rejected\n"); SYNCNEG=2; /* negotiation completed */ } else - printk(INFO_LEAD "inbound message (MESSAGE REJECT)\n", CMDINFO(CURRENT_SC)); + scmd_printk(KERN_INFO, CURRENT_SC, + "inbound message (MESSAGE REJECT)\n"); break; case SAVE_POINTERS: @@ -1907,7 +1741,8 @@ static void msgi_run(struct Scsi_Host *shpnt) long ticks; if (MSGI(1) != 3) { - printk(ERR_LEAD "SDTR message length!=3\n", CMDINFO(CURRENT_SC)); + scmd_printk(KERN_ERR, CURRENT_SC, + "SDTR message length!=3\n"); break; } @@ -1924,10 +1759,12 @@ static void msgi_run(struct Scsi_Host *shpnt) /* negotiation in progress */ if (ticks > 9 || MSGI(4) < 1 || MSGI(4) > 8) { ADDMSGO(MESSAGE_REJECT); - printk(INFO_LEAD "received Synchronous Data Transfer Request invalid - rejected\n", CMDINFO(CURRENT_SC)); + scmd_printk(KERN_INFO, + CURRENT_SC, + "received Synchronous Data Transfer Request invalid - rejected\n"); break; } - + SYNCRATE |= ((ticks - 2) << 4) + MSGI(4); } else if (ticks <= 9 && MSGI(4) >= 1) { ADDMSGO(EXTENDED_MESSAGE); @@ -1947,11 +1784,14 @@ static void msgi_run(struct Scsi_Host *shpnt) SYNCRATE |= ((ticks - 2) << 4) + MSGI(4); } else { /* requested SDTR is too slow, do it asynchronously */ - printk(INFO_LEAD "Synchronous Data Transfer Request too slow - Rejecting\n", CMDINFO(CURRENT_SC)); + scmd_printk(KERN_INFO, + CURRENT_SC, + "Synchronous Data Transfer Request too slow - Rejecting\n"); ADDMSGO(MESSAGE_REJECT); } - SYNCNEG=2; /* negotiation completed */ + /* negotiation completed */ + SYNCNEG=2; SETRATE(SYNCRATE); } break; @@ -1985,12 +1825,12 @@ static void msgi_run(struct Scsi_Host *shpnt) static void msgi_end(struct Scsi_Host *shpnt) { if(MSGILEN>0) - printk(WARN_LEAD "target left before message completed (%d)\n", CMDINFO(CURRENT_SC), MSGILEN); + scmd_printk(KERN_WARNING, CURRENT_SC, + "target left before message completed (%d)\n", + MSGILEN); - if (MSGOLEN > 0 && !(GETPORT(SSTAT1) & BUSFREE)) { - DPRINTK(debug_msgi, DEBUG_LEAD "msgo pending\n", CMDINFO(CURRENT_SC)); + if (MSGOLEN > 0 && !(GETPORT(SSTAT1) & BUSFREE)) SETPORT(SCSISIG, P_MSGI | SIG_ATNO); - } } /* @@ -2003,21 +1843,12 @@ static void msgo_init(struct Scsi_Host *shpnt) if((CURRENT_SC->SCp.phase & syncneg) && SYNCNEG==2 && SYNCRATE==0) { ADDMSGO(IDENTIFY(RECONNECT, CURRENT_SC->device->lun)); } else { - printk(INFO_LEAD "unexpected MESSAGE OUT phase; rejecting\n", CMDINFO(CURRENT_SC)); + scmd_printk(KERN_INFO, CURRENT_SC, + "unexpected MESSAGE OUT phase; rejecting\n"); ADDMSGO(MESSAGE_REJECT); } } -#if defined(AHA152X_DEBUG) - if(HOSTDATA(shpnt)->debug & debug_msgo) { - int i; - - printk(DEBUG_LEAD "messages( ", CMDINFO(CURRENT_SC)); - for (i=0; i<MSGOLEN; i+=spi_print_msg(&MSGO(i)), printk(" ")) - ; - printk(")\n"); - } -#endif } /* @@ -2026,16 +1857,9 @@ static void msgo_init(struct Scsi_Host *shpnt) */ static void msgo_run(struct Scsi_Host *shpnt) { - if(MSGO_I==MSGOLEN) - DPRINTK(debug_msgo, DEBUG_LEAD "messages all sent (%d/%d)\n", CMDINFO(CURRENT_SC), MSGO_I, MSGOLEN); - while(MSGO_I<MSGOLEN) { - DPRINTK(debug_msgo, DEBUG_LEAD "message byte %02x (%d/%d)\n", CMDINFO(CURRENT_SC), MSGO(MSGO_I), MSGO_I, MSGOLEN); - - if(TESTLO(SSTAT0, SPIORDY)) { - DPRINTK(debug_msgo, DEBUG_LEAD "!SPIORDY\n", CMDINFO(CURRENT_SC)); + if (TESTLO(SSTAT0, SPIORDY)) return; - } if (MSGO_I==MSGOLEN-1) { /* Leave MESSAGE OUT after transfer */ @@ -2059,36 +1883,33 @@ static void msgo_run(struct Scsi_Host *shpnt) static void msgo_end(struct Scsi_Host *shpnt) { if(MSGO_I<MSGOLEN) { - printk(ERR_LEAD "message sent incompletely (%d/%d)\n", CMDINFO(CURRENT_SC), MSGO_I, MSGOLEN); + scmd_printk(KERN_ERR, CURRENT_SC, + "message sent incompletely (%d/%d)\n", + MSGO_I, MSGOLEN); if(SYNCNEG==1) { - printk(INFO_LEAD "Synchronous Data Transfer Request was rejected\n", CMDINFO(CURRENT_SC)); + scmd_printk(KERN_INFO, CURRENT_SC, + "Synchronous Data Transfer Request was rejected\n"); SYNCNEG=2; } } - + MSGO_I = 0; MSGOLEN = 0; } -/* +/* * command phase * */ static void cmd_init(struct Scsi_Host *shpnt) { if (CURRENT_SC->SCp.sent_command) { - printk(ERR_LEAD "command already sent\n", CMDINFO(CURRENT_SC)); + scmd_printk(KERN_ERR, CURRENT_SC, + "command already sent\n"); done(shpnt, DID_ERROR << 16); return; } -#if defined(AHA152X_DEBUG) - if (HOSTDATA(shpnt)->debug & debug_cmd) { - printk(DEBUG_LEAD "cmd_init: ", CMDINFO(CURRENT_SC)); - __scsi_print_command(CURRENT_SC->cmnd); - } -#endif - CMD_I=0; } @@ -2098,18 +1919,9 @@ static void cmd_init(struct Scsi_Host *shpnt) */ static void cmd_run(struct Scsi_Host *shpnt) { - if(CMD_I==CURRENT_SC->cmd_len) { - DPRINTK(debug_cmd, DEBUG_LEAD "command already completely sent (%d/%d)", CMDINFO(CURRENT_SC), CMD_I, CURRENT_SC->cmd_len); - disp_ports(shpnt); - } - while(CMD_I<CURRENT_SC->cmd_len) { - DPRINTK(debug_cmd, DEBUG_LEAD "command byte %02x (%d/%d)\n", CMDINFO(CURRENT_SC), CURRENT_SC->cmnd[CMD_I], CMD_I, CURRENT_SC->cmd_len); - - if(TESTLO(SSTAT0, SPIORDY)) { - DPRINTK(debug_cmd, DEBUG_LEAD "!SPIORDY\n", CMDINFO(CURRENT_SC)); + if (TESTLO(SSTAT0, SPIORDY)) return; - } SETPORT(SCSIDAT, CURRENT_SC->cmnd[CMD_I++]); } @@ -2118,7 +1930,9 @@ static void cmd_run(struct Scsi_Host *shpnt) static void cmd_end(struct Scsi_Host *shpnt) { if(CMD_I<CURRENT_SC->cmd_len) - printk(ERR_LEAD "command sent incompletely (%d/%d)\n", CMDINFO(CURRENT_SC), CMD_I, CURRENT_SC->cmd_len); + scmd_printk(KERN_ERR, CURRENT_SC, + "command sent incompletely (%d/%d)\n", + CMD_I, CURRENT_SC->cmd_len); else CURRENT_SC->SCp.sent_command++; } @@ -2129,20 +1943,11 @@ static void cmd_end(struct Scsi_Host *shpnt) */ static void status_run(struct Scsi_Host *shpnt) { - if(TESTLO(SSTAT0,SPIORDY)) { - DPRINTK(debug_status, DEBUG_LEAD "!SPIORDY\n", CMDINFO(CURRENT_SC)); + if (TESTLO(SSTAT0, SPIORDY)) return; - } CURRENT_SC->SCp.Status = GETPORT(SCSIDAT); -#if defined(AHA152X_DEBUG) - if (HOSTDATA(shpnt)->debug & debug_status) { - printk(DEBUG_LEAD "inbound status %02x ", CMDINFO(CURRENT_SC), CURRENT_SC->SCp.Status); - scsi_print_status(CURRENT_SC->SCp.Status); - printk("\n"); - } -#endif } /* @@ -2161,10 +1966,6 @@ static void datai_init(struct Scsi_Host *shpnt) SETPORT(SIMODE1, ENSCSIPERR | ENSCSIRST | ENPHASEMIS | ENBUSFREE); DATA_LEN=0; - DPRINTK(debug_datai, - DEBUG_LEAD "datai_init: request_bufflen=%d resid=%d\n", - CMDINFO(CURRENT_SC), scsi_bufflen(CURRENT_SC), - scsi_get_resid(CURRENT_SC)); } static void datai_run(struct Scsi_Host *shpnt) @@ -2186,8 +1987,7 @@ static void datai_run(struct Scsi_Host *shpnt) barrier(); if(TESTLO(DMASTAT, DFIFOFULL|INTSTAT)) { - printk(ERR_LEAD "datai timeout", CMDINFO(CURRENT_SC)); - disp_ports(shpnt); + scmd_printk(KERN_ERR, CURRENT_SC, "datai timeout\n"); break; } @@ -2199,8 +1999,8 @@ static void datai_run(struct Scsi_Host *shpnt) barrier(); if(TESTLO(SSTAT2, SEMPTY)) { - printk(ERR_LEAD "datai sempty timeout", CMDINFO(CURRENT_SC)); - disp_ports(shpnt); + scmd_printk(KERN_ERR, CURRENT_SC, + "datai sempty timeout"); break; } @@ -2209,48 +2009,49 @@ static void datai_run(struct Scsi_Host *shpnt) if(CURRENT_SC->SCp.this_residual>0) { while(fifodata>0 && CURRENT_SC->SCp.this_residual>0) { - data_count = fifodata>CURRENT_SC->SCp.this_residual ? + data_count = fifodata > CURRENT_SC->SCp.this_residual ? CURRENT_SC->SCp.this_residual : fifodata; fifodata -= data_count; - if(data_count & 1) { - DPRINTK(debug_datai, DEBUG_LEAD "8bit\n", CMDINFO(CURRENT_SC)); - SETPORT(DMACNTRL0, ENDMA|_8BIT); - *CURRENT_SC->SCp.ptr++ = GETPORT(DATAPORT); - CURRENT_SC->SCp.this_residual--; - DATA_LEN++; - SETPORT(DMACNTRL0, ENDMA); - } - - if(data_count > 1) { - DPRINTK(debug_datai, DEBUG_LEAD "16bit(%d)\n", CMDINFO(CURRENT_SC), data_count); - data_count >>= 1; - insw(DATAPORT, CURRENT_SC->SCp.ptr, data_count); - CURRENT_SC->SCp.ptr += 2 * data_count; - CURRENT_SC->SCp.this_residual -= 2 * data_count; - DATA_LEN += 2 * data_count; - } - - if(CURRENT_SC->SCp.this_residual==0 && CURRENT_SC->SCp.buffers_residual>0) { - /* advance to next buffer */ - CURRENT_SC->SCp.buffers_residual--; - CURRENT_SC->SCp.buffer++; - CURRENT_SC->SCp.ptr = SG_ADDRESS(CURRENT_SC->SCp.buffer); - CURRENT_SC->SCp.this_residual = CURRENT_SC->SCp.buffer->length; - } - } - } else if(fifodata>0) { - printk(ERR_LEAD "no buffers left for %d(%d) bytes (data overrun!?)\n", CMDINFO(CURRENT_SC), fifodata, GETPORT(FIFOSTAT)); - SETPORT(DMACNTRL0, ENDMA|_8BIT); + if (data_count & 1) { + SETPORT(DMACNTRL0, ENDMA|_8BIT); + *CURRENT_SC->SCp.ptr++ = GETPORT(DATAPORT); + CURRENT_SC->SCp.this_residual--; + DATA_LEN++; + SETPORT(DMACNTRL0, ENDMA); + } + + if (data_count > 1) { + data_count >>= 1; + insw(DATAPORT, CURRENT_SC->SCp.ptr, data_count); + CURRENT_SC->SCp.ptr += 2 * data_count; + CURRENT_SC->SCp.this_residual -= 2 * data_count; + DATA_LEN += 2 * data_count; + } + + if (CURRENT_SC->SCp.this_residual == 0 && + CURRENT_SC->SCp.buffers_residual > 0) { + /* advance to next buffer */ + CURRENT_SC->SCp.buffers_residual--; + CURRENT_SC->SCp.buffer++; + CURRENT_SC->SCp.ptr = SG_ADDRESS(CURRENT_SC->SCp.buffer); + CURRENT_SC->SCp.this_residual = CURRENT_SC->SCp.buffer->length; + } + } + } else if (fifodata > 0) { + scmd_printk(KERN_ERR, CURRENT_SC, + "no buffers left for %d(%d) bytes" + " (data overrun!?)\n", + fifodata, GETPORT(FIFOSTAT)); + SETPORT(DMACNTRL0, ENDMA|_8BIT); while(fifodata>0) { int data; data=GETPORT(DATAPORT); - DPRINTK(debug_datai, DEBUG_LEAD "data=%02x\n", CMDINFO(CURRENT_SC), data); fifodata--; DATA_LEN++; } - SETPORT(DMACNTRL0, ENDMA|_8BIT); + SETPORT(DMACNTRL0, ENDMA|_8BIT); } } @@ -2258,19 +2059,20 @@ static void datai_run(struct Scsi_Host *shpnt) TESTLO(DMASTAT, DFIFOEMP) || TESTLO(SSTAT2, SEMPTY) || GETPORT(FIFOSTAT)>0) { - /* + /* * something went wrong, if there's something left in the fifos * or the phase didn't change */ - printk(ERR_LEAD "fifos should be empty and phase should have changed\n", CMDINFO(CURRENT_SC)); - disp_ports(shpnt); + scmd_printk(KERN_ERR, CURRENT_SC, + "fifos should be empty and phase should have changed\n"); } if(DATA_LEN!=GETSTCNT()) { - printk(ERR_LEAD - "manual transfer count differs from automatic (count=%d;stcnt=%d;diff=%d;fifostat=%d)", - CMDINFO(CURRENT_SC), DATA_LEN, GETSTCNT(), GETSTCNT()-DATA_LEN, GETPORT(FIFOSTAT)); - disp_ports(shpnt); + scmd_printk(KERN_ERR, CURRENT_SC, + "manual transfer count differs from automatic " + "(count=%d;stcnt=%d;diff=%d;fifostat=%d)", + DATA_LEN, GETSTCNT(), GETSTCNT()-DATA_LEN, + GETPORT(FIFOSTAT)); mdelay(10000); } } @@ -2279,11 +2081,6 @@ static void datai_end(struct Scsi_Host *shpnt) { CMD_INC_RESID(CURRENT_SC, -GETSTCNT()); - DPRINTK(debug_datai, - DEBUG_LEAD "datai_end: request_bufflen=%d resid=%d stcnt=%d\n", - CMDINFO(CURRENT_SC), scsi_bufflen(CURRENT_SC), - scsi_get_resid(CURRENT_SC), GETSTCNT()); - SETPORT(SXFRCTL0, CH1|CLRSTCNT); SETPORT(DMACNTRL0, 0); } @@ -2304,11 +2101,6 @@ static void datao_init(struct Scsi_Host *shpnt) SETPORT(SIMODE1, ENSCSIPERR | ENSCSIRST | ENPHASEMIS | ENBUSFREE ); DATA_LEN = scsi_get_resid(CURRENT_SC); - - DPRINTK(debug_datao, - DEBUG_LEAD "datao_init: request_bufflen=%d; resid=%d\n", - CMDINFO(CURRENT_SC), scsi_bufflen(CURRENT_SC), - scsi_get_resid(CURRENT_SC)); } static void datao_run(struct Scsi_Host *shpnt) @@ -2323,8 +2115,9 @@ static void datao_run(struct Scsi_Host *shpnt) data_count=CURRENT_SC->SCp.this_residual; if(TESTLO(DMASTAT, DFIFOEMP)) { - printk(ERR_LEAD "datao fifo not empty (%d)", CMDINFO(CURRENT_SC), GETPORT(FIFOSTAT)); - disp_ports(shpnt); + scmd_printk(KERN_ERR, CURRENT_SC, + "datao fifo not empty (%d)", + GETPORT(FIFOSTAT)); break; } @@ -2342,7 +2135,7 @@ static void datao_run(struct Scsi_Host *shpnt) CURRENT_SC->SCp.ptr += 2 * data_count; CURRENT_SC->SCp.this_residual -= 2 * data_count; CMD_INC_RESID(CURRENT_SC, -2 * data_count); - } + } if(CURRENT_SC->SCp.this_residual==0 && CURRENT_SC->SCp.buffers_residual>0) { /* advance to next buffer */ @@ -2357,8 +2150,7 @@ static void datao_run(struct Scsi_Host *shpnt) barrier(); if(TESTLO(DMASTAT, DFIFOEMP|INTSTAT)) { - printk(ERR_LEAD "dataout timeout", CMDINFO(CURRENT_SC)); - disp_ports(shpnt); + scmd_printk(KERN_ERR, CURRENT_SC, "dataout timeout\n"); break; } } @@ -2368,35 +2160,23 @@ static void datao_end(struct Scsi_Host *shpnt) { if(TESTLO(DMASTAT, DFIFOEMP)) { int data_count = (DATA_LEN - scsi_get_resid(CURRENT_SC)) - - GETSTCNT(); - - DPRINTK(debug_datao, DEBUG_LEAD "datao: %d bytes to resend (%d written, %d transferred)\n", - CMDINFO(CURRENT_SC), - data_count, - DATA_LEN - scsi_get_resid(CURRENT_SC), - GETSTCNT()); + GETSTCNT(); CMD_INC_RESID(CURRENT_SC, data_count); data_count -= CURRENT_SC->SCp.ptr - - SG_ADDRESS(CURRENT_SC->SCp.buffer); + SG_ADDRESS(CURRENT_SC->SCp.buffer); while(data_count>0) { CURRENT_SC->SCp.buffer--; CURRENT_SC->SCp.buffers_residual++; data_count -= CURRENT_SC->SCp.buffer->length; } CURRENT_SC->SCp.ptr = SG_ADDRESS(CURRENT_SC->SCp.buffer) - - data_count; + data_count; CURRENT_SC->SCp.this_residual = CURRENT_SC->SCp.buffer->length + - data_count; + data_count; } - DPRINTK(debug_datao, DEBUG_LEAD "datao_end: request_bufflen=%d; resid=%d; stcnt=%d\n", - CMDINFO(CURRENT_SC), - scsi_bufflen(CURRENT_SC), - scsi_get_resid(CURRENT_SC), - GETSTCNT()); - SETPORT(SXFRCTL0, CH1|CLRCH1|CLRSTCNT); SETPORT(SXFRCTL0, CH1); @@ -2420,7 +2200,7 @@ static int update_state(struct Scsi_Host *shpnt) STATE=rsti; SETPORT(SCSISEQ,0); SETPORT(SSTAT1,SCSIRSTI); - } else if(stat0 & SELDI && PREVSTATE==busfree) { + } else if (stat0 & SELDI && PREVSTATE == busfree) { STATE=seldi; } else if(stat0 & SELDO && CURRENT_SC && (CURRENT_SC->SCp.phase & selecting)) { STATE=seldo; @@ -2445,8 +2225,7 @@ static int update_state(struct Scsi_Host *shpnt) } if((stat0 & SELDI) && STATE!=seldi && !dataphase) { - printk(INFO_LEAD "reselection missed?", CMDINFO(CURRENT_SC)); - disp_ports(shpnt); + scmd_printk(KERN_INFO, CURRENT_SC, "reselection missed?"); } if(STATE!=PREVSTATE) { @@ -2464,7 +2243,7 @@ static int update_state(struct Scsi_Host *shpnt) */ static void parerr_run(struct Scsi_Host *shpnt) { - printk(ERR_LEAD "parity error\n", CMDINFO(CURRENT_SC)); + scmd_printk(KERN_ERR, CURRENT_SC, "parity error\n"); done(shpnt, DID_PARITY << 16); } @@ -2476,8 +2255,8 @@ static void rsti_run(struct Scsi_Host *shpnt) { Scsi_Cmnd *ptr; - printk(KERN_NOTICE "aha152x%d: scsi reset in\n", HOSTNO); - + shost_printk(KERN_NOTICE, shpnt, "scsi reset in\n"); + ptr=DISCONNECTED_SC; while(ptr) { Scsi_Cmnd *next = SCNEXT(ptr); @@ -2539,8 +2318,6 @@ static void is_complete(struct Scsi_Host *shpnt) dataphase=update_state(shpnt); - DPRINTK(debug_phases, LEAD "start %s %s(%s)\n", CMDINFO(CURRENT_SC), states[STATE].name, states[PREVSTATE].name, states[LASTSTATE].name); - /* * end previous state * @@ -2567,9 +2344,9 @@ static void is_complete(struct Scsi_Host *shpnt) if(dataphase) { SETPORT(SSTAT0, REQINIT); SETPORT(SCSISIG, GETPORT(SCSISIG) & P_MASK); - SETPORT(SSTAT1, PHASECHG); + SETPORT(SSTAT1, PHASECHG); } - + /* * enable SPIO mode if previous didn't use it * and this one does @@ -2581,14 +2358,14 @@ static void is_complete(struct Scsi_Host *shpnt) if(CURRENT_SC) CURRENT_SC->SCp.phase |= spiordy; } - + /* * initialize for new state * */ if(PREVSTATE!=STATE && states[STATE].init) states[STATE].init(shpnt); - + /* * handle current state * @@ -2596,8 +2373,9 @@ static void is_complete(struct Scsi_Host *shpnt) if(states[STATE].run) states[STATE].run(shpnt); else - printk(ERR_LEAD "unexpected state (%x)\n", CMDINFO(CURRENT_SC), STATE); - + scmd_printk(KERN_ERR, CURRENT_SC, + "unexpected state (%x)\n", STATE); + /* * setup controller to interrupt on * the next expected condition and @@ -2613,7 +2391,6 @@ static void is_complete(struct Scsi_Host *shpnt) HOSTDATA(shpnt)->time[STATE] += jiffies-start; #endif - DPRINTK(debug_phases, LEAD "end %s %s(%s)\n", CMDINFO(CURRENT_SC), states[STATE].name, states[PREVSTATE].name, states[LASTSTATE].name); } while(pending); /* @@ -2626,289 +2403,42 @@ static void is_complete(struct Scsi_Host *shpnt) } -/* +/* * Dump the current driver status and panic */ static void aha152x_error(struct Scsi_Host *shpnt, char *msg) { - printk(KERN_EMERG "\naha152x%d: %s\n", HOSTNO, msg); + shost_printk(KERN_EMERG, shpnt, "%s\n", msg); show_queues(shpnt); panic("aha152x panic\n"); } /* - * Display registers of AIC-6260 - */ -static void disp_ports(struct Scsi_Host *shpnt) -{ -#if defined(AHA152X_DEBUG) - int s; - - printk("\n%s: %s(%s) ", - CURRENT_SC ? "busy" : "waiting", - states[STATE].name, - states[PREVSTATE].name); - - s = GETPORT(SCSISEQ); - printk("SCSISEQ( "); - if (s & TEMODEO) - printk("TARGET MODE "); - if (s & ENSELO) - printk("SELO "); - if (s & ENSELI) - printk("SELI "); - if (s & ENRESELI) - printk("RESELI "); - if (s & ENAUTOATNO) - printk("AUTOATNO "); - if (s & ENAUTOATNI) - printk("AUTOATNI "); - if (s & ENAUTOATNP) - printk("AUTOATNP "); - if (s & SCSIRSTO) - printk("SCSIRSTO "); - printk(");"); - - printk(" SCSISIG("); - s = GETPORT(SCSISIG); - switch (s & P_MASK) { - case P_DATAO: - printk("DATA OUT"); - break; - case P_DATAI: - printk("DATA IN"); - break; - case P_CMD: - printk("COMMAND"); - break; - case P_STATUS: - printk("STATUS"); - break; - case P_MSGO: - printk("MESSAGE OUT"); - break; - case P_MSGI: - printk("MESSAGE IN"); - break; - default: - printk("*invalid*"); - break; - } - - printk("); "); - - printk("INTSTAT (%s); ", TESTHI(DMASTAT, INTSTAT) ? "hi" : "lo"); - - printk("SSTAT( "); - s = GETPORT(SSTAT0); - if (s & TARGET) - printk("TARGET "); - if (s & SELDO) - printk("SELDO "); - if (s & SELDI) - printk("SELDI "); - if (s & SELINGO) - printk("SELINGO "); - if (s & SWRAP) - printk("SWRAP "); - if (s & SDONE) - printk("SDONE "); - if (s & SPIORDY) - printk("SPIORDY "); - if (s & DMADONE) - printk("DMADONE "); - - s = GETPORT(SSTAT1); - if (s & SELTO) - printk("SELTO "); - if (s & ATNTARG) - printk("ATNTARG "); - if (s & SCSIRSTI) - printk("SCSIRSTI "); - if (s & PHASEMIS) - printk("PHASEMIS "); - if (s & BUSFREE) - printk("BUSFREE "); - if (s & SCSIPERR) - printk("SCSIPERR "); - if (s & PHASECHG) - printk("PHASECHG "); - if (s & REQINIT) - printk("REQINIT "); - printk("); "); - - - printk("SSTAT( "); - - s = GETPORT(SSTAT0) & GETPORT(SIMODE0); - - if (s & TARGET) - printk("TARGET "); - if (s & SELDO) - printk("SELDO "); - if (s & SELDI) - printk("SELDI "); - if (s & SELINGO) - printk("SELINGO "); - if (s & SWRAP) - printk("SWRAP "); - if (s & SDONE) - printk("SDONE "); - if (s & SPIORDY) - printk("SPIORDY "); - if (s & DMADONE) - printk("DMADONE "); - - s = GETPORT(SSTAT1) & GETPORT(SIMODE1); - - if (s & SELTO) - printk("SELTO "); - if (s & ATNTARG) - printk("ATNTARG "); - if (s & SCSIRSTI) - printk("SCSIRSTI "); - if (s & PHASEMIS) - printk("PHASEMIS "); - if (s & BUSFREE) - printk("BUSFREE "); - if (s & SCSIPERR) - printk("SCSIPERR "); - if (s & PHASECHG) - printk("PHASECHG "); - if (s & REQINIT) - printk("REQINIT "); - printk("); "); - - printk("SXFRCTL0( "); - - s = GETPORT(SXFRCTL0); - if (s & SCSIEN) - printk("SCSIEN "); - if (s & DMAEN) - printk("DMAEN "); - if (s & CH1) - printk("CH1 "); - if (s & CLRSTCNT) - printk("CLRSTCNT "); - if (s & SPIOEN) - printk("SPIOEN "); - if (s & CLRCH1) - printk("CLRCH1 "); - printk("); "); - - printk("SIGNAL( "); - - s = GETPORT(SCSISIG); - if (s & SIG_ATNI) - printk("ATNI "); - if (s & SIG_SELI) - printk("SELI "); - if (s & SIG_BSYI) - printk("BSYI "); - if (s & SIG_REQI) - printk("REQI "); - if (s & SIG_ACKI) - printk("ACKI "); - printk("); "); - - printk("SELID (%02x), ", GETPORT(SELID)); - - printk("STCNT (%d), ", GETSTCNT()); - - printk("SSTAT2( "); - - s = GETPORT(SSTAT2); - if (s & SOFFSET) - printk("SOFFSET "); - if (s & SEMPTY) - printk("SEMPTY "); - if (s & SFULL) - printk("SFULL "); - printk("); SFCNT (%d); ", s & (SFULL | SFCNT)); - - s = GETPORT(SSTAT3); - printk("SCSICNT (%d), OFFCNT(%d), ", (s & 0xf0) >> 4, s & 0x0f); - - printk("SSTAT4( "); - s = GETPORT(SSTAT4); - if (s & SYNCERR) - printk("SYNCERR "); - if (s & FWERR) - printk("FWERR "); - if (s & FRERR) - printk("FRERR "); - printk("); "); - - printk("DMACNTRL0( "); - s = GETPORT(DMACNTRL0); - printk("%s ", s & _8BIT ? "8BIT" : "16BIT"); - printk("%s ", s & DMA ? "DMA" : "PIO"); - printk("%s ", s & WRITE_READ ? "WRITE" : "READ"); - if (s & ENDMA) - printk("ENDMA "); - if (s & INTEN) - printk("INTEN "); - if (s & RSTFIFO) - printk("RSTFIFO "); - if (s & SWINT) - printk("SWINT "); - printk("); "); - - printk("DMASTAT( "); - s = GETPORT(DMASTAT); - if (s & ATDONE) - printk("ATDONE "); - if (s & WORDRDY) - printk("WORDRDY "); - if (s & DFIFOFULL) - printk("DFIFOFULL "); - if (s & DFIFOEMP) - printk("DFIFOEMP "); - printk(")\n"); -#endif -} - -/* * display enabled interrupts */ static void disp_enintr(struct Scsi_Host *shpnt) { - int s; - - printk(KERN_DEBUG "enabled interrupts ( "); - - s = GETPORT(SIMODE0); - if (s & ENSELDO) - printk("ENSELDO "); - if (s & ENSELDI) - printk("ENSELDI "); - if (s & ENSELINGO) - printk("ENSELINGO "); - if (s & ENSWRAP) - printk("ENSWRAP "); - if (s & ENSDONE) - printk("ENSDONE "); - if (s & ENSPIORDY) - printk("ENSPIORDY "); - if (s & ENDMADONE) - printk("ENDMADONE "); - - s = GETPORT(SIMODE1); - if (s & ENSELTIMO) - printk("ENSELTIMO "); - if (s & ENATNTARG) - printk("ENATNTARG "); - if (s & ENPHASEMIS) - printk("ENPHASEMIS "); - if (s & ENBUSFREE) - printk("ENBUSFREE "); - if (s & ENSCSIPERR) - printk("ENSCSIPERR "); - if (s & ENPHASECHG) - printk("ENPHASECHG "); - if (s & ENREQINIT) - printk("ENREQINIT "); - printk(")\n"); + int s0, s1; + + s0 = GETPORT(SIMODE0); + s1 = GETPORT(SIMODE1); + + shost_printk(KERN_DEBUG, shpnt, + "enabled interrupts (%s%s%s%s%s%s%s%s%s%s%s%s%s%s)\n", + (s0 & ENSELDO) ? "ENSELDO " : "", + (s0 & ENSELDI) ? "ENSELDI " : "", + (s0 & ENSELINGO) ? "ENSELINGO " : "", + (s0 & ENSWRAP) ? "ENSWRAP " : "", + (s0 & ENSDONE) ? "ENSDONE " : "", + (s0 & ENSPIORDY) ? "ENSPIORDY " : "", + (s0 & ENDMADONE) ? "ENDMADONE " : "", + (s1 & ENSELTIMO) ? "ENSELTIMO " : "", + (s1 & ENATNTARG) ? "ENATNTARG " : "", + (s1 & ENPHASEMIS) ? "ENPHASEMIS " : "", + (s1 & ENBUSFREE) ? "ENBUSFREE " : "", + (s1 & ENSCSIPERR) ? "ENSCSIPERR " : "", + (s1 & ENPHASECHG) ? "ENPHASECHG " : "", + (s1 & ENREQINIT) ? "ENREQINIT " : ""); } /* @@ -2916,36 +2446,21 @@ static void disp_enintr(struct Scsi_Host *shpnt) */ static void show_command(Scsi_Cmnd *ptr) { - scmd_printk(KERN_DEBUG, ptr, "%p: cmnd=(", ptr); - - __scsi_print_command(ptr->cmnd); - - printk(KERN_DEBUG "); request_bufflen=%d; resid=%d; phase |", - scsi_bufflen(ptr), scsi_get_resid(ptr)); - - if (ptr->SCp.phase & not_issued) - printk("not issued|"); - if (ptr->SCp.phase & selecting) - printk("selecting|"); - if (ptr->SCp.phase & identified) - printk("identified|"); - if (ptr->SCp.phase & disconnected) - printk("disconnected|"); - if (ptr->SCp.phase & completed) - printk("completed|"); - if (ptr->SCp.phase & spiordy) - printk("spiordy|"); - if (ptr->SCp.phase & syncneg) - printk("syncneg|"); - if (ptr->SCp.phase & aborted) - printk("aborted|"); - if (ptr->SCp.phase & resetted) - printk("resetted|"); - if( SCDATA(ptr) ) { - printk("; next=0x%p\n", SCNEXT(ptr)); - } else { - printk("; next=(host scribble NULL)\n"); - } + scsi_print_command(ptr); + scmd_printk(KERN_DEBUG, ptr, + "request_bufflen=%d; resid=%d; " + "phase |%s%s%s%s%s%s%s%s%s; next=0x%p", + scsi_bufflen(ptr), scsi_get_resid(ptr), + (ptr->SCp.phase & not_issued) ? "not issued|" : "", + (ptr->SCp.phase & selecting) ? "selecting|" : "", + (ptr->SCp.phase & identified) ? "identified|" : "", + (ptr->SCp.phase & disconnected) ? "disconnected|" : "", + (ptr->SCp.phase & completed) ? "completed|" : "", + (ptr->SCp.phase & spiordy) ? "spiordy|" : "", + (ptr->SCp.phase & syncneg) ? "syncneg|" : "", + (ptr->SCp.phase & aborted) ? "aborted|" : "", + (ptr->SCp.phase & resetted) ? "resetted|" : "", + (SCDATA(ptr)) ? SCNEXT(ptr) : NULL); } /* @@ -2972,7 +2487,6 @@ static void show_queues(struct Scsi_Host *shpnt) for (ptr = DISCONNECTED_SC; ptr; ptr = SCDATA(ptr) ? SCNEXT(ptr) : NULL) show_command(ptr); - disp_ports(shpnt); disp_enintr(shpnt); } @@ -3276,15 +2790,6 @@ static int aha152x_set_info(struct Scsi_Host *shpnt, char *buffer, int length) if(!shpnt || !buffer || length<8 || strncmp("aha152x ", buffer, 8)!=0) return -EINVAL; -#if defined(AHA152X_DEBUG) - if(length>14 && strncmp("debug ", buffer+8, 6)==0) { - int debug = HOSTDATA(shpnt)->debug; - - HOSTDATA(shpnt)->debug = simple_strtoul(buffer+14, NULL, 0); - - printk(KERN_INFO "aha152x%d: debugging options set to 0x%04x (were 0x%04x)\n", HOSTNO, HOSTDATA(shpnt)->debug, debug); - } else -#endif #if defined(AHA152X_STAT) if(length>13 && strncmp("reset", buffer+8, 5)==0) { int i; @@ -3302,7 +2807,7 @@ static int aha152x_set_info(struct Scsi_Host *shpnt, char *buffer, int length) HOSTDATA(shpnt)->time[i]=0; } - printk(KERN_INFO "aha152x%d: stats reseted.\n", HOSTNO); + shost_printk(KERN_INFO, shpnt, "aha152x: stats reset.\n"); } else #endif @@ -3343,29 +2848,6 @@ static int aha152x_show_info(struct seq_file *m, struct Scsi_Host *shpnt) (((HOSTDATA(shpnt)->syncrate[i] & 0x70) >> 4) + 2) * 50, HOSTDATA(shpnt)->syncrate[i] & 0x0f); } -#if defined(AHA152X_DEBUG) -#define PDEBUG(flags,txt) \ - if(HOSTDATA(shpnt)->debug & flags) SPRINTF("(%s) ", txt); - - SPRINTF("enabled debugging options: "); - - PDEBUG(debug_procinfo, "procinfo"); - PDEBUG(debug_queue, "queue"); - PDEBUG(debug_intr, "interrupt"); - PDEBUG(debug_selection, "selection"); - PDEBUG(debug_msgo, "message out"); - PDEBUG(debug_msgi, "message in"); - PDEBUG(debug_status, "status"); - PDEBUG(debug_cmd, "command"); - PDEBUG(debug_datai, "data in"); - PDEBUG(debug_datao, "data out"); - PDEBUG(debug_eh, "eh"); - PDEBUG(debug_locking, "locks"); - PDEBUG(debug_phases, "phases"); - - SPRINTF("\n"); -#endif - SPRINTF("\nqueue status:\n"); DO_LOCK(flags); if (ISSUE_SC) { @@ -3393,8 +2875,8 @@ static int aha152x_show_info(struct seq_file *m, struct Scsi_Host *shpnt) #if defined(AHA152X_STAT) SPRINTF("statistics:\n" - "total commands: %d\n" - "disconnections: %d\n" + "total commands: %d\n" + "disconnections: %d\n" "busfree with check condition: %d\n" "busfree without old command: %d\n" "busfree without new command: %d\n" @@ -3413,7 +2895,7 @@ static int aha152x_show_info(struct seq_file *m, struct Scsi_Host *shpnt) HOSTDATA(shpnt)->busfree_without_any_action); for(i=0; i<maxstate; i++) { SPRINTF("%-10s %-12d %-12d %-12ld\n", - states[i].name, + states[i].name, HOSTDATA(shpnt)->count_trans[i], HOSTDATA(shpnt)->count[i], HOSTDATA(shpnt)->time[i]); @@ -3671,25 +3153,19 @@ static int __init aha152x_init(void) setup[setup_count].synchronous = aha152x[5]; setup[setup_count].delay = aha152x[6]; setup[setup_count].ext_trans = aha152x[7]; -#if defined(AHA152X_DEBUG) - setup[setup_count].debug = aha152x[8]; -#endif - } else if(io[0]!=0 || irq[0]!=0) { + } else if (io[0] != 0 || irq[0] != 0) { if(io[0]!=0) setup[setup_count].io_port = io[0]; if(irq[0]!=0) setup[setup_count].irq = irq[0]; - setup[setup_count].scsiid = scsiid[0]; - setup[setup_count].reconnect = reconnect[0]; - setup[setup_count].parity = parity[0]; - setup[setup_count].synchronous = sync[0]; - setup[setup_count].delay = delay[0]; - setup[setup_count].ext_trans = exttrans[0]; -#if defined(AHA152X_DEBUG) - setup[setup_count].debug = debug[0]; -#endif + setup[setup_count].scsiid = scsiid[0]; + setup[setup_count].reconnect = reconnect[0]; + setup[setup_count].parity = parity[0]; + setup[setup_count].synchronous = sync[0]; + setup[setup_count].delay = delay[0]; + setup[setup_count].ext_trans = exttrans[0]; } - if (checksetup(&setup[setup_count])) + if (checksetup(&setup[setup_count])) setup_count++; else printk(KERN_ERR "aha152x: invalid module params io=0x%x, irq=%d,scsiid=%d,reconnect=%d,parity=%d,sync=%d,delay=%d,exttrans=%d\n", @@ -3714,22 +3190,16 @@ static int __init aha152x_init(void) setup[setup_count].synchronous = aha152x1[5]; setup[setup_count].delay = aha152x1[6]; setup[setup_count].ext_trans = aha152x1[7]; -#if defined(AHA152X_DEBUG) - setup[setup_count].debug = aha152x1[8]; -#endif - } else if(io[1]!=0 || irq[1]!=0) { + } else if (io[1] != 0 || irq[1] != 0) { if(io[1]!=0) setup[setup_count].io_port = io[1]; if(irq[1]!=0) setup[setup_count].irq = irq[1]; - setup[setup_count].scsiid = scsiid[1]; - setup[setup_count].reconnect = reconnect[1]; - setup[setup_count].parity = parity[1]; - setup[setup_count].synchronous = sync[1]; - setup[setup_count].delay = delay[1]; - setup[setup_count].ext_trans = exttrans[1]; -#if defined(AHA152X_DEBUG) - setup[setup_count].debug = debug[1]; -#endif + setup[setup_count].scsiid = scsiid[1]; + setup[setup_count].reconnect = reconnect[1]; + setup[setup_count].parity = parity[1]; + setup[setup_count].synchronous = sync[1]; + setup[setup_count].delay = delay[1]; + setup[setup_count].ext_trans = exttrans[1]; } if (checksetup(&setup[setup_count])) setup_count++; @@ -3776,9 +3246,6 @@ static int __init aha152x_init(void) setup[setup_count].synchronous = 1; setup[setup_count].delay = DELAY_DEFAULT; setup[setup_count].ext_trans = 0; -#if defined(AHA152X_DEBUG) - setup[setup_count].debug = DEBUG_DEFAULT; -#endif #if defined(__ISAPNP__) pnpdev[setup_count] = dev; #endif @@ -3847,9 +3314,6 @@ static int __init aha152x_init(void) setup[setup_count].synchronous = conf.cf_syncneg; setup[setup_count].delay = DELAY_DEFAULT; setup[setup_count].ext_trans = 0; -#if defined(AHA152X_DEBUG) - setup[setup_count].debug = DEBUG_DEFAULT; -#endif setup_count++; } @@ -3903,11 +3367,8 @@ module_exit(aha152x_exit); #if !defined(MODULE) static int __init aha152x_setup(char *str) { -#if defined(AHA152X_DEBUG) - int ints[11]; -#else int ints[10]; -#endif + get_options(str, ARRAY_SIZE(ints), ints); if(setup_count>=ARRAY_SIZE(setup)) { @@ -3924,16 +3385,9 @@ static int __init aha152x_setup(char *str) setup[setup_count].synchronous = ints[0] >= 6 ? ints[6] : 1; setup[setup_count].delay = ints[0] >= 7 ? ints[7] : DELAY_DEFAULT; setup[setup_count].ext_trans = ints[0] >= 8 ? ints[8] : 0; -#if defined(AHA152X_DEBUG) - setup[setup_count].debug = ints[0] >= 9 ? ints[9] : DEBUG_DEFAULT; - if (ints[0] > 9) { - printk(KERN_NOTICE "aha152x: usage: aha152x=<IOBASE>[,<IRQ>[,<SCSI ID>" - "[,<RECONNECT>[,<PARITY>[,<SYNCHRONOUS>[,<DELAY>[,<EXT_TRANS>[,<DEBUG>]]]]]]]]\n"); -#else if (ints[0] > 8) { /*}*/ printk(KERN_NOTICE "aha152x: usage: aha152x=<IOBASE>[,<IRQ>[,<SCSI ID>" "[,<RECONNECT>[,<PARITY>[,<SYNCHRONOUS>[,<DELAY>[,<EXT_TRANS>]]]]]]]\n"); -#endif } else { setup_count++; return 0; diff --git a/drivers/scsi/aha1740.c b/drivers/scsi/aha1740.c index 5f3101797c93..31ace4bef8fe 100644 --- a/drivers/scsi/aha1740.c +++ b/drivers/scsi/aha1740.c @@ -531,7 +531,7 @@ static int aha1740_eh_abort_handler (Scsi_Cmnd *dummy) * quiet as possible... */ - return 0; + return SUCCESS; } static struct scsi_host_template aha1740_template = { diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.c b/drivers/scsi/aic7xxx/aic79xx_osm.c index ed333669a7dc..d5c7b193d8d3 100644 --- a/drivers/scsi/aic7xxx/aic79xx_osm.c +++ b/drivers/scsi/aic7xxx/aic79xx_osm.c @@ -925,6 +925,7 @@ struct scsi_host_template aic79xx_driver_template = { .slave_configure = ahd_linux_slave_configure, .target_alloc = ahd_linux_target_alloc, .target_destroy = ahd_linux_target_destroy, + .use_blk_tags = 1, }; /******************************** Bus DMA *************************************/ @@ -1468,12 +1469,9 @@ ahd_platform_set_tags(struct ahd_softc *ahd, struct scsi_device *sdev, switch ((dev->flags & (AHD_DEV_Q_BASIC|AHD_DEV_Q_TAGGED))) { case AHD_DEV_Q_BASIC: - scsi_set_tag_type(sdev, MSG_SIMPLE_TASK); - scsi_activate_tcq(sdev, dev->openings + dev->active); - break; case AHD_DEV_Q_TAGGED: - scsi_set_tag_type(sdev, MSG_ORDERED_TASK); - scsi_activate_tcq(sdev, dev->openings + dev->active); + scsi_change_queue_depth(sdev, + dev->openings + dev->active); break; default: /* @@ -1482,7 +1480,7 @@ ahd_platform_set_tags(struct ahd_softc *ahd, struct scsi_device *sdev, * serially on the controller/device. This should * remove some latency. */ - scsi_deactivate_tcq(sdev, 1); + scsi_change_queue_depth(sdev, 1); break; } } @@ -1619,15 +1617,6 @@ ahd_linux_run_command(struct ahd_softc *ahd, struct ahd_linux_device *dev, } if ((dev->flags & (AHD_DEV_Q_TAGGED|AHD_DEV_Q_BASIC)) != 0) { - int msg_bytes; - uint8_t tag_msgs[2]; - - msg_bytes = scsi_populate_tag_msg(cmd, tag_msgs); - if (msg_bytes && tag_msgs[0] != MSG_SIMPLE_TASK) { - hscb->control |= tag_msgs[0]; - if (tag_msgs[0] == MSG_ORDERED_TASK) - dev->commands_since_idle_or_otag = 0; - } else if (dev->commands_since_idle_or_otag == AHD_OTAG_THRESH && (dev->flags & AHD_DEV_Q_TAGGED) != 0) { hscb->control |= MSG_ORDERED_TASK; diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.c b/drivers/scsi/aic7xxx/aic7xxx_osm.c index d2c9bf39033d..88360116dbcb 100644 --- a/drivers/scsi/aic7xxx/aic7xxx_osm.c +++ b/drivers/scsi/aic7xxx/aic7xxx_osm.c @@ -812,6 +812,7 @@ struct scsi_host_template aic7xxx_driver_template = { .slave_configure = ahc_linux_slave_configure, .target_alloc = ahc_linux_target_alloc, .target_destroy = ahc_linux_target_destroy, + .use_blk_tags = 1, }; /**************************** Tasklet Handler *********************************/ @@ -1334,13 +1335,9 @@ ahc_platform_set_tags(struct ahc_softc *ahc, struct scsi_device *sdev, } switch ((dev->flags & (AHC_DEV_Q_BASIC|AHC_DEV_Q_TAGGED))) { case AHC_DEV_Q_BASIC: - scsi_set_tag_type(sdev, MSG_SIMPLE_TAG); - scsi_activate_tcq(sdev, dev->openings + dev->active); - break; case AHC_DEV_Q_TAGGED: - scsi_set_tag_type(sdev, MSG_ORDERED_TAG); - scsi_activate_tcq(sdev, dev->openings + dev->active); - break; + scsi_change_queue_depth(sdev, + dev->openings + dev->active); default: /* * We allow the OS to queue 2 untagged transactions to @@ -1348,7 +1345,7 @@ ahc_platform_set_tags(struct ahc_softc *ahc, struct scsi_device *sdev, * serially on the controller/device. This should * remove some latency. */ - scsi_deactivate_tcq(sdev, 2); + scsi_change_queue_depth(sdev, 2); break; } } @@ -1447,7 +1444,7 @@ ahc_linux_run_command(struct ahc_softc *ahc, struct ahc_linux_device *dev, * we are storing a full busy target *lun* * table in SCB space. */ - if (!blk_rq_tagged(cmd->request) + if (!(cmd->flags & SCMD_TAGGED) && (ahc->features & AHC_SCB_BTT) == 0) { int target_offset; @@ -1501,15 +1498,7 @@ ahc_linux_run_command(struct ahc_softc *ahc, struct ahc_linux_device *dev, } if ((dev->flags & (AHC_DEV_Q_TAGGED|AHC_DEV_Q_BASIC)) != 0) { - int msg_bytes; - uint8_t tag_msgs[2]; - - msg_bytes = scsi_populate_tag_msg(cmd, tag_msgs); - if (msg_bytes && tag_msgs[0] != MSG_SIMPLE_TASK) { - hscb->control |= tag_msgs[0]; - if (tag_msgs[0] == MSG_ORDERED_TASK) - dev->commands_since_idle_or_otag = 0; - } else if (dev->commands_since_idle_or_otag == AHC_OTAG_THRESH + if (dev->commands_since_idle_or_otag == AHC_OTAG_THRESH && (dev->flags & AHC_DEV_Q_TAGGED) != 0) { hscb->control |= MSG_ORDERED_TASK; dev->commands_since_idle_or_otag = 0; diff --git a/drivers/scsi/aic94xx/aic94xx.h b/drivers/scsi/aic94xx/aic94xx.h index 66cda669b417..26d4ad9ede2e 100644 --- a/drivers/scsi/aic94xx/aic94xx.h +++ b/drivers/scsi/aic94xx/aic94xx.h @@ -78,7 +78,7 @@ void asd_dev_gone(struct domain_device *dev); void asd_invalidate_edb(struct asd_ascb *ascb, int edb_id); -int asd_execute_task(struct sas_task *, int num, gfp_t gfp_flags); +int asd_execute_task(struct sas_task *task, gfp_t gfp_flags); void asd_set_dmamode(struct domain_device *dev); diff --git a/drivers/scsi/aic94xx/aic94xx_hwi.c b/drivers/scsi/aic94xx/aic94xx_hwi.c index 4df867e07b20..9f636a34d595 100644 --- a/drivers/scsi/aic94xx/aic94xx_hwi.c +++ b/drivers/scsi/aic94xx/aic94xx_hwi.c @@ -1200,8 +1200,7 @@ static void asd_start_scb_timers(struct list_head *list) * Case A: we can send the whole batch at once. Increment "pending" * in the beginning of this function, when it is checked, in order to * eliminate races when this function is called by multiple processes. - * Case B: should never happen if the managing layer considers - * lldd_queue_size. + * Case B: should never happen. */ int asd_post_ascb_list(struct asd_ha_struct *asd_ha, struct asd_ascb *ascb, int num) diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c index c56741fc4b99..14fc018436c2 100644 --- a/drivers/scsi/aic94xx/aic94xx_init.c +++ b/drivers/scsi/aic94xx/aic94xx_init.c @@ -49,14 +49,6 @@ MODULE_PARM_DESC(use_msi, "\n" "\tEnable(1) or disable(0) using PCI MSI.\n" "\tDefault: 0"); -static int lldd_max_execute_num = 0; -module_param_named(collector, lldd_max_execute_num, int, S_IRUGO); -MODULE_PARM_DESC(collector, "\n" - "\tIf greater than one, tells the SAS Layer to run in Task Collector\n" - "\tMode. If 1 or 0, tells the SAS Layer to run in Direct Mode.\n" - "\tThe aic94xx SAS LLDD supports both modes.\n" - "\tDefault: 0 (Direct Mode).\n"); - static struct scsi_transport_template *aic94xx_transport_template; static int asd_scan_finished(struct Scsi_Host *, unsigned long); static void asd_scan_start(struct Scsi_Host *); @@ -83,6 +75,8 @@ static struct scsi_host_template aic94xx_sht = { .eh_bus_reset_handler = sas_eh_bus_reset_handler, .target_destroy = sas_target_destroy, .ioctl = sas_ioctl, + .use_blk_tags = 1, + .track_queue_depth = 1, }; static int asd_map_memio(struct asd_ha_struct *asd_ha) @@ -709,9 +703,6 @@ static int asd_register_sas_ha(struct asd_ha_struct *asd_ha) asd_ha->sas_ha.sas_port= sas_ports; asd_ha->sas_ha.num_phys= ASD_MAX_PHYS; - asd_ha->sas_ha.lldd_queue_size = asd_ha->seq.can_queue; - asd_ha->sas_ha.lldd_max_execute_num = lldd_max_execute_num; - return sas_register_ha(&asd_ha->sas_ha); } diff --git a/drivers/scsi/aic94xx/aic94xx_task.c b/drivers/scsi/aic94xx/aic94xx_task.c index 59b86e260ce9..5ff1ce7ba1f4 100644 --- a/drivers/scsi/aic94xx/aic94xx_task.c +++ b/drivers/scsi/aic94xx/aic94xx_task.c @@ -543,8 +543,7 @@ static int asd_can_queue(struct asd_ha_struct *asd_ha, int num) return res; } -int asd_execute_task(struct sas_task *task, const int num, - gfp_t gfp_flags) +int asd_execute_task(struct sas_task *task, gfp_t gfp_flags) { int res = 0; LIST_HEAD(alist); @@ -553,11 +552,11 @@ int asd_execute_task(struct sas_task *task, const int num, struct asd_ha_struct *asd_ha = task->dev->port->ha->lldd_ha; unsigned long flags; - res = asd_can_queue(asd_ha, num); + res = asd_can_queue(asd_ha, 1); if (res) return res; - res = num; + res = 1; ascb = asd_ascb_alloc_list(asd_ha, &res, gfp_flags); if (res) { res = -ENOMEM; @@ -568,7 +567,7 @@ int asd_execute_task(struct sas_task *task, const int num, list_for_each_entry(a, &alist, list) { a->uldd_task = t; t->lldd_task = a; - t = list_entry(t->list.next, struct sas_task, list); + break; } list_for_each_entry(a, &alist, list) { t = a->uldd_task; @@ -601,7 +600,7 @@ int asd_execute_task(struct sas_task *task, const int num, } list_del_init(&alist); - res = asd_post_ascb_list(asd_ha, ascb, num); + res = asd_post_ascb_list(asd_ha, ascb, 1); if (unlikely(res)) { a = NULL; __list_add(&alist, ascb->list.prev, &ascb->list); @@ -639,6 +638,6 @@ out_err_unmap: out_err: if (ascb) asd_ascb_free_list(ascb); - asd_can_dequeue(asd_ha, num); + asd_can_dequeue(asd_ha, 1); return res; } diff --git a/drivers/scsi/am53c974.c b/drivers/scsi/am53c974.c new file mode 100644 index 000000000000..aa3e2c7cd83c --- /dev/null +++ b/drivers/scsi/am53c974.c @@ -0,0 +1,586 @@ +/* + * AMD am53c974 driver. + * Copyright (c) 2014 Hannes Reinecke, SUSE Linux GmbH + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/delay.h> +#include <linux/pci.h> +#include <linux/interrupt.h> + +#include <scsi/scsi_host.h> + +#include "esp_scsi.h" + +#define DRV_MODULE_NAME "am53c974" +#define DRV_MODULE_VERSION "1.00" + +static bool am53c974_debug; +static bool am53c974_fenab = true; + +#define esp_dma_log(f, a...) \ + do { \ + if (am53c974_debug) \ + shost_printk(KERN_DEBUG, esp->host, f, ##a); \ + } while (0) + +#define ESP_DMA_CMD 0x10 +#define ESP_DMA_STC 0x11 +#define ESP_DMA_SPA 0x12 +#define ESP_DMA_WBC 0x13 +#define ESP_DMA_WAC 0x14 +#define ESP_DMA_STATUS 0x15 +#define ESP_DMA_SMDLA 0x16 +#define ESP_DMA_WMAC 0x17 + +#define ESP_DMA_CMD_IDLE 0x00 +#define ESP_DMA_CMD_BLAST 0x01 +#define ESP_DMA_CMD_ABORT 0x02 +#define ESP_DMA_CMD_START 0x03 +#define ESP_DMA_CMD_MASK 0x03 +#define ESP_DMA_CMD_DIAG 0x04 +#define ESP_DMA_CMD_MDL 0x10 +#define ESP_DMA_CMD_INTE_P 0x20 +#define ESP_DMA_CMD_INTE_D 0x40 +#define ESP_DMA_CMD_DIR 0x80 + +#define ESP_DMA_STAT_PWDN 0x01 +#define ESP_DMA_STAT_ERROR 0x02 +#define ESP_DMA_STAT_ABORT 0x04 +#define ESP_DMA_STAT_DONE 0x08 +#define ESP_DMA_STAT_SCSIINT 0x10 +#define ESP_DMA_STAT_BCMPLT 0x20 + +/* EEPROM is accessed with 16-bit values */ +#define DC390_EEPROM_READ 0x80 +#define DC390_EEPROM_LEN 0x40 + +/* + * DC390 EEPROM + * + * 8 * 4 bytes of per-device options + * followed by HBA specific options + */ + +/* Per-device options */ +#define DC390_EE_MODE1 0x00 +#define DC390_EE_SPEED 0x01 + +/* HBA-specific options */ +#define DC390_EE_ADAPT_SCSI_ID 0x40 +#define DC390_EE_MODE2 0x41 +#define DC390_EE_DELAY 0x42 +#define DC390_EE_TAG_CMD_NUM 0x43 + +#define DC390_EE_MODE1_PARITY_CHK 0x01 +#define DC390_EE_MODE1_SYNC_NEGO 0x02 +#define DC390_EE_MODE1_EN_DISC 0x04 +#define DC390_EE_MODE1_SEND_START 0x08 +#define DC390_EE_MODE1_TCQ 0x10 + +#define DC390_EE_MODE2_MORE_2DRV 0x01 +#define DC390_EE_MODE2_GREATER_1G 0x02 +#define DC390_EE_MODE2_RST_SCSI_BUS 0x04 +#define DC390_EE_MODE2_ACTIVE_NEGATION 0x08 +#define DC390_EE_MODE2_NO_SEEK 0x10 +#define DC390_EE_MODE2_LUN_CHECK 0x20 + +struct pci_esp_priv { + struct esp *esp; + u8 dma_status; +}; + +static void pci_esp_dma_drain(struct esp *esp); + +static inline struct pci_esp_priv *pci_esp_get_priv(struct esp *esp) +{ + struct pci_dev *pdev = esp->dev; + + return pci_get_drvdata(pdev); +} + +static void pci_esp_write8(struct esp *esp, u8 val, unsigned long reg) +{ + iowrite8(val, esp->regs + (reg * 4UL)); +} + +static u8 pci_esp_read8(struct esp *esp, unsigned long reg) +{ + return ioread8(esp->regs + (reg * 4UL)); +} + +static void pci_esp_write32(struct esp *esp, u32 val, unsigned long reg) +{ + return iowrite32(val, esp->regs + (reg * 4UL)); +} + +static dma_addr_t pci_esp_map_single(struct esp *esp, void *buf, + size_t sz, int dir) +{ + return pci_map_single(esp->dev, buf, sz, dir); +} + +static int pci_esp_map_sg(struct esp *esp, struct scatterlist *sg, + int num_sg, int dir) +{ + return pci_map_sg(esp->dev, sg, num_sg, dir); +} + +static void pci_esp_unmap_single(struct esp *esp, dma_addr_t addr, + size_t sz, int dir) +{ + pci_unmap_single(esp->dev, addr, sz, dir); +} + +static void pci_esp_unmap_sg(struct esp *esp, struct scatterlist *sg, + int num_sg, int dir) +{ + pci_unmap_sg(esp->dev, sg, num_sg, dir); +} + +static int pci_esp_irq_pending(struct esp *esp) +{ + struct pci_esp_priv *pep = pci_esp_get_priv(esp); + + pep->dma_status = pci_esp_read8(esp, ESP_DMA_STATUS); + esp_dma_log("dma intr dreg[%02x]\n", pep->dma_status); + + if (pep->dma_status & (ESP_DMA_STAT_ERROR | + ESP_DMA_STAT_ABORT | + ESP_DMA_STAT_DONE | + ESP_DMA_STAT_SCSIINT)) + return 1; + + return 0; +} + +static void pci_esp_reset_dma(struct esp *esp) +{ + /* Nothing to do ? */ +} + +static void pci_esp_dma_drain(struct esp *esp) +{ + u8 resid; + int lim = 1000; + + + if ((esp->sreg & ESP_STAT_PMASK) == ESP_DOP || + (esp->sreg & ESP_STAT_PMASK) == ESP_DIP) + /* Data-In or Data-Out, nothing to be done */ + return; + + while (--lim > 0) { + resid = pci_esp_read8(esp, ESP_FFLAGS) & ESP_FF_FBYTES; + if (resid <= 1) + break; + cpu_relax(); + } + if (resid > 1) { + /* FIFO not cleared */ + shost_printk(KERN_INFO, esp->host, + "FIFO not cleared, %d bytes left\n", + resid); + } + + /* + * When there is a residual BCMPLT will never be set + * (obviously). But we still have to issue the BLAST + * command, otherwise the data will not being transferred. + * But we'll never know when the BLAST operation is + * finished. So check for some time and give up eventually. + */ + lim = 1000; + pci_esp_write8(esp, ESP_DMA_CMD_DIR | ESP_DMA_CMD_BLAST, ESP_DMA_CMD); + while (pci_esp_read8(esp, ESP_DMA_STATUS) & ESP_DMA_STAT_BCMPLT) { + if (--lim == 0) + break; + cpu_relax(); + } + pci_esp_write8(esp, ESP_DMA_CMD_DIR | ESP_DMA_CMD_IDLE, ESP_DMA_CMD); + esp_dma_log("DMA blast done (%d tries, %d bytes left)\n", lim, resid); + /* BLAST residual handling is currently untested */ + if (WARN_ON_ONCE(resid == 1)) { + struct esp_cmd_entry *ent = esp->active_cmd; + + ent->flags |= ESP_CMD_FLAG_RESIDUAL; + } +} + +static void pci_esp_dma_invalidate(struct esp *esp) +{ + struct pci_esp_priv *pep = pci_esp_get_priv(esp); + + esp_dma_log("invalidate DMA\n"); + + pci_esp_write8(esp, ESP_DMA_CMD_IDLE, ESP_DMA_CMD); + pep->dma_status = 0; +} + +static int pci_esp_dma_error(struct esp *esp) +{ + struct pci_esp_priv *pep = pci_esp_get_priv(esp); + + if (pep->dma_status & ESP_DMA_STAT_ERROR) { + u8 dma_cmd = pci_esp_read8(esp, ESP_DMA_CMD); + + if ((dma_cmd & ESP_DMA_CMD_MASK) == ESP_DMA_CMD_START) + pci_esp_write8(esp, ESP_DMA_CMD_ABORT, ESP_DMA_CMD); + + return 1; + } + if (pep->dma_status & ESP_DMA_STAT_ABORT) { + pci_esp_write8(esp, ESP_DMA_CMD_IDLE, ESP_DMA_CMD); + pep->dma_status = pci_esp_read8(esp, ESP_DMA_CMD); + return 1; + } + return 0; +} + +static void pci_esp_send_dma_cmd(struct esp *esp, u32 addr, u32 esp_count, + u32 dma_count, int write, u8 cmd) +{ + struct pci_esp_priv *pep = pci_esp_get_priv(esp); + u32 val = 0; + + BUG_ON(!(cmd & ESP_CMD_DMA)); + + pep->dma_status = 0; + + /* Set DMA engine to IDLE */ + if (write) + /* DMA write direction logic is inverted */ + val |= ESP_DMA_CMD_DIR; + pci_esp_write8(esp, ESP_DMA_CMD_IDLE | val, ESP_DMA_CMD); + + pci_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW); + pci_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED); + if (esp->config2 & ESP_CONFIG2_FENAB) + pci_esp_write8(esp, (esp_count >> 16) & 0xff, ESP_TCHI); + + pci_esp_write32(esp, esp_count, ESP_DMA_STC); + pci_esp_write32(esp, addr, ESP_DMA_SPA); + + esp_dma_log("start dma addr[%x] count[%d:%d]\n", + addr, esp_count, dma_count); + + scsi_esp_cmd(esp, cmd); + /* Send DMA Start command */ + pci_esp_write8(esp, ESP_DMA_CMD_START | val, ESP_DMA_CMD); +} + +static u32 pci_esp_dma_length_limit(struct esp *esp, u32 dma_addr, u32 dma_len) +{ + int dma_limit = 16; + u32 base, end; + + /* + * If CONFIG2_FENAB is set we can + * handle up to 24 bit addresses + */ + if (esp->config2 & ESP_CONFIG2_FENAB) + dma_limit = 24; + + if (dma_len > (1U << dma_limit)) + dma_len = (1U << dma_limit); + + /* + * Prevent crossing a 24-bit address boundary. + */ + base = dma_addr & ((1U << 24) - 1U); + end = base + dma_len; + if (end > (1U << 24)) + end = (1U <<24); + dma_len = end - base; + + return dma_len; +} + +static const struct esp_driver_ops pci_esp_ops = { + .esp_write8 = pci_esp_write8, + .esp_read8 = pci_esp_read8, + .map_single = pci_esp_map_single, + .map_sg = pci_esp_map_sg, + .unmap_single = pci_esp_unmap_single, + .unmap_sg = pci_esp_unmap_sg, + .irq_pending = pci_esp_irq_pending, + .reset_dma = pci_esp_reset_dma, + .dma_drain = pci_esp_dma_drain, + .dma_invalidate = pci_esp_dma_invalidate, + .send_dma_cmd = pci_esp_send_dma_cmd, + .dma_error = pci_esp_dma_error, + .dma_length_limit = pci_esp_dma_length_limit, +}; + +/* + * Read DC-390 eeprom + */ +static void dc390_eeprom_prepare_read(struct pci_dev *pdev, u8 cmd) +{ + u8 carry_flag = 1, j = 0x80, bval; + int i; + + for (i = 0; i < 9; i++) { + if (carry_flag) { + pci_write_config_byte(pdev, 0x80, 0x40); + bval = 0xc0; + } else + bval = 0x80; + + udelay(160); + pci_write_config_byte(pdev, 0x80, bval); + udelay(160); + pci_write_config_byte(pdev, 0x80, 0); + udelay(160); + + carry_flag = (cmd & j) ? 1 : 0; + j >>= 1; + } +} + +static u16 dc390_eeprom_get_data(struct pci_dev *pdev) +{ + int i; + u16 wval = 0; + u8 bval; + + for (i = 0; i < 16; i++) { + wval <<= 1; + + pci_write_config_byte(pdev, 0x80, 0x80); + udelay(160); + pci_write_config_byte(pdev, 0x80, 0x40); + udelay(160); + pci_read_config_byte(pdev, 0x00, &bval); + + if (bval == 0x22) + wval |= 1; + } + + return wval; +} + +static void dc390_read_eeprom(struct pci_dev *pdev, u16 *ptr) +{ + u8 cmd = DC390_EEPROM_READ, i; + + for (i = 0; i < DC390_EEPROM_LEN; i++) { + pci_write_config_byte(pdev, 0xc0, 0); + udelay(160); + + dc390_eeprom_prepare_read(pdev, cmd++); + *ptr++ = dc390_eeprom_get_data(pdev); + + pci_write_config_byte(pdev, 0x80, 0); + pci_write_config_byte(pdev, 0x80, 0); + udelay(160); + } +} + +static void dc390_check_eeprom(struct esp *esp) +{ + u8 EEbuf[128]; + u16 *ptr = (u16 *)EEbuf, wval = 0; + int i; + + dc390_read_eeprom((struct pci_dev *)esp->dev, ptr); + + for (i = 0; i < DC390_EEPROM_LEN; i++, ptr++) + wval += *ptr; + + /* no Tekram EEprom found */ + if (wval != 0x1234) { + struct pci_dev *pdev = esp->dev; + dev_printk(KERN_INFO, &pdev->dev, + "No valid Tekram EEprom found\n"); + return; + } + esp->scsi_id = EEbuf[DC390_EE_ADAPT_SCSI_ID]; + esp->num_tags = 2 << EEbuf[DC390_EE_TAG_CMD_NUM]; + if (EEbuf[DC390_EE_MODE2] & DC390_EE_MODE2_ACTIVE_NEGATION) + esp->config4 |= ESP_CONFIG4_RADE | ESP_CONFIG4_RAE; +} + +static int pci_esp_probe_one(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + struct scsi_host_template *hostt = &scsi_esp_template; + int err = -ENODEV; + struct Scsi_Host *shost; + struct esp *esp; + struct pci_esp_priv *pep; + + if (pci_enable_device(pdev)) { + dev_printk(KERN_INFO, &pdev->dev, "cannot enable device\n"); + return -ENODEV; + } + + if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) { + dev_printk(KERN_INFO, &pdev->dev, + "failed to set 32bit DMA mask\n"); + goto fail_disable_device; + } + + shost = scsi_host_alloc(hostt, sizeof(struct esp)); + if (!shost) { + dev_printk(KERN_INFO, &pdev->dev, + "failed to allocate scsi host\n"); + err = -ENOMEM; + goto fail_disable_device; + } + + pep = kzalloc(sizeof(struct pci_esp_priv), GFP_KERNEL); + if (!pep) { + dev_printk(KERN_INFO, &pdev->dev, + "failed to allocate esp_priv\n"); + err = -ENOMEM; + goto fail_host_alloc; + } + + esp = shost_priv(shost); + esp->host = shost; + esp->dev = pdev; + esp->ops = &pci_esp_ops; + /* + * The am53c974 HBA has a design flaw of generating + * spurious DMA completion interrupts when using + * DMA for command submission. + */ + esp->flags |= ESP_FLAG_USE_FIFO; + /* + * Enable CONFIG2_FENAB to allow for large DMA transfers + */ + if (am53c974_fenab) + esp->config2 |= ESP_CONFIG2_FENAB; + + pep->esp = esp; + + if (pci_request_regions(pdev, DRV_MODULE_NAME)) { + dev_printk(KERN_ERR, &pdev->dev, + "pci memory selection failed\n"); + goto fail_priv_alloc; + } + + esp->regs = pci_iomap(pdev, 0, pci_resource_len(pdev, 0)); + if (!esp->regs) { + dev_printk(KERN_ERR, &pdev->dev, "pci I/O map failed\n"); + err = -EINVAL; + goto fail_release_regions; + } + esp->dma_regs = esp->regs; + + pci_set_master(pdev); + + esp->command_block = pci_alloc_consistent(pdev, 16, + &esp->command_block_dma); + if (!esp->command_block) { + dev_printk(KERN_ERR, &pdev->dev, + "failed to allocate command block\n"); + err = -ENOMEM; + goto fail_unmap_regs; + } + + err = request_irq(pdev->irq, scsi_esp_intr, IRQF_SHARED, + DRV_MODULE_NAME, esp); + if (err < 0) { + dev_printk(KERN_ERR, &pdev->dev, "failed to register IRQ\n"); + goto fail_unmap_command_block; + } + + esp->scsi_id = 7; + dc390_check_eeprom(esp); + + shost->this_id = esp->scsi_id; + shost->max_id = 8; + shost->irq = pdev->irq; + shost->io_port = pci_resource_start(pdev, 0); + shost->n_io_port = pci_resource_len(pdev, 0); + shost->unique_id = shost->io_port; + esp->scsi_id_mask = (1 << esp->scsi_id); + /* Assume 40MHz clock */ + esp->cfreq = 40000000; + + pci_set_drvdata(pdev, pep); + + err = scsi_esp_register(esp, &pdev->dev); + if (err) + goto fail_free_irq; + + return 0; + +fail_free_irq: + free_irq(pdev->irq, esp); +fail_unmap_command_block: + pci_free_consistent(pdev, 16, esp->command_block, + esp->command_block_dma); +fail_unmap_regs: + pci_iounmap(pdev, esp->regs); +fail_release_regions: + pci_release_regions(pdev); +fail_priv_alloc: + kfree(pep); +fail_host_alloc: + scsi_host_put(shost); +fail_disable_device: + pci_disable_device(pdev); + + return err; +} + +static void pci_esp_remove_one(struct pci_dev *pdev) +{ + struct pci_esp_priv *pep = pci_get_drvdata(pdev); + struct esp *esp = pep->esp; + + scsi_esp_unregister(esp); + free_irq(pdev->irq, esp); + pci_free_consistent(pdev, 16, esp->command_block, + esp->command_block_dma); + pci_iounmap(pdev, esp->regs); + pci_release_regions(pdev); + pci_disable_device(pdev); + kfree(pep); + + scsi_host_put(esp->host); +} + +static struct pci_device_id am53c974_pci_tbl[] = { + { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_SCSI, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { } +}; +MODULE_DEVICE_TABLE(pci, am53c974_pci_tbl); + +static struct pci_driver am53c974_driver = { + .name = DRV_MODULE_NAME, + .id_table = am53c974_pci_tbl, + .probe = pci_esp_probe_one, + .remove = pci_esp_remove_one, +}; + +static int __init am53c974_module_init(void) +{ + return pci_register_driver(&am53c974_driver); +} + +static void __exit am53c974_module_exit(void) +{ + pci_unregister_driver(&am53c974_driver); +} + +MODULE_DESCRIPTION("AM53C974 SCSI driver"); +MODULE_AUTHOR("Hannes Reinecke <hare@suse.de>"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_MODULE_VERSION); +MODULE_ALIAS("tmscsim"); + +module_param(am53c974_debug, bool, 0644); +MODULE_PARM_DESC(am53c974_debug, "Enable debugging"); + +module_param(am53c974_fenab, bool, 0444); +MODULE_PARM_DESC(am53c974_fenab, "Enable 24-bit DMA transfer sizes"); + +module_init(am53c974_module_init); +module_exit(am53c974_module_exit); diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c index 0b44fb5ee485..914c39f9f388 100644 --- a/drivers/scsi/arcmsr/arcmsr_hba.c +++ b/drivers/scsi/arcmsr/arcmsr_hba.c @@ -114,16 +114,11 @@ static void arcmsr_hardware_reset(struct AdapterControlBlock *acb); static const char *arcmsr_info(struct Scsi_Host *); static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb); static void arcmsr_free_irq(struct pci_dev *, struct AdapterControlBlock *); -static int arcmsr_adjust_disk_queue_depth(struct scsi_device *sdev, - int queue_depth, int reason) +static int arcmsr_adjust_disk_queue_depth(struct scsi_device *sdev, int queue_depth) { - if (reason != SCSI_QDEPTH_DEFAULT) - return -EOPNOTSUPP; - if (queue_depth > ARCMSR_MAX_CMD_PERLUN) queue_depth = ARCMSR_MAX_CMD_PERLUN; - scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, queue_depth); - return queue_depth; + return scsi_change_queue_depth(sdev, queue_depth); } static struct scsi_host_template arcmsr_scsi_host_template = { diff --git a/drivers/scsi/arm/acornscsi.c b/drivers/scsi/arm/acornscsi.c index d89b9b4deb3c..deaaf84989cd 100644 --- a/drivers/scsi/arm/acornscsi.c +++ b/drivers/scsi/arm/acornscsi.c @@ -850,13 +850,13 @@ static void acornscsi_done(AS_Host *host, struct scsi_cmnd **SCpntp, break; default: - printk(KERN_ERR "scsi%d.H: incomplete data transfer detected: result=%08X command=", - host->host->host_no, SCpnt->result); - __scsi_print_command(SCpnt->cmnd); + scmd_printk(KERN_ERR, SCpnt, + "incomplete data transfer detected: " + "result=%08X", SCpnt->result); + scsi_print_command(SCpnt); acornscsi_dumpdma(host, "done"); - acornscsi_dumplog(host, SCpnt->device->id); - SCpnt->result &= 0xffff; - SCpnt->result |= DID_ERROR << 16; + acornscsi_dumplog(host, SCpnt->device->id); + set_host_byte(SCpnt, DID_ERROR); } } } diff --git a/drivers/scsi/arm/cumana_1.c b/drivers/scsi/arm/cumana_1.c index 8ef810a4476e..d28d6c0f18c0 100644 --- a/drivers/scsi/arm/cumana_1.c +++ b/drivers/scsi/arm/cumana_1.c @@ -13,16 +13,12 @@ #include <asm/ecard.h> #include <asm/io.h> -#include "../scsi.h" #include <scsi/scsi_host.h> #include <scsi/scsicam.h> -#define AUTOSENSE #define PSEUDO_DMA -#define CUMANASCSI_PUBLIC_RELEASE 1 - #define priv(host) ((struct NCR5380_hostdata *)(host)->hostdata) #define NCR5380_local_declare() struct Scsi_Host *_instance #define NCR5380_setup(instance) _instance = instance @@ -30,6 +26,7 @@ #define NCR5380_write(reg, value) cumanascsi_write(_instance, reg, value) #define NCR5380_intr cumanascsi_intr #define NCR5380_queue_command cumanascsi_queue_command +#define NCR5380_info cumanascsi_info #define NCR5380_implementation_fields \ unsigned ctrl; \ @@ -42,11 +39,6 @@ void cumanascsi_setup(char *str, int *ints) { } -const char *cumanascsi_info(struct Scsi_Host *spnt) -{ - return ""; -} - #define CTRL 0x16fc #define STAT 0x2004 #define L(v) (((v)<<16)|((v) & 0x0000ffff)) @@ -267,14 +259,6 @@ static int cumanascsi1_probe(struct expansion_card *ec, goto out_unmap; } - printk("scsi%d: at port 0x%08lx irq %d", - host->host_no, host->io_port, host->irq); - printk(" options CAN_QUEUE=%d CMD_PER_LUN=%d release=%d", - host->can_queue, host->cmd_per_lun, CUMANASCSI_PUBLIC_RELEASE); - printk("\nscsi%d:", host->host_no); - NCR5380_print_options(host); - printk("\n"); - ret = scsi_add_host(host, &ec->dev); if (ret) goto out_free_irq; diff --git a/drivers/scsi/arm/fas216.c b/drivers/scsi/arm/fas216.c index 71cfb1e504c4..e64c3af7c1a0 100644 --- a/drivers/scsi/arm/fas216.c +++ b/drivers/scsi/arm/fas216.c @@ -308,8 +308,7 @@ static void fas216_log_command(FAS216_Info *info, int level, fas216_do_log(info, '0' + SCpnt->device->id, fmt, args); va_end(args); - printk(" CDB: "); - __scsi_print_command(SCpnt->cmnd); + scsi_print_command(SCpnt); } static void @@ -2079,14 +2078,12 @@ fas216_std_done(FAS216_Info *info, struct scsi_cmnd *SCpnt, unsigned int result) break; default: - printk(KERN_ERR "scsi%d.%c: incomplete data transfer " - "detected: res=%08X ptr=%p len=%X CDB: ", - info->host->host_no, '0' + SCpnt->device->id, - SCpnt->result, info->scsi.SCp.ptr, - info->scsi.SCp.this_residual); - __scsi_print_command(SCpnt->cmnd); - SCpnt->result &= ~(255 << 16); - SCpnt->result |= DID_BAD_TARGET << 16; + scmd_printk(KERN_ERR, SCpnt, + "incomplete data transfer detected: res=%08X ptr=%p len=%X\n", + SCpnt->result, info->scsi.SCp.ptr, + info->scsi.SCp.this_residual); + scsi_print_command(SCpnt); + set_host_byte(SCpnt, DID_ERROR); goto request_sense; } } @@ -2158,12 +2155,11 @@ static void fas216_done(FAS216_Info *info, unsigned int result) * to transfer, we should not have a valid pointer. */ if (info->scsi.SCp.ptr && info->scsi.SCp.this_residual == 0) { - printk("scsi%d.%c: zero bytes left to transfer, but " - "buffer pointer still valid: ptr=%p len=%08x CDB: ", - info->host->host_no, '0' + SCpnt->device->id, - info->scsi.SCp.ptr, info->scsi.SCp.this_residual); + scmd_printk(KERN_INFO, SCpnt, + "zero bytes left to transfer, but buffer pointer still valid: ptr=%p len=%08x\n", + info->scsi.SCp.ptr, info->scsi.SCp.this_residual); info->scsi.SCp.ptr = NULL; - __scsi_print_command(SCpnt->cmnd); + scsi_print_command(SCpnt); } /* @@ -2427,14 +2423,11 @@ int fas216_eh_abort(struct scsi_cmnd *SCpnt) info->stats.aborts += 1; - printk(KERN_WARNING "scsi%d: abort command ", info->host->host_no); - __scsi_print_command(SCpnt->cmnd); + scmd_printk(KERN_WARNING, SCpnt, "abort command\n"); print_debug_list(); fas216_dumpstate(info); - printk(KERN_WARNING "scsi%d: abort %p ", info->host->host_no, SCpnt); - switch (fas216_find_command(info, SCpnt)) { /* * We found the command, and cleared it out. Either @@ -2442,7 +2435,7 @@ int fas216_eh_abort(struct scsi_cmnd *SCpnt) * target, or the busylun bit is not set. */ case res_success: - printk("success\n"); + scmd_printk(KERN_WARNING, SCpnt, "abort %p success\n", SCpnt); result = SUCCESS; break; @@ -2452,14 +2445,13 @@ int fas216_eh_abort(struct scsi_cmnd *SCpnt) * if the bus is free. */ case res_hw_abort: - /* * We are unable to abort the command for some reason. */ default: case res_failed: - printk("failed\n"); + scmd_printk(KERN_WARNING, SCpnt, "abort %p failed\n", SCpnt); break; } @@ -2664,8 +2656,7 @@ int fas216_eh_host_reset(struct scsi_cmnd *SCpnt) fas216_checkmagic(info); - printk("scsi%d.%c: %s: resetting host\n", - info->host->host_no, '0' + SCpnt->device->id, __func__); + fas216_log(info, LOG_ERROR, "resetting host"); /* * Reset the SCSI chip. diff --git a/drivers/scsi/arm/oak.c b/drivers/scsi/arm/oak.c index 188e734c7ff0..7c6fa1479c9c 100644 --- a/drivers/scsi/arm/oak.c +++ b/drivers/scsi/arm/oak.c @@ -14,13 +14,9 @@ #include <asm/ecard.h> #include <asm/io.h> -#include "../scsi.h" #include <scsi/scsi_host.h> -#define AUTOSENSE /*#define PSEUDO_DMA*/ - -#define OAKSCSI_PUBLIC_RELEASE 1 #define DONT_USE_INTR #define priv(host) ((struct NCR5380_hostdata *)(host)->hostdata) @@ -29,10 +25,9 @@ #define NCR5380_read(reg) readb(_base + ((reg) << 2)) #define NCR5380_write(reg, value) writeb(value, _base + ((reg) << 2)) -#define NCR5380_intr oakscsi_intr #define NCR5380_queue_command oakscsi_queue_command +#define NCR5380_info oakscsi_info #define NCR5380_show_info oakscsi_show_info -#define NCR5380_write_info oakscsi_write_info #define NCR5380_implementation_fields \ void __iomem *base @@ -42,11 +37,6 @@ #undef START_DMA_INITIATOR_RECEIVE_REG #define START_DMA_INITIATOR_RECEIVE_REG (128 + 7) -const char * oakscsi_info (struct Scsi_Host *spnt) -{ - return ""; -} - #define STAT ((128 + 16) << 2) #define DATA ((128 + 8) << 2) @@ -114,7 +104,6 @@ printk("reading %p len %d\n", addr, len); static struct scsi_host_template oakscsi_template = { .module = THIS_MODULE, .show_info = oakscsi_show_info, - .write_info = oakscsi_write_info, .name = "Oak 16-bit SCSI", .info = oakscsi_info, .queuecommand = oakscsi_queue_command, @@ -150,19 +139,11 @@ static int oakscsi_probe(struct expansion_card *ec, const struct ecard_id *id) goto unreg; } - host->irq = IRQ_NONE; + host->irq = NO_IRQ; host->n_io_port = 255; NCR5380_init(host, 0); - printk("scsi%d: at port 0x%08lx irqs disabled", - host->host_no, host->io_port); - printk(" options CAN_QUEUE=%d CMD_PER_LUN=%d release=%d", - host->can_queue, host->cmd_per_lun, OAKSCSI_PUBLIC_RELEASE); - printk("\nscsi%d:", host->host_no); - NCR5380_print_options(host); - printk("\n"); - ret = scsi_add_host(host, &ec->dev); if (ret) goto out_unmap; diff --git a/drivers/scsi/atari_NCR5380.c b/drivers/scsi/atari_NCR5380.c index 79e6f045c2a9..6daed6b386d4 100644 --- a/drivers/scsi/atari_NCR5380.c +++ b/drivers/scsi/atari_NCR5380.c @@ -11,8 +11,6 @@ * drew@colorado.edu * +1 (303) 666-5836 * - * DISTRIBUTION RELEASE 6. - * * For more information, please consult * * NCR 5380 Family @@ -73,6 +71,9 @@ * 1. Test linked command handling code after Eric is ready with * the high level code. */ + +/* Adapted for the sun3 by Sam Creasey. */ + #include <scsi/scsi_dbg.h> #include <scsi/scsi_transport_spi.h> @@ -103,27 +104,7 @@ /* * Design - * Issues : - * - * The other Linux SCSI drivers were written when Linux was Intel PC-only, - * and specifically for each board rather than each chip. This makes their - * adaptation to platforms like the Mac (Some of which use NCR5380's) - * more difficult than it has to be. * - * Also, many of the SCSI drivers were written before the command queuing - * routines were implemented, meaning their implementations of queued - * commands were hacked on rather than designed in from the start. - * - * When I designed the Linux SCSI drivers I figured that - * while having two different SCSI boards in a system might be useful - * for debugging things, two of the same type wouldn't be used. - * Well, I was wrong and a number of users have mailed me about running - * multiple high-performance SCSI boards in a server. - * - * Finally, when I get questions from users, I have no idea what - * revision of my driver they are running. - * - * This driver attempts to address these problems : * This is a generic 5380 driver. To use it on a different platform, * one simply writes appropriate system specific macros (ie, data * transfer - some PC's will use the I/O bus, 68K's must use @@ -138,17 +119,6 @@ * allowing multiple commands to propagate all the way to a SCSI-II device * while a command is already executing. * - * To solve the multiple-boards-in-the-same-system problem, - * there is a separate instance structure for each instance - * of a 5380 in the system. So, multiple NCR5380 drivers will - * be able to coexist with appropriate changes to the high level - * SCSI code. - * - * A NCR5380_PUBLIC_REVISION macro is provided, with the release - * number (updated for each public release) printed by the - * NCR5380_print_options command, which should be called from the - * wrapper detect function, so that I know what release of the driver - * users are using. * * Issues specific to the NCR5380 : * @@ -173,19 +143,17 @@ * Architecture : * * At the heart of the design is a coroutine, NCR5380_main, - * which is started when not running by the interrupt handler, - * timer, and queue command function. It attempts to establish - * I_T_L or I_T_L_Q nexuses by removing the commands from the - * issue queue and calling NCR5380_select() if a nexus - * is not established. + * which is started from a workqueue for each NCR5380 host in the + * system. It attempts to establish I_T_L or I_T_L_Q nexuses by + * removing the commands from the issue queue and calling + * NCR5380_select() if a nexus is not established. * * Once a nexus is established, the NCR5380_information_transfer() * phase goes through the various phases as instructed by the target. * if the target goes into MSG IN and sends a DISCONNECT message, * the command structure is placed into the per instance disconnected - * queue, and NCR5380_main tries to find more work. If USLEEP - * was defined, and the target is idle for too long, the system - * will try to sleep. + * queue, and NCR5380_main tries to find more work. If the target is + * idle for too long, the system will try to sleep. * * If a command has disconnected, eventually an interrupt will trigger, * calling NCR5380_intr() which will in turn call NCR5380_reselect @@ -211,6 +179,9 @@ * AUTOSENSE - if defined, REQUEST SENSE will be performed automatically * for commands that return with a CHECK CONDITION status. * + * DIFFERENTIAL - if defined, NCR53c81 chips will use external differential + * transceivers. + * * LINKED - if defined, linked commands are supported. * * REAL_DMA - if defined, REAL DMA is used during the data transfer phases. @@ -223,6 +194,9 @@ * * NCR5380_write(register, value) - write to the specific register * + * NCR5380_implementation_fields - additional fields needed for this + * specific implementation of the NCR5380 + * * Either real DMA *or* pseudo DMA may be implemented * REAL functions : * NCR5380_REAL_DMA should be defined if real DMA is to be used. @@ -241,40 +215,21 @@ * NCR5380_pwrite(instance, src, count) * NCR5380_pread(instance, dst, count); * - * If nothing specific to this implementation needs doing (ie, with external - * hardware), you must also define - * - * NCR5380_queue_command - * NCR5380_reset - * NCR5380_abort - * NCR5380_proc_info - * - * to be the global entry points into the specific driver, ie - * #define NCR5380_queue_command t128_queue_command. - * - * If this is not done, the routines will be defined as static functions - * with the NCR5380* names and the user must provide a globally - * accessible wrapper function. - * * The generic driver is initialized by calling NCR5380_init(instance), * after setting the appropriate host specific fields and ID. If the * driver wishes to autoprobe for an IRQ line, the NCR5380_probe_irq(instance, - * possible) function may be used. Before the specific driver initialization - * code finishes, NCR5380_print_options should be called. + * possible) function may be used. */ -static struct Scsi_Host *first_instance = NULL; -static struct scsi_host_template *the_template = NULL; - /* Macros ease life... :-) */ #define SETUP_HOSTDATA(in) \ struct NCR5380_hostdata *hostdata = \ (struct NCR5380_hostdata *)(in)->hostdata #define HOSTDATA(in) ((struct NCR5380_hostdata *)(in)->hostdata) -#define NEXT(cmd) ((Scsi_Cmnd *)(cmd)->host_scribble) +#define NEXT(cmd) ((struct scsi_cmnd *)(cmd)->host_scribble) #define SET_NEXT(cmd,next) ((cmd)->host_scribble = (void *)(next)) -#define NEXTADDR(cmd) ((Scsi_Cmnd **)&(cmd)->host_scribble) +#define NEXTADDR(cmd) ((struct scsi_cmnd **)&(cmd)->host_scribble) #define HOSTNO instance->host_no #define H_NO(cmd) (cmd)->device->host->host_no @@ -316,30 +271,17 @@ static struct scsi_host_template *the_template = NULL; * important: the tag bit must be cleared before 'nr_allocated' is decreased. */ -/* -1 for TAG_NONE is not possible with unsigned char cmd->tag */ -#undef TAG_NONE -#define TAG_NONE 0xff - -typedef struct { - DECLARE_BITMAP(allocated, MAX_TAGS); - int nr_allocated; - int queue_size; -} TAG_ALLOC; - -static TAG_ALLOC TagAlloc[8][8]; /* 8 targets and 8 LUNs */ - - -static void __init init_tags(void) +static void __init init_tags(struct NCR5380_hostdata *hostdata) { int target, lun; - TAG_ALLOC *ta; + struct tag_alloc *ta; - if (!setup_use_tagged_queuing) + if (!(hostdata->flags & FLAG_TAGGED_QUEUING)) return; for (target = 0; target < 8; ++target) { for (lun = 0; lun < 8; ++lun) { - ta = &TagAlloc[target][lun]; + ta = &hostdata->TagAlloc[target][lun]; bitmap_zero(ta->allocated, MAX_TAGS); ta->nr_allocated = 0; /* At the beginning, assume the maximum queue size we could @@ -359,7 +301,7 @@ static void __init init_tags(void) * conditions. */ -static int is_lun_busy(Scsi_Cmnd *cmd, int should_be_tagged) +static int is_lun_busy(struct scsi_cmnd *cmd, int should_be_tagged) { u8 lun = cmd->device->lun; SETUP_HOSTDATA(cmd->device->host); @@ -367,10 +309,11 @@ static int is_lun_busy(Scsi_Cmnd *cmd, int should_be_tagged) if (hostdata->busy[cmd->device->id] & (1 << lun)) return 1; if (!should_be_tagged || - !setup_use_tagged_queuing || !cmd->device->tagged_supported) + !(hostdata->flags & FLAG_TAGGED_QUEUING) || + !cmd->device->tagged_supported) return 0; - if (TagAlloc[cmd->device->id][lun].nr_allocated >= - TagAlloc[cmd->device->id][lun].queue_size) { + if (hostdata->TagAlloc[scmd_id(cmd)][lun].nr_allocated >= + hostdata->TagAlloc[scmd_id(cmd)][lun].queue_size) { dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d: no free tags\n", H_NO(cmd), cmd->device->id, lun); return 1; @@ -384,7 +327,7 @@ static int is_lun_busy(Scsi_Cmnd *cmd, int should_be_tagged) * untagged. */ -static void cmd_get_tag(Scsi_Cmnd *cmd, int should_be_tagged) +static void cmd_get_tag(struct scsi_cmnd *cmd, int should_be_tagged) { u8 lun = cmd->device->lun; SETUP_HOSTDATA(cmd->device->host); @@ -393,13 +336,14 @@ static void cmd_get_tag(Scsi_Cmnd *cmd, int should_be_tagged) * an untagged command. */ if (!should_be_tagged || - !setup_use_tagged_queuing || !cmd->device->tagged_supported) { + !(hostdata->flags & FLAG_TAGGED_QUEUING) || + !cmd->device->tagged_supported) { cmd->tag = TAG_NONE; hostdata->busy[cmd->device->id] |= (1 << lun); dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d now allocated by untagged " "command\n", H_NO(cmd), cmd->device->id, lun); } else { - TAG_ALLOC *ta = &TagAlloc[cmd->device->id][lun]; + struct tag_alloc *ta = &hostdata->TagAlloc[scmd_id(cmd)][lun]; cmd->tag = find_first_zero_bit(ta->allocated, MAX_TAGS); set_bit(cmd->tag, ta->allocated); @@ -416,7 +360,7 @@ static void cmd_get_tag(Scsi_Cmnd *cmd, int should_be_tagged) * unlock the LUN. */ -static void cmd_free_tag(Scsi_Cmnd *cmd) +static void cmd_free_tag(struct scsi_cmnd *cmd) { u8 lun = cmd->device->lun; SETUP_HOSTDATA(cmd->device->host); @@ -429,7 +373,7 @@ static void cmd_free_tag(Scsi_Cmnd *cmd) printk(KERN_NOTICE "scsi%d: trying to free bad tag %d!\n", H_NO(cmd), cmd->tag); } else { - TAG_ALLOC *ta = &TagAlloc[cmd->device->id][lun]; + struct tag_alloc *ta = &hostdata->TagAlloc[scmd_id(cmd)][lun]; clear_bit(cmd->tag, ta->allocated); ta->nr_allocated--; dprintk(NDEBUG_TAGS, "scsi%d: freed tag %d for target %d lun %d\n", @@ -438,17 +382,17 @@ static void cmd_free_tag(Scsi_Cmnd *cmd) } -static void free_all_tags(void) +static void free_all_tags(struct NCR5380_hostdata *hostdata) { int target, lun; - TAG_ALLOC *ta; + struct tag_alloc *ta; - if (!setup_use_tagged_queuing) + if (!(hostdata->flags & FLAG_TAGGED_QUEUING)) return; for (target = 0; target < 8; ++target) { for (lun = 0; lun < 8; ++lun) { - ta = &TagAlloc[target][lun]; + ta = &hostdata->TagAlloc[target][lun]; bitmap_zero(ta->allocated, MAX_TAGS); ta->nr_allocated = 0; } @@ -459,19 +403,20 @@ static void free_all_tags(void) /* - * Function: void merge_contiguous_buffers( Scsi_Cmnd *cmd ) + * Function: void merge_contiguous_buffers( struct scsi_cmnd *cmd ) * * Purpose: Try to merge several scatter-gather requests into one DMA * transfer. This is possible if the scatter buffers lie on * physical contiguous addresses. * - * Parameters: Scsi_Cmnd *cmd + * Parameters: struct scsi_cmnd *cmd * The command to work on. The first scatter buffer's data are * assumed to be already transferred into ptr/this_residual. */ -static void merge_contiguous_buffers(Scsi_Cmnd *cmd) +static void merge_contiguous_buffers(struct scsi_cmnd *cmd) { +#if !defined(CONFIG_SUN3) unsigned long endaddr; #if (NDEBUG & NDEBUG_MERGING) unsigned long oldlen = cmd->SCp.this_residual; @@ -496,18 +441,17 @@ static void merge_contiguous_buffers(Scsi_Cmnd *cmd) dprintk(NDEBUG_MERGING, "merged %d buffers from %p, new length %08x\n", cnt, cmd->SCp.ptr, cmd->SCp.this_residual); #endif +#endif /* !defined(CONFIG_SUN3) */ } -/* - * Function : void initialize_SCp(Scsi_Cmnd *cmd) +/** + * initialize_SCp - init the scsi pointer field + * @cmd: command block to set up * - * Purpose : initialize the saved data pointers for cmd to point to the - * start of the buffer. - * - * Inputs : cmd - Scsi_Cmnd structure to have pointers reset. + * Set up the internal fields in the SCSI command. */ -static inline void initialize_SCp(Scsi_Cmnd *cmd) +static inline void initialize_SCp(struct scsi_cmnd *cmd) { /* * Initialize the Scsi Pointer field so that all of the commands in the @@ -557,12 +501,11 @@ static struct { {0, NULL} }; -/* - * Function : void NCR5380_print(struct Scsi_Host *instance) +/** + * NCR5380_print - print scsi bus signals + * @instance: adapter state to dump * - * Purpose : print the SCSI bus signals for debugging purposes - * - * Input : instance - which NCR5380 + * Print the SCSI bus signals for debugging purposes */ static void NCR5380_print(struct Scsi_Host *instance) @@ -605,12 +548,13 @@ static struct { {PHASE_UNKNOWN, "UNKNOWN"} }; -/* - * Function : void NCR5380_print_phase(struct Scsi_Host *instance) +/** + * NCR5380_print_phase - show SCSI phase + * @instance: adapter to dump * - * Purpose : print the current SCSI phase for debugging purposes + * Print the current SCSI phase for debugging purposes * - * Input : instance - which NCR5380 + * Locks: none */ static void NCR5380_print_phase(struct Scsi_Host *instance) @@ -648,71 +592,75 @@ static void NCR5380_print_phase(struct Scsi_Host *instance) #include <linux/workqueue.h> #include <linux/interrupt.h> -static volatile int main_running; -static DECLARE_WORK(NCR5380_tqueue, NCR5380_main); - -static inline void queue_main(void) +static inline void queue_main(struct NCR5380_hostdata *hostdata) { - if (!main_running) { + if (!hostdata->main_running) { /* If in interrupt and NCR5380_main() not already running, queue it on the 'immediate' task queue, to be processed immediately after the current interrupt processing has finished. */ - schedule_work(&NCR5380_tqueue); + schedule_work(&hostdata->main_task); } /* else: nothing to do: the running NCR5380_main() will pick up any newly queued command. */ } - -static inline void NCR5380_all_init(void) -{ - static int done = 0; - if (!done) { - dprintk(NDEBUG_INIT, "scsi : NCR5380_all_init()\n"); - done = 1; - } -} - - -/* - * Function : void NCR58380_print_options (struct Scsi_Host *instance) +/** + * NCR58380_info - report driver and host information + * @instance: relevant scsi host instance * - * Purpose : called by probe code indicating the NCR5380 driver - * options that were selected. + * For use as the host template info() handler. * - * Inputs : instance, pointer to this instance. Unused. + * Locks: none */ -static void __init NCR5380_print_options(struct Scsi_Host *instance) +static const char *NCR5380_info(struct Scsi_Host *instance) +{ + struct NCR5380_hostdata *hostdata = shost_priv(instance); + + return hostdata->info; +} + +static void prepare_info(struct Scsi_Host *instance) { - printk(" generic options" -#ifdef AUTOSENSE - " AUTOSENSE" + struct NCR5380_hostdata *hostdata = shost_priv(instance); + + snprintf(hostdata->info, sizeof(hostdata->info), + "%s, io_port 0x%lx, n_io_port %d, " + "base 0x%lx, irq %d, " + "can_queue %d, cmd_per_lun %d, " + "sg_tablesize %d, this_id %d, " + "flags { %s}, " + "options { %s} ", + instance->hostt->name, instance->io_port, instance->n_io_port, + instance->base, instance->irq, + instance->can_queue, instance->cmd_per_lun, + instance->sg_tablesize, instance->this_id, + hostdata->flags & FLAG_TAGGED_QUEUING ? "TAGGED_QUEUING " : "", +#ifdef DIFFERENTIAL + "DIFFERENTIAL " #endif #ifdef REAL_DMA - " REAL DMA" + "REAL_DMA " #endif #ifdef PARITY - " PARITY" + "PARITY " #endif #ifdef SUPPORT_TAGS - " SCSI-2 TAGGED QUEUING" + "SUPPORT_TAGS " #endif - ); - printk(" generic release=%d", NCR5380_PUBLIC_RELEASE); + ""); } -/* - * Function : void NCR5380_print_status (struct Scsi_Host *instance) +/** + * NCR5380_print_status - dump controller info + * @instance: controller to dump * - * Purpose : print commands in the various queues, called from - * NCR5380_abort and NCR5380_debug to aid debugging. - * - * Inputs : instance, pointer to this instance. + * Print commands in the various queues, called from NCR5380_abort + * to aid debugging. */ -static void lprint_Scsi_Cmnd(Scsi_Cmnd *cmd) +static void lprint_Scsi_Cmnd(struct scsi_cmnd *cmd) { int i, s; unsigned char *command; @@ -729,7 +677,7 @@ static void lprint_Scsi_Cmnd(Scsi_Cmnd *cmd) static void NCR5380_print_status(struct Scsi_Host *instance) { struct NCR5380_hostdata *hostdata; - Scsi_Cmnd *ptr; + struct scsi_cmnd *ptr; unsigned long flags; NCR5380_dprint(NDEBUG_ANY, instance); @@ -737,20 +685,19 @@ static void NCR5380_print_status(struct Scsi_Host *instance) hostdata = (struct NCR5380_hostdata *)instance->hostdata; - printk("\nNCR5380 core release=%d.\n", NCR5380_PUBLIC_RELEASE); local_irq_save(flags); printk("NCR5380: coroutine is%s running.\n", - main_running ? "" : "n't"); + hostdata->main_running ? "" : "n't"); if (!hostdata->connected) printk("scsi%d: no currently connected command\n", HOSTNO); else - lprint_Scsi_Cmnd((Scsi_Cmnd *) hostdata->connected); + lprint_Scsi_Cmnd((struct scsi_cmnd *) hostdata->connected); printk("scsi%d: issue_queue\n", HOSTNO); - for (ptr = (Scsi_Cmnd *)hostdata->issue_queue; ptr; ptr = NEXT(ptr)) + for (ptr = (struct scsi_cmnd *)hostdata->issue_queue; ptr; ptr = NEXT(ptr)) lprint_Scsi_Cmnd(ptr); printk("scsi%d: disconnected_queue\n", HOSTNO); - for (ptr = (Scsi_Cmnd *) hostdata->disconnected_queue; ptr; + for (ptr = (struct scsi_cmnd *) hostdata->disconnected_queue; ptr; ptr = NEXT(ptr)) lprint_Scsi_Cmnd(ptr); @@ -758,7 +705,7 @@ static void NCR5380_print_status(struct Scsi_Host *instance) printk("\n"); } -static void show_Scsi_Cmnd(Scsi_Cmnd *cmd, struct seq_file *m) +static void show_Scsi_Cmnd(struct scsi_cmnd *cmd, struct seq_file *m) { int i, s; unsigned char *command; @@ -772,28 +719,28 @@ static void show_Scsi_Cmnd(Scsi_Cmnd *cmd, struct seq_file *m) seq_printf(m, "\n"); } -static int NCR5380_show_info(struct seq_file *m, struct Scsi_Host *instance) +static int __maybe_unused NCR5380_show_info(struct seq_file *m, + struct Scsi_Host *instance) { struct NCR5380_hostdata *hostdata; - Scsi_Cmnd *ptr; + struct scsi_cmnd *ptr; unsigned long flags; hostdata = (struct NCR5380_hostdata *)instance->hostdata; - seq_printf(m, "NCR5380 core release=%d.\n", NCR5380_PUBLIC_RELEASE); local_irq_save(flags); seq_printf(m, "NCR5380: coroutine is%s running.\n", - main_running ? "" : "n't"); + hostdata->main_running ? "" : "n't"); if (!hostdata->connected) seq_printf(m, "scsi%d: no currently connected command\n", HOSTNO); else - show_Scsi_Cmnd((Scsi_Cmnd *) hostdata->connected, m); + show_Scsi_Cmnd((struct scsi_cmnd *) hostdata->connected, m); seq_printf(m, "scsi%d: issue_queue\n", HOSTNO); - for (ptr = (Scsi_Cmnd *)hostdata->issue_queue; ptr; ptr = NEXT(ptr)) + for (ptr = (struct scsi_cmnd *)hostdata->issue_queue; ptr; ptr = NEXT(ptr)) show_Scsi_Cmnd(ptr, m); seq_printf(m, "scsi%d: disconnected_queue\n", HOSTNO); - for (ptr = (Scsi_Cmnd *) hostdata->disconnected_queue; ptr; + for (ptr = (struct scsi_cmnd *) hostdata->disconnected_queue; ptr; ptr = NEXT(ptr)) show_Scsi_Cmnd(ptr, m); @@ -801,16 +748,18 @@ static int NCR5380_show_info(struct seq_file *m, struct Scsi_Host *instance) return 0; } -/* - * Function : void NCR5380_init (struct Scsi_Host *instance) +/** + * NCR5380_init - initialise an NCR5380 + * @instance: adapter to configure + * @flags: control flags * - * Purpose : initializes *instance and corresponding 5380 chip. - * - * Inputs : instance - instantiation of the 5380 driver. + * Initializes *instance and corresponding 5380 chip, + * with flags OR'd into the initial flags value. * * Notes : I assume that the host, hostno, and id bits have been - * set correctly. I don't care about the irq and other fields. + * set correctly. I don't care about the irq and other fields. * + * Returns 0 for success */ static int __init NCR5380_init(struct Scsi_Host *instance, int flags) @@ -818,8 +767,7 @@ static int __init NCR5380_init(struct Scsi_Host *instance, int flags) int i; SETUP_HOSTDATA(instance); - NCR5380_all_init(); - + hostdata->host = instance; hostdata->aborted = 0; hostdata->id_mask = 1 << instance->this_id; hostdata->id_higher_mask = 0; @@ -829,7 +777,7 @@ static int __init NCR5380_init(struct Scsi_Host *instance, int flags) for (i = 0; i < 8; ++i) hostdata->busy[i] = 0; #ifdef SUPPORT_TAGS - init_tags(); + init_tags(hostdata); #endif #if defined (REAL_DMA) hostdata->dma_len = 0; @@ -838,19 +786,11 @@ static int __init NCR5380_init(struct Scsi_Host *instance, int flags) hostdata->connected = NULL; hostdata->issue_queue = NULL; hostdata->disconnected_queue = NULL; - hostdata->flags = FLAG_CHECK_LAST_BYTE_SENT; + hostdata->flags = flags; - if (!the_template) { - the_template = instance->hostt; - first_instance = instance; - } + INIT_WORK(&hostdata->main_task, NCR5380_main); -#ifndef AUTOSENSE - if ((instance->cmd_per_lun > 1) || (instance->can_queue > 1)) - printk("scsi%d: WARNING : support for multiple outstanding commands enabled\n" - " without AUTOSENSE option, contingent allegiance conditions may\n" - " be incorrectly cleared.\n", HOSTNO); -#endif /* def AUTOSENSE */ + prepare_info(instance); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); NCR5380_write(MODE_REG, MR_BASE); @@ -860,33 +800,35 @@ static int __init NCR5380_init(struct Scsi_Host *instance, int flags) return 0; } +/** + * NCR5380_exit - remove an NCR5380 + * @instance: adapter to remove + * + * Assumes that no more work can be queued (e.g. by NCR5380_intr). + */ + static void NCR5380_exit(struct Scsi_Host *instance) { - /* Empty, as we didn't schedule any delayed work */ + struct NCR5380_hostdata *hostdata = shost_priv(instance); + + cancel_work_sync(&hostdata->main_task); } -/* - * Function : int NCR5380_queue_command (Scsi_Cmnd *cmd, - * void (*done)(Scsi_Cmnd *)) - * - * Purpose : enqueues a SCSI command - * - * Inputs : cmd - SCSI command, done - function called on completion, with - * a pointer to the command descriptor. - * - * Returns : 0 - * - * Side effects : - * cmd is added to the per instance issue_queue, with minor - * twiddling done to the host specific fields of cmd. If the - * main coroutine is not running, it is restarted. +/** + * NCR5380_queue_command - queue a command + * @instance: the relevant SCSI adapter + * @cmd: SCSI command * + * cmd is added to the per instance issue_queue, with minor + * twiddling done to the host specific fields of cmd. If the + * main coroutine is not running, it is restarted. */ -static int NCR5380_queue_command_lck(Scsi_Cmnd *cmd, void (*done)(Scsi_Cmnd *)) +static int NCR5380_queue_command(struct Scsi_Host *instance, + struct scsi_cmnd *cmd) { - SETUP_HOSTDATA(cmd->device->host); - Scsi_Cmnd *tmp; + struct NCR5380_hostdata *hostdata = shost_priv(instance); + struct scsi_cmnd *tmp; unsigned long flags; #if (NDEBUG & NDEBUG_NO_WRITE) @@ -896,47 +838,17 @@ static int NCR5380_queue_command_lck(Scsi_Cmnd *cmd, void (*done)(Scsi_Cmnd *)) printk(KERN_NOTICE "scsi%d: WRITE attempted with NO_WRITE debugging flag set\n", H_NO(cmd)); cmd->result = (DID_ERROR << 16); - done(cmd); + cmd->scsi_done(cmd); return 0; } #endif /* (NDEBUG & NDEBUG_NO_WRITE) */ -#ifdef NCR5380_STATS -# if 0 - if (!hostdata->connected && !hostdata->issue_queue && - !hostdata->disconnected_queue) { - hostdata->timebase = jiffies; - } -# endif -# ifdef NCR5380_STAT_LIMIT - if (scsi_bufflen(cmd) > NCR5380_STAT_LIMIT) -# endif - switch (cmd->cmnd[0]) { - case WRITE: - case WRITE_6: - case WRITE_10: - hostdata->time_write[cmd->device->id] -= (jiffies - hostdata->timebase); - hostdata->bytes_write[cmd->device->id] += scsi_bufflen(cmd); - hostdata->pendingw++; - break; - case READ: - case READ_6: - case READ_10: - hostdata->time_read[cmd->device->id] -= (jiffies - hostdata->timebase); - hostdata->bytes_read[cmd->device->id] += scsi_bufflen(cmd); - hostdata->pendingr++; - break; - } -#endif - /* * We use the host_scribble field as a pointer to the next command * in a queue */ SET_NEXT(cmd, NULL); - cmd->scsi_done = done; - cmd->result = 0; /* @@ -946,7 +858,6 @@ static int NCR5380_queue_command_lck(Scsi_Cmnd *cmd, void (*done)(Scsi_Cmnd *)) * sense data is only guaranteed to be valid while the condition exists. */ - local_irq_save(flags); /* ++guenther: now that the issue queue is being set up, we can lock ST-DMA. * Otherwise a running NCR5380_main may steal the lock. * Lock before actually inserting due to fairness reasons explained in @@ -959,17 +870,24 @@ static int NCR5380_queue_command_lck(Scsi_Cmnd *cmd, void (*done)(Scsi_Cmnd *)) * because also a timer int can trigger an abort or reset, which would * alter queues and touch the lock. */ - if (!IS_A_TT()) { - /* perhaps stop command timer here */ - falcon_get_lock(); - /* perhaps restart command timer here */ - } + if (!NCR5380_acquire_dma_irq(instance)) + return SCSI_MLQUEUE_HOST_BUSY; + + local_irq_save(flags); + + /* + * Insert the cmd into the issue queue. Note that REQUEST SENSE + * commands are added to the head of the queue since any command will + * clear the contingent allegiance condition that exists and the + * sense data is only guaranteed to be valid while the condition exists. + */ + if (!(hostdata->issue_queue) || (cmd->cmnd[0] == REQUEST_SENSE)) { LIST(cmd, hostdata->issue_queue); SET_NEXT(cmd, hostdata->issue_queue); hostdata->issue_queue = cmd; } else { - for (tmp = (Scsi_Cmnd *)hostdata->issue_queue; + for (tmp = (struct scsi_cmnd *)hostdata->issue_queue; NEXT(tmp); tmp = NEXT(tmp)) ; LIST(cmd, tmp); @@ -987,32 +905,42 @@ static int NCR5380_queue_command_lck(Scsi_Cmnd *cmd, void (*done)(Scsi_Cmnd *)) * If we're not in an interrupt, we can call NCR5380_main() * unconditionally, because it cannot be already running. */ - if (in_interrupt() || ((flags >> 8) & 7) >= 6) - queue_main(); + if (in_interrupt() || irqs_disabled()) + queue_main(hostdata); else - NCR5380_main(NULL); + NCR5380_main(&hostdata->main_task); return 0; } -static DEF_SCSI_QCMD(NCR5380_queue_command) +static inline void maybe_release_dma_irq(struct Scsi_Host *instance) +{ + struct NCR5380_hostdata *hostdata = shost_priv(instance); + + /* Caller does the locking needed to set & test these data atomically */ + if (!hostdata->disconnected_queue && + !hostdata->issue_queue && + !hostdata->connected && + !hostdata->retain_dma_intr) + NCR5380_release_dma_irq(instance); +} -/* - * Function : NCR5380_main (void) +/** + * NCR5380_main - NCR state machines * - * Purpose : NCR5380_main is a coroutine that runs as long as more work can - * be done on the NCR5380 host adapters in a system. Both - * NCR5380_queue_command() and NCR5380_intr() will try to start it - * in case it is not running. + * NCR5380_main is a coroutine that runs as long as more work can + * be done on the NCR5380 host adapters in a system. Both + * NCR5380_queue_command() and NCR5380_intr() will try to start it + * in case it is not running. * - * NOTE : NCR5380_main exits with interrupts *disabled*, the caller should - * reenable them. This prevents reentrancy and kernel stack overflow. + * Locks: called as its own thread with no locks held. */ static void NCR5380_main(struct work_struct *work) { - Scsi_Cmnd *tmp, *prev; - struct Scsi_Host *instance = first_instance; - struct NCR5380_hostdata *hostdata = HOSTDATA(instance); + struct NCR5380_hostdata *hostdata = + container_of(work, struct NCR5380_hostdata, main_task); + struct Scsi_Host *instance = hostdata->host; + struct scsi_cmnd *tmp, *prev; int done; unsigned long flags; @@ -1037,9 +965,9 @@ static void NCR5380_main(struct work_struct *work) 'main_running' is set here, and queues/executes main via the task queue, it doesn't do any harm, just this instance of main won't find any work left to do. */ - if (main_running) + if (hostdata->main_running) return; - main_running = 1; + hostdata->main_running = 1; local_save_flags(flags); do { @@ -1053,7 +981,7 @@ static void NCR5380_main(struct work_struct *work) * for a target that's not busy. */ #if (NDEBUG & NDEBUG_LISTS) - for (tmp = (Scsi_Cmnd *) hostdata->issue_queue, prev = NULL; + for (tmp = (struct scsi_cmnd *) hostdata->issue_queue, prev = NULL; tmp && (tmp != prev); prev = tmp, tmp = NEXT(tmp)) ; /*printk("%p ", tmp);*/ @@ -1061,16 +989,14 @@ static void NCR5380_main(struct work_struct *work) printk(" LOOP\n"); /* else printk("\n"); */ #endif - for (tmp = (Scsi_Cmnd *) hostdata->issue_queue, + for (tmp = (struct scsi_cmnd *) hostdata->issue_queue, prev = NULL; tmp; prev = tmp, tmp = NEXT(tmp)) { u8 lun = tmp->device->lun; -#if (NDEBUG & NDEBUG_LISTS) - if (prev != tmp) - printk("MAIN tmp=%p target=%d busy=%d lun=%llu\n", - tmp, tmp->device->id, hostdata->busy[tmp->device->id], - lun); -#endif + dprintk(NDEBUG_LISTS, + "MAIN tmp=%p target=%d busy=%d lun=%d\n", + tmp, scmd_id(tmp), hostdata->busy[scmd_id(tmp)], + lun); /* When we find one, remove it from the issue queue. */ /* ++guenther: possible race with Falcon locking */ if ( @@ -1090,7 +1016,7 @@ static void NCR5380_main(struct work_struct *work) hostdata->issue_queue = NEXT(tmp); } SET_NEXT(tmp, NULL); - falcon_dont_release++; + hostdata->retain_dma_intr++; /* reenable interrupts after finding one */ local_irq_restore(flags); @@ -1117,12 +1043,12 @@ static void NCR5380_main(struct work_struct *work) #ifdef SUPPORT_TAGS cmd_get_tag(tmp, tmp->cmnd[0] != REQUEST_SENSE); #endif - if (!NCR5380_select(instance, tmp, - (tmp->cmnd[0] == REQUEST_SENSE) ? TAG_NONE : - TAG_NEXT)) { - falcon_dont_release--; + if (!NCR5380_select(instance, tmp)) { + local_irq_disable(); + hostdata->retain_dma_intr--; /* release if target did not response! */ - falcon_release_lock_if_possible(hostdata); + maybe_release_dma_irq(instance); + local_irq_restore(flags); break; } else { local_irq_disable(); @@ -1132,7 +1058,7 @@ static void NCR5380_main(struct work_struct *work) #ifdef SUPPORT_TAGS cmd_free_tag(tmp); #endif - falcon_dont_release--; + hostdata->retain_dma_intr--; local_irq_restore(flags); dprintk(NDEBUG_MAIN, "scsi%d: main(): select() failed, " "returned to issue_queue\n", HOSTNO); @@ -1160,7 +1086,7 @@ static void NCR5380_main(struct work_struct *work) /* Better allow ints _after_ 'main_running' has been cleared, else an interrupt could believe we'll pick up the work it left for us, but we won't see it anymore here... */ - main_running = 0; + hostdata->main_running = 0; local_irq_restore(flags); } @@ -1179,9 +1105,11 @@ static void NCR5380_main(struct work_struct *work) static void NCR5380_dma_complete(struct Scsi_Host *instance) { SETUP_HOSTDATA(instance); - int transfered, saved_data = 0, overrun = 0, cnt, toPIO; - unsigned char **data, p; + int transferred; + unsigned char **data; volatile int *count; + int saved_data = 0, overrun = 0; + unsigned char p; if (!hostdata->connected) { printk(KERN_WARNING "scsi%d: received end of DMA interrupt with " @@ -1189,7 +1117,7 @@ static void NCR5380_dma_complete(struct Scsi_Host *instance) return; } - if (atari_read_overruns) { + if (hostdata->read_overruns) { p = hostdata->connected->SCp.phase; if (p & SR_IO) { udelay(10); @@ -1207,21 +1135,41 @@ static void NCR5380_dma_complete(struct Scsi_Host *instance) HOSTNO, NCR5380_read(BUS_AND_STATUS_REG), NCR5380_read(STATUS_REG)); +#if defined(CONFIG_SUN3) + if ((sun3scsi_dma_finish(rq_data_dir(hostdata->connected->request)))) { + pr_err("scsi%d: overrun in UDC counter -- not prepared to deal with this!\n", + instance->host_no); + BUG(); + } + + /* make sure we're not stuck in a data phase */ + if ((NCR5380_read(BUS_AND_STATUS_REG) & (BASR_PHASE_MATCH | BASR_ACK)) == + (BASR_PHASE_MATCH | BASR_ACK)) { + pr_err("scsi%d: BASR %02x\n", instance->host_no, + NCR5380_read(BUS_AND_STATUS_REG)); + pr_err("scsi%d: bus stuck in data phase -- probably a single byte overrun!\n", + instance->host_no); + BUG(); + } +#endif + (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG); NCR5380_write(MODE_REG, MR_BASE); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); - transfered = hostdata->dma_len - NCR5380_dma_residual(instance); + transferred = hostdata->dma_len - NCR5380_dma_residual(instance); hostdata->dma_len = 0; data = (unsigned char **)&hostdata->connected->SCp.ptr; count = &hostdata->connected->SCp.this_residual; - *data += transfered; - *count -= transfered; + *data += transferred; + *count -= transferred; + + if (hostdata->read_overruns) { + int cnt, toPIO; - if (atari_read_overruns) { if ((NCR5380_read(STATUS_REG) & PHASE_MASK) == p && (p & SR_IO)) { - cnt = toPIO = atari_read_overruns; + cnt = toPIO = hostdata->read_overruns; if (overrun) { dprintk(NDEBUG_DMA, "Got an input overrun, using saved byte\n"); *(*data)++ = saved_data; @@ -1238,20 +1186,19 @@ static void NCR5380_dma_complete(struct Scsi_Host *instance) #endif /* REAL_DMA */ -/* - * Function : void NCR5380_intr (int irq) - * - * Purpose : handle interrupts, reestablishing I_T_L or I_T_L_Q nexuses - * from the disconnected queue, and restarting NCR5380_main() - * as required. - * - * Inputs : int irq, irq that caused this interrupt. +/** + * NCR5380_intr - generic NCR5380 irq handler + * @irq: interrupt number + * @dev_id: device info * + * Handle interrupts, reestablishing I_T_L or I_T_L_Q nexuses + * from the disconnected queue, and restarting NCR5380_main() + * as required. */ static irqreturn_t NCR5380_intr(int irq, void *dev_id) { - struct Scsi_Host *instance = first_instance; + struct Scsi_Host *instance = dev_id; int done = 1, handled = 0; unsigned char basr; @@ -1265,7 +1212,6 @@ static irqreturn_t NCR5380_intr(int irq, void *dev_id) NCR5380_dprint(NDEBUG_INTR, instance); if ((NCR5380_read(STATUS_REG) & (SR_SEL|SR_IO)) == (SR_SEL|SR_IO)) { done = 0; - ENABLE_IRQ(); dprintk(NDEBUG_INTR, "scsi%d: SEL interrupt\n", HOSTNO); NCR5380_reselect(instance); (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG); @@ -1295,17 +1241,19 @@ static irqreturn_t NCR5380_intr(int irq, void *dev_id) dprintk(NDEBUG_INTR, "scsi%d: PHASE MISM or EOP interrupt\n", HOSTNO); NCR5380_dma_complete( instance ); done = 0; - ENABLE_IRQ(); } else #endif /* REAL_DMA */ { /* MS: Ignore unknown phase mismatch interrupts (caused by EOP interrupt) */ if (basr & BASR_PHASE_MATCH) - printk(KERN_NOTICE "scsi%d: unknown interrupt, " + dprintk(NDEBUG_INTR, "scsi%d: unknown interrupt, " "BASR 0x%x, MR 0x%x, SR 0x%x\n", HOSTNO, basr, NCR5380_read(MODE_REG), NCR5380_read(STATUS_REG)); (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG); +#ifdef SUN3_SCSI_VME + dregs->csr |= CSR_DMA_ENABLE; +#endif } } /* if !(SELECTION || PARITY) */ handled = 1; @@ -1314,53 +1262,29 @@ static irqreturn_t NCR5380_intr(int irq, void *dev_id) "BASR 0x%X, MR 0x%X, SR 0x%x\n", HOSTNO, basr, NCR5380_read(MODE_REG), NCR5380_read(STATUS_REG)); (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG); +#ifdef SUN3_SCSI_VME + dregs->csr |= CSR_DMA_ENABLE; +#endif } if (!done) { dprintk(NDEBUG_INTR, "scsi%d: in int routine, calling main\n", HOSTNO); /* Put a call to NCR5380_main() on the queue... */ - queue_main(); + queue_main(shost_priv(instance)); } return IRQ_RETVAL(handled); } -#ifdef NCR5380_STATS -static void collect_stats(struct NCR5380_hostdata* hostdata, Scsi_Cmnd *cmd) -{ -# ifdef NCR5380_STAT_LIMIT - if (scsi_bufflen(cmd) > NCR5380_STAT_LIMIT) -# endif - switch (cmd->cmnd[0]) { - case WRITE: - case WRITE_6: - case WRITE_10: - hostdata->time_write[cmd->device->id] += (jiffies - hostdata->timebase); - /*hostdata->bytes_write[cmd->device->id] += scsi_bufflen(cmd);*/ - hostdata->pendingw--; - break; - case READ: - case READ_6: - case READ_10: - hostdata->time_read[cmd->device->id] += (jiffies - hostdata->timebase); - /*hostdata->bytes_read[cmd->device->id] += scsi_bufflen(cmd);*/ - hostdata->pendingr--; - break; - } -} -#endif - /* - * Function : int NCR5380_select (struct Scsi_Host *instance, Scsi_Cmnd *cmd, - * int tag); + * Function : int NCR5380_select(struct Scsi_Host *instance, + * struct scsi_cmnd *cmd) * * Purpose : establishes I_T_L or I_T_L_Q nexus for new or existing command, * including ARBITRATION, SELECTION, and initial message out for * IDENTIFY and queue messages. * * Inputs : instance - instantiation of the 5380 driver on which this - * target lives, cmd - SCSI command to execute, tag - set to TAG_NEXT for - * new tag, TAG_NONE for untagged queueing, otherwise set to the tag for - * the command that is presently connected. + * target lives, cmd - SCSI command to execute. * * Returns : -1 if selection could not execute for some reason, * 0 if selection succeeded or failed because the target @@ -1380,7 +1304,7 @@ static void collect_stats(struct NCR5380_hostdata* hostdata, Scsi_Cmnd *cmd) * cmd->result host byte set to DID_BAD_TARGET. */ -static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd *cmd, int tag) +static int NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd) { SETUP_HOSTDATA(instance); unsigned char tmp[3], phase; @@ -1562,7 +1486,7 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd *cmd, int tag) * selection. */ - timeout = jiffies + 25; + timeout = jiffies + (250 * HZ / 1000); /* * XXX very interesting - we're seeing a bounce where the BSY we @@ -1616,9 +1540,6 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd *cmd, int tag) return -1; } cmd->result = DID_BAD_TARGET << 16; -#ifdef NCR5380_STATS - collect_stats(hostdata, cmd); -#endif #ifdef SUPPORT_TAGS cmd_free_tag(cmd); #endif @@ -1676,6 +1597,9 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd *cmd, int tag) #ifndef SUPPORT_TAGS hostdata->busy[cmd->device->id] |= (1 << cmd->device->lun); #endif +#ifdef SUN3_SCSI_VME + dregs->csr |= CSR_INTR; +#endif initialize_SCp(cmd); @@ -1826,7 +1750,7 @@ static int NCR5380_transfer_pio(struct Scsi_Host *instance, * Returns : 0 on success, -1 on failure. */ -static int do_abort(struct Scsi_Host *host) +static int do_abort(struct Scsi_Host *instance) { unsigned char tmp, *msgptr, phase; int len; @@ -1861,7 +1785,7 @@ static int do_abort(struct Scsi_Host *host) msgptr = &tmp; len = 1; phase = PHASE_MSGOUT; - NCR5380_transfer_pio(host, &phase, &len, &msgptr); + NCR5380_transfer_pio(instance, &phase, &len, &msgptr); /* * If we got here, and the command completed successfully, @@ -1899,17 +1823,62 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance, SETUP_HOSTDATA(instance); register int c = *count; register unsigned char p = *phase; + unsigned long flags; + +#if defined(CONFIG_SUN3) + /* sanity check */ + if (!sun3_dma_setup_done) { + pr_err("scsi%d: transfer_dma without setup!\n", + instance->host_no); + BUG(); + } + hostdata->dma_len = c; + + dprintk(NDEBUG_DMA, "scsi%d: initializing DMA for %s, %d bytes %s %p\n", + instance->host_no, (p & SR_IO) ? "reading" : "writing", + c, (p & SR_IO) ? "to" : "from", *data); + + /* netbsd turns off ints here, why not be safe and do it too */ + local_irq_save(flags); + + /* send start chain */ + sun3scsi_dma_start(c, *data); + + if (p & SR_IO) { + NCR5380_write(TARGET_COMMAND_REG, 1); + NCR5380_read(RESET_PARITY_INTERRUPT_REG); + NCR5380_write(INITIATOR_COMMAND_REG, 0); + NCR5380_write(MODE_REG, + (NCR5380_read(MODE_REG) | MR_DMA_MODE | MR_ENABLE_EOP_INTR)); + NCR5380_write(START_DMA_INITIATOR_RECEIVE_REG, 0); + } else { + NCR5380_write(TARGET_COMMAND_REG, 0); + NCR5380_read(RESET_PARITY_INTERRUPT_REG); + NCR5380_write(INITIATOR_COMMAND_REG, ICR_ASSERT_DATA); + NCR5380_write(MODE_REG, + (NCR5380_read(MODE_REG) | MR_DMA_MODE | MR_ENABLE_EOP_INTR)); + NCR5380_write(START_DMA_SEND_REG, 0); + } + +#ifdef SUN3_SCSI_VME + dregs->csr |= CSR_DMA_ENABLE; +#endif + + local_irq_restore(flags); + + sun3_dma_active = 1; + +#else /* !defined(CONFIG_SUN3) */ register unsigned char *d = *data; unsigned char tmp; - unsigned long flags; if ((tmp = (NCR5380_read(STATUS_REG) & PHASE_MASK)) != p) { *phase = tmp; return -1; } - if (atari_read_overruns && (p & SR_IO)) - c -= atari_read_overruns; + if (hostdata->read_overruns && (p & SR_IO)) + c -= hostdata->read_overruns; dprintk(NDEBUG_DMA, "scsi%d: initializing DMA for %s, %d bytes %s %p\n", HOSTNO, (p & SR_IO) ? "reading" : "writing", @@ -1921,7 +1890,7 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance, NCR5380_write(MODE_REG, MR_BASE | MR_DMA_MODE | MR_ENABLE_EOP_INTR | MR_MONITOR_BSY); #endif /* def REAL_DMA */ - if (IS_A_TT()) { + if (!(hostdata->flags & FLAG_LATE_DMA_SETUP)) { /* On the Medusa, it is a must to initialize the DMA before * starting the NCR. This is also the cleaner way for the TT. */ @@ -1939,7 +1908,7 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance, NCR5380_write(START_DMA_SEND_REG, 0); } - if (!IS_A_TT()) { + if (hostdata->flags & FLAG_LATE_DMA_SETUP) { /* On the Falcon, the DMA setup must be done after the last */ /* NCR access, else the DMA setup gets trashed! */ @@ -1949,6 +1918,8 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance, NCR5380_dma_write_setup(instance, d, c); local_irq_restore(flags); } +#endif /* !defined(CONFIG_SUN3) */ + return 0; } #endif /* defined(REAL_DMA) */ @@ -1982,7 +1953,11 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) #endif unsigned char *data; unsigned char phase, tmp, extended_msg[10], old_phase = 0xff; - Scsi_Cmnd *cmd = (Scsi_Cmnd *) hostdata->connected; + struct scsi_cmnd *cmd = (struct scsi_cmnd *) hostdata->connected; + +#ifdef SUN3_SCSI_VME + dregs->csr |= CSR_INTR; +#endif while (1) { tmp = NCR5380_read(STATUS_REG); @@ -1993,6 +1968,33 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) old_phase = phase; NCR5380_dprint_phase(NDEBUG_INFORMATION, instance); } +#if defined(CONFIG_SUN3) + if (phase == PHASE_CMDOUT) { +#if defined(REAL_DMA) + void *d; + unsigned long count; + + if (!cmd->SCp.this_residual && cmd->SCp.buffers_residual) { + count = cmd->SCp.buffer->length; + d = sg_virt(cmd->SCp.buffer); + } else { + count = cmd->SCp.this_residual; + d = cmd->SCp.ptr; + } + /* this command setup for dma yet? */ + if ((count >= DMA_MIN_SIZE) && (sun3_dma_setup_done != cmd)) { + if (cmd->request->cmd_type == REQ_TYPE_FS) { + sun3scsi_dma_setup(d, count, + rq_data_dir(cmd->request)); + sun3_dma_setup_done = cmd; + } + } +#endif +#ifdef SUN3_SCSI_VME + dregs->csr |= CSR_INTR; +#endif + } +#endif /* CONFIG_SUN3 */ if (sink && (phase != PHASE_MSGOUT)) { NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(tmp)); @@ -2054,8 +2056,11 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) */ #if defined(REAL_DMA) - if (!cmd->device->borken && - (transfersize = NCR5380_dma_xfer_len(instance,cmd,phase)) > 31) { + if ( +#if !defined(CONFIG_SUN3) + !cmd->device->borken && +#endif + (transfersize = NCR5380_dma_xfer_len(instance, cmd, phase)) >= DMA_MIN_SIZE) { len = transfersize; cmd->SCp.phase = phase; if (NCR5380_transfer_dma(instance, &phase, @@ -2064,9 +2069,8 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) * If the watchdog timer fires, all future * accesses to this device will use the * polled-IO. */ - printk(KERN_NOTICE "scsi%d: switching target %d " - "lun %llu to slow handshake\n", HOSTNO, - cmd->device->id, cmd->device->lun); + scmd_printk(KERN_INFO, cmd, + "switching to slow handshake\n"); cmd->device->borken = 1; NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN); @@ -2092,6 +2096,11 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) NCR5380_transfer_pio(instance, &phase, (int *)&cmd->SCp.this_residual, (unsigned char **)&cmd->SCp.ptr); +#if defined(CONFIG_SUN3) && defined(REAL_DMA) + /* if we had intended to dma that command clear it */ + if (sun3_dma_setup_done == cmd) + sun3_dma_setup_done = NULL; +#endif break; case PHASE_MSGIN: len = 1; @@ -2145,9 +2154,6 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) dprintk(NDEBUG_LINKED, "scsi%d: target %d lun %llu linked request " "done, calling scsi_done().\n", HOSTNO, cmd->device->id, cmd->device->lun); -#ifdef NCR5380_STATS - collect_stats(hostdata, cmd); -#endif cmd->scsi_done(cmd); cmd = hostdata->connected; break; @@ -2156,11 +2162,12 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) case COMMAND_COMPLETE: /* Accept message by clearing ACK */ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); - /* ++guenther: possible race with Falcon locking */ - falcon_dont_release++; - hostdata->connected = NULL; dprintk(NDEBUG_QUEUES, "scsi%d: command for target %d, lun %llu " "completed\n", HOSTNO, cmd->device->id, cmd->device->lun); + + local_irq_save(flags); + hostdata->retain_dma_intr++; + hostdata->connected = NULL; #ifdef SUPPORT_TAGS cmd_free_tag(cmd); if (status_byte(cmd->SCp.Status) == QUEUE_FULL) { @@ -2172,7 +2179,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) */ /* ++Andreas: the mid level code knows about QUEUE_FULL now. */ - TAG_ALLOC *ta = &TagAlloc[cmd->device->id][cmd->device->lun]; + struct tag_alloc *ta = &hostdata->TagAlloc[scmd_id(cmd)][cmd->device->lun]; dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %llu returned " "QUEUE_FULL after %d commands\n", HOSTNO, cmd->device->id, cmd->device->lun, @@ -2207,7 +2214,6 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) else if (status_byte(cmd->SCp.Status) != GOOD) cmd->result = (cmd->result & 0x00ffff) | (DID_ERROR << 16); -#ifdef AUTOSENSE if ((cmd->cmnd[0] == REQUEST_SENSE) && hostdata->ses.cmd_len) { scsi_eh_restore_cmnd(cmd, &hostdata->ses); @@ -2220,22 +2226,17 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) dprintk(NDEBUG_AUTOSENSE, "scsi%d: performing request sense\n", HOSTNO); - local_irq_save(flags); LIST(cmd,hostdata->issue_queue); SET_NEXT(cmd, hostdata->issue_queue); - hostdata->issue_queue = (Scsi_Cmnd *) cmd; - local_irq_restore(flags); + hostdata->issue_queue = (struct scsi_cmnd *) cmd; dprintk(NDEBUG_QUEUES, "scsi%d: REQUEST SENSE added to head of " "issue queue\n", H_NO(cmd)); - } else -#endif /* def AUTOSENSE */ - { -#ifdef NCR5380_STATS - collect_stats(hostdata, cmd); -#endif + } else { cmd->scsi_done(cmd); } + local_irq_restore(flags); + NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); /* * Restore phase bits to 0 so an interrupted selection, @@ -2246,12 +2247,14 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) while ((NCR5380_read(STATUS_REG) & SR_BSY) && !hostdata->connected) barrier(); - falcon_dont_release--; + local_irq_save(flags); + hostdata->retain_dma_intr--; /* ++roman: For Falcon SCSI, release the lock on the * ST-DMA here if no other commands are waiting on the * disconnected queue. */ - falcon_release_lock_if_possible(hostdata); + maybe_release_dma_irq(instance); + local_irq_restore(flags); return; case MESSAGE_REJECT: /* Accept message by clearing ACK */ @@ -2303,6 +2306,9 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) /* Wait for bus free to avoid nasty timeouts */ while ((NCR5380_read(STATUS_REG) & SR_BSY) && !hostdata->connected) barrier(); +#ifdef SUN3_SCSI_VME + dregs->csr |= CSR_DMA_ENABLE; +#endif return; /* * The SCSI data pointer is *IMPLICITLY* saved on a disconnect @@ -2384,20 +2390,18 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) */ default: if (!tmp) { - printk(KERN_DEBUG "scsi%d: rejecting message ", HOSTNO); + printk(KERN_INFO "scsi%d: rejecting message ", + instance->host_no); spi_print_msg(extended_msg); printk("\n"); } else if (tmp != EXTENDED_MESSAGE) - printk(KERN_DEBUG "scsi%d: rejecting unknown " - "message %02x from target %d, lun %llu\n", - HOSTNO, tmp, cmd->device->id, cmd->device->lun); + scmd_printk(KERN_INFO, cmd, + "rejecting unknown message %02x\n", + tmp); else - printk(KERN_DEBUG "scsi%d: rejecting unknown " - "extended message " - "code %02x, length %d from target %d, lun %llu\n", - HOSTNO, extended_msg[1], extended_msg[0], - cmd->device->id, cmd->device->lun); - + scmd_printk(KERN_INFO, cmd, + "rejecting unknown extended message code %02x, length %d\n", + extended_msg[1], extended_msg[0]); msgout = MESSAGE_REJECT; NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN); @@ -2410,6 +2414,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) hostdata->last_message = msgout; NCR5380_transfer_pio(instance, &phase, &len, &data); if (msgout == ABORT) { + local_irq_save(flags); #ifdef SUPPORT_TAGS cmd_free_tag(cmd); #else @@ -2417,12 +2422,10 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) #endif hostdata->connected = NULL; cmd->result = DID_ERROR << 16; -#ifdef NCR5380_STATS - collect_stats(hostdata, cmd); -#endif - cmd->scsi_done(cmd); NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); - falcon_release_lock_if_possible(hostdata); + maybe_release_dma_irq(instance); + local_irq_restore(flags); + cmd->scsi_done(cmd); return; } msgout = NOP; @@ -2455,7 +2458,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) * Function : void NCR5380_reselect (struct Scsi_Host *instance) * * Purpose : does reselection, initializing the instance->connected - * field to point to the Scsi_Cmnd for which the I_T_L or I_T_L_Q + * field to point to the scsi_cmnd for which the I_T_L or I_T_L_Q * nexus has been reestablished, * * Inputs : instance - this instance of the NCR5380. @@ -2463,19 +2466,21 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) */ +/* it might eventually prove necessary to do a dma setup on + reselection, but it doesn't seem to be needed now -- sam */ + static void NCR5380_reselect(struct Scsi_Host *instance) { SETUP_HOSTDATA(instance); unsigned char target_mask; - unsigned char lun, phase; - int len; + unsigned char lun; #ifdef SUPPORT_TAGS unsigned char tag; #endif unsigned char msg[3]; - unsigned char *data; - Scsi_Cmnd *tmp = NULL, *prev; -/* unsigned long flags; */ + int __maybe_unused len; + unsigned char __maybe_unused *data, __maybe_unused phase; + struct scsi_cmnd *tmp = NULL, *prev; /* * Disable arbitration, etc. since the host adapter obviously @@ -2511,10 +2516,18 @@ static void NCR5380_reselect(struct Scsi_Host *instance) while (!(NCR5380_read(STATUS_REG) & SR_REQ)) ; +#if defined(CONFIG_SUN3) && defined(REAL_DMA) + /* acknowledge toggle to MSGIN */ + NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(PHASE_MSGIN)); + + /* peek at the byte without really hitting the bus */ + msg[0] = NCR5380_read(CURRENT_SCSI_DATA_REG); +#else len = 1; data = msg; phase = PHASE_MSGIN; NCR5380_transfer_pio(instance, &phase, &len, &data); +#endif if (!(msg[0] & 0x80)) { printk(KERN_DEBUG "scsi%d: expecting IDENTIFY message, got ", HOSTNO); @@ -2524,13 +2537,13 @@ static void NCR5380_reselect(struct Scsi_Host *instance) } lun = (msg[0] & 0x07); -#ifdef SUPPORT_TAGS +#if defined(SUPPORT_TAGS) && !defined(CONFIG_SUN3) /* If the phase is still MSGIN, the target wants to send some more * messages. In case it supports tagged queuing, this is probably a * SIMPLE_QUEUE_TAG for the I_T_L_Q nexus. */ tag = TAG_NONE; - if (phase == PHASE_MSGIN && setup_use_tagged_queuing) { + if (phase == PHASE_MSGIN && (hostdata->flags & FLAG_TAGGED_QUEUING)) { /* Accept previous IDENTIFY message by clearing ACK */ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); len = 2; @@ -2548,15 +2561,13 @@ static void NCR5380_reselect(struct Scsi_Host *instance) * just reestablished, and remove it from the disconnected queue. */ - for (tmp = (Scsi_Cmnd *) hostdata->disconnected_queue, prev = NULL; + for (tmp = (struct scsi_cmnd *) hostdata->disconnected_queue, prev = NULL; tmp; prev = tmp, tmp = NEXT(tmp)) { if ((target_mask == (1 << tmp->device->id)) && (lun == tmp->device->lun) #ifdef SUPPORT_TAGS && (tag == tmp->tag) #endif ) { - /* ++guenther: prevent race with falcon_release_lock */ - falcon_dont_release++; if (prev) { REMOVE(prev, NEXT(prev), tmp, NEXT(tmp)); SET_NEXT(prev, NEXT(tmp)); @@ -2588,26 +2599,67 @@ static void NCR5380_reselect(struct Scsi_Host *instance) return; } +#if defined(CONFIG_SUN3) && defined(REAL_DMA) + /* engage dma setup for the command we just saw */ + { + void *d; + unsigned long count; + + if (!tmp->SCp.this_residual && tmp->SCp.buffers_residual) { + count = tmp->SCp.buffer->length; + d = sg_virt(tmp->SCp.buffer); + } else { + count = tmp->SCp.this_residual; + d = tmp->SCp.ptr; + } + /* setup this command for dma if not already */ + if ((count >= DMA_MIN_SIZE) && (sun3_dma_setup_done != tmp)) { + sun3scsi_dma_setup(d, count, rq_data_dir(tmp->request)); + sun3_dma_setup_done = tmp; + } + } + + NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ACK); +#endif + /* Accept message by clearing ACK */ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); +#if defined(SUPPORT_TAGS) && defined(CONFIG_SUN3) + /* If the phase is still MSGIN, the target wants to send some more + * messages. In case it supports tagged queuing, this is probably a + * SIMPLE_QUEUE_TAG for the I_T_L_Q nexus. + */ + tag = TAG_NONE; + if (phase == PHASE_MSGIN && setup_use_tagged_queuing) { + /* Accept previous IDENTIFY message by clearing ACK */ + NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); + len = 2; + data = msg + 1; + if (!NCR5380_transfer_pio(instance, &phase, &len, &data) && + msg[1] == SIMPLE_QUEUE_TAG) + tag = msg[2]; + dprintk(NDEBUG_TAGS, "scsi%d: target mask %02x, lun %d sent tag %d at reselection\n" + HOSTNO, target_mask, lun, tag); + } +#endif + hostdata->connected = tmp; dprintk(NDEBUG_RESELECTION, "scsi%d: nexus established, target = %d, lun = %llu, tag = %d\n", HOSTNO, tmp->device->id, tmp->device->lun, tmp->tag); - falcon_dont_release--; } /* - * Function : int NCR5380_abort (Scsi_Cmnd *cmd) + * Function : int NCR5380_abort (struct scsi_cmnd *cmd) * * Purpose : abort a command * - * Inputs : cmd - the Scsi_Cmnd to abort, code - code to set the + * Inputs : cmd - the scsi_cmnd to abort, code - code to set the * host byte of the result field to, if zero DID_ABORTED is * used. * - * Returns : 0 - success, -1 on failure. + * Returns : SUCCESS - success, FAILED on failure. * * XXX - there is no way to abort the command that is currently * connected, you have to wait for it to complete. If this is @@ -2616,24 +2668,19 @@ static void NCR5380_reselect(struct Scsi_Host *instance) */ static -int NCR5380_abort(Scsi_Cmnd *cmd) +int NCR5380_abort(struct scsi_cmnd *cmd) { struct Scsi_Host *instance = cmd->device->host; SETUP_HOSTDATA(instance); - Scsi_Cmnd *tmp, **prev; + struct scsi_cmnd *tmp, **prev; unsigned long flags; - printk(KERN_NOTICE "scsi%d: aborting command\n", HOSTNO); - scsi_print_command(cmd); + scmd_printk(KERN_NOTICE, cmd, "aborting command\n"); NCR5380_print_status(instance); local_irq_save(flags); - if (!IS_A_TT() && !falcon_got_lock) - printk(KERN_ERR "scsi%d: !!BINGO!! Falcon has no lock in NCR5380_abort\n", - HOSTNO); - dprintk(NDEBUG_ABORT, "scsi%d: abort called basr 0x%02x, sr 0x%02x\n", HOSTNO, NCR5380_read(BUS_AND_STATUS_REG), NCR5380_read(STATUS_REG)); @@ -2674,12 +2721,12 @@ int NCR5380_abort(Scsi_Cmnd *cmd) #else hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun); #endif + maybe_release_dma_irq(instance); local_irq_restore(flags); cmd->scsi_done(cmd); - falcon_release_lock_if_possible(hostdata); return SUCCESS; } else { -/* local_irq_restore(flags); */ + local_irq_restore(flags); printk("scsi%d: abort of connected command failed!\n", HOSTNO); return FAILED; } @@ -2690,21 +2737,21 @@ int NCR5380_abort(Scsi_Cmnd *cmd) * Case 2 : If the command hasn't been issued yet, we simply remove it * from the issue queue. */ - for (prev = (Scsi_Cmnd **)&(hostdata->issue_queue), - tmp = (Scsi_Cmnd *)hostdata->issue_queue; + for (prev = (struct scsi_cmnd **)&(hostdata->issue_queue), + tmp = (struct scsi_cmnd *)hostdata->issue_queue; tmp; prev = NEXTADDR(tmp), tmp = NEXT(tmp)) { if (cmd == tmp) { REMOVE(5, *prev, tmp, NEXT(tmp)); (*prev) = NEXT(tmp); SET_NEXT(tmp, NULL); tmp->result = DID_ABORT << 16; + maybe_release_dma_irq(instance); local_irq_restore(flags); dprintk(NDEBUG_ABORT, "scsi%d: abort removed command from issue queue.\n", HOSTNO); /* Tagged queuing note: no tag to free here, hasn't been assigned * yet... */ tmp->scsi_done(tmp); - falcon_release_lock_if_possible(hostdata); return SUCCESS; } } @@ -2751,13 +2798,13 @@ int NCR5380_abort(Scsi_Cmnd *cmd) * it from the disconnected queue. */ - for (tmp = (Scsi_Cmnd *) hostdata->disconnected_queue; tmp; + for (tmp = (struct scsi_cmnd *) hostdata->disconnected_queue; tmp; tmp = NEXT(tmp)) { if (cmd == tmp) { local_irq_restore(flags); dprintk(NDEBUG_ABORT, "scsi%d: aborting disconnected command.\n", HOSTNO); - if (NCR5380_select(instance, cmd, (int)cmd->tag)) + if (NCR5380_select(instance, cmd)) return FAILED; dprintk(NDEBUG_ABORT, "scsi%d: nexus reestablished.\n", HOSTNO); @@ -2765,8 +2812,8 @@ int NCR5380_abort(Scsi_Cmnd *cmd) do_abort(instance); local_irq_save(flags); - for (prev = (Scsi_Cmnd **)&(hostdata->disconnected_queue), - tmp = (Scsi_Cmnd *)hostdata->disconnected_queue; + for (prev = (struct scsi_cmnd **)&(hostdata->disconnected_queue), + tmp = (struct scsi_cmnd *)hostdata->disconnected_queue; tmp; prev = NEXTADDR(tmp), tmp = NEXT(tmp)) { if (cmd == tmp) { REMOVE(5, *prev, tmp, NEXT(tmp)); @@ -2782,15 +2829,22 @@ int NCR5380_abort(Scsi_Cmnd *cmd) #else hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun); #endif + maybe_release_dma_irq(instance); local_irq_restore(flags); tmp->scsi_done(tmp); - falcon_release_lock_if_possible(hostdata); return SUCCESS; } } } } + /* Maybe it is sufficient just to release the ST-DMA lock... (if + * possible at all) At least, we should check if the lock could be + * released after the abort, in case it is kept due to some bug. + */ + maybe_release_dma_irq(instance); + local_irq_restore(flags); + /* * Case 5 : If we reached this point, the command was not found in any of * the queues. @@ -2801,21 +2855,14 @@ int NCR5380_abort(Scsi_Cmnd *cmd) * broke. */ - local_irq_restore(flags); printk(KERN_INFO "scsi%d: warning : SCSI command probably completed successfully before abortion\n", HOSTNO); - /* Maybe it is sufficient just to release the ST-DMA lock... (if - * possible at all) At least, we should check if the lock could be - * released after the abort, in case it is kept due to some bug. - */ - falcon_release_lock_if_possible(hostdata); - return FAILED; } /* - * Function : int NCR5380_reset (Scsi_Cmnd *cmd) + * Function : int NCR5380_reset (struct scsi_cmnd *cmd) * * Purpose : reset the SCSI bus. * @@ -2823,20 +2870,14 @@ int NCR5380_abort(Scsi_Cmnd *cmd) * */ -static int NCR5380_bus_reset(Scsi_Cmnd *cmd) +static int NCR5380_bus_reset(struct scsi_cmnd *cmd) { - SETUP_HOSTDATA(cmd->device->host); + struct Scsi_Host *instance = cmd->device->host; + struct NCR5380_hostdata *hostdata = shost_priv(instance); int i; unsigned long flags; -#if defined(RESET_RUN_DONE) - Scsi_Cmnd *connected, *disconnected_queue; -#endif - - if (!IS_A_TT() && !falcon_got_lock) - printk(KERN_ERR "scsi%d: !!BINGO!! Falcon has no lock in NCR5380_reset\n", - H_NO(cmd)); - NCR5380_print_status(cmd->device->host); + NCR5380_print_status(instance); /* get in phase */ NCR5380_write(TARGET_COMMAND_REG, @@ -2853,89 +2894,6 @@ static int NCR5380_bus_reset(Scsi_Cmnd *cmd) * through anymore ... */ (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG); - /* MSch 20140115 - looking at the generic NCR5380 driver, all of this - * should go. - * Catch-22: if we don't clear all queues, the SCSI driver lock will - * not be reset by atari_scsi_reset()! - */ - -#if defined(RESET_RUN_DONE) - /* XXX Should now be done by midlevel code, but it's broken XXX */ - /* XXX see below XXX */ - - /* MSch: old-style reset: actually abort all command processing here */ - - /* After the reset, there are no more connected or disconnected commands - * and no busy units; to avoid problems with re-inserting the commands - * into the issue_queue (via scsi_done()), the aborted commands are - * remembered in local variables first. - */ - local_irq_save(flags); - connected = (Scsi_Cmnd *)hostdata->connected; - hostdata->connected = NULL; - disconnected_queue = (Scsi_Cmnd *)hostdata->disconnected_queue; - hostdata->disconnected_queue = NULL; -#ifdef SUPPORT_TAGS - free_all_tags(); -#endif - for (i = 0; i < 8; ++i) - hostdata->busy[i] = 0; -#ifdef REAL_DMA - hostdata->dma_len = 0; -#endif - local_irq_restore(flags); - - /* In order to tell the mid-level code which commands were aborted, - * set the command status to DID_RESET and call scsi_done() !!! - * This ultimately aborts processing of these commands in the mid-level. - */ - - if ((cmd = connected)) { - dprintk(NDEBUG_ABORT, "scsi%d: reset aborted a connected command\n", H_NO(cmd)); - cmd->result = (cmd->result & 0xffff) | (DID_RESET << 16); - cmd->scsi_done(cmd); - } - - for (i = 0; (cmd = disconnected_queue); ++i) { - disconnected_queue = NEXT(cmd); - SET_NEXT(cmd, NULL); - cmd->result = (cmd->result & 0xffff) | (DID_RESET << 16); - cmd->scsi_done(cmd); - } - if (i > 0) - dprintk(NDEBUG_ABORT, "scsi: reset aborted %d disconnected command(s)\n", i); - - /* The Falcon lock should be released after a reset... - */ - /* ++guenther: moved to atari_scsi_reset(), to prevent a race between - * unlocking and enabling dma interrupt. - */ -/* falcon_release_lock_if_possible( hostdata );*/ - - /* since all commands have been explicitly terminated, we need to tell - * the midlevel code that the reset was SUCCESSFUL, and there is no - * need to 'wake up' the commands by a request_sense - */ - return SUCCESS; -#else /* 1 */ - - /* MSch: new-style reset handling: let the mid-level do what it can */ - - /* ++guenther: MID-LEVEL IS STILL BROKEN. - * Mid-level is supposed to requeue all commands that were active on the - * various low-level queues. In fact it does this, but that's not enough - * because all these commands are subject to timeout. And if a timeout - * happens for any removed command, *_abort() is called but all queues - * are now empty. Abort then gives up the falcon lock, which is fatal, - * since the mid-level will queue more commands and must have the lock - * (it's all happening inside timer interrupt handler!!). - * Even worse, abort will return NOT_RUNNING for all those commands not - * on any queue, so they won't be retried ... - * - * Conclusion: either scsi.c disables timeout for all resetted commands - * immediately, or we lose! As of linux-2.0.20 it doesn't. - */ - /* After the reset, there are no more connected or disconnected commands * and no busy units; so clear the low-level status here to avoid * conflicts when the mid-level code tries to wake up the affected @@ -2954,16 +2912,16 @@ static int NCR5380_bus_reset(Scsi_Cmnd *cmd) hostdata->connected = NULL; hostdata->disconnected_queue = NULL; #ifdef SUPPORT_TAGS - free_all_tags(); + free_all_tags(hostdata); #endif for (i = 0; i < 8; ++i) hostdata->busy[i] = 0; #ifdef REAL_DMA hostdata->dma_len = 0; #endif + + maybe_release_dma_irq(instance); local_irq_restore(flags); - /* we did no complete reset of all commands, so a wakeup is required */ return SUCCESS; -#endif /* 1 */ } diff --git a/drivers/scsi/atari_scsi.c b/drivers/scsi/atari_scsi.c index b522134528d6..d1c37a386947 100644 --- a/drivers/scsi/atari_scsi.c +++ b/drivers/scsi/atari_scsi.c @@ -64,45 +64,57 @@ /**************************************************************************/ - #include <linux/module.h> - -#define AUTOSENSE -/* For the Atari version, use only polled IO or REAL_DMA */ -#define REAL_DMA -/* Support tagged queuing? (on devices that are able to... :-) */ -#define SUPPORT_TAGS -#define MAX_TAGS 32 - #include <linux/types.h> -#include <linux/stddef.h> -#include <linux/ctype.h> #include <linux/delay.h> -#include <linux/mm.h> #include <linux/blkdev.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/nvram.h> #include <linux/bitops.h> #include <linux/wait.h> +#include <linux/platform_device.h> #include <asm/setup.h> #include <asm/atarihw.h> #include <asm/atariints.h> -#include <asm/page.h> -#include <asm/pgtable.h> -#include <asm/irq.h> -#include <asm/traps.h> - -#include "scsi.h" -#include <scsi/scsi_host.h> -#include "atari_scsi.h" -#include "NCR5380.h" #include <asm/atari_stdma.h> #include <asm/atari_stram.h> #include <asm/io.h> -#include <linux/stat.h> +#include <scsi/scsi_host.h> + +/* Definitions for the core NCR5380 driver. */ + +#define REAL_DMA +#define SUPPORT_TAGS +#define MAX_TAGS 32 +#define DMA_MIN_SIZE 32 + +#define NCR5380_implementation_fields /* none */ + +#define NCR5380_read(reg) atari_scsi_reg_read(reg) +#define NCR5380_write(reg, value) atari_scsi_reg_write(reg, value) + +#define NCR5380_queue_command atari_scsi_queue_command +#define NCR5380_abort atari_scsi_abort +#define NCR5380_show_info atari_scsi_show_info +#define NCR5380_info atari_scsi_info + +#define NCR5380_dma_read_setup(instance, data, count) \ + atari_scsi_dma_setup(instance, data, count, 0) +#define NCR5380_dma_write_setup(instance, data, count) \ + atari_scsi_dma_setup(instance, data, count, 1) +#define NCR5380_dma_residual(instance) \ + atari_scsi_dma_residual(instance) +#define NCR5380_dma_xfer_len(instance, cmd, phase) \ + atari_dma_xfer_len(cmd->SCp.this_residual, cmd, !((phase) & SR_IO)) + +#define NCR5380_acquire_dma_irq(instance) falcon_get_lock(instance) +#define NCR5380_release_dma_irq(instance) falcon_release_lock() + +#include "NCR5380.h" + #define IS_A_TT() ATARIHW_PRESENT(TT_SCSI) @@ -149,23 +161,6 @@ static inline unsigned long SCSI_DMA_GETADR(void) return adr; } -static inline void ENABLE_IRQ(void) -{ - if (IS_A_TT()) - atari_enable_irq(IRQ_TT_MFP_SCSI); - else - atari_enable_irq(IRQ_MFP_FSCSI); -} - -static inline void DISABLE_IRQ(void) -{ - if (IS_A_TT()) - atari_disable_irq(IRQ_TT_MFP_SCSI); - else - atari_disable_irq(IRQ_MFP_FSCSI); -} - - #define HOSTDATA_DMALEN (((struct NCR5380_hostdata *) \ (atari_scsi_host->hostdata))->dma_len) @@ -178,30 +173,9 @@ static inline void DISABLE_IRQ(void) #define AFTER_RESET_DELAY (5*HZ/2) #endif -/***************************** Prototypes *****************************/ - #ifdef REAL_DMA -static int scsi_dma_is_ignored_buserr(unsigned char dma_stat); static void atari_scsi_fetch_restbytes(void); -static long atari_scsi_dma_residual(struct Scsi_Host *instance); -static int falcon_classify_cmd(Scsi_Cmnd *cmd); -static unsigned long atari_dma_xfer_len(unsigned long wanted_len, - Scsi_Cmnd *cmd, int write_flag); -#endif -static irqreturn_t scsi_tt_intr(int irq, void *dummy); -static irqreturn_t scsi_falcon_intr(int irq, void *dummy); -static void falcon_release_lock_if_possible(struct NCR5380_hostdata *hostdata); -static void falcon_get_lock(void); -#ifdef CONFIG_ATARI_SCSI_RESET_BOOT -static void atari_scsi_reset_boot(void); #endif -static unsigned char atari_scsi_tt_reg_read(unsigned char reg); -static void atari_scsi_tt_reg_write(unsigned char reg, unsigned char value); -static unsigned char atari_scsi_falcon_reg_read(unsigned char reg); -static void atari_scsi_falcon_reg_write(unsigned char reg, unsigned char value); - -/************************* End of Prototypes **************************/ - static struct Scsi_Host *atari_scsi_host; static unsigned char (*atari_scsi_reg_read)(unsigned char reg); @@ -226,8 +200,6 @@ static char *atari_dma_orig_addr; /* mask for address bits that can't be used with the ST-DMA */ static unsigned long atari_dma_stram_mask; #define STRAM_ADDR(a) (((a) & atari_dma_stram_mask) == 0) -/* number of bytes to cut from a transfer to handle NCR overruns */ -static int atari_read_overruns; #endif static int setup_can_queue = -1; @@ -386,10 +358,6 @@ static irqreturn_t scsi_tt_intr(int irq, void *dummy) NCR5380_intr(irq, dummy); -#if 0 - /* To be sure the int is not masked */ - atari_enable_irq(IRQ_TT_MFP_SCSI); -#endif return IRQ_HANDLED; } @@ -480,257 +448,35 @@ static void atari_scsi_fetch_restbytes(void) #endif /* REAL_DMA */ -static int falcon_got_lock = 0; -static DECLARE_WAIT_QUEUE_HEAD(falcon_fairness_wait); -static int falcon_trying_lock = 0; -static DECLARE_WAIT_QUEUE_HEAD(falcon_try_wait); -static int falcon_dont_release = 0; - /* This function releases the lock on the DMA chip if there is no - * connected command and the disconnected queue is empty. On - * releasing, instances of falcon_get_lock are awoken, that put - * themselves to sleep for fairness. They can now try to get the lock - * again (but others waiting longer more probably will win). + * connected command and the disconnected queue is empty. */ -static void falcon_release_lock_if_possible(struct NCR5380_hostdata *hostdata) +static void falcon_release_lock(void) { - unsigned long flags; - if (IS_A_TT()) return; - local_irq_save(flags); - - if (falcon_got_lock && !hostdata->disconnected_queue && - !hostdata->issue_queue && !hostdata->connected) { - - if (falcon_dont_release) { -#if 0 - printk("WARNING: Lock release not allowed. Ignored\n"); -#endif - local_irq_restore(flags); - return; - } - falcon_got_lock = 0; + if (stdma_is_locked_by(scsi_falcon_intr)) stdma_release(); - wake_up(&falcon_fairness_wait); - } - - local_irq_restore(flags); } /* This function manages the locking of the ST-DMA. * If the DMA isn't locked already for SCSI, it tries to lock it by * calling stdma_lock(). But if the DMA is locked by the SCSI code and * there are other drivers waiting for the chip, we do not issue the - * command immediately but wait on 'falcon_fairness_queue'. We will be - * waked up when the DMA is unlocked by some SCSI interrupt. After that - * we try to get the lock again. - * But we must be prepared that more than one instance of - * falcon_get_lock() is waiting on the fairness queue. They should not - * try all at once to call stdma_lock(), one is enough! For that, the - * first one sets 'falcon_trying_lock', others that see that variable - * set wait on the queue 'falcon_try_wait'. - * Complicated, complicated.... Sigh... + * command immediately but tell the SCSI mid-layer to defer. */ -static void falcon_get_lock(void) +static int falcon_get_lock(struct Scsi_Host *instance) { - unsigned long flags; - if (IS_A_TT()) - return; - - local_irq_save(flags); - - wait_event_cmd(falcon_fairness_wait, - in_interrupt() || !falcon_got_lock || !stdma_others_waiting(), - local_irq_restore(flags), - local_irq_save(flags)); - - while (!falcon_got_lock) { - if (in_irq()) - panic("Falcon SCSI hasn't ST-DMA lock in interrupt"); - if (!falcon_trying_lock) { - falcon_trying_lock = 1; - stdma_lock(scsi_falcon_intr, NULL); - falcon_got_lock = 1; - falcon_trying_lock = 0; - wake_up(&falcon_try_wait); - } else { - wait_event_cmd(falcon_try_wait, - falcon_got_lock && !falcon_trying_lock, - local_irq_restore(flags), - local_irq_save(flags)); - } - } - - local_irq_restore(flags); - if (!falcon_got_lock) - panic("Falcon SCSI: someone stole the lock :-(\n"); -} - - -static int __init atari_scsi_detect(struct scsi_host_template *host) -{ - static int called = 0; - struct Scsi_Host *instance; - - if (!MACH_IS_ATARI || - (!ATARIHW_PRESENT(ST_SCSI) && !ATARIHW_PRESENT(TT_SCSI)) || - called) - return 0; - - host->proc_name = "Atari"; - - atari_scsi_reg_read = IS_A_TT() ? atari_scsi_tt_reg_read : - atari_scsi_falcon_reg_read; - atari_scsi_reg_write = IS_A_TT() ? atari_scsi_tt_reg_write : - atari_scsi_falcon_reg_write; - - /* setup variables */ - host->can_queue = - (setup_can_queue > 0) ? setup_can_queue : - IS_A_TT() ? ATARI_TT_CAN_QUEUE : ATARI_FALCON_CAN_QUEUE; - host->cmd_per_lun = - (setup_cmd_per_lun > 0) ? setup_cmd_per_lun : - IS_A_TT() ? ATARI_TT_CMD_PER_LUN : ATARI_FALCON_CMD_PER_LUN; - /* Force sg_tablesize to 0 on a Falcon! */ - host->sg_tablesize = - !IS_A_TT() ? ATARI_FALCON_SG_TABLESIZE : - (setup_sg_tablesize >= 0) ? setup_sg_tablesize : ATARI_TT_SG_TABLESIZE; - - if (setup_hostid >= 0) - host->this_id = setup_hostid; - else { - /* use 7 as default */ - host->this_id = 7; - /* Test if a host id is set in the NVRam */ - if (ATARIHW_PRESENT(TT_CLK) && nvram_check_checksum()) { - unsigned char b = nvram_read_byte( 14 ); - /* Arbitration enabled? (for TOS) If yes, use configured host ID */ - if (b & 0x80) - host->this_id = b & 7; - } - } + return 1; -#ifdef SUPPORT_TAGS - if (setup_use_tagged_queuing < 0) - setup_use_tagged_queuing = DEFAULT_USE_TAGGED_QUEUING; -#endif -#ifdef REAL_DMA - /* If running on a Falcon and if there's TT-Ram (i.e., more than one - * memory block, since there's always ST-Ram in a Falcon), then allocate a - * STRAM_BUFFER_SIZE byte dribble buffer for transfers from/to alternative - * Ram. - */ - if (MACH_IS_ATARI && ATARIHW_PRESENT(ST_SCSI) && - !ATARIHW_PRESENT(EXTD_DMA) && m68k_num_memory > 1) { - atari_dma_buffer = atari_stram_alloc(STRAM_BUFFER_SIZE, "SCSI"); - if (!atari_dma_buffer) { - printk(KERN_ERR "atari_scsi_detect: can't allocate ST-RAM " - "double buffer\n"); - return 0; - } - atari_dma_phys_buffer = atari_stram_to_phys(atari_dma_buffer); - atari_dma_orig_addr = 0; - } -#endif - instance = scsi_register(host, sizeof(struct NCR5380_hostdata)); - if (instance == NULL) { - atari_stram_free(atari_dma_buffer); - atari_dma_buffer = 0; - return 0; - } - atari_scsi_host = instance; - /* - * Set irq to 0, to avoid that the mid-level code disables our interrupt - * during queue_command calls. This is completely unnecessary, and even - * worse causes bad problems on the Falcon, where the int is shared with - * IDE and floppy! - */ - instance->irq = 0; - -#ifdef CONFIG_ATARI_SCSI_RESET_BOOT - atari_scsi_reset_boot(); -#endif - NCR5380_init(instance, 0); - - if (IS_A_TT()) { - - /* This int is actually "pseudo-slow", i.e. it acts like a slow - * interrupt after having cleared the pending flag for the DMA - * interrupt. */ - if (request_irq(IRQ_TT_MFP_SCSI, scsi_tt_intr, IRQ_TYPE_SLOW, - "SCSI NCR5380", instance)) { - printk(KERN_ERR "atari_scsi_detect: cannot allocate irq %d, aborting",IRQ_TT_MFP_SCSI); - scsi_unregister(atari_scsi_host); - atari_stram_free(atari_dma_buffer); - atari_dma_buffer = 0; - return 0; - } - tt_mfp.active_edge |= 0x80; /* SCSI int on L->H */ -#ifdef REAL_DMA - tt_scsi_dma.dma_ctrl = 0; - atari_dma_residual = 0; - - if (MACH_IS_MEDUSA) { - /* While the read overruns (described by Drew Eckhardt in - * NCR5380.c) never happened on TTs, they do in fact on the Medusa - * (This was the cause why SCSI didn't work right for so long - * there.) Since handling the overruns slows down a bit, I turned - * the #ifdef's into a runtime condition. - * - * In principle it should be sufficient to do max. 1 byte with - * PIO, but there is another problem on the Medusa with the DMA - * rest data register. So 'atari_read_overruns' is currently set - * to 4 to avoid having transfers that aren't a multiple of 4. If - * the rest data bug is fixed, this can be lowered to 1. - */ - atari_read_overruns = 4; - } -#endif /*REAL_DMA*/ - } else { /* ! IS_A_TT */ - - /* Nothing to do for the interrupt: the ST-DMA is initialized - * already by atari_init_INTS() - */ - -#ifdef REAL_DMA - atari_dma_residual = 0; - atari_dma_active = 0; - atari_dma_stram_mask = (ATARIHW_PRESENT(EXTD_DMA) ? 0x00000000 - : 0xff000000); -#endif - } + if (in_interrupt()) + return stdma_try_lock(scsi_falcon_intr, instance); - printk(KERN_INFO "scsi%d: options CAN_QUEUE=%d CMD_PER_LUN=%d SCAT-GAT=%d " -#ifdef SUPPORT_TAGS - "TAGGED-QUEUING=%s " -#endif - "HOSTID=%d", - instance->host_no, instance->hostt->can_queue, - instance->hostt->cmd_per_lun, - instance->hostt->sg_tablesize, -#ifdef SUPPORT_TAGS - setup_use_tagged_queuing ? "yes" : "no", -#endif - instance->hostt->this_id ); - NCR5380_print_options(instance); - printk("\n"); - - called = 1; - return 1; -} - -static int atari_scsi_release(struct Scsi_Host *sh) -{ - if (IS_A_TT()) - free_irq(IRQ_TT_MFP_SCSI, sh); - if (atari_dma_buffer) - atari_stram_free(atari_dma_buffer); - NCR5380_exit(sh); + stdma_lock(scsi_falcon_intr, instance); return 1; } @@ -739,7 +485,7 @@ static int __init atari_scsi_setup(char *str) { /* Format of atascsi parameter is: * atascsi=<can_queue>,<cmd_per_lun>,<sg_tablesize>,<hostid>,<use_tags> - * Defaults depend on TT or Falcon, hostid determined at run time. + * Defaults depend on TT or Falcon, determined at run time. * Negative values mean don't change. */ int ints[6]; @@ -750,36 +496,17 @@ static int __init atari_scsi_setup(char *str) printk("atari_scsi_setup: no arguments!\n"); return 0; } - - if (ints[0] >= 1) { - if (ints[1] > 0) - /* no limits on this, just > 0 */ - setup_can_queue = ints[1]; - } - if (ints[0] >= 2) { - if (ints[2] > 0) - setup_cmd_per_lun = ints[2]; - } - if (ints[0] >= 3) { - if (ints[3] >= 0) { - setup_sg_tablesize = ints[3]; - /* Must be <= SG_ALL (255) */ - if (setup_sg_tablesize > SG_ALL) - setup_sg_tablesize = SG_ALL; - } - } - if (ints[0] >= 4) { - /* Must be between 0 and 7 */ - if (ints[4] >= 0 && ints[4] <= 7) - setup_hostid = ints[4]; - else if (ints[4] > 7) - printk("atari_scsi_setup: invalid host ID %d !\n", ints[4]); - } + if (ints[0] >= 1) + setup_can_queue = ints[1]; + if (ints[0] >= 2) + setup_cmd_per_lun = ints[2]; + if (ints[0] >= 3) + setup_sg_tablesize = ints[3]; + if (ints[0] >= 4) + setup_hostid = ints[4]; #ifdef SUPPORT_TAGS - if (ints[0] >= 5) { - if (ints[5] >= 0) - setup_use_tagged_queuing = !!ints[5]; - } + if (ints[0] >= 5) + setup_use_tagged_queuing = ints[5]; #endif return 1; @@ -788,45 +515,6 @@ static int __init atari_scsi_setup(char *str) __setup("atascsi=", atari_scsi_setup); #endif /* !MODULE */ -static int atari_scsi_bus_reset(Scsi_Cmnd *cmd) -{ - int rv; - struct NCR5380_hostdata *hostdata = - (struct NCR5380_hostdata *)cmd->device->host->hostdata; - - /* For doing the reset, SCSI interrupts must be disabled first, - * since the 5380 raises its IRQ line while _RST is active and we - * can't disable interrupts completely, since we need the timer. - */ - /* And abort a maybe active DMA transfer */ - if (IS_A_TT()) { - atari_turnoff_irq(IRQ_TT_MFP_SCSI); -#ifdef REAL_DMA - tt_scsi_dma.dma_ctrl = 0; -#endif /* REAL_DMA */ - } else { - atari_turnoff_irq(IRQ_MFP_FSCSI); -#ifdef REAL_DMA - st_dma.dma_mode_status = 0x90; - atari_dma_active = 0; - atari_dma_orig_addr = NULL; -#endif /* REAL_DMA */ - } - - rv = NCR5380_bus_reset(cmd); - - /* Re-enable ints */ - if (IS_A_TT()) { - atari_turnon_irq(IRQ_TT_MFP_SCSI); - } else { - atari_turnon_irq(IRQ_MFP_FSCSI); - } - if (rv == SUCCESS) - falcon_release_lock_if_possible(hostdata); - - return rv; -} - #ifdef CONFIG_ATARI_SCSI_RESET_BOOT static void __init atari_scsi_reset_boot(void) @@ -860,15 +548,6 @@ static void __init atari_scsi_reset_boot(void) } #endif - -static const char *atari_scsi_info(struct Scsi_Host *host) -{ - /* atari_scsi_detect() is verbose enough... */ - static const char string[] = "Atari native SCSI"; - return string; -} - - #if defined(REAL_DMA) static unsigned long atari_scsi_dma_setup(struct Scsi_Host *instance, @@ -949,7 +628,7 @@ static long atari_scsi_dma_residual(struct Scsi_Host *instance) #define CMD_SURELY_BYTE_MODE 1 #define CMD_MODE_UNKNOWN 2 -static int falcon_classify_cmd(Scsi_Cmnd *cmd) +static int falcon_classify_cmd(struct scsi_cmnd *cmd) { unsigned char opcode = cmd->cmnd[0]; @@ -981,7 +660,7 @@ static int falcon_classify_cmd(Scsi_Cmnd *cmd) */ static unsigned long atari_dma_xfer_len(unsigned long wanted_len, - Scsi_Cmnd *cmd, int write_flag) + struct scsi_cmnd *cmd, int write_flag) { unsigned long possible_len, limit; @@ -1099,23 +778,247 @@ static void atari_scsi_falcon_reg_write(unsigned char reg, unsigned char value) #include "atari_NCR5380.c" -static struct scsi_host_template driver_template = { +static int atari_scsi_bus_reset(struct scsi_cmnd *cmd) +{ + int rv; + unsigned long flags; + + local_irq_save(flags); + +#ifdef REAL_DMA + /* Abort a maybe active DMA transfer */ + if (IS_A_TT()) { + tt_scsi_dma.dma_ctrl = 0; + } else { + st_dma.dma_mode_status = 0x90; + atari_dma_active = 0; + atari_dma_orig_addr = NULL; + } +#endif + + rv = NCR5380_bus_reset(cmd); + + /* The 5380 raises its IRQ line while _RST is active but the ST DMA + * "lock" has been released so this interrupt may end up handled by + * floppy or IDE driver (if one of them holds the lock). The NCR5380 + * interrupt flag has been cleared already. + */ + + local_irq_restore(flags); + + return rv; +} + +#define DRV_MODULE_NAME "atari_scsi" +#define PFX DRV_MODULE_NAME ": " + +static struct scsi_host_template atari_scsi_template = { + .module = THIS_MODULE, + .proc_name = DRV_MODULE_NAME, .show_info = atari_scsi_show_info, .name = "Atari native SCSI", - .detect = atari_scsi_detect, - .release = atari_scsi_release, .info = atari_scsi_info, .queuecommand = atari_scsi_queue_command, .eh_abort_handler = atari_scsi_abort, .eh_bus_reset_handler = atari_scsi_bus_reset, - .can_queue = 0, /* initialized at run-time */ - .this_id = 0, /* initialized at run-time */ - .sg_tablesize = 0, /* initialized at run-time */ - .cmd_per_lun = 0, /* initialized at run-time */ + .this_id = 7, .use_clustering = DISABLE_CLUSTERING }; +static int __init atari_scsi_probe(struct platform_device *pdev) +{ + struct Scsi_Host *instance; + int error; + struct resource *irq; + int host_flags = 0; + + irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); + if (!irq) + return -ENODEV; + + if (ATARIHW_PRESENT(TT_SCSI)) { + atari_scsi_reg_read = atari_scsi_tt_reg_read; + atari_scsi_reg_write = atari_scsi_tt_reg_write; + } else { + atari_scsi_reg_read = atari_scsi_falcon_reg_read; + atari_scsi_reg_write = atari_scsi_falcon_reg_write; + } + + /* The values for CMD_PER_LUN and CAN_QUEUE are somehow arbitrary. + * Higher values should work, too; try it! + * (But cmd_per_lun costs memory!) + * + * But there seems to be a bug somewhere that requires CAN_QUEUE to be + * 2*CMD_PER_LUN. At least on a TT, no spurious timeouts seen since + * changed CMD_PER_LUN... + * + * Note: The Falcon currently uses 8/1 setting due to unsolved problems + * with cmd_per_lun != 1 + */ + if (ATARIHW_PRESENT(TT_SCSI)) { + atari_scsi_template.can_queue = 16; + atari_scsi_template.cmd_per_lun = 8; + atari_scsi_template.sg_tablesize = SG_ALL; + } else { + atari_scsi_template.can_queue = 8; + atari_scsi_template.cmd_per_lun = 1; + atari_scsi_template.sg_tablesize = SG_NONE; + } + + if (setup_can_queue > 0) + atari_scsi_template.can_queue = setup_can_queue; + + if (setup_cmd_per_lun > 0) + atari_scsi_template.cmd_per_lun = setup_cmd_per_lun; + + /* Leave sg_tablesize at 0 on a Falcon! */ + if (ATARIHW_PRESENT(TT_SCSI) && setup_sg_tablesize >= 0) + atari_scsi_template.sg_tablesize = setup_sg_tablesize; + + if (setup_hostid >= 0) { + atari_scsi_template.this_id = setup_hostid & 7; + } else { + /* Test if a host id is set in the NVRam */ + if (ATARIHW_PRESENT(TT_CLK) && nvram_check_checksum()) { + unsigned char b = nvram_read_byte(14); + + /* Arbitration enabled? (for TOS) + * If yes, use configured host ID + */ + if (b & 0x80) + atari_scsi_template.this_id = b & 7; + } + } + + +#ifdef REAL_DMA + /* If running on a Falcon and if there's TT-Ram (i.e., more than one + * memory block, since there's always ST-Ram in a Falcon), then + * allocate a STRAM_BUFFER_SIZE byte dribble buffer for transfers + * from/to alternative Ram. + */ + if (ATARIHW_PRESENT(ST_SCSI) && !ATARIHW_PRESENT(EXTD_DMA) && + m68k_num_memory > 1) { + atari_dma_buffer = atari_stram_alloc(STRAM_BUFFER_SIZE, "SCSI"); + if (!atari_dma_buffer) { + pr_err(PFX "can't allocate ST-RAM double buffer\n"); + return -ENOMEM; + } + atari_dma_phys_buffer = atari_stram_to_phys(atari_dma_buffer); + atari_dma_orig_addr = 0; + } +#endif + + instance = scsi_host_alloc(&atari_scsi_template, + sizeof(struct NCR5380_hostdata)); + if (!instance) { + error = -ENOMEM; + goto fail_alloc; + } + atari_scsi_host = instance; + +#ifdef CONFIG_ATARI_SCSI_RESET_BOOT + atari_scsi_reset_boot(); +#endif + + instance->irq = irq->start; + + host_flags |= IS_A_TT() ? 0 : FLAG_LATE_DMA_SETUP; + +#ifdef SUPPORT_TAGS + host_flags |= setup_use_tagged_queuing > 0 ? FLAG_TAGGED_QUEUING : 0; +#endif + + NCR5380_init(instance, host_flags); + + if (IS_A_TT()) { + error = request_irq(instance->irq, scsi_tt_intr, 0, + "NCR5380", instance); + if (error) { + pr_err(PFX "request irq %d failed, aborting\n", + instance->irq); + goto fail_irq; + } + tt_mfp.active_edge |= 0x80; /* SCSI int on L->H */ +#ifdef REAL_DMA + tt_scsi_dma.dma_ctrl = 0; + atari_dma_residual = 0; + + /* While the read overruns (described by Drew Eckhardt in + * NCR5380.c) never happened on TTs, they do in fact on the + * Medusa (This was the cause why SCSI didn't work right for + * so long there.) Since handling the overruns slows down + * a bit, I turned the #ifdef's into a runtime condition. + * + * In principle it should be sufficient to do max. 1 byte with + * PIO, but there is another problem on the Medusa with the DMA + * rest data register. So read_overruns is currently set + * to 4 to avoid having transfers that aren't a multiple of 4. + * If the rest data bug is fixed, this can be lowered to 1. + */ + if (MACH_IS_MEDUSA) { + struct NCR5380_hostdata *hostdata = + shost_priv(instance); + + hostdata->read_overruns = 4; + } +#endif + } else { + /* Nothing to do for the interrupt: the ST-DMA is initialized + * already. + */ +#ifdef REAL_DMA + atari_dma_residual = 0; + atari_dma_active = 0; + atari_dma_stram_mask = (ATARIHW_PRESENT(EXTD_DMA) ? 0x00000000 + : 0xff000000); +#endif + } + + error = scsi_add_host(instance, NULL); + if (error) + goto fail_host; + + platform_set_drvdata(pdev, instance); + + scsi_scan_host(instance); + return 0; + +fail_host: + if (IS_A_TT()) + free_irq(instance->irq, instance); +fail_irq: + NCR5380_exit(instance); + scsi_host_put(instance); +fail_alloc: + if (atari_dma_buffer) + atari_stram_free(atari_dma_buffer); + return error; +} + +static int __exit atari_scsi_remove(struct platform_device *pdev) +{ + struct Scsi_Host *instance = platform_get_drvdata(pdev); + + scsi_remove_host(instance); + if (IS_A_TT()) + free_irq(instance->irq, instance); + NCR5380_exit(instance); + scsi_host_put(instance); + if (atari_dma_buffer) + atari_stram_free(atari_dma_buffer); + return 0; +} + +static struct platform_driver atari_scsi_driver = { + .remove = __exit_p(atari_scsi_remove), + .driver = { + .name = DRV_MODULE_NAME, + .owner = THIS_MODULE, + }, +}; -#include "scsi_module.c" +module_platform_driver_probe(atari_scsi_driver, atari_scsi_probe); +MODULE_ALIAS("platform:" DRV_MODULE_NAME); MODULE_LICENSE("GPL"); diff --git a/drivers/scsi/atari_scsi.h b/drivers/scsi/atari_scsi.h deleted file mode 100644 index 3299d91d7336..000000000000 --- a/drivers/scsi/atari_scsi.h +++ /dev/null @@ -1,60 +0,0 @@ -/* - * atari_scsi.h -- Header file for the Atari native SCSI driver - * - * Copyright 1994 Roman Hodek <Roman.Hodek@informatik.uni-erlangen.de> - * - * (Loosely based on the work of Robert De Vries' team) - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file COPYING in the main directory of this archive - * for more details. - * - */ - - -#ifndef ATARI_SCSI_H -#define ATARI_SCSI_H - -/* (I_HAVE_OVERRUNS stuff removed) */ - -#ifndef ASM -/* The values for CMD_PER_LUN and CAN_QUEUE are somehow arbitrary. Higher - * values should work, too; try it! (but cmd_per_lun costs memory!) */ - -/* But there seems to be a bug somewhere that requires CAN_QUEUE to be - * 2*CMD_PER_LUN. At least on a TT, no spurious timeouts seen since - * changed CMD_PER_LUN... */ - -/* Note: The Falcon currently uses 8/1 setting due to unsolved problems with - * cmd_per_lun != 1 */ - -#define ATARI_TT_CAN_QUEUE 16 -#define ATARI_TT_CMD_PER_LUN 8 -#define ATARI_TT_SG_TABLESIZE SG_ALL - -#define ATARI_FALCON_CAN_QUEUE 8 -#define ATARI_FALCON_CMD_PER_LUN 1 -#define ATARI_FALCON_SG_TABLESIZE SG_NONE - -#define DEFAULT_USE_TAGGED_QUEUING 0 - - -#define NCR5380_implementation_fields /* none */ - -#define NCR5380_read(reg) atari_scsi_reg_read( reg ) -#define NCR5380_write(reg, value) atari_scsi_reg_write( reg, value ) - -#define NCR5380_intr atari_scsi_intr -#define NCR5380_queue_command atari_scsi_queue_command -#define NCR5380_abort atari_scsi_abort -#define NCR5380_show_info atari_scsi_show_info -#define NCR5380_dma_read_setup(inst,d,c) atari_scsi_dma_setup (inst, d, c, 0) -#define NCR5380_dma_write_setup(inst,d,c) atari_scsi_dma_setup (inst, d, c, 1) -#define NCR5380_dma_residual(inst) atari_scsi_dma_residual( inst ) -#define NCR5380_dma_xfer_len(i,cmd,phase) \ - atari_dma_xfer_len(cmd->SCp.this_residual,cmd,((phase) & SR_IO) ? 0 : 1) - -#endif /* ndef ASM */ -#endif /* ATARI_SCSI_H */ - - diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c index 30d74a06b993..f3193406776c 100644 --- a/drivers/scsi/be2iscsi/be_main.c +++ b/drivers/scsi/be2iscsi/be_main.c @@ -556,7 +556,7 @@ static struct scsi_host_template beiscsi_sht = { .name = "Emulex 10Gbe open-iscsi Initiator Driver", .proc_name = DRV_NAME, .queuecommand = iscsi_queuecommand, - .change_queue_depth = iscsi_change_queue_depth, + .change_queue_depth = scsi_change_queue_depth, .slave_configure = beiscsi_slave_configure, .target_alloc = iscsi_target_alloc, .eh_abort_handler = beiscsi_eh_abort, @@ -570,7 +570,7 @@ static struct scsi_host_template beiscsi_sht = { .cmd_per_lun = BEISCSI_CMD_PER_LUN, .use_clustering = ENABLE_CLUSTERING, .vendor_id = SCSI_NL_VID_TYPE_PCI | BE_VENDOR_ID, - + .track_queue_depth = 1, }; static struct scsi_transport_template *beiscsi_scsi_transport; diff --git a/drivers/scsi/bfa/bfad_debugfs.c b/drivers/scsi/bfa/bfad_debugfs.c index 8e83d0474fe7..74a307c0a240 100644 --- a/drivers/scsi/bfa/bfad_debugfs.c +++ b/drivers/scsi/bfa/bfad_debugfs.c @@ -260,18 +260,9 @@ bfad_debugfs_write_regrd(struct file *file, const char __user *buf, unsigned long flags; void *kern_buf; - kern_buf = kzalloc(nbytes, GFP_KERNEL); - - if (!kern_buf) { - printk(KERN_INFO "bfad[%d]: Failed to allocate buffer\n", - bfad->inst_no); - return -ENOMEM; - } - - if (copy_from_user(kern_buf, (void __user *)buf, nbytes)) { - kfree(kern_buf); - return -ENOMEM; - } + kern_buf = memdup_user(buf, nbytes); + if (IS_ERR(kern_buf)) + return PTR_ERR(kern_buf); rc = sscanf(kern_buf, "%x:%x", &addr, &len); if (rc < 2) { @@ -336,18 +327,9 @@ bfad_debugfs_write_regwr(struct file *file, const char __user *buf, unsigned long flags; void *kern_buf; - kern_buf = kzalloc(nbytes, GFP_KERNEL); - - if (!kern_buf) { - printk(KERN_INFO "bfad[%d]: Failed to allocate buffer\n", - bfad->inst_no); - return -ENOMEM; - } - - if (copy_from_user(kern_buf, (void __user *)buf, nbytes)) { - kfree(kern_buf); - return -ENOMEM; - } + kern_buf = memdup_user(buf, nbytes); + if (IS_ERR(kern_buf)) + return PTR_ERR(kern_buf); rc = sscanf(kern_buf, "%x:%x", &addr, &val); if (rc < 2) { diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c index f067332bf763..7223b0006740 100644 --- a/drivers/scsi/bfa/bfad_im.c +++ b/drivers/scsi/bfa/bfad_im.c @@ -776,11 +776,7 @@ bfad_thread_workq(struct bfad_s *bfad) static int bfad_im_slave_configure(struct scsi_device *sdev) { - if (sdev->tagged_supported) - scsi_activate_tcq(sdev, bfa_lun_queue_depth); - else - scsi_deactivate_tcq(sdev, bfa_lun_queue_depth); - + scsi_change_queue_depth(sdev, bfa_lun_queue_depth); return 0; } @@ -804,6 +800,7 @@ struct scsi_host_template bfad_im_scsi_host_template = { .shost_attrs = bfad_im_host_attrs, .max_sectors = BFAD_MAX_SECTORS, .vendor_id = BFA_PCI_VENDOR_ID_BROCADE, + .use_blk_tags = 1, }; struct scsi_host_template bfad_im_vport_template = { @@ -825,6 +822,7 @@ struct scsi_host_template bfad_im_vport_template = { .use_clustering = ENABLE_CLUSTERING, .shost_attrs = bfad_im_vport_attrs, .max_sectors = BFAD_MAX_SECTORS, + .use_blk_tags = 1, }; bfa_status_t @@ -868,14 +866,8 @@ bfad_ramp_up_qdepth(struct bfad_itnim_s *itnim, struct scsi_device *sdev) if (bfa_lun_queue_depth > tmp_sdev->queue_depth) { if (tmp_sdev->id != sdev->id) continue; - if (tmp_sdev->ordered_tags) - scsi_adjust_queue_depth(tmp_sdev, - MSG_ORDERED_TAG, - tmp_sdev->queue_depth + 1); - else - scsi_adjust_queue_depth(tmp_sdev, - MSG_SIMPLE_TAG, - tmp_sdev->queue_depth + 1); + scsi_change_queue_depth(tmp_sdev, + tmp_sdev->queue_depth + 1); itnim->last_ramp_up_time = jiffies; } diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c index 72533c58c1f3..e861f286b42e 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c +++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c @@ -1088,7 +1088,7 @@ static int bnx2fc_vport_create(struct fc_vport *vport, bool disabled) mutex_unlock(&bnx2fc_dev_lock); rtnl_unlock(); - if (IS_ERR(vn_port)) { + if (!vn_port) { printk(KERN_ERR PFX "bnx2fc_vport_create (%s) failed\n", netdev->name); return -EIO; @@ -2202,6 +2202,7 @@ static int _bnx2fc_create(struct net_device *netdev, interface = bnx2fc_interface_create(hba, netdev, fip_mode); if (!interface) { printk(KERN_ERR PFX "bnx2fc_interface_create failed\n"); + rc = -ENOMEM; goto ifput_err; } @@ -2790,13 +2791,15 @@ static struct scsi_host_template bnx2fc_shost_template = { .eh_target_reset_handler = bnx2fc_eh_target_reset, /* tgt reset */ .eh_host_reset_handler = fc_eh_host_reset, .slave_alloc = fc_slave_alloc, - .change_queue_depth = fc_change_queue_depth, - .change_queue_type = fc_change_queue_type, + .change_queue_depth = scsi_change_queue_depth, + .change_queue_type = scsi_change_queue_type, .this_id = -1, .cmd_per_lun = 3, .use_clustering = ENABLE_CLUSTERING, .sg_tablesize = BNX2FC_MAX_BDS_PER_CMD, .max_sectors = 1024, + .use_blk_tags = 1, + .track_queue_depth = 1, }; static struct libfc_function_template bnx2fc_libfc_fcn_templ = { diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c index 5b99844ef6bf..4b56858c1df2 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_io.c +++ b/drivers/scsi/bnx2fc/bnx2fc_io.c @@ -1725,7 +1725,6 @@ void bnx2fc_build_fcp_cmnd(struct bnx2fc_cmd *io_req, struct fcp_cmnd *fcp_cmnd) { struct scsi_cmnd *sc_cmd = io_req->sc_cmd; - char tag[2]; memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd)); @@ -1739,21 +1738,10 @@ void bnx2fc_build_fcp_cmnd(struct bnx2fc_cmd *io_req, fcp_cmnd->fc_tm_flags = io_req->mp_req.tm_flags; fcp_cmnd->fc_flags = io_req->io_req_flags; - if (scsi_populate_tag_msg(sc_cmd, tag)) { - switch (tag[0]) { - case HEAD_OF_QUEUE_TAG: - fcp_cmnd->fc_pri_ta = FCP_PTA_HEADQ; - break; - case ORDERED_QUEUE_TAG: - fcp_cmnd->fc_pri_ta = FCP_PTA_ORDERED; - break; - default: - fcp_cmnd->fc_pri_ta = FCP_PTA_SIMPLE; - break; - } - } else { + if (sc_cmd->flags & SCMD_TAGGED) + fcp_cmnd->fc_pri_ta = FCP_PTA_SIMPLE; + else fcp_cmnd->fc_pri_ta = 0; - } } static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req, diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c index 7a36388822aa..e53078d03309 100644 --- a/drivers/scsi/bnx2i/bnx2i_iscsi.c +++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c @@ -2259,7 +2259,7 @@ static struct scsi_host_template bnx2i_host_template = { .eh_abort_handler = iscsi_eh_abort, .eh_device_reset_handler = iscsi_eh_device_reset, .eh_target_reset_handler = iscsi_eh_recover_target, - .change_queue_depth = iscsi_change_queue_depth, + .change_queue_depth = scsi_change_queue_depth, .target_alloc = iscsi_target_alloc, .can_queue = 2048, .max_sectors = 127, @@ -2268,6 +2268,7 @@ static struct scsi_host_template bnx2i_host_template = { .use_clustering = ENABLE_CLUSTERING, .sg_tablesize = ISCSI_MAX_BDS_PER_CMD, .shost_attrs = bnx2i_dev_attributes, + .track_queue_depth = 1, }; struct iscsi_transport bnx2i_iscsi_transport = { diff --git a/drivers/scsi/ch.c b/drivers/scsi/ch.c index ef5ae0d03616..6bac8a746ee2 100644 --- a/drivers/scsi/ch.c +++ b/drivers/scsi/ch.c @@ -85,8 +85,7 @@ static const char * vendor_labels[CH_TYPES-4] = { // module_param_string_array(vendor_labels, NULL, 0444); #define ch_printk(prefix, ch, fmt, a...) \ - sdev_printk(prefix, (ch)->device, "[%s] " fmt, \ - (ch)->name, ##a) + sdev_prefix_printk(prefix, (ch)->device, (ch)->name, fmt, ##a) #define DPRINTK(fmt, arg...) \ do { \ @@ -183,7 +182,7 @@ static int ch_find_errno(struct scsi_sense_hdr *sshdr) } static int -ch_do_scsi(scsi_changer *ch, unsigned char *cmd, +ch_do_scsi(scsi_changer *ch, unsigned char *cmd, int cmd_len, void *buffer, unsigned buflength, enum dma_data_direction direction) { @@ -197,7 +196,7 @@ ch_do_scsi(scsi_changer *ch, unsigned char *cmd, errno = 0; if (debug) { DPRINTK("command: "); - __scsi_print_command(cmd); + __scsi_print_command(cmd, cmd_len); } result = scsi_execute_req(ch->device, cmd, direction, buffer, @@ -207,7 +206,7 @@ ch_do_scsi(scsi_changer *ch, unsigned char *cmd, DPRINTK("result: 0x%x\n",result); if (driver_byte(result) & DRIVER_SENSE) { if (debug) - scsi_print_sense_hdr(ch->name, &sshdr); + scsi_print_sense_hdr(ch->device, ch->name, &sshdr); errno = ch_find_errno(&sshdr); switch(sshdr.sense_key) { @@ -258,7 +257,8 @@ ch_read_element_status(scsi_changer *ch, u_int elem, char *data) cmd[3] = elem & 0xff; cmd[5] = 1; cmd[9] = 255; - if (0 == (result = ch_do_scsi(ch, cmd, buffer, 256, DMA_FROM_DEVICE))) { + if (0 == (result = ch_do_scsi(ch, cmd, 12, + buffer, 256, DMA_FROM_DEVICE))) { if (((buffer[16] << 8) | buffer[17]) != elem) { DPRINTK("asked for element 0x%02x, got 0x%02x\n", elem,(buffer[16] << 8) | buffer[17]); @@ -288,7 +288,7 @@ ch_init_elem(scsi_changer *ch) memset(cmd,0,sizeof(cmd)); cmd[0] = INITIALIZE_ELEMENT_STATUS; cmd[1] = (ch->device->lun & 0x7) << 5; - err = ch_do_scsi(ch, cmd, NULL, 0, DMA_NONE); + err = ch_do_scsi(ch, cmd, 6, NULL, 0, DMA_NONE); VPRINTK(KERN_INFO, "... finished\n"); return err; } @@ -310,10 +310,10 @@ ch_readconfig(scsi_changer *ch) cmd[1] = (ch->device->lun & 0x7) << 5; cmd[2] = 0x1d; cmd[4] = 255; - result = ch_do_scsi(ch, cmd, buffer, 255, DMA_FROM_DEVICE); + result = ch_do_scsi(ch, cmd, 10, buffer, 255, DMA_FROM_DEVICE); if (0 != result) { cmd[1] |= (1<<3); - result = ch_do_scsi(ch, cmd, buffer, 255, DMA_FROM_DEVICE); + result = ch_do_scsi(ch, cmd, 10, buffer, 255, DMA_FROM_DEVICE); } if (0 == result) { ch->firsts[CHET_MT] = @@ -438,7 +438,7 @@ ch_position(scsi_changer *ch, u_int trans, u_int elem, int rotate) cmd[4] = (elem >> 8) & 0xff; cmd[5] = elem & 0xff; cmd[8] = rotate ? 1 : 0; - return ch_do_scsi(ch, cmd, NULL, 0, DMA_NONE); + return ch_do_scsi(ch, cmd, 10, NULL, 0, DMA_NONE); } static int @@ -459,7 +459,7 @@ ch_move(scsi_changer *ch, u_int trans, u_int src, u_int dest, int rotate) cmd[6] = (dest >> 8) & 0xff; cmd[7] = dest & 0xff; cmd[10] = rotate ? 1 : 0; - return ch_do_scsi(ch, cmd, NULL,0, DMA_NONE); + return ch_do_scsi(ch, cmd, 12, NULL,0, DMA_NONE); } static int @@ -485,7 +485,7 @@ ch_exchange(scsi_changer *ch, u_int trans, u_int src, cmd[9] = dest2 & 0xff; cmd[10] = (rotate1 ? 1 : 0) | (rotate2 ? 2 : 0); - return ch_do_scsi(ch, cmd, NULL,0, DMA_NONE); + return ch_do_scsi(ch, cmd, 12, NULL, 0, DMA_NONE); } static void @@ -535,7 +535,7 @@ ch_set_voltag(scsi_changer *ch, u_int elem, memcpy(buffer,tag,32); ch_check_voltag(buffer); - result = ch_do_scsi(ch, cmd, buffer, 256, DMA_TO_DEVICE); + result = ch_do_scsi(ch, cmd, 12, buffer, 256, DMA_TO_DEVICE); kfree(buffer); return result; } @@ -616,6 +616,11 @@ static long ch_ioctl(struct file *file, int retval; void __user *argp = (void __user *)arg; + retval = scsi_ioctl_block_when_processing_errors(ch->device, cmd, + file->f_flags & O_NDELAY); + if (retval) + return retval; + switch (cmd) { case CHIOGPARAMS: { @@ -766,7 +771,8 @@ static long ch_ioctl(struct file *file, ch_cmd[5] = 1; ch_cmd[9] = 255; - result = ch_do_scsi(ch, ch_cmd, buffer, 256, DMA_FROM_DEVICE); + result = ch_do_scsi(ch, ch_cmd, 12, + buffer, 256, DMA_FROM_DEVICE); if (!result) { cge.cge_status = buffer[18]; cge.cge_flags = 0; @@ -966,9 +972,9 @@ static int ch_remove(struct device *dev) } static struct scsi_driver ch_template = { - .owner = THIS_MODULE, .gendrv = { .name = "ch", + .owner = THIS_MODULE, .probe = ch_probe, .remove = ch_remove, }, diff --git a/drivers/scsi/constants.c b/drivers/scsi/constants.c index d35a5d6c8d7c..e2068a2621c4 100644 --- a/drivers/scsi/constants.c +++ b/drivers/scsi/constants.c @@ -21,15 +21,21 @@ /* Commands with service actions that change the command name */ -#define SERVICE_ACTION_IN_12 0xab -#define SERVICE_ACTION_OUT_12 0xa9 -#define SERVICE_ACTION_BIDIRECTIONAL 0x9d -#define SERVICE_ACTION_IN_16 0x9e -#define SERVICE_ACTION_OUT_16 0x9f #define THIRD_PARTY_COPY_OUT 0x83 #define THIRD_PARTY_COPY_IN 0x84 +#define VENDOR_SPECIFIC_CDB 0xc0 +struct sa_name_list { + int opcode; + const struct value_name_pair *arr; + int arr_sz; +}; + +struct value_name_pair { + int value; + const char * name; +}; #ifdef CONFIG_SCSI_CONSTANTS static const char * cdb_byte0_names[] = { @@ -102,11 +108,6 @@ static const char * cdb_byte0_names[] = { "Volume set (out), Send DVD structure", }; -struct value_name_pair { - int value; - const char * name; -}; - static const struct value_name_pair maint_in_arr[] = { {0x5, "Report identifying information"}, {0xa, "Report target port groups"}, @@ -244,170 +245,119 @@ static const struct value_name_pair variable_length_arr[] = { }; #define VARIABLE_LENGTH_SZ ARRAY_SIZE(variable_length_arr) -static const char * get_sa_name(const struct value_name_pair * arr, - int arr_sz, int service_action) -{ - int k; +static struct sa_name_list sa_names_arr[] = { + {VARIABLE_LENGTH_CMD, variable_length_arr, VARIABLE_LENGTH_SZ}, + {MAINTENANCE_IN, maint_in_arr, MAINT_IN_SZ}, + {MAINTENANCE_OUT, maint_out_arr, MAINT_OUT_SZ}, + {PERSISTENT_RESERVE_IN, pr_in_arr, PR_IN_SZ}, + {PERSISTENT_RESERVE_OUT, pr_out_arr, PR_OUT_SZ}, + {SERVICE_ACTION_IN_12, serv_in12_arr, SERV_IN12_SZ}, + {SERVICE_ACTION_OUT_12, serv_out12_arr, SERV_OUT12_SZ}, + {SERVICE_ACTION_BIDIRECTIONAL, serv_bidi_arr, SERV_BIDI_SZ}, + {SERVICE_ACTION_IN_16, serv_in16_arr, SERV_IN16_SZ}, + {SERVICE_ACTION_OUT_16, serv_out16_arr, SERV_OUT16_SZ}, + {THIRD_PARTY_COPY_IN, tpc_in_arr, TPC_IN_SZ}, + {THIRD_PARTY_COPY_OUT, tpc_out_arr, TPC_OUT_SZ}, + {0, NULL, 0}, +}; - for (k = 0; k < arr_sz; ++k, ++arr) { - if (service_action == arr->value) - break; - } - return (k < arr_sz) ? arr->name : NULL; -} +#else /* ifndef CONFIG_SCSI_CONSTANTS */ +static const char *cdb_byte0_names[0]; + +static struct sa_name_list sa_names_arr[] = { + {VARIABLE_LENGTH_CMD, NULL, 0}, + {MAINTENANCE_IN, NULL, 0}, + {MAINTENANCE_OUT, NULL, 0}, + {PERSISTENT_RESERVE_IN, NULL, 0}, + {PERSISTENT_RESERVE_OUT, NULL, 0}, + {SERVICE_ACTION_IN_12, NULL, 0}, + {SERVICE_ACTION_OUT_12, NULL, 0}, + {SERVICE_ACTION_BIDIRECTIONAL, NULL, 0}, + {SERVICE_ACTION_IN_16, NULL, 0}, + {SERVICE_ACTION_OUT_16, NULL, 0}, + {THIRD_PARTY_COPY_IN, NULL, 0}, + {THIRD_PARTY_COPY_OUT, NULL, 0}, + {0, NULL, 0}, +}; +#endif /* CONFIG_SCSI_CONSTANTS */ -/* attempt to guess cdb length if cdb_len==0 . No trailing linefeed. */ -static void print_opcode_name(unsigned char * cdbp, int cdb_len) +static bool scsi_opcode_sa_name(int opcode, int service_action, + const char **cdb_name, const char **sa_name) { - int sa, len, cdb0; - int fin_name = 0; - const char * name; + struct sa_name_list *sa_name_ptr; + const struct value_name_pair *arr = NULL; + int arr_sz, k; - cdb0 = cdbp[0]; - switch(cdb0) { - case VARIABLE_LENGTH_CMD: - len = scsi_varlen_cdb_length(cdbp); - if (len < 10) { - printk("short variable length command, " - "len=%d ext_len=%d", len, cdb_len); - break; - } - sa = (cdbp[8] << 8) + cdbp[9]; - name = get_sa_name(variable_length_arr, VARIABLE_LENGTH_SZ, - sa); - if (name) - printk("%s", name); - else - printk("cdb[0]=0x%x, sa=0x%x", cdb0, sa); + *cdb_name = NULL; + if (opcode >= VENDOR_SPECIFIC_CDB) + return false; - if ((cdb_len > 0) && (len != cdb_len)) - printk(", in_cdb_len=%d, ext_len=%d", len, cdb_len); + if (opcode < ARRAY_SIZE(cdb_byte0_names)) + *cdb_name = cdb_byte0_names[opcode]; - break; - case MAINTENANCE_IN: - sa = cdbp[1] & 0x1f; - name = get_sa_name(maint_in_arr, MAINT_IN_SZ, sa); - fin_name = 1; - break; - case MAINTENANCE_OUT: - sa = cdbp[1] & 0x1f; - name = get_sa_name(maint_out_arr, MAINT_OUT_SZ, sa); - fin_name = 1; - break; - case PERSISTENT_RESERVE_IN: - sa = cdbp[1] & 0x1f; - name = get_sa_name(pr_in_arr, PR_IN_SZ, sa); - fin_name = 1; - break; - case PERSISTENT_RESERVE_OUT: - sa = cdbp[1] & 0x1f; - name = get_sa_name(pr_out_arr, PR_OUT_SZ, sa); - fin_name = 1; - break; - case SERVICE_ACTION_IN_12: - sa = cdbp[1] & 0x1f; - name = get_sa_name(serv_in12_arr, SERV_IN12_SZ, sa); - fin_name = 1; - break; - case SERVICE_ACTION_OUT_12: - sa = cdbp[1] & 0x1f; - name = get_sa_name(serv_out12_arr, SERV_OUT12_SZ, sa); - fin_name = 1; - break; - case SERVICE_ACTION_BIDIRECTIONAL: - sa = cdbp[1] & 0x1f; - name = get_sa_name(serv_bidi_arr, SERV_BIDI_SZ, sa); - fin_name = 1; - break; - case SERVICE_ACTION_IN_16: - sa = cdbp[1] & 0x1f; - name = get_sa_name(serv_in16_arr, SERV_IN16_SZ, sa); - fin_name = 1; - break; - case SERVICE_ACTION_OUT_16: - sa = cdbp[1] & 0x1f; - name = get_sa_name(serv_out16_arr, SERV_OUT16_SZ, sa); - fin_name = 1; - break; - case THIRD_PARTY_COPY_IN: - sa = cdbp[1] & 0x1f; - name = get_sa_name(tpc_in_arr, TPC_IN_SZ, sa); - fin_name = 1; - break; - case THIRD_PARTY_COPY_OUT: - sa = cdbp[1] & 0x1f; - name = get_sa_name(tpc_out_arr, TPC_OUT_SZ, sa); - fin_name = 1; - break; - default: - if (cdb0 < 0xc0) { - name = cdb_byte0_names[cdb0]; - if (name) - printk("%s", name); - else - printk("cdb[0]=0x%x (reserved)", cdb0); - } else - printk("cdb[0]=0x%x (vendor)", cdb0); - break; + for (sa_name_ptr = sa_names_arr; sa_name_ptr->arr; ++sa_name_ptr) { + if (sa_name_ptr->opcode == opcode) { + arr = sa_name_ptr->arr; + arr_sz = sa_name_ptr->arr_sz; + break; + } } - if (fin_name) { - if (name) - printk("%s", name); - else - printk("cdb[0]=0x%x, sa=0x%x", cdb0, sa); + if (!arr) + return false; + + for (k = 0; k < arr_sz; ++k, ++arr) { + if (service_action == arr->value) + break; } -} + if (k < arr_sz) + *sa_name = arr->name; -#else /* ifndef CONFIG_SCSI_CONSTANTS */ + return true; +} -static void print_opcode_name(unsigned char * cdbp, int cdb_len) +static void print_opcode_name(const unsigned char *cdbp, size_t cdb_len) { - int sa, len, cdb0; + int sa, cdb0; + const char *cdb_name = NULL, *sa_name = NULL; cdb0 = cdbp[0]; - switch(cdb0) { - case VARIABLE_LENGTH_CMD: - len = scsi_varlen_cdb_length(cdbp); - if (len < 10) { - printk("short opcode=0x%x command, len=%d " - "ext_len=%d", cdb0, len, cdb_len); - break; + if (cdb0 == VARIABLE_LENGTH_CMD) { + if (cdb_len < 10) { + printk("short variable length command, len=%zu", + cdb_len); + return; } sa = (cdbp[8] << 8) + cdbp[9]; - printk("cdb[0]=0x%x, sa=0x%x", cdb0, sa); - if (len != cdb_len) - printk(", in_cdb_len=%d, ext_len=%d", len, cdb_len); - break; - case MAINTENANCE_IN: - case MAINTENANCE_OUT: - case PERSISTENT_RESERVE_IN: - case PERSISTENT_RESERVE_OUT: - case SERVICE_ACTION_IN_12: - case SERVICE_ACTION_OUT_12: - case SERVICE_ACTION_BIDIRECTIONAL: - case SERVICE_ACTION_IN_16: - case SERVICE_ACTION_OUT_16: - case THIRD_PARTY_COPY_IN: - case THIRD_PARTY_COPY_OUT: + } else sa = cdbp[1] & 0x1f; - printk("cdb[0]=0x%x, sa=0x%x", cdb0, sa); - break; - default: - if (cdb0 < 0xc0) + + if (!scsi_opcode_sa_name(cdb0, sa, &cdb_name, &sa_name)) { + if (cdb_name) + printk("%s", cdb_name); + else if (cdb0 >= VENDOR_SPECIFIC_CDB) + printk("cdb[0]=0x%x (vendor)", cdb0); + else if (cdb0 >= 0x60 && cdb0 < 0x7e) + printk("cdb[0]=0x%x (reserved)", cdb0); + else printk("cdb[0]=0x%x", cdb0); + } else { + if (sa_name) + printk("%s", sa_name); + else if (cdb_name) + printk("%s, sa=0x%x", cdb_name, sa); else - printk("cdb[0]=0x%x (vendor)", cdb0); - break; + printk("cdb[0]=0x%x, sa=0x%x", cdb0, sa); } } -#endif -void __scsi_print_command(unsigned char *cdb) +void __scsi_print_command(const unsigned char *cdb, size_t cdb_len) { int k, len; - print_opcode_name(cdb, 0); + print_opcode_name(cdb, cdb_len); len = scsi_command_size(cdb); + if (cdb_len < len) + len = cdb_len; /* print out all bytes in cdb */ for (k = 0; k < len; ++k) printk(" %02x", cdb[k]); @@ -433,41 +383,6 @@ void scsi_print_command(struct scsi_cmnd *cmd) } EXPORT_SYMBOL(scsi_print_command); -/** - * scsi_print_status - print scsi status description - * @scsi_status: scsi status value - * - * If the status is recognized, the description is printed. - * Otherwise "Unknown status" is output. No trailing space. - * If CONFIG_SCSI_CONSTANTS is not set, then print status in hex - * (e.g. "0x2" for Check Condition). - **/ -void -scsi_print_status(unsigned char scsi_status) { -#ifdef CONFIG_SCSI_CONSTANTS - const char * ccp; - - switch (scsi_status) { - case 0: ccp = "Good"; break; - case 0x2: ccp = "Check Condition"; break; - case 0x4: ccp = "Condition Met"; break; - case 0x8: ccp = "Busy"; break; - case 0x10: ccp = "Intermediate"; break; - case 0x14: ccp = "Intermediate-Condition Met"; break; - case 0x18: ccp = "Reservation Conflict"; break; - case 0x22: ccp = "Command Terminated"; break; /* obsolete */ - case 0x28: ccp = "Task set Full"; break; /* was: Queue Full */ - case 0x30: ccp = "ACA Active"; break; - case 0x40: ccp = "Task Aborted"; break; - default: ccp = "Unknown status"; - } - printk(KERN_INFO "%s", ccp); -#else - printk(KERN_INFO "0x%0x", scsi_status); -#endif -} -EXPORT_SYMBOL(scsi_print_status); - #ifdef CONFIG_SCSI_CONSTANTS struct error_info { @@ -1292,18 +1207,19 @@ static const struct error_info additional[] = struct error_info2 { unsigned char code1, code2_min, code2_max; + const char * str; const char * fmt; }; static const struct error_info2 additional2[] = { - {0x40, 0x00, 0x7f, "Ram failure (%x)"}, - {0x40, 0x80, 0xff, "Diagnostic failure on component (%x)"}, - {0x41, 0x00, 0xff, "Data path failure (%x)"}, - {0x42, 0x00, 0xff, "Power-on or self-test failure (%x)"}, - {0x4D, 0x00, 0xff, "Tagged overlapped commands (task tag %x)"}, - {0x70, 0x00, 0xff, "Decompression exception short algorithm id of %x"}, - {0, 0, 0, NULL} + {0x40, 0x00, 0x7f, "Ram failure", ""}, + {0x40, 0x80, 0xff, "Diagnostic failure on component", ""}, + {0x41, 0x00, 0xff, "Data path failure", ""}, + {0x42, 0x00, 0xff, "Power-on or self-test failure", ""}, + {0x4D, 0x00, 0xff, "Tagged overlapped commands", "task tag "}, + {0x70, 0x00, 0xff, "Decompression exception", "short algorithm id of "}, + {0, 0, 0, NULL, NULL} }; /* description of the sense key values */ @@ -1349,69 +1265,79 @@ EXPORT_SYMBOL(scsi_sense_key_string); * This string may contain a "%x" and should be printed with ascq as arg. */ const char * -scsi_extd_sense_format(unsigned char asc, unsigned char ascq) { +scsi_extd_sense_format(unsigned char asc, unsigned char ascq, const char **fmt) +{ #ifdef CONFIG_SCSI_CONSTANTS int i; unsigned short code = ((asc << 8) | ascq); + *fmt = NULL; for (i = 0; additional[i].text; i++) if (additional[i].code12 == code) return additional[i].text; for (i = 0; additional2[i].fmt; i++) { if (additional2[i].code1 == asc && ascq >= additional2[i].code2_min && - ascq <= additional2[i].code2_max) - return additional2[i].fmt; + ascq <= additional2[i].code2_max) { + *fmt = additional2[i].fmt; + return additional2[i].str; + } } +#else + *fmt = NULL; #endif return NULL; } EXPORT_SYMBOL(scsi_extd_sense_format); void -scsi_show_extd_sense(unsigned char asc, unsigned char ascq) +scsi_show_extd_sense(const struct scsi_device *sdev, const char *name, + unsigned char asc, unsigned char ascq) { - const char *extd_sense_fmt = scsi_extd_sense_format(asc, ascq); - - if (extd_sense_fmt) { - if (strstr(extd_sense_fmt, "%x")) { - printk("Add. Sense: "); - printk(extd_sense_fmt, ascq); - } else - printk("Add. Sense: %s", extd_sense_fmt); - } else { - if (asc >= 0x80) - printk("<<vendor>> ASC=0x%x ASCQ=0x%x", asc, - ascq); - if (ascq >= 0x80) - printk("ASC=0x%x <<vendor>> ASCQ=0x%x", asc, - ascq); + const char *extd_sense_fmt = NULL; + const char *extd_sense_str = scsi_extd_sense_format(asc, ascq, + &extd_sense_fmt); + + if (extd_sense_str) { + if (extd_sense_fmt) + sdev_prefix_printk(KERN_INFO, sdev, name, + "Add. Sense: %s (%s%x)", + extd_sense_str, extd_sense_fmt, + ascq); else - printk("ASC=0x%x ASCQ=0x%x", asc, ascq); - } + sdev_prefix_printk(KERN_INFO, sdev, name, + "Add. Sense: %s", extd_sense_str); - printk("\n"); + } else { + sdev_prefix_printk(KERN_INFO, sdev, name, + "%sASC=0x%x %sASCQ=0x%x\n", + asc >= 0x80 ? "<<vendor>> " : "", asc, + ascq >= 0x80 ? "<<vendor>> " : "", ascq); + } } EXPORT_SYMBOL(scsi_show_extd_sense); void -scsi_show_sense_hdr(struct scsi_sense_hdr *sshdr) +scsi_show_sense_hdr(const struct scsi_device *sdev, const char *name, + const struct scsi_sense_hdr *sshdr) { const char *sense_txt; sense_txt = scsi_sense_key_string(sshdr->sense_key); if (sense_txt) - printk("Sense Key : %s ", sense_txt); + sdev_prefix_printk(KERN_INFO, sdev, name, + "Sense Key : %s [%s]%s\n", sense_txt, + scsi_sense_is_deferred(sshdr) ? + "deferred" : "current", + sshdr->response_code >= 0x72 ? + " [descriptor]" : ""); else - printk("Sense Key : 0x%x ", sshdr->sense_key); - - printk("%s", scsi_sense_is_deferred(sshdr) ? "[deferred] " : - "[current] "); - - if (sshdr->response_code >= 0x72) - printk("[descriptor]"); - - printk("\n"); + sdev_prefix_printk(KERN_INFO, sdev, name, + "Sense Key : 0x%x [%s]%s", sshdr->sense_key, + scsi_sense_is_deferred(sshdr) ? + "deferred" : "current", + sshdr->response_code >= 0x72 ? + " [descriptor]" : ""); } EXPORT_SYMBOL(scsi_show_sense_hdr); @@ -1419,141 +1345,55 @@ EXPORT_SYMBOL(scsi_show_sense_hdr); * Print normalized SCSI sense header with a prefix. */ void -scsi_print_sense_hdr(const char *name, struct scsi_sense_hdr *sshdr) +scsi_print_sense_hdr(const struct scsi_device *sdev, const char *name, + const struct scsi_sense_hdr *sshdr) { - printk(KERN_INFO "%s: ", name); - scsi_show_sense_hdr(sshdr); - printk(KERN_INFO "%s: ", name); - scsi_show_extd_sense(sshdr->asc, sshdr->ascq); + scsi_show_sense_hdr(sdev, name, sshdr); + scsi_show_extd_sense(sdev, name, sshdr->asc, sshdr->ascq); } EXPORT_SYMBOL(scsi_print_sense_hdr); -/* - * Print normalized SCSI sense header with device information and a prefix. - */ -void -scsi_cmd_print_sense_hdr(struct scsi_cmnd *scmd, const char *desc, - struct scsi_sense_hdr *sshdr) -{ - scmd_printk(KERN_INFO, scmd, "%s: ", desc); - scsi_show_sense_hdr(sshdr); - scmd_printk(KERN_INFO, scmd, "%s: ", desc); - scsi_show_extd_sense(sshdr->asc, sshdr->ascq); -} -EXPORT_SYMBOL(scsi_cmd_print_sense_hdr); - static void -scsi_decode_sense_buffer(const unsigned char *sense_buffer, int sense_len, - struct scsi_sense_hdr *sshdr) +scsi_dump_sense_buffer(const unsigned char *sense_buffer, int sense_len) { - int k, num, res; - - res = scsi_normalize_sense(sense_buffer, sense_len, sshdr); - if (0 == res) { - /* this may be SCSI-1 sense data */ - num = (sense_len < 32) ? sense_len : 32; - printk("Unrecognized sense data (in hex):"); - for (k = 0; k < num; ++k) { - if (0 == (k % 16)) { - printk("\n"); - printk(KERN_INFO " "); - } - printk("%02x ", sense_buffer[k]); + int k, num; + + num = (sense_len < 32) ? sense_len : 32; + printk("Unrecognized sense data (in hex):"); + for (k = 0; k < num; ++k) { + if (0 == (k % 16)) { + printk("\n"); + printk(KERN_INFO " "); } - printk("\n"); - return; + printk("%02x ", sense_buffer[k]); } -} - -static void -scsi_decode_sense_extras(const unsigned char *sense_buffer, int sense_len, - struct scsi_sense_hdr *sshdr) -{ - int k, num, res; - - if (sshdr->response_code < 0x72) - { - /* only decode extras for "fixed" format now */ - char buff[80]; - int blen, fixed_valid; - unsigned int info; - - fixed_valid = sense_buffer[0] & 0x80; - info = ((sense_buffer[3] << 24) | (sense_buffer[4] << 16) | - (sense_buffer[5] << 8) | sense_buffer[6]); - res = 0; - memset(buff, 0, sizeof(buff)); - blen = sizeof(buff) - 1; - if (fixed_valid) - res += snprintf(buff + res, blen - res, - "Info fld=0x%x", info); - if (sense_buffer[2] & 0x80) { - /* current command has read a filemark */ - if (res > 0) - res += snprintf(buff + res, blen - res, ", "); - res += snprintf(buff + res, blen - res, "FMK"); - } - if (sense_buffer[2] & 0x40) { - /* end-of-medium condition exists */ - if (res > 0) - res += snprintf(buff + res, blen - res, ", "); - res += snprintf(buff + res, blen - res, "EOM"); - } - if (sense_buffer[2] & 0x20) { - /* incorrect block length requested */ - if (res > 0) - res += snprintf(buff + res, blen - res, ", "); - res += snprintf(buff + res, blen - res, "ILI"); - } - if (res > 0) - printk("%s\n", buff); - } else if (sshdr->additional_length > 0) { - /* descriptor format with sense descriptors */ - num = 8 + sshdr->additional_length; - num = (sense_len < num) ? sense_len : num; - printk("Descriptor sense data with sense descriptors " - "(in hex):"); - for (k = 0; k < num; ++k) { - if (0 == (k % 16)) { - printk("\n"); - printk(KERN_INFO " "); - } - printk("%02x ", sense_buffer[k]); - } - - printk("\n"); - } - + printk("\n"); + return; } /* Normalize and print sense buffer with name prefix */ -void __scsi_print_sense(const char *name, const unsigned char *sense_buffer, - int sense_len) +void __scsi_print_sense(const struct scsi_device *sdev, const char *name, + const unsigned char *sense_buffer, int sense_len) { struct scsi_sense_hdr sshdr; - printk(KERN_INFO "%s: ", name); - scsi_decode_sense_buffer(sense_buffer, sense_len, &sshdr); - scsi_show_sense_hdr(&sshdr); - scsi_decode_sense_extras(sense_buffer, sense_len, &sshdr); - printk(KERN_INFO "%s: ", name); - scsi_show_extd_sense(sshdr.asc, sshdr.ascq); + if (!scsi_normalize_sense(sense_buffer, sense_len, &sshdr)) { + scsi_dump_sense_buffer(sense_buffer, sense_len); + return; + } + scsi_show_sense_hdr(sdev, name, &sshdr); + scsi_show_extd_sense(sdev, name, sshdr.asc, sshdr.ascq); } EXPORT_SYMBOL(__scsi_print_sense); /* Normalize and print sense buffer in SCSI command */ -void scsi_print_sense(char *name, struct scsi_cmnd *cmd) +void scsi_print_sense(const struct scsi_cmnd *cmd) { - struct scsi_sense_hdr sshdr; + struct gendisk *disk = cmd->request->rq_disk; + const char *disk_name = disk ? disk->disk_name : NULL; - scmd_printk(KERN_INFO, cmd, " "); - scsi_decode_sense_buffer(cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE, - &sshdr); - scsi_show_sense_hdr(&sshdr); - scsi_decode_sense_extras(cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE, - &sshdr); - scmd_printk(KERN_INFO, cmd, " "); - scsi_show_extd_sense(sshdr.asc, sshdr.ascq); + __scsi_print_sense(cmd->device, disk_name, cmd->sense_buffer, + SCSI_SENSE_BUFFERSIZE); } EXPORT_SYMBOL(scsi_print_sense); @@ -1565,38 +1405,87 @@ static const char * const hostbyte_table[]={ "DID_PASSTHROUGH", "DID_SOFT_ERROR", "DID_IMM_RETRY", "DID_REQUEUE", "DID_TRANSPORT_DISRUPTED", "DID_TRANSPORT_FAILFAST", "DID_TARGET_FAILURE", "DID_NEXUS_FAILURE" }; -#define NUM_HOSTBYTE_STRS ARRAY_SIZE(hostbyte_table) static const char * const driverbyte_table[]={ "DRIVER_OK", "DRIVER_BUSY", "DRIVER_SOFT", "DRIVER_MEDIA", "DRIVER_ERROR", "DRIVER_INVALID", "DRIVER_TIMEOUT", "DRIVER_HARD", "DRIVER_SENSE"}; -#define NUM_DRIVERBYTE_STRS ARRAY_SIZE(driverbyte_table) -void scsi_show_result(int result) +#endif + +const char *scsi_hostbyte_string(int result) { + const char *hb_string = NULL; +#ifdef CONFIG_SCSI_CONSTANTS int hb = host_byte(result); - int db = driver_byte(result); - printk("Result: hostbyte=%s driverbyte=%s\n", - (hb < NUM_HOSTBYTE_STRS ? hostbyte_table[hb] : "invalid"), - (db < NUM_DRIVERBYTE_STRS ? driverbyte_table[db] : "invalid")); + if (hb < ARRAY_SIZE(hostbyte_table)) + hb_string = hostbyte_table[hb]; +#endif + return hb_string; } +EXPORT_SYMBOL(scsi_hostbyte_string); -#else - -void scsi_show_result(int result) +const char *scsi_driverbyte_string(int result) { - printk("Result: hostbyte=0x%02x driverbyte=0x%02x\n", - host_byte(result), driver_byte(result)); + const char *db_string = NULL; +#ifdef CONFIG_SCSI_CONSTANTS + int db = driver_byte(result); + + if (db < ARRAY_SIZE(driverbyte_table)) + db_string = driverbyte_table[db]; +#endif + return db_string; } +EXPORT_SYMBOL(scsi_driverbyte_string); +#ifdef CONFIG_SCSI_CONSTANTS +#define scsi_mlreturn_name(result) { result, #result } +static const struct value_name_pair scsi_mlreturn_arr[] = { + scsi_mlreturn_name(NEEDS_RETRY), + scsi_mlreturn_name(SUCCESS), + scsi_mlreturn_name(FAILED), + scsi_mlreturn_name(QUEUED), + scsi_mlreturn_name(SOFT_ERROR), + scsi_mlreturn_name(ADD_TO_MLQUEUE), + scsi_mlreturn_name(TIMEOUT_ERROR), + scsi_mlreturn_name(SCSI_RETURN_NOT_HANDLED), + scsi_mlreturn_name(FAST_IO_FAIL) +}; #endif -EXPORT_SYMBOL(scsi_show_result); +const char *scsi_mlreturn_string(int result) +{ +#ifdef CONFIG_SCSI_CONSTANTS + const struct value_name_pair *arr = scsi_mlreturn_arr; + int k; -void scsi_print_result(struct scsi_cmnd *cmd) + for (k = 0; k < ARRAY_SIZE(scsi_mlreturn_arr); ++k, ++arr) { + if (result == arr->value) + return arr->name; + } +#endif + return NULL; +} +EXPORT_SYMBOL(scsi_mlreturn_string); + +void scsi_print_result(struct scsi_cmnd *cmd, const char *msg, int disposition) { - scmd_printk(KERN_INFO, cmd, " "); - scsi_show_result(cmd->result); + const char *mlret_string = scsi_mlreturn_string(disposition); + const char *hb_string = scsi_hostbyte_string(cmd->result); + const char *db_string = scsi_driverbyte_string(cmd->result); + + if (hb_string || db_string) + scmd_printk(KERN_INFO, cmd, + "%s%s Result: hostbyte=%s driverbyte=%s", + msg ? msg : "", + mlret_string ? mlret_string : "UNKNOWN", + hb_string ? hb_string : "invalid", + db_string ? db_string : "invalid"); + else + scmd_printk(KERN_INFO, cmd, + "%s%s Result: hostbyte=0x%02x driverbyte=0x%02x", + msg ? msg : "", + mlret_string ? mlret_string : "UNKNOWN", + host_byte(cmd->result), driver_byte(cmd->result)); } EXPORT_SYMBOL(scsi_print_result); diff --git a/drivers/scsi/csiostor/csio_scsi.c b/drivers/scsi/csiostor/csio_scsi.c index 86103c8475d8..4d0b6ce55f20 100644 --- a/drivers/scsi/csiostor/csio_scsi.c +++ b/drivers/scsi/csiostor/csio_scsi.c @@ -152,28 +152,6 @@ csio_scsi_itnexus_loss_error(uint16_t error) return 0; } -static inline void -csio_scsi_tag(struct scsi_cmnd *scmnd, uint8_t *tag, uint8_t hq, - uint8_t oq, uint8_t sq) -{ - char stag[2]; - - if (scsi_populate_tag_msg(scmnd, stag)) { - switch (stag[0]) { - case HEAD_OF_QUEUE_TAG: - *tag = hq; - break; - case ORDERED_QUEUE_TAG: - *tag = oq; - break; - default: - *tag = sq; - break; - } - } else - *tag = 0; -} - /* * csio_scsi_fcp_cmnd - Frame the SCSI FCP command paylod. * @req: IO req structure. @@ -192,11 +170,12 @@ csio_scsi_fcp_cmnd(struct csio_ioreq *req, void *addr) int_to_scsilun(scmnd->device->lun, &fcp_cmnd->fc_lun); fcp_cmnd->fc_tm_flags = 0; fcp_cmnd->fc_cmdref = 0; - fcp_cmnd->fc_pri_ta = 0; memcpy(fcp_cmnd->fc_cdb, scmnd->cmnd, 16); - csio_scsi_tag(scmnd, &fcp_cmnd->fc_pri_ta, - FCP_PTA_HEADQ, FCP_PTA_ORDERED, FCP_PTA_SIMPLE); + if (scmnd->flags & SCMD_TAGGED) + fcp_cmnd->fc_pri_ta = FCP_PTA_SIMPLE; + else + fcp_cmnd->fc_pri_ta = 0; fcp_cmnd->fc_dl = cpu_to_be32(scsi_bufflen(scmnd)); if (req->nsge) @@ -2262,11 +2241,7 @@ csio_slave_alloc(struct scsi_device *sdev) static int csio_slave_configure(struct scsi_device *sdev) { - if (sdev->tagged_supported) - scsi_activate_tcq(sdev, csio_lun_qdepth); - else - scsi_deactivate_tcq(sdev, csio_lun_qdepth); - + scsi_change_queue_depth(sdev, csio_lun_qdepth); return 0; } @@ -2311,6 +2286,7 @@ struct scsi_host_template csio_fcoe_shost_template = { .use_clustering = ENABLE_CLUSTERING, .shost_attrs = csio_fcoe_lport_attrs, .max_sectors = CSIO_MAX_SECTOR_SIZE, + .use_blk_tags = 1, }; struct scsi_host_template csio_fcoe_shost_vport_template = { @@ -2330,6 +2306,7 @@ struct scsi_host_template csio_fcoe_shost_vport_template = { .use_clustering = ENABLE_CLUSTERING, .shost_attrs = csio_fcoe_vport_attrs, .max_sectors = CSIO_MAX_SECTOR_SIZE, + .use_blk_tags = 1, }; /* diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c index 49692a1ac44a..3db4c63978c5 100644 --- a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c +++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c @@ -86,7 +86,7 @@ static struct scsi_host_template cxgb3i_host_template = { .proc_name = DRV_MODULE_NAME, .can_queue = CXGB3I_SCSI_HOST_QDEPTH, .queuecommand = iscsi_queuecommand, - .change_queue_depth = iscsi_change_queue_depth, + .change_queue_depth = scsi_change_queue_depth, .sg_tablesize = SG_ALL, .max_sectors = 0xFFFF, .cmd_per_lun = ISCSI_DEF_CMD_PER_LUN, @@ -96,6 +96,7 @@ static struct scsi_host_template cxgb3i_host_template = { .target_alloc = iscsi_target_alloc, .use_clustering = DISABLE_CLUSTERING, .this_id = -1, + .track_queue_depth = 1, }; static struct iscsi_transport cxgb3i_iscsi_transport = { diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c index 15081257cfc8..e6c3f55d9d36 100644 --- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c +++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c @@ -89,7 +89,7 @@ static struct scsi_host_template cxgb4i_host_template = { .proc_name = DRV_MODULE_NAME, .can_queue = CXGB4I_SCSI_HOST_QDEPTH, .queuecommand = iscsi_queuecommand, - .change_queue_depth = iscsi_change_queue_depth, + .change_queue_depth = scsi_change_queue_depth, .sg_tablesize = SG_ALL, .max_sectors = 0xFFFF, .cmd_per_lun = ISCSI_DEF_CMD_PER_LUN, @@ -99,6 +99,7 @@ static struct scsi_host_template cxgb4i_host_template = { .target_alloc = iscsi_target_alloc, .use_clustering = DISABLE_CLUSTERING, .this_id = -1, + .track_queue_depth = 1, }; static struct iscsi_transport cxgb4i_iscsi_transport = { diff --git a/drivers/scsi/device_handler/scsi_dh.c b/drivers/scsi/device_handler/scsi_dh.c index 33e422e75835..1dba62c5cf6a 100644 --- a/drivers/scsi/device_handler/scsi_dh.c +++ b/drivers/scsi/device_handler/scsi_dh.c @@ -98,27 +98,51 @@ device_handler_match(struct scsi_device_handler *scsi_dh, static int scsi_dh_handler_attach(struct scsi_device *sdev, struct scsi_device_handler *scsi_dh) { - int err = 0; + struct scsi_dh_data *d; if (sdev->scsi_dh_data) { if (sdev->scsi_dh_data->scsi_dh != scsi_dh) - err = -EBUSY; - else - kref_get(&sdev->scsi_dh_data->kref); - } else if (scsi_dh->attach) { - err = scsi_dh->attach(sdev); - if (!err) { - kref_init(&sdev->scsi_dh_data->kref); - sdev->scsi_dh_data->sdev = sdev; - } + return -EBUSY; + + kref_get(&sdev->scsi_dh_data->kref); + return 0; } - return err; + + if (!try_module_get(scsi_dh->module)) + return -EINVAL; + + d = scsi_dh->attach(sdev); + if (IS_ERR(d)) { + sdev_printk(KERN_ERR, sdev, "%s: Attach failed (%ld)\n", + scsi_dh->name, PTR_ERR(d)); + module_put(scsi_dh->module); + return PTR_ERR(d); + } + + d->scsi_dh = scsi_dh; + kref_init(&d->kref); + d->sdev = sdev; + + spin_lock_irq(sdev->request_queue->queue_lock); + sdev->scsi_dh_data = d; + spin_unlock_irq(sdev->request_queue->queue_lock); + return 0; } static void __detach_handler (struct kref *kref) { - struct scsi_dh_data *scsi_dh_data = container_of(kref, struct scsi_dh_data, kref); - scsi_dh_data->scsi_dh->detach(scsi_dh_data->sdev); + struct scsi_dh_data *scsi_dh_data = + container_of(kref, struct scsi_dh_data, kref); + struct scsi_device_handler *scsi_dh = scsi_dh_data->scsi_dh; + struct scsi_device *sdev = scsi_dh_data->sdev; + + spin_lock_irq(sdev->request_queue->queue_lock); + sdev->scsi_dh_data = NULL; + spin_unlock_irq(sdev->request_queue->queue_lock); + + scsi_dh->detach(sdev); + sdev_printk(KERN_NOTICE, sdev, "%s: Detached\n", scsi_dh->name); + module_put(scsi_dh->module); } /* @@ -141,7 +165,7 @@ static void scsi_dh_handler_detach(struct scsi_device *sdev, if (!scsi_dh) scsi_dh = sdev->scsi_dh_data->scsi_dh; - if (scsi_dh && scsi_dh->detach) + if (scsi_dh) kref_put(&sdev->scsi_dh_data->kref, __detach_handler); } @@ -330,6 +354,9 @@ int scsi_register_device_handler(struct scsi_device_handler *scsi_dh) if (get_device_handler(scsi_dh->name)) return -EBUSY; + if (!scsi_dh->attach || !scsi_dh->detach) + return -EINVAL; + spin_lock(&list_lock); list_add(&scsi_dh->list, &scsi_dh_list); spin_unlock(&list_lock); diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c index fd78bdc53528..854b568b9931 100644 --- a/drivers/scsi/device_handler/scsi_dh_alua.c +++ b/drivers/scsi/device_handler/scsi_dh_alua.c @@ -62,6 +62,7 @@ #define ALUA_OPTIMIZE_STPG 1 struct alua_dh_data { + struct scsi_dh_data dh_data; int group_id; int rel_port; int tpgs; @@ -87,9 +88,7 @@ static int alua_check_sense(struct scsi_device *, struct scsi_sense_hdr *); static inline struct alua_dh_data *get_alua_data(struct scsi_device *sdev) { - struct scsi_dh_data *scsi_dh_data = sdev->scsi_dh_data; - BUG_ON(scsi_dh_data == NULL); - return ((struct alua_dh_data *) scsi_dh_data->buf); + return container_of(sdev->scsi_dh_data, struct alua_dh_data, dh_data); } static int realloc_buffer(struct alua_dh_data *h, unsigned len) @@ -825,42 +824,18 @@ static bool alua_match(struct scsi_device *sdev) return (scsi_device_tpgs(sdev) != 0); } -static int alua_bus_attach(struct scsi_device *sdev); -static void alua_bus_detach(struct scsi_device *sdev); - -static struct scsi_device_handler alua_dh = { - .name = ALUA_DH_NAME, - .module = THIS_MODULE, - .attach = alua_bus_attach, - .detach = alua_bus_detach, - .prep_fn = alua_prep_fn, - .check_sense = alua_check_sense, - .activate = alua_activate, - .set_params = alua_set_params, - .match = alua_match, -}; - /* * alua_bus_attach - Attach device handler * @sdev: device to be attached to */ -static int alua_bus_attach(struct scsi_device *sdev) +static struct scsi_dh_data *alua_bus_attach(struct scsi_device *sdev) { - struct scsi_dh_data *scsi_dh_data; struct alua_dh_data *h; - unsigned long flags; - int err = SCSI_DH_OK; - - scsi_dh_data = kzalloc(sizeof(*scsi_dh_data) - + sizeof(*h) , GFP_KERNEL); - if (!scsi_dh_data) { - sdev_printk(KERN_ERR, sdev, "%s: Attach failed\n", - ALUA_DH_NAME); - return -ENOMEM; - } + int err; - scsi_dh_data->scsi_dh = &alua_dh; - h = (struct alua_dh_data *) scsi_dh_data->buf; + h = kzalloc(sizeof(*h) , GFP_KERNEL); + if (!h) + return ERR_PTR(-ENOMEM); h->tpgs = TPGS_MODE_UNINITIALIZED; h->state = TPGS_STATE_OPTIMIZED; h->group_id = -1; @@ -870,23 +845,14 @@ static int alua_bus_attach(struct scsi_device *sdev) h->sdev = sdev; err = alua_initialize(sdev, h); - if ((err != SCSI_DH_OK) && (err != SCSI_DH_DEV_OFFLINED)) - goto failed; - - if (!try_module_get(THIS_MODULE)) + if (err != SCSI_DH_OK && err != SCSI_DH_DEV_OFFLINED) goto failed; - spin_lock_irqsave(sdev->request_queue->queue_lock, flags); - sdev->scsi_dh_data = scsi_dh_data; - spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags); sdev_printk(KERN_NOTICE, sdev, "%s: Attached\n", ALUA_DH_NAME); - - return 0; - + return &h->dh_data; failed: - kfree(scsi_dh_data); - sdev_printk(KERN_ERR, sdev, "%s: not attached\n", ALUA_DH_NAME); - return -EINVAL; + kfree(h); + return ERR_PTR(-EINVAL); } /* @@ -895,23 +861,25 @@ failed: */ static void alua_bus_detach(struct scsi_device *sdev) { - struct scsi_dh_data *scsi_dh_data; - struct alua_dh_data *h; - unsigned long flags; - - spin_lock_irqsave(sdev->request_queue->queue_lock, flags); - scsi_dh_data = sdev->scsi_dh_data; - sdev->scsi_dh_data = NULL; - spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags); + struct alua_dh_data *h = get_alua_data(sdev); - h = (struct alua_dh_data *) scsi_dh_data->buf; if (h->buff && h->inq != h->buff) kfree(h->buff); - kfree(scsi_dh_data); - module_put(THIS_MODULE); - sdev_printk(KERN_NOTICE, sdev, "%s: Detached\n", ALUA_DH_NAME); + kfree(h); } +static struct scsi_device_handler alua_dh = { + .name = ALUA_DH_NAME, + .module = THIS_MODULE, + .attach = alua_bus_attach, + .detach = alua_bus_detach, + .prep_fn = alua_prep_fn, + .check_sense = alua_check_sense, + .activate = alua_activate, + .set_params = alua_set_params, + .match = alua_match, +}; + static int __init alua_init(void) { int r; diff --git a/drivers/scsi/device_handler/scsi_dh_emc.c b/drivers/scsi/device_handler/scsi_dh_emc.c index 84765384c47c..6ed1caadbc6a 100644 --- a/drivers/scsi/device_handler/scsi_dh_emc.c +++ b/drivers/scsi/device_handler/scsi_dh_emc.c @@ -72,6 +72,7 @@ static const char * lun_state[] = }; struct clariion_dh_data { + struct scsi_dh_data dh_data; /* * Flags: * CLARIION_SHORT_TRESPASS @@ -116,9 +117,8 @@ struct clariion_dh_data { static inline struct clariion_dh_data *get_clariion_data(struct scsi_device *sdev) { - struct scsi_dh_data *scsi_dh_data = sdev->scsi_dh_data; - BUG_ON(scsi_dh_data == NULL); - return ((struct clariion_dh_data *) scsi_dh_data->buf); + return container_of(sdev->scsi_dh_data, struct clariion_dh_data, + dh_data); } /* @@ -622,7 +622,10 @@ done: return result; } -static const struct scsi_dh_devlist clariion_dev_list[] = { +static const struct { + char *vendor; + char *model; +} clariion_dev_list[] = { {"DGC", "RAID"}, {"DGC", "DISK"}, {"DGC", "VRAID"}, @@ -647,39 +650,14 @@ static bool clariion_match(struct scsi_device *sdev) return false; } -static int clariion_bus_attach(struct scsi_device *sdev); -static void clariion_bus_detach(struct scsi_device *sdev); - -static struct scsi_device_handler clariion_dh = { - .name = CLARIION_NAME, - .module = THIS_MODULE, - .devlist = clariion_dev_list, - .attach = clariion_bus_attach, - .detach = clariion_bus_detach, - .check_sense = clariion_check_sense, - .activate = clariion_activate, - .prep_fn = clariion_prep_fn, - .set_params = clariion_set_params, - .match = clariion_match, -}; - -static int clariion_bus_attach(struct scsi_device *sdev) +static struct scsi_dh_data *clariion_bus_attach(struct scsi_device *sdev) { - struct scsi_dh_data *scsi_dh_data; struct clariion_dh_data *h; - unsigned long flags; int err; - scsi_dh_data = kzalloc(sizeof(*scsi_dh_data) - + sizeof(*h) , GFP_KERNEL); - if (!scsi_dh_data) { - sdev_printk(KERN_ERR, sdev, "%s: Attach failed\n", - CLARIION_NAME); - return -ENOMEM; - } - - scsi_dh_data->scsi_dh = &clariion_dh; - h = (struct clariion_dh_data *) scsi_dh_data->buf; + h = kzalloc(sizeof(*h) , GFP_KERNEL); + if (!h) + return ERR_PTR(-ENOMEM); h->lun_state = CLARIION_LUN_UNINITIALIZED; h->default_sp = CLARIION_UNBOUND_LU; h->current_sp = CLARIION_UNBOUND_LU; @@ -692,45 +670,37 @@ static int clariion_bus_attach(struct scsi_device *sdev) if (err != SCSI_DH_OK) goto failed; - if (!try_module_get(THIS_MODULE)) - goto failed; - - spin_lock_irqsave(sdev->request_queue->queue_lock, flags); - sdev->scsi_dh_data = scsi_dh_data; - spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags); - sdev_printk(KERN_INFO, sdev, "%s: connected to SP %c Port %d (%s, default SP %c)\n", CLARIION_NAME, h->current_sp + 'A', h->port, lun_state[h->lun_state], h->default_sp + 'A'); - - return 0; + return &h->dh_data; failed: - kfree(scsi_dh_data); - sdev_printk(KERN_ERR, sdev, "%s: not attached\n", - CLARIION_NAME); - return -EINVAL; + kfree(h); + return ERR_PTR(-EINVAL); } static void clariion_bus_detach(struct scsi_device *sdev) { - struct scsi_dh_data *scsi_dh_data; - unsigned long flags; - - spin_lock_irqsave(sdev->request_queue->queue_lock, flags); - scsi_dh_data = sdev->scsi_dh_data; - sdev->scsi_dh_data = NULL; - spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags); - - sdev_printk(KERN_NOTICE, sdev, "%s: Detached\n", - CLARIION_NAME); + struct clariion_dh_data *h = get_clariion_data(sdev); - kfree(scsi_dh_data); - module_put(THIS_MODULE); + kfree(h); } +static struct scsi_device_handler clariion_dh = { + .name = CLARIION_NAME, + .module = THIS_MODULE, + .attach = clariion_bus_attach, + .detach = clariion_bus_detach, + .check_sense = clariion_check_sense, + .activate = clariion_activate, + .prep_fn = clariion_prep_fn, + .set_params = clariion_set_params, + .match = clariion_match, +}; + static int __init clariion_init(void) { int r; diff --git a/drivers/scsi/device_handler/scsi_dh_hp_sw.c b/drivers/scsi/device_handler/scsi_dh_hp_sw.c index 4ee2759f5299..485d99544a15 100644 --- a/drivers/scsi/device_handler/scsi_dh_hp_sw.c +++ b/drivers/scsi/device_handler/scsi_dh_hp_sw.c @@ -38,6 +38,7 @@ #define HP_SW_PATH_PASSIVE 1 struct hp_sw_dh_data { + struct scsi_dh_data dh_data; unsigned char sense[SCSI_SENSE_BUFFERSIZE]; int path_state; int retries; @@ -51,9 +52,7 @@ static int hp_sw_start_stop(struct hp_sw_dh_data *); static inline struct hp_sw_dh_data *get_hp_sw_data(struct scsi_device *sdev) { - struct scsi_dh_data *scsi_dh_data = sdev->scsi_dh_data; - BUG_ON(scsi_dh_data == NULL); - return ((struct hp_sw_dh_data *) scsi_dh_data->buf); + return container_of(sdev->scsi_dh_data, struct hp_sw_dh_data, dh_data); } /* @@ -312,7 +311,10 @@ static int hp_sw_activate(struct scsi_device *sdev, return 0; } -static const struct scsi_dh_devlist hp_sw_dh_data_list[] = { +static const struct { + char *vendor; + char *model; +} hp_sw_dh_data_list[] = { {"COMPAQ", "MSA1000 VOLUME"}, {"COMPAQ", "HSV110"}, {"HP", "HSV100"}, @@ -338,37 +340,14 @@ static bool hp_sw_match(struct scsi_device *sdev) return false; } -static int hp_sw_bus_attach(struct scsi_device *sdev); -static void hp_sw_bus_detach(struct scsi_device *sdev); - -static struct scsi_device_handler hp_sw_dh = { - .name = HP_SW_NAME, - .module = THIS_MODULE, - .devlist = hp_sw_dh_data_list, - .attach = hp_sw_bus_attach, - .detach = hp_sw_bus_detach, - .activate = hp_sw_activate, - .prep_fn = hp_sw_prep_fn, - .match = hp_sw_match, -}; - -static int hp_sw_bus_attach(struct scsi_device *sdev) +static struct scsi_dh_data *hp_sw_bus_attach(struct scsi_device *sdev) { - struct scsi_dh_data *scsi_dh_data; struct hp_sw_dh_data *h; - unsigned long flags; int ret; - scsi_dh_data = kzalloc(sizeof(*scsi_dh_data) - + sizeof(*h) , GFP_KERNEL); - if (!scsi_dh_data) { - sdev_printk(KERN_ERR, sdev, "%s: Attach Failed\n", - HP_SW_NAME); - return 0; - } - - scsi_dh_data->scsi_dh = &hp_sw_dh; - h = (struct hp_sw_dh_data *) scsi_dh_data->buf; + h = kzalloc(sizeof(*h), GFP_KERNEL); + if (!h) + return ERR_PTR(-ENOMEM); h->path_state = HP_SW_PATH_UNINITIALIZED; h->retries = HP_SW_RETRIES; h->sdev = sdev; @@ -377,42 +356,32 @@ static int hp_sw_bus_attach(struct scsi_device *sdev) if (ret != SCSI_DH_OK || h->path_state == HP_SW_PATH_UNINITIALIZED) goto failed; - if (!try_module_get(THIS_MODULE)) - goto failed; - - spin_lock_irqsave(sdev->request_queue->queue_lock, flags); - sdev->scsi_dh_data = scsi_dh_data; - spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags); - sdev_printk(KERN_INFO, sdev, "%s: attached to %s path\n", HP_SW_NAME, h->path_state == HP_SW_PATH_ACTIVE? "active":"passive"); - - return 0; - + return &h->dh_data; failed: - kfree(scsi_dh_data); - sdev_printk(KERN_ERR, sdev, "%s: not attached\n", - HP_SW_NAME); - return -EINVAL; + kfree(h); + return ERR_PTR(-EINVAL); } static void hp_sw_bus_detach( struct scsi_device *sdev ) { - struct scsi_dh_data *scsi_dh_data; - unsigned long flags; - - spin_lock_irqsave(sdev->request_queue->queue_lock, flags); - scsi_dh_data = sdev->scsi_dh_data; - sdev->scsi_dh_data = NULL; - spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags); - module_put(THIS_MODULE); - - sdev_printk(KERN_NOTICE, sdev, "%s: Detached\n", HP_SW_NAME); + struct hp_sw_dh_data *h = get_hp_sw_data(sdev); - kfree(scsi_dh_data); + kfree(h); } +static struct scsi_device_handler hp_sw_dh = { + .name = HP_SW_NAME, + .module = THIS_MODULE, + .attach = hp_sw_bus_attach, + .detach = hp_sw_bus_detach, + .activate = hp_sw_activate, + .prep_fn = hp_sw_prep_fn, + .match = hp_sw_match, +}; + static int __init hp_sw_init(void) { return scsi_register_device_handler(&hp_sw_dh); diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c index 1b5bc9293e37..b46ace3d4bf0 100644 --- a/drivers/scsi/device_handler/scsi_dh_rdac.c +++ b/drivers/scsi/device_handler/scsi_dh_rdac.c @@ -181,6 +181,7 @@ struct c2_inquiry { }; struct rdac_dh_data { + struct scsi_dh_data dh_data; struct rdac_controller *ctlr; #define UNINITIALIZED_LUN (1 << 8) unsigned lun; @@ -261,9 +262,7 @@ do { \ static inline struct rdac_dh_data *get_rdac_data(struct scsi_device *sdev) { - struct scsi_dh_data *scsi_dh_data = sdev->scsi_dh_data; - BUG_ON(scsi_dh_data == NULL); - return ((struct rdac_dh_data *) scsi_dh_data->buf); + return container_of(sdev->scsi_dh_data, struct rdac_dh_data, dh_data); } static struct request *get_rdac_req(struct scsi_device *sdev, @@ -779,7 +778,10 @@ static int rdac_check_sense(struct scsi_device *sdev, return SCSI_RETURN_NOT_HANDLED; } -static const struct scsi_dh_devlist rdac_dev_list[] = { +static const struct { + char *vendor; + char *model; +} rdac_dev_list[] = { {"IBM", "1722"}, {"IBM", "1724"}, {"IBM", "1726"}, @@ -825,40 +827,16 @@ static bool rdac_match(struct scsi_device *sdev) return false; } -static int rdac_bus_attach(struct scsi_device *sdev); -static void rdac_bus_detach(struct scsi_device *sdev); - -static struct scsi_device_handler rdac_dh = { - .name = RDAC_NAME, - .module = THIS_MODULE, - .devlist = rdac_dev_list, - .prep_fn = rdac_prep_fn, - .check_sense = rdac_check_sense, - .attach = rdac_bus_attach, - .detach = rdac_bus_detach, - .activate = rdac_activate, - .match = rdac_match, -}; - -static int rdac_bus_attach(struct scsi_device *sdev) +static struct scsi_dh_data *rdac_bus_attach(struct scsi_device *sdev) { - struct scsi_dh_data *scsi_dh_data; struct rdac_dh_data *h; - unsigned long flags; int err; char array_name[ARRAY_LABEL_LEN]; char array_id[UNIQUE_ID_LEN]; - scsi_dh_data = kzalloc(sizeof(*scsi_dh_data) - + sizeof(*h) , GFP_KERNEL); - if (!scsi_dh_data) { - sdev_printk(KERN_ERR, sdev, "%s: Attach failed\n", - RDAC_NAME); - return -ENOMEM; - } - - scsi_dh_data->scsi_dh = &rdac_dh; - h = (struct rdac_dh_data *) scsi_dh_data->buf; + h = kzalloc(sizeof(*h) , GFP_KERNEL); + if (!h) + return ERR_PTR(-ENOMEM); h->lun = UNINITIALIZED_LUN; h->state = RDAC_STATE_ACTIVE; @@ -878,19 +856,12 @@ static int rdac_bus_attach(struct scsi_device *sdev) if (err != SCSI_DH_OK) goto clean_ctlr; - if (!try_module_get(THIS_MODULE)) - goto clean_ctlr; - - spin_lock_irqsave(sdev->request_queue->queue_lock, flags); - sdev->scsi_dh_data = scsi_dh_data; - spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags); - sdev_printk(KERN_NOTICE, sdev, "%s: LUN %d (%s) (%s)\n", RDAC_NAME, h->lun, mode[(int)h->mode], lun_state[(int)h->lun_state]); - return 0; + return &h->dh_data; clean_ctlr: spin_lock(&list_lock); @@ -898,37 +869,34 @@ clean_ctlr: spin_unlock(&list_lock); failed: - kfree(scsi_dh_data); - sdev_printk(KERN_ERR, sdev, "%s: not attached\n", - RDAC_NAME); - return -EINVAL; + kfree(h); + return ERR_PTR(-EINVAL); } static void rdac_bus_detach( struct scsi_device *sdev ) { - struct scsi_dh_data *scsi_dh_data; - struct rdac_dh_data *h; - unsigned long flags; + struct rdac_dh_data *h = get_rdac_data(sdev); - scsi_dh_data = sdev->scsi_dh_data; - h = (struct rdac_dh_data *) scsi_dh_data->buf; if (h->ctlr && h->ctlr->ms_queued) flush_workqueue(kmpath_rdacd); - spin_lock_irqsave(sdev->request_queue->queue_lock, flags); - sdev->scsi_dh_data = NULL; - spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags); - spin_lock(&list_lock); if (h->ctlr) kref_put(&h->ctlr->kref, release_controller); spin_unlock(&list_lock); - kfree(scsi_dh_data); - module_put(THIS_MODULE); - sdev_printk(KERN_NOTICE, sdev, "%s: Detached\n", RDAC_NAME); + kfree(h); } - +static struct scsi_device_handler rdac_dh = { + .name = RDAC_NAME, + .module = THIS_MODULE, + .prep_fn = rdac_prep_fn, + .check_sense = rdac_check_sense, + .attach = rdac_bus_attach, + .detach = rdac_bus_detach, + .activate = rdac_activate, + .match = rdac_match, +}; static int __init rdac_init(void) { diff --git a/drivers/scsi/dmx3191d.c b/drivers/scsi/dmx3191d.c index 4b0dd8c56707..3e088125a8be 100644 --- a/drivers/scsi/dmx3191d.c +++ b/drivers/scsi/dmx3191d.c @@ -33,20 +33,20 @@ /* * Definitions for the generic 5380 driver. */ -#define AUTOSENSE + +#define DONT_USE_INTR #define NCR5380_read(reg) inb(port + reg) #define NCR5380_write(reg, value) outb(value, port + reg) -#define NCR5380_implementation_fields unsigned int port -#define NCR5380_local_declare() NCR5380_implementation_fields +#define NCR5380_implementation_fields /* none */ +#define NCR5380_local_declare() unsigned int port #define NCR5380_setup(instance) port = instance->io_port /* * Includes needed for NCR5380.[ch] (XXX: Move them to NCR5380.h) */ #include <linux/delay.h> -#include "scsi.h" #include "NCR5380.h" #include "NCR5380.c" @@ -58,6 +58,7 @@ static struct scsi_host_template dmx3191d_driver_template = { .proc_name = DMX3191D_DRIVER_NAME, .name = "Domex DMX3191D", + .info = NCR5380_info, .queuecommand = NCR5380_queue_command, .eh_abort_handler = NCR5380_abort, .eh_bus_reset_handler = NCR5380_bus_reset, @@ -90,31 +91,23 @@ static int dmx3191d_probe_one(struct pci_dev *pdev, if (!shost) goto out_release_region; shost->io_port = io; - shost->irq = pdev->irq; - NCR5380_init(shost, FLAG_NO_PSEUDO_DMA | FLAG_DTC3181E); + /* This card does not seem to raise an interrupt on pdev->irq. + * Steam-powered SCSI controllers run without an IRQ anyway. + */ + shost->irq = NO_IRQ; - if (request_irq(pdev->irq, NCR5380_intr, IRQF_SHARED, - DMX3191D_DRIVER_NAME, shost)) { - /* - * Steam powered scsi controllers run without an IRQ anyway - */ - printk(KERN_WARNING "dmx3191: IRQ %d not available - " - "switching to polled mode.\n", pdev->irq); - shost->irq = SCSI_IRQ_NONE; - } + NCR5380_init(shost, FLAG_NO_PSEUDO_DMA | FLAG_DTC3181E); pci_set_drvdata(pdev, shost); error = scsi_add_host(shost, &pdev->dev); if (error) - goto out_free_irq; + goto out_release_region; scsi_scan_host(shost); return 0; - out_free_irq: - free_irq(shost->irq, shost); out_release_region: release_region(io, DMX3191D_REGION_LEN); out_disable_device: @@ -131,8 +124,6 @@ static void dmx3191d_remove_one(struct pci_dev *pdev) NCR5380_exit(shost); - if (shost->irq != SCSI_IRQ_NONE) - free_irq(shost->irq, shost); release_region(shost->io_port, DMX3191D_REGION_LEN); pci_disable_device(pdev); diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c index 072f0ec2851e..0bf976936a10 100644 --- a/drivers/scsi/dpt_i2o.c +++ b/drivers/scsi/dpt_i2o.c @@ -415,10 +415,8 @@ static int adpt_slave_configure(struct scsi_device * device) pHba = (adpt_hba *) host->hostdata[0]; if (host->can_queue && device->tagged_supported) { - scsi_adjust_queue_depth(device, MSG_SIMPLE_TAG, + scsi_change_queue_depth(device, host->can_queue - 1); - } else { - scsi_adjust_queue_depth(device, 0, 1); } return 0; } diff --git a/drivers/scsi/dtc.c b/drivers/scsi/dtc.c index 0a667fe05006..4c74c7ba2dff 100644 --- a/drivers/scsi/dtc.c +++ b/drivers/scsi/dtc.c @@ -1,5 +1,4 @@ -#define AUTOSENSE #define PSEUDO_DMA #define DONT_USE_INTR #define UNSAFE /* Leave interrupts enabled during pseudo-dma I/O */ @@ -18,29 +17,9 @@ * (Unix and Linux consulting and custom programming) * drew@colorado.edu * +1 (303) 440-4894 - * - * DISTRIBUTION RELEASE 1. - * - * For more information, please consult - * - * NCR 5380 Family - * SCSI Protocol Controller - * Databook -*/ + */ /* - * Options : - * AUTOSENSE - if defined, REQUEST SENSE will be performed automatically - * for commands that return with a CHECK CONDITION status. - * - * PSEUDO_DMA - enables PSEUDO-DMA hardware, should give a 3-4X performance - * increase compared to polled I/O. - * - * PARITY - enable parity checking. Not supported. - * - * UNSAFE - leave interrupts enabled during pseudo-DMA transfers. - * You probably want this. - * * The card is detected and initialized in one of several ways : * 1. Autoprobe (default) - since the board is memory mapped, * a BIOS signature is scanned for to locate the registers. @@ -79,15 +58,11 @@ #include <linux/init.h> #include <linux/interrupt.h> #include <linux/io.h> -#include "scsi.h" #include <scsi/scsi_host.h> #include "dtc.h" #define AUTOPROBE_IRQ #include "NCR5380.h" - -#define DTC_PUBLIC_RELEASE 2 - /* * The DTC3180 & 3280 boards are memory mapped. * @@ -173,10 +148,13 @@ static const struct signature { * */ -static void __init dtc_setup(char *str, int *ints) +static int __init dtc_setup(char *str) { static int commandline_current = 0; int i; + int ints[10]; + + get_options(str, ARRAY_SIZE(ints), ints); if (ints[0] != 2) printk("dtc_setup: usage dtc=address,irq\n"); else if (commandline_current < NO_OVERRIDES) { @@ -189,7 +167,10 @@ static void __init dtc_setup(char *str, int *ints) } ++commandline_current; } + return 1; } + +__setup("dtc=", dtc_setup); #endif /* @@ -213,10 +194,6 @@ static int __init dtc_detect(struct scsi_host_template * tpnt) void __iomem *base; int sig, count; - tpnt->proc_name = "dtc3x80"; - tpnt->show_info = dtc_show_info; - tpnt->write_info = dtc_write_info; - for (count = 0; current_override < NO_OVERRIDES; ++current_override) { addr = 0; base = NULL; @@ -271,38 +248,33 @@ found: else instance->irq = NCR5380_probe_irq(instance, DTC_IRQS); + /* Compatibility with documented NCR5380 kernel parameters */ + if (instance->irq == 255) + instance->irq = NO_IRQ; + #ifndef DONT_USE_INTR /* With interrupts enabled, it will sometimes hang when doing heavy * reads. So better not enable them until I finger it out. */ - if (instance->irq != SCSI_IRQ_NONE) + if (instance->irq != NO_IRQ) if (request_irq(instance->irq, dtc_intr, 0, "dtc", instance)) { printk(KERN_ERR "scsi%d : IRQ%d not free, interrupts disabled\n", instance->host_no, instance->irq); - instance->irq = SCSI_IRQ_NONE; + instance->irq = NO_IRQ; } - if (instance->irq == SCSI_IRQ_NONE) { + if (instance->irq == NO_IRQ) { printk(KERN_WARNING "scsi%d : interrupts not enabled. for better interactive performance,\n", instance->host_no); printk(KERN_WARNING "scsi%d : please jumper the board for a free IRQ.\n", instance->host_no); } #else - if (instance->irq != SCSI_IRQ_NONE) + if (instance->irq != NO_IRQ) printk(KERN_WARNING "scsi%d : interrupts not used. Might as well not jumper it.\n", instance->host_no); - instance->irq = SCSI_IRQ_NONE; + instance->irq = NO_IRQ; #endif #if defined(DTCDEBUG) && (DTCDEBUG & DTCDEBUG_INIT) printk("scsi%d : irq = %d\n", instance->host_no, instance->irq); #endif - printk(KERN_INFO "scsi%d : at 0x%05X", instance->host_no, (int) instance->base); - if (instance->irq == SCSI_IRQ_NONE) - printk(" interrupts disabled"); - else - printk(" irq %d", instance->irq); - printk(" options CAN_QUEUE=%d CMD_PER_LUN=%d release=%d", CAN_QUEUE, CMD_PER_LUN, DTC_PUBLIC_RELEASE); - NCR5380_print_options(instance); - printk("\n"); - ++current_override; ++count; } @@ -354,20 +326,18 @@ static int dtc_biosparam(struct scsi_device *sdev, struct block_device *dev, * timeout. */ -static int dtc_maxi = 0; -static int dtc_wmaxi = 0; - static inline int NCR5380_pread(struct Scsi_Host *instance, unsigned char *dst, int len) { unsigned char *d = dst; int i; /* For counting time spent in the poll-loop */ + struct NCR5380_hostdata *hostdata = shost_priv(instance); NCR5380_local_declare(); NCR5380_setup(instance); i = 0; NCR5380_read(RESET_PARITY_INTERRUPT_REG); NCR5380_write(MODE_REG, MR_ENABLE_EOP_INTR | MR_DMA_MODE); - if (instance->irq == SCSI_IRQ_NONE) + if (instance->irq == NO_IRQ) NCR5380_write(DTC_CONTROL_REG, CSR_DIR_READ); else NCR5380_write(DTC_CONTROL_REG, CSR_DIR_READ | CSR_INT_BASE); @@ -391,8 +361,8 @@ static inline int NCR5380_pread(struct Scsi_Host *instance, unsigned char *dst, NCR5380_write(MODE_REG, 0); /* Clear the operating mode */ rtrc(0); NCR5380_read(RESET_PARITY_INTERRUPT_REG); - if (i > dtc_maxi) - dtc_maxi = i; + if (i > hostdata->spin_max_r) + hostdata->spin_max_r = i; return (0); } @@ -412,13 +382,14 @@ static inline int NCR5380_pread(struct Scsi_Host *instance, unsigned char *dst, static inline int NCR5380_pwrite(struct Scsi_Host *instance, unsigned char *src, int len) { int i; + struct NCR5380_hostdata *hostdata = shost_priv(instance); NCR5380_local_declare(); NCR5380_setup(instance); NCR5380_read(RESET_PARITY_INTERRUPT_REG); NCR5380_write(MODE_REG, MR_ENABLE_EOP_INTR | MR_DMA_MODE); /* set direction (write) */ - if (instance->irq == SCSI_IRQ_NONE) + if (instance->irq == NO_IRQ) NCR5380_write(DTC_CONTROL_REG, 0); else NCR5380_write(DTC_CONTROL_REG, CSR_5380_INTR); @@ -444,8 +415,8 @@ static inline int NCR5380_pwrite(struct Scsi_Host *instance, unsigned char *src, /* Check for parity error here. fixme. */ NCR5380_write(MODE_REG, 0); /* Clear the operating mode */ rtrc(0); - if (i > dtc_wmaxi) - dtc_wmaxi = i; + if (i > hostdata->spin_max_w) + hostdata->spin_max_w = i; return (0); } @@ -457,7 +428,7 @@ static int dtc_release(struct Scsi_Host *shost) { NCR5380_local_declare(); NCR5380_setup(shost); - if (shost->irq) + if (shost->irq != NO_IRQ) free_irq(shost->irq, shost); NCR5380_exit(shost); if (shost->io_port && shost->n_io_port) @@ -471,6 +442,10 @@ static struct scsi_host_template driver_template = { .name = "DTC 3180/3280 ", .detect = dtc_detect, .release = dtc_release, + .proc_name = "dtc3x80", + .show_info = dtc_show_info, + .write_info = dtc_write_info, + .info = dtc_info, .queuecommand = dtc_queue_command, .eh_abort_handler = dtc_abort, .eh_bus_reset_handler = dtc_bus_reset, diff --git a/drivers/scsi/dtc.h b/drivers/scsi/dtc.h index 92d7cfc3f4fc..78a2332e9064 100644 --- a/drivers/scsi/dtc.h +++ b/drivers/scsi/dtc.h @@ -5,24 +5,6 @@ * (Unix and Linux consulting and custom programming) * drew@colorado.edu * +1 (303) 440-4894 - * - * DISTRIBUTION RELEASE 2. - * - * For more information, please consult - * - * - * - * and - * - * NCR 5380 Family - * SCSI Protocol Controller - * Databook - * - * NCR Microelectronics - * 1635 Aeroplaza Drive - * Colorado Springs, CO 80916 - * 1+ (719) 578-3400 - * 1+ (800) 334-5454 */ #ifndef DTC3280_H @@ -32,13 +14,6 @@ #define DTCDEBUG_INIT 0x1 #define DTCDEBUG_TRANSFER 0x2 -static int dtc_abort(Scsi_Cmnd *); -static int dtc_biosparam(struct scsi_device *, struct block_device *, - sector_t, int*); -static int dtc_detect(struct scsi_host_template *); -static int dtc_queue_command(struct Scsi_Host *, struct scsi_cmnd *); -static int dtc_bus_reset(Scsi_Cmnd *); - #ifndef CMD_PER_LUN #define CMD_PER_LUN 2 #endif @@ -88,6 +63,7 @@ static int dtc_bus_reset(Scsi_Cmnd *); #define NCR5380_queue_command dtc_queue_command #define NCR5380_abort dtc_abort #define NCR5380_bus_reset dtc_bus_reset +#define NCR5380_info dtc_info #define NCR5380_show_info dtc_show_info #define NCR5380_write_info dtc_write_info diff --git a/drivers/scsi/eata.c b/drivers/scsi/eata.c index 943ad3a19661..227dd2c2ec2f 100644 --- a/drivers/scsi/eata.c +++ b/drivers/scsi/eata.c @@ -946,20 +946,18 @@ static int eata2x_slave_configure(struct scsi_device *dev) if (TLDEV(dev->type) && dev->tagged_supported) { if (tag_mode == TAG_SIMPLE) { - scsi_adjust_queue_depth(dev, MSG_SIMPLE_TAG, tqd); tag_suffix = ", simple tags"; } else if (tag_mode == TAG_ORDERED) { - scsi_adjust_queue_depth(dev, MSG_ORDERED_TAG, tqd); tag_suffix = ", ordered tags"; } else { - scsi_adjust_queue_depth(dev, 0, tqd); tag_suffix = ", no tags"; } + scsi_change_queue_depth(dev, tqd); } else if (TLDEV(dev->type) && linked_comm) { - scsi_adjust_queue_depth(dev, 0, tqd); + scsi_change_queue_depth(dev, tqd); tag_suffix = ", untagged"; } else { - scsi_adjust_queue_depth(dev, 0, utqd); + scsi_change_queue_depth(dev, utqd); tag_suffix = ""; } diff --git a/drivers/scsi/esas2r/esas2r.h b/drivers/scsi/esas2r/esas2r.h index 3fd305d6b67d..b6030e3edd01 100644 --- a/drivers/scsi/esas2r/esas2r.h +++ b/drivers/scsi/esas2r/esas2r.h @@ -972,11 +972,6 @@ u8 handle_hba_ioctl(struct esas2r_adapter *a, struct atto_ioctl *ioctl_hba); int esas2r_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd); int esas2r_show_info(struct seq_file *m, struct Scsi_Host *sh); -int esas2r_slave_alloc(struct scsi_device *dev); -int esas2r_slave_configure(struct scsi_device *dev); -void esas2r_slave_destroy(struct scsi_device *dev); -int esas2r_change_queue_depth(struct scsi_device *dev, int depth, int reason); -int esas2r_change_queue_type(struct scsi_device *dev, int type); long esas2r_proc_ioctl(struct file *fp, unsigned int cmd, unsigned long arg); /* SCSI error handler (eh) functions */ diff --git a/drivers/scsi/esas2r/esas2r_ioctl.c b/drivers/scsi/esas2r/esas2r_ioctl.c index d89a0277a8e1..baf913047b48 100644 --- a/drivers/scsi/esas2r/esas2r_ioctl.c +++ b/drivers/scsi/esas2r/esas2r_ioctl.c @@ -117,9 +117,8 @@ static void do_fm_api(struct esas2r_adapter *a, struct esas2r_flash_img *fi) rq = esas2r_alloc_request(a); if (rq == NULL) { - up(&a->fm_api_semaphore); fi->status = FI_STAT_BUSY; - return; + goto free_sem; } if (fi == &a->firmware.header) { @@ -135,7 +134,7 @@ static void do_fm_api(struct esas2r_adapter *a, struct esas2r_flash_img *fi) if (a->firmware.header_buff == NULL) { esas2r_debug("failed to allocate header buffer!"); fi->status = FI_STAT_BUSY; - return; + goto free_req; } memcpy(a->firmware.header_buff, fi, @@ -171,9 +170,10 @@ all_done: a->firmware.header_buff, (dma_addr_t)a->firmware.header_buff_phys); } - - up(&a->fm_api_semaphore); +free_req: esas2r_free_request(a, (struct esas2r_request *)rq); +free_sem: + up(&a->fm_api_semaphore); return; } @@ -1420,9 +1420,10 @@ int esas2r_ioctl_handler(void *hostdata, int cmd, void __user *arg) rq = esas2r_alloc_request(a); if (rq == NULL) { - up(&a->nvram_semaphore); - ioctl->data.prw.code = 0; - break; + kfree(ioctl); + esas2r_log(ESAS2R_LOG_WARN, + "could not allocate an internal request"); + return -ENOMEM; } code = esas2r_write_params(a, rq, @@ -1523,9 +1524,12 @@ ioctl_done: case -EINVAL: ioctl->header.return_code = IOCTL_INVALID_PARAM; break; + + default: + ioctl->header.return_code = IOCTL_GENERAL_ERROR; + break; } - ioctl->header.return_code = IOCTL_GENERAL_ERROR; } /* Always copy the buffer back, if only to pick up the status */ diff --git a/drivers/scsi/esas2r/esas2r_main.c b/drivers/scsi/esas2r/esas2r_main.c index 6504a195c874..593ff8a63c70 100644 --- a/drivers/scsi/esas2r/esas2r_main.c +++ b/drivers/scsi/esas2r/esas2r_main.c @@ -254,12 +254,10 @@ static struct scsi_host_template driver_template = { .use_clustering = ENABLE_CLUSTERING, .emulated = 0, .proc_name = ESAS2R_DRVR_NAME, - .slave_configure = esas2r_slave_configure, - .slave_alloc = esas2r_slave_alloc, - .slave_destroy = esas2r_slave_destroy, - .change_queue_depth = esas2r_change_queue_depth, - .change_queue_type = esas2r_change_queue_type, + .change_queue_depth = scsi_change_queue_depth, + .change_queue_type = scsi_change_queue_type, .max_sectors = 0xFFFF, + .use_blk_tags = 1, }; int sgl_page_size = 512; @@ -1057,7 +1055,7 @@ int esas2r_eh_abort(struct scsi_cmnd *cmd) cmd->scsi_done(cmd); - return 0; + return SUCCESS; } spin_lock_irqsave(&a->queue_lock, flags); @@ -1259,60 +1257,6 @@ int esas2r_target_reset(struct scsi_cmnd *cmd) return esas2r_dev_targ_reset(cmd, true); } -int esas2r_change_queue_depth(struct scsi_device *dev, int depth, int reason) -{ - esas2r_log(ESAS2R_LOG_INFO, "change_queue_depth %p, %d", dev, depth); - - scsi_adjust_queue_depth(dev, scsi_get_tag_type(dev), depth); - - return dev->queue_depth; -} - -int esas2r_change_queue_type(struct scsi_device *dev, int type) -{ - esas2r_log(ESAS2R_LOG_INFO, "change_queue_type %p, %d", dev, type); - - if (dev->tagged_supported) { - scsi_set_tag_type(dev, type); - - if (type) - scsi_activate_tcq(dev, dev->queue_depth); - else - scsi_deactivate_tcq(dev, dev->queue_depth); - } else { - type = 0; - } - - return type; -} - -int esas2r_slave_alloc(struct scsi_device *dev) -{ - return 0; -} - -int esas2r_slave_configure(struct scsi_device *dev) -{ - esas2r_log_dev(ESAS2R_LOG_INFO, &(dev->sdev_gendev), - "esas2r_slave_configure()"); - - if (dev->tagged_supported) { - scsi_set_tag_type(dev, MSG_SIMPLE_TAG); - scsi_activate_tcq(dev, cmd_per_lun); - } else { - scsi_set_tag_type(dev, 0); - scsi_deactivate_tcq(dev, cmd_per_lun); - } - - return 0; -} - -void esas2r_slave_destroy(struct scsi_device *dev) -{ - esas2r_log_dev(ESAS2R_LOG_INFO, &(dev->sdev_gendev), - "esas2r_slave_destroy()"); -} - void esas2r_log_request_failure(struct esas2r_adapter *a, struct esas2r_request *rq) { diff --git a/drivers/scsi/esp_scsi.c b/drivers/scsi/esp_scsi.c index 55548dc5cec3..ce5bd52fe692 100644 --- a/drivers/scsi/esp_scsi.c +++ b/drivers/scsi/esp_scsi.c @@ -49,55 +49,67 @@ static u32 esp_debug; #define ESP_DEBUG_DATADONE 0x00000100 #define ESP_DEBUG_RECONNECT 0x00000200 #define ESP_DEBUG_AUTOSENSE 0x00000400 +#define ESP_DEBUG_EVENT 0x00000800 +#define ESP_DEBUG_COMMAND 0x00001000 #define esp_log_intr(f, a...) \ do { if (esp_debug & ESP_DEBUG_INTR) \ - printk(f, ## a); \ + shost_printk(KERN_DEBUG, esp->host, f, ## a); \ } while (0) #define esp_log_reset(f, a...) \ do { if (esp_debug & ESP_DEBUG_RESET) \ - printk(f, ## a); \ + shost_printk(KERN_DEBUG, esp->host, f, ## a); \ } while (0) #define esp_log_msgin(f, a...) \ do { if (esp_debug & ESP_DEBUG_MSGIN) \ - printk(f, ## a); \ + shost_printk(KERN_DEBUG, esp->host, f, ## a); \ } while (0) #define esp_log_msgout(f, a...) \ do { if (esp_debug & ESP_DEBUG_MSGOUT) \ - printk(f, ## a); \ + shost_printk(KERN_DEBUG, esp->host, f, ## a); \ } while (0) #define esp_log_cmddone(f, a...) \ do { if (esp_debug & ESP_DEBUG_CMDDONE) \ - printk(f, ## a); \ + shost_printk(KERN_DEBUG, esp->host, f, ## a); \ } while (0) #define esp_log_disconnect(f, a...) \ do { if (esp_debug & ESP_DEBUG_DISCONNECT) \ - printk(f, ## a); \ + shost_printk(KERN_DEBUG, esp->host, f, ## a); \ } while (0) #define esp_log_datastart(f, a...) \ do { if (esp_debug & ESP_DEBUG_DATASTART) \ - printk(f, ## a); \ + shost_printk(KERN_DEBUG, esp->host, f, ## a); \ } while (0) #define esp_log_datadone(f, a...) \ do { if (esp_debug & ESP_DEBUG_DATADONE) \ - printk(f, ## a); \ + shost_printk(KERN_DEBUG, esp->host, f, ## a); \ } while (0) #define esp_log_reconnect(f, a...) \ do { if (esp_debug & ESP_DEBUG_RECONNECT) \ - printk(f, ## a); \ + shost_printk(KERN_DEBUG, esp->host, f, ## a); \ } while (0) #define esp_log_autosense(f, a...) \ do { if (esp_debug & ESP_DEBUG_AUTOSENSE) \ - printk(f, ## a); \ + shost_printk(KERN_DEBUG, esp->host, f, ## a); \ +} while (0) + +#define esp_log_event(f, a...) \ +do { if (esp_debug & ESP_DEBUG_EVENT) \ + shost_printk(KERN_DEBUG, esp->host, f, ## a); \ +} while (0) + +#define esp_log_command(f, a...) \ +do { if (esp_debug & ESP_DEBUG_COMMAND) \ + shost_printk(KERN_DEBUG, esp->host, f, ## a); \ } while (0) #define esp_read8(REG) esp->ops->esp_read8(esp, REG) @@ -126,10 +138,29 @@ void scsi_esp_cmd(struct esp *esp, u8 val) esp->esp_event_cur = (idx + 1) & (ESP_EVENT_LOG_SZ - 1); + esp_log_command("cmd[%02x]\n", val); esp_write8(val, ESP_CMD); } EXPORT_SYMBOL(scsi_esp_cmd); +static void esp_send_dma_cmd(struct esp *esp, int len, int max_len, int cmd) +{ + if (esp->flags & ESP_FLAG_USE_FIFO) { + int i; + + scsi_esp_cmd(esp, ESP_CMD_FLUSH); + for (i = 0; i < len; i++) + esp_write8(esp->command_block[i], ESP_FDATA); + scsi_esp_cmd(esp, cmd); + } else { + if (esp->rev == FASHME) + scsi_esp_cmd(esp, ESP_CMD_FLUSH); + cmd |= ESP_CMD_DMA; + esp->ops->send_dma_cmd(esp, esp->command_block_dma, + len, max_len, 0, cmd); + } +} + static void esp_event(struct esp *esp, u8 val) { struct esp_event_ent *p; @@ -150,19 +181,17 @@ static void esp_dump_cmd_log(struct esp *esp) int idx = esp->esp_event_cur; int stop = idx; - printk(KERN_INFO PFX "esp%d: Dumping command log\n", - esp->host->unique_id); + shost_printk(KERN_INFO, esp->host, "Dumping command log\n"); do { struct esp_event_ent *p = &esp->esp_event_log[idx]; - printk(KERN_INFO PFX "esp%d: ent[%d] %s ", - esp->host->unique_id, idx, - p->type == ESP_EVENT_TYPE_CMD ? "CMD" : "EVENT"); - - printk("val[%02x] sreg[%02x] seqreg[%02x] " - "sreg2[%02x] ireg[%02x] ss[%02x] event[%02x]\n", - p->val, p->sreg, p->seqreg, - p->sreg2, p->ireg, p->select_state, p->event); + shost_printk(KERN_INFO, esp->host, + "ent[%d] %s val[%02x] sreg[%02x] seqreg[%02x] " + "sreg2[%02x] ireg[%02x] ss[%02x] event[%02x]\n", + idx, + p->type == ESP_EVENT_TYPE_CMD ? "CMD" : "EVENT", + p->val, p->sreg, p->seqreg, + p->sreg2, p->ireg, p->select_state, p->event); idx = (idx + 1) & (ESP_EVENT_LOG_SZ - 1); } while (idx != stop); @@ -176,9 +205,8 @@ static void esp_flush_fifo(struct esp *esp) while (esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES) { if (--lim == 0) { - printk(KERN_ALERT PFX "esp%d: ESP_FF_BYTES " - "will not clear!\n", - esp->host->unique_id); + shost_printk(KERN_ALERT, esp->host, + "ESP_FF_BYTES will not clear!\n"); break; } udelay(1); @@ -240,6 +268,19 @@ static void esp_reset_esp(struct esp *esp) } else { esp->min_period = ((5 * esp->ccycle) / 1000); } + if (esp->rev == FAS236) { + /* + * The AM53c974 chip returns the same ID as FAS236; + * try to configure glitch eater. + */ + u8 config4 = ESP_CONFIG4_GE1; + esp_write8(config4, ESP_CFG4); + config4 = esp_read8(ESP_CFG4); + if (config4 & ESP_CONFIG4_GE1) { + esp->rev = PCSCSI; + esp_write8(esp->config4, ESP_CFG4); + } + } esp->max_period = (esp->max_period + 3)>>2; esp->min_period = (esp->min_period + 3)>>2; @@ -265,7 +306,8 @@ static void esp_reset_esp(struct esp *esp) /* fallthrough... */ case FAS236: - /* Fast 236 or HME */ + case PCSCSI: + /* Fast 236, AM53c974 or HME */ esp_write8(esp->config2, ESP_CFG2); if (esp->rev == FASHME) { u8 cfg3 = esp->target[0].esp_config3; @@ -383,12 +425,11 @@ static void esp_advance_dma(struct esp *esp, struct esp_cmd_entry *ent, p->cur_residue -= len; p->tot_residue -= len; if (p->cur_residue < 0 || p->tot_residue < 0) { - printk(KERN_ERR PFX "esp%d: Data transfer overflow.\n", - esp->host->unique_id); - printk(KERN_ERR PFX "esp%d: cur_residue[%d] tot_residue[%d] " - "len[%u]\n", - esp->host->unique_id, - p->cur_residue, p->tot_residue, len); + shost_printk(KERN_ERR, esp->host, + "Data transfer overflow.\n"); + shost_printk(KERN_ERR, esp->host, + "cur_residue[%d] tot_residue[%d] len[%u]\n", + p->cur_residue, p->tot_residue, len); p->cur_residue = 0; p->tot_residue = 0; } @@ -604,9 +645,8 @@ static void esp_autosense(struct esp *esp, struct esp_cmd_entry *ent) if (!ent->sense_ptr) { - esp_log_autosense("esp%d: Doing auto-sense for " - "tgt[%d] lun[%d]\n", - esp->host->unique_id, tgt, lun); + esp_log_autosense("Doing auto-sense for tgt[%d] lun[%d]\n", + tgt, lun); ent->sense_ptr = cmd->sense_buffer; ent->sense_dma = esp->ops->map_single(esp, @@ -642,10 +682,7 @@ static void esp_autosense(struct esp *esp, struct esp_cmd_entry *ent) val = (p - esp->command_block); - if (esp->rev == FASHME) - scsi_esp_cmd(esp, ESP_CMD_FLUSH); - esp->ops->send_dma_cmd(esp, esp->command_block_dma, - val, 16, 0, ESP_CMD_DMA | ESP_CMD_SELA); + esp_send_dma_cmd(esp, val, 16, ESP_CMD_SELA); } static struct esp_cmd_entry *find_and_prep_issuable_command(struct esp *esp) @@ -663,7 +700,7 @@ static struct esp_cmd_entry *find_and_prep_issuable_command(struct esp *esp) return ent; } - if (!scsi_populate_tag_msg(cmd, &ent->tag[0])) { + if (!spi_populate_tag_msg(&ent->tag[0], cmd)) { ent->tag[0] = 0; ent->tag[1] = 0; } @@ -781,12 +818,12 @@ build_identify: } if (!(esp->flags & ESP_FLAG_DOING_SLOWCMD)) { - start_cmd = ESP_CMD_DMA | ESP_CMD_SELA; + start_cmd = ESP_CMD_SELA; if (ent->tag[0]) { *p++ = ent->tag[0]; *p++ = ent->tag[1]; - start_cmd = ESP_CMD_DMA | ESP_CMD_SA3; + start_cmd = ESP_CMD_SA3; } for (i = 0; i < cmd->cmd_len; i++) @@ -806,7 +843,7 @@ build_identify: esp->msg_out_len += 2; } - start_cmd = ESP_CMD_DMA | ESP_CMD_SELAS; + start_cmd = ESP_CMD_SELAS; esp->select_state = ESP_SELECT_MSGOUT; } val = tgt; @@ -826,10 +863,7 @@ build_identify: printk("]\n"); } - if (esp->rev == FASHME) - scsi_esp_cmd(esp, ESP_CMD_FLUSH); - esp->ops->send_dma_cmd(esp, esp->command_block_dma, - val, 16, 0, start_cmd); + esp_send_dma_cmd(esp, val, 16, start_cmd); } static struct esp_cmd_entry *esp_get_ent(struct esp *esp) @@ -953,8 +987,8 @@ static int esp_check_gross_error(struct esp *esp) * - DMA programmed with wrong direction * - improper phase change */ - printk(KERN_ERR PFX "esp%d: Gross error sreg[%02x]\n", - esp->host->unique_id, esp->sreg); + shost_printk(KERN_ERR, esp->host, + "Gross error sreg[%02x]\n", esp->sreg); /* XXX Reset the chip. XXX */ return 1; } @@ -974,7 +1008,6 @@ static int esp_check_spur_intr(struct esp *esp) default: if (!(esp->sreg & ESP_STAT_INTR)) { - esp->ireg = esp_read8(ESP_INTRPT); if (esp->ireg & ESP_INTR_SR) return 1; @@ -982,14 +1015,13 @@ static int esp_check_spur_intr(struct esp *esp) * ESP is not, the only possibility is a DMA error. */ if (!esp->ops->dma_error(esp)) { - printk(KERN_ERR PFX "esp%d: Spurious irq, " - "sreg=%02x.\n", - esp->host->unique_id, esp->sreg); + shost_printk(KERN_ERR, esp->host, + "Spurious irq, sreg=%02x.\n", + esp->sreg); return -1; } - printk(KERN_ERR PFX "esp%d: DMA error\n", - esp->host->unique_id); + shost_printk(KERN_ERR, esp->host, "DMA error\n"); /* XXX Reset the chip. XXX */ return -1; @@ -1002,7 +1034,7 @@ static int esp_check_spur_intr(struct esp *esp) static void esp_schedule_reset(struct esp *esp) { - esp_log_reset("ESP: esp_schedule_reset() from %pf\n", + esp_log_reset("esp_schedule_reset() from %pf\n", __builtin_return_address(0)); esp->flags |= ESP_FLAG_RESETTING; esp_event(esp, ESP_EVENT_RESET); @@ -1019,20 +1051,20 @@ static struct esp_cmd_entry *esp_reconnect_with_tag(struct esp *esp, int i; if (!lp->num_tagged) { - printk(KERN_ERR PFX "esp%d: Reconnect w/num_tagged==0\n", - esp->host->unique_id); + shost_printk(KERN_ERR, esp->host, + "Reconnect w/num_tagged==0\n"); return NULL; } - esp_log_reconnect("ESP: reconnect tag, "); + esp_log_reconnect("reconnect tag, "); for (i = 0; i < ESP_QUICKIRQ_LIMIT; i++) { if (esp->ops->irq_pending(esp)) break; } if (i == ESP_QUICKIRQ_LIMIT) { - printk(KERN_ERR PFX "esp%d: Reconnect IRQ1 timeout\n", - esp->host->unique_id); + shost_printk(KERN_ERR, esp->host, + "Reconnect IRQ1 timeout\n"); return NULL; } @@ -1043,14 +1075,14 @@ static struct esp_cmd_entry *esp_reconnect_with_tag(struct esp *esp, i, esp->ireg, esp->sreg); if (esp->ireg & ESP_INTR_DC) { - printk(KERN_ERR PFX "esp%d: Reconnect, got disconnect.\n", - esp->host->unique_id); + shost_printk(KERN_ERR, esp->host, + "Reconnect, got disconnect.\n"); return NULL; } if ((esp->sreg & ESP_STAT_PMASK) != ESP_MIP) { - printk(KERN_ERR PFX "esp%d: Reconnect, not MIP sreg[%02x].\n", - esp->host->unique_id, esp->sreg); + shost_printk(KERN_ERR, esp->host, + "Reconnect, not MIP sreg[%02x].\n", esp->sreg); return NULL; } @@ -1073,8 +1105,7 @@ static struct esp_cmd_entry *esp_reconnect_with_tag(struct esp *esp, udelay(1); } if (i == ESP_RESELECT_TAG_LIMIT) { - printk(KERN_ERR PFX "esp%d: Reconnect IRQ2 timeout\n", - esp->host->unique_id); + shost_printk(KERN_ERR, esp->host, "Reconnect IRQ2 timeout\n"); return NULL; } esp->ops->dma_drain(esp); @@ -1087,17 +1118,17 @@ static struct esp_cmd_entry *esp_reconnect_with_tag(struct esp *esp, if (esp->command_block[0] < SIMPLE_QUEUE_TAG || esp->command_block[0] > ORDERED_QUEUE_TAG) { - printk(KERN_ERR PFX "esp%d: Reconnect, bad tag " - "type %02x.\n", - esp->host->unique_id, esp->command_block[0]); + shost_printk(KERN_ERR, esp->host, + "Reconnect, bad tag type %02x.\n", + esp->command_block[0]); return NULL; } ent = lp->tagged_cmds[esp->command_block[1]]; if (!ent) { - printk(KERN_ERR PFX "esp%d: Reconnect, no entry for " - "tag %02x.\n", - esp->host->unique_id, esp->command_block[1]); + shost_printk(KERN_ERR, esp->host, + "Reconnect, no entry for tag %02x.\n", + esp->command_block[1]); return NULL; } @@ -1163,9 +1194,9 @@ static int esp_reconnect(struct esp *esp) tp = &esp->target[target]; dev = __scsi_device_lookup_by_target(tp->starget, lun); if (!dev) { - printk(KERN_ERR PFX "esp%d: Reconnect, no lp " - "tgt[%u] lun[%u]\n", - esp->host->unique_id, target, lun); + shost_printk(KERN_ERR, esp->host, + "Reconnect, no lp tgt[%u] lun[%u]\n", + target, lun); goto do_reset; } lp = dev->hostdata; @@ -1291,8 +1322,8 @@ static int esp_finish_select(struct esp *esp) return 0; } - printk("ESP: Unexpected selection completion ireg[%x].\n", - esp->ireg); + shost_printk(KERN_INFO, esp->host, + "Unexpected selection completion ireg[%x]\n", esp->ireg); esp_schedule_reset(esp); return 0; } @@ -1312,11 +1343,42 @@ static int esp_data_bytes_sent(struct esp *esp, struct esp_cmd_entry *ent, (((unsigned int)esp_read8(ESP_TCMED)) << 8)); if (esp->rev == FASHME) ecount |= ((unsigned int)esp_read8(FAS_RLO)) << 16; + if (esp->rev == PCSCSI && (esp->config2 & ESP_CONFIG2_FENAB)) + ecount |= ((unsigned int)esp_read8(ESP_TCHI)) << 16; } bytes_sent = esp->data_dma_len; bytes_sent -= ecount; + /* + * The am53c974 has a DMA 'pecularity'. The doc states: + * In some odd byte conditions, one residual byte will + * be left in the SCSI FIFO, and the FIFO Flags will + * never count to '0 '. When this happens, the residual + * byte should be retrieved via PIO following completion + * of the BLAST operation. + */ + if (fifo_cnt == 1 && ent->flags & ESP_CMD_FLAG_RESIDUAL) { + size_t count = 1; + size_t offset = bytes_sent; + u8 bval = esp_read8(ESP_FDATA); + + if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) + ent->sense_ptr[bytes_sent] = bval; + else { + struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd); + u8 *ptr; + + ptr = scsi_kmap_atomic_sg(p->cur_sg, p->u.num_sg, + &offset, &count); + if (likely(ptr)) { + *(ptr + offset) = bval; + scsi_kunmap_atomic_sg(ptr); + } + } + bytes_sent += fifo_cnt; + ent->flags &= ~ESP_CMD_FLAG_RESIDUAL; + } if (!(ent->flags & ESP_CMD_FLAG_WRITE)) bytes_sent -= fifo_cnt; @@ -1556,8 +1618,8 @@ static void esp_msgin_extended(struct esp *esp) return; } - printk("ESP: Unexpected extended msg type %x\n", - esp->msg_in[2]); + shost_printk(KERN_INFO, esp->host, + "Unexpected extended msg type %x\n", esp->msg_in[2]); esp->msg_out[0] = ABORT_TASK_SET; esp->msg_out_len = 1; @@ -1574,7 +1636,8 @@ static int esp_msgin_process(struct esp *esp) if (msg0 & 0x80) { /* Identify */ - printk("ESP: Unexpected msgin identify\n"); + shost_printk(KERN_INFO, esp->host, + "Unexpected msgin identify\n"); return 0; } @@ -1640,10 +1703,12 @@ static int esp_msgin_process(struct esp *esp) static int esp_process_event(struct esp *esp) { - int write; + int write, i; again: write = 0; + esp_log_event("process event %d phase %x\n", + esp->event, esp->sreg & ESP_STAT_PMASK); switch (esp->event) { case ESP_EVENT_CHECK_PHASE: switch (esp->sreg & ESP_STAT_PMASK) { @@ -1673,8 +1738,9 @@ again: break; default: - printk("ESP: Unexpected phase, sreg=%02x\n", - esp->sreg); + shost_printk(KERN_INFO, esp->host, + "Unexpected phase, sreg=%02x\n", + esp->sreg); esp_schedule_reset(esp); return 0; } @@ -1708,18 +1774,17 @@ again: esp->data_dma_len = dma_len; if (!dma_len) { - printk(KERN_ERR PFX "esp%d: DMA length is zero!\n", - esp->host->unique_id); - printk(KERN_ERR PFX "esp%d: cur adr[%08llx] len[%08x]\n", - esp->host->unique_id, - (unsigned long long)esp_cur_dma_addr(ent, cmd), - esp_cur_dma_len(ent, cmd)); + shost_printk(KERN_ERR, esp->host, + "DMA length is zero!\n"); + shost_printk(KERN_ERR, esp->host, + "cur adr[%08llx] len[%08x]\n", + (unsigned long long)esp_cur_dma_addr(ent, cmd), + esp_cur_dma_len(ent, cmd)); esp_schedule_reset(esp); return 0; } - esp_log_datastart("ESP: start data addr[%08llx] len[%u] " - "write(%d)\n", + esp_log_datastart("start data addr[%08llx] len[%u] write(%d)\n", (unsigned long long)dma_addr, dma_len, write); esp->ops->send_dma_cmd(esp, dma_addr, dma_len, dma_len, @@ -1733,7 +1798,8 @@ again: int bytes_sent; if (esp->ops->dma_error(esp)) { - printk("ESP: data done, DMA error, resetting\n"); + shost_printk(KERN_INFO, esp->host, + "data done, DMA error, resetting\n"); esp_schedule_reset(esp); return 0; } @@ -1749,14 +1815,15 @@ again: /* We should always see exactly a bus-service * interrupt at the end of a successful transfer. */ - printk("ESP: data done, not BSERV, resetting\n"); + shost_printk(KERN_INFO, esp->host, + "data done, not BSERV, resetting\n"); esp_schedule_reset(esp); return 0; } bytes_sent = esp_data_bytes_sent(esp, ent, cmd); - esp_log_datadone("ESP: data done flgs[%x] sent[%d]\n", + esp_log_datadone("data done flgs[%x] sent[%d]\n", ent->flags, bytes_sent); if (bytes_sent < 0) { @@ -1785,8 +1852,9 @@ again: } if (ent->message != COMMAND_COMPLETE) { - printk("ESP: Unexpected message %x in status\n", - ent->message); + shost_printk(KERN_INFO, esp->host, + "Unexpected message %x in status\n", + ent->message); esp_schedule_reset(esp); return 0; } @@ -1804,8 +1872,7 @@ again: scsi_esp_cmd(esp, ESP_CMD_ESEL); if (ent->message == COMMAND_COMPLETE) { - esp_log_cmddone("ESP: Command done status[%x] " - "message[%x]\n", + esp_log_cmddone("Command done status[%x] message[%x]\n", ent->status, ent->message); if (ent->status == SAM_STAT_TASK_SET_FULL) esp_event_queue_full(esp, ent); @@ -1821,16 +1888,16 @@ again: DID_OK)); } } else if (ent->message == DISCONNECT) { - esp_log_disconnect("ESP: Disconnecting tgt[%d] " - "tag[%x:%x]\n", + esp_log_disconnect("Disconnecting tgt[%d] tag[%x:%x]\n", cmd->device->id, ent->tag[0], ent->tag[1]); esp->active_cmd = NULL; esp_maybe_execute_command(esp); } else { - printk("ESP: Unexpected message %x in freebus\n", - ent->message); + shost_printk(KERN_INFO, esp->host, + "Unexpected message %x in freebus\n", + ent->message); esp_schedule_reset(esp); return 0; } @@ -1862,6 +1929,10 @@ again: if (esp->msg_out_len == 1) { esp_write8(esp->msg_out[0], ESP_FDATA); scsi_esp_cmd(esp, ESP_CMD_TI); + } else if (esp->flags & ESP_FLAG_USE_FIFO) { + for (i = 0; i < esp->msg_out_len; i++) + esp_write8(esp->msg_out[i], ESP_FDATA); + scsi_esp_cmd(esp, ESP_CMD_TI); } else { /* Use DMA. */ memcpy(esp->command_block, @@ -1917,7 +1988,7 @@ again: val = esp_read8(ESP_FDATA); esp->msg_in[esp->msg_in_len++] = val; - esp_log_msgin("ESP: Got msgin byte %x\n", val); + esp_log_msgin("Got msgin byte %x\n", val); if (!esp_msgin_process(esp)) esp->msg_in_len = 0; @@ -1930,7 +2001,8 @@ again: if (esp->event != ESP_EVENT_FREE_BUS) esp_event(esp, ESP_EVENT_CHECK_PHASE); } else { - printk("ESP: MSGIN neither BSERV not FDON, resetting"); + shost_printk(KERN_INFO, esp->host, + "MSGIN neither BSERV not FDON, resetting"); esp_schedule_reset(esp); return 0; } @@ -1938,11 +2010,7 @@ again: case ESP_EVENT_CMD_START: memcpy(esp->command_block, esp->cmd_bytes_ptr, esp->cmd_bytes_left); - if (esp->rev == FASHME) - scsi_esp_cmd(esp, ESP_CMD_FLUSH); - esp->ops->send_dma_cmd(esp, esp->command_block_dma, - esp->cmd_bytes_left, 16, 0, - ESP_CMD_DMA | ESP_CMD_TI); + esp_send_dma_cmd(esp, esp->cmd_bytes_left, 16, ESP_CMD_TI); esp_event(esp, ESP_EVENT_CMD_DONE); esp->flags |= ESP_FLAG_QUICKIRQ_CHECK; break; @@ -1961,8 +2029,8 @@ again: break; default: - printk("ESP: Unexpected event %x, resetting\n", - esp->event); + shost_printk(KERN_INFO, esp->host, + "Unexpected event %x, resetting\n", esp->event); esp_schedule_reset(esp); return 0; break; @@ -2044,7 +2112,12 @@ static void __esp_interrupt(struct esp *esp) int finish_reset, intr_done; u8 phase; + /* + * Once INTRPT is read STATUS and SSTEP are cleared. + */ esp->sreg = esp_read8(ESP_STATUS); + esp->seqreg = esp_read8(ESP_SSTEP); + esp->ireg = esp_read8(ESP_INTRPT); if (esp->flags & ESP_FLAG_RESETTING) { finish_reset = 1; @@ -2057,8 +2130,6 @@ static void __esp_interrupt(struct esp *esp) return; } - esp->ireg = esp_read8(ESP_INTRPT); - if (esp->ireg & ESP_INTR_SR) finish_reset = 1; @@ -2085,14 +2156,15 @@ static void __esp_interrupt(struct esp *esp) } } - esp_log_intr("ESP: intr sreg[%02x] seqreg[%02x] " + esp_log_intr("intr sreg[%02x] seqreg[%02x] " "sreg2[%02x] ireg[%02x]\n", esp->sreg, esp->seqreg, esp->sreg2, esp->ireg); intr_done = 0; if (esp->ireg & (ESP_INTR_S | ESP_INTR_SATN | ESP_INTR_IC)) { - printk("ESP: unexpected IREG %02x\n", esp->ireg); + shost_printk(KERN_INFO, esp->host, + "unexpected IREG %02x\n", esp->ireg); if (esp->ireg & ESP_INTR_IC) esp_dump_cmd_log(esp); @@ -2149,46 +2221,50 @@ static void esp_get_revision(struct esp *esp) u8 val; esp->config1 = (ESP_CONFIG1_PENABLE | (esp->scsi_id & 7)); - esp->config2 = (ESP_CONFIG2_SCSI2ENAB | ESP_CONFIG2_REGPARITY); + if (esp->config2 == 0) { + esp->config2 = (ESP_CONFIG2_SCSI2ENAB | ESP_CONFIG2_REGPARITY); + esp_write8(esp->config2, ESP_CFG2); + + val = esp_read8(ESP_CFG2); + val &= ~ESP_CONFIG2_MAGIC; + + esp->config2 = 0; + if (val != (ESP_CONFIG2_SCSI2ENAB | ESP_CONFIG2_REGPARITY)) { + /* + * If what we write to cfg2 does not come back, + * cfg2 is not implemented. + * Therefore this must be a plain esp100. + */ + esp->rev = ESP100; + return; + } + } + + esp_set_all_config3(esp, 5); + esp->prev_cfg3 = 5; esp_write8(esp->config2, ESP_CFG2); + esp_write8(0, ESP_CFG3); + esp_write8(esp->prev_cfg3, ESP_CFG3); - val = esp_read8(ESP_CFG2); - val &= ~ESP_CONFIG2_MAGIC; - if (val != (ESP_CONFIG2_SCSI2ENAB | ESP_CONFIG2_REGPARITY)) { - /* If what we write to cfg2 does not come back, cfg2 is not - * implemented, therefore this must be a plain esp100. + val = esp_read8(ESP_CFG3); + if (val != 5) { + /* The cfg2 register is implemented, however + * cfg3 is not, must be esp100a. */ - esp->rev = ESP100; + esp->rev = ESP100A; } else { - esp->config2 = 0; - esp_set_all_config3(esp, 5); - esp->prev_cfg3 = 5; - esp_write8(esp->config2, ESP_CFG2); - esp_write8(0, ESP_CFG3); + esp_set_all_config3(esp, 0); + esp->prev_cfg3 = 0; esp_write8(esp->prev_cfg3, ESP_CFG3); - val = esp_read8(ESP_CFG3); - if (val != 5) { - /* The cfg2 register is implemented, however - * cfg3 is not, must be esp100a. - */ - esp->rev = ESP100A; + /* All of cfg{1,2,3} implemented, must be one of + * the fas variants, figure out which one. + */ + if (esp->cfact == 0 || esp->cfact > ESP_CCF_F5) { + esp->rev = FAST; + esp->sync_defp = SYNC_DEFP_FAST; } else { - esp_set_all_config3(esp, 0); - esp->prev_cfg3 = 0; - esp_write8(esp->prev_cfg3, ESP_CFG3); - - /* All of cfg{1,2,3} implemented, must be one of - * the fas variants, figure out which one. - */ - if (esp->cfact == 0 || esp->cfact > ESP_CCF_F5) { - esp->rev = FAST; - esp->sync_defp = SYNC_DEFP_FAST; - } else { - esp->rev = ESP236; - } - esp->config2 = 0; - esp_write8(esp->config2, ESP_CFG2); + esp->rev = ESP236; } } } @@ -2308,6 +2384,7 @@ static const char *esp_chip_names[] = { "FAS100A", "FAST", "FASHME", + "AM53C974", }; static struct scsi_transport_template *esp_transport_template; @@ -2317,6 +2394,10 @@ int scsi_esp_register(struct esp *esp, struct device *dev) static int instance; int err; + if (!esp->num_tags) + esp->num_tags = ESP_DEFAULT_TAGS; + else if (esp->num_tags >= ESP_MAX_TAG) + esp->num_tags = ESP_MAX_TAG - 1; esp->host->transportt = esp_transport_template; esp->host->max_lun = ESP_MAX_LUN; esp->host->cmd_per_lun = 2; @@ -2330,12 +2411,13 @@ int scsi_esp_register(struct esp *esp, struct device *dev) esp_bootup_reset(esp); - printk(KERN_INFO PFX "esp%u, regs[%1p:%1p] irq[%u]\n", - esp->host->unique_id, esp->regs, esp->dma_regs, - esp->host->irq); - printk(KERN_INFO PFX "esp%u is a %s, %u MHz (ccf=%u), SCSI ID %u\n", - esp->host->unique_id, esp_chip_names[esp->rev], - esp->cfreq / 1000000, esp->cfact, esp->scsi_id); + dev_printk(KERN_INFO, dev, "esp%u: regs[%1p:%1p] irq[%u]\n", + esp->host->unique_id, esp->regs, esp->dma_regs, + esp->host->irq); + dev_printk(KERN_INFO, dev, + "esp%u: is a %s, %u MHz (ccf=%u), SCSI ID %u\n", + esp->host->unique_id, esp_chip_names[esp->rev], + esp->cfreq / 1000000, esp->cfact, esp->scsi_id); /* Let the SCSI bus reset settle. */ ssleep(esp_bus_reset_settle); @@ -2402,28 +2484,10 @@ static int esp_slave_configure(struct scsi_device *dev) { struct esp *esp = shost_priv(dev->host); struct esp_target_data *tp = &esp->target[dev->id]; - int goal_tags, queue_depth; - - goal_tags = 0; - if (dev->tagged_supported) { - /* XXX make this configurable somehow XXX */ - goal_tags = ESP_DEFAULT_TAGS; - - if (goal_tags > ESP_MAX_TAG) - goal_tags = ESP_MAX_TAG; - } + if (dev->tagged_supported) + scsi_change_queue_depth(dev, esp->num_tags); - queue_depth = goal_tags; - if (queue_depth < dev->host->cmd_per_lun) - queue_depth = dev->host->cmd_per_lun; - - if (goal_tags) { - scsi_set_tag_type(dev, MSG_ORDERED_TAG); - scsi_activate_tcq(dev, queue_depth); - } else { - scsi_deactivate_tcq(dev, queue_depth); - } tp->flags |= ESP_TGT_DISCONNECT; if (!spi_initial_dv(dev->sdev_target)) @@ -2451,19 +2515,20 @@ static int esp_eh_abort_handler(struct scsi_cmnd *cmd) * XXX much for the final driver. */ spin_lock_irqsave(esp->host->host_lock, flags); - printk(KERN_ERR PFX "esp%d: Aborting command [%p:%02x]\n", - esp->host->unique_id, cmd, cmd->cmnd[0]); + shost_printk(KERN_ERR, esp->host, "Aborting command [%p:%02x]\n", + cmd, cmd->cmnd[0]); ent = esp->active_cmd; if (ent) - printk(KERN_ERR PFX "esp%d: Current command [%p:%02x]\n", - esp->host->unique_id, ent->cmd, ent->cmd->cmnd[0]); + shost_printk(KERN_ERR, esp->host, + "Current command [%p:%02x]\n", + ent->cmd, ent->cmd->cmnd[0]); list_for_each_entry(ent, &esp->queued_cmds, list) { - printk(KERN_ERR PFX "esp%d: Queued command [%p:%02x]\n", - esp->host->unique_id, ent->cmd, ent->cmd->cmnd[0]); + shost_printk(KERN_ERR, esp->host, "Queued command [%p:%02x]\n", + ent->cmd, ent->cmd->cmnd[0]); } list_for_each_entry(ent, &esp->active_cmds, list) { - printk(KERN_ERR PFX "esp%d: Active command [%p:%02x]\n", - esp->host->unique_id, ent->cmd, ent->cmd->cmnd[0]); + shost_printk(KERN_ERR, esp->host, " Active command [%p:%02x]\n", + ent->cmd, ent->cmd->cmnd[0]); } esp_dump_cmd_log(esp); spin_unlock_irqrestore(esp->host->host_lock, flags); @@ -2631,6 +2696,7 @@ struct scsi_host_template scsi_esp_template = { .use_clustering = ENABLE_CLUSTERING, .max_sectors = 0xffff, .skip_settle_delay = 1, + .use_blk_tags = 1, }; EXPORT_SYMBOL(scsi_esp_template); diff --git a/drivers/scsi/esp_scsi.h b/drivers/scsi/esp_scsi.h index cd68805e8d78..84dcbe4a6268 100644 --- a/drivers/scsi/esp_scsi.h +++ b/drivers/scsi/esp_scsi.h @@ -1,4 +1,4 @@ -/* esp_scsi.h: Defines and structures for the ESP drier. +/* esp_scsi.h: Defines and structures for the ESP driver. * * Copyright (C) 2007 David S. Miller (davem@davemloft.net) */ @@ -25,6 +25,7 @@ #define ESP_CTEST 0x0aUL /* wo Chip test register 0x28 */ #define ESP_CFG2 0x0bUL /* rw Second cfg register 0x2c */ #define ESP_CFG3 0x0cUL /* rw Third cfg register 0x30 */ +#define ESP_CFG4 0x0dUL /* rw Fourth cfg register 0x34 */ #define ESP_TCHI 0x0eUL /* rw High bits transf count 0x38 */ #define ESP_UID ESP_TCHI /* ro Unique ID code 0x38 */ #define FAS_RLO ESP_TCHI /* rw HME extended counter 0x38 */ @@ -76,6 +77,18 @@ #define ESP_CONFIG3_IMS 0x80 /* ID msg chk'ng (esp/fas236) */ #define ESP_CONFIG3_OBPUSH 0x80 /* Push odd-byte to dma (hme) */ +/* ESP config register 4 read-write, found only on am53c974 chips */ +#define ESP_CONFIG4_RADE 0x04 /* Active negation */ +#define ESP_CONFIG4_RAE 0x08 /* Active negation on REQ and ACK */ +#define ESP_CONFIG4_PWD 0x20 /* Reduced power feature */ +#define ESP_CONFIG4_GE0 0x40 /* Glitch eater bit 0 */ +#define ESP_CONFIG4_GE1 0x80 /* Glitch eater bit 1 */ + +#define ESP_CONFIG_GE_12NS (0) +#define ESP_CONFIG_GE_25NS (ESP_CONFIG_GE1) +#define ESP_CONFIG_GE_35NS (ESP_CONFIG_GE0) +#define ESP_CONFIG_GE_0NS (ESP_CONFIG_GE0 | ESP_CONFIG_GE1) + /* ESP command register read-write */ /* Group 1 commands: These may be sent at any point in time to the ESP * chip. None of them can generate interrupts 'cept @@ -254,6 +267,7 @@ enum esp_rev { FAS100A = 0x04, FAST = 0x05, FASHME = 0x06, + PCSCSI = 0x07, /* AM53c974 */ }; struct esp_cmd_entry { @@ -269,6 +283,7 @@ struct esp_cmd_entry { #define ESP_CMD_FLAG_WRITE 0x01 /* DMA is a write */ #define ESP_CMD_FLAG_ABORT 0x02 /* being aborted */ #define ESP_CMD_FLAG_AUTOSENSE 0x04 /* Doing automatic REQUEST_SENSE */ +#define ESP_CMD_FLAG_RESIDUAL 0x08 /* AM53c974 BLAST residual */ u8 tag[2]; u8 orig_tag[2]; @@ -283,7 +298,6 @@ struct esp_cmd_entry { struct completion *eh_done; }; -/* XXX make this configurable somehow XXX */ #define ESP_DEFAULT_TAGS 16 #define ESP_MAX_TARGET 16 @@ -445,7 +459,7 @@ struct esp { u8 prev_soff; u8 prev_stp; u8 prev_cfg3; - u8 __pad; + u8 num_tags; struct list_head esp_cmd_pool; @@ -466,6 +480,7 @@ struct esp { u8 bursts; u8 config1; u8 config2; + u8 config4; u8 scsi_id; u32 scsi_id_mask; @@ -479,6 +494,7 @@ struct esp { #define ESP_FLAG_WIDE_CAPABLE 0x00000008 #define ESP_FLAG_QUICKIRQ_CHECK 0x00000010 #define ESP_FLAG_DISABLE_SYNC 0x00000020 +#define ESP_FLAG_USE_FIFO 0x00000040 u8 select_state; #define ESP_SELECT_NONE 0x00 /* Not selecting */ diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index 4a8ac7d8c76b..308a016fdaea 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -280,14 +280,16 @@ static struct scsi_host_template fcoe_shost_template = { .eh_device_reset_handler = fc_eh_device_reset, .eh_host_reset_handler = fc_eh_host_reset, .slave_alloc = fc_slave_alloc, - .change_queue_depth = fc_change_queue_depth, - .change_queue_type = fc_change_queue_type, + .change_queue_depth = scsi_change_queue_depth, + .change_queue_type = scsi_change_queue_type, .this_id = -1, .cmd_per_lun = 3, .can_queue = FCOE_MAX_OUTSTANDING_COMMANDS, .use_clustering = ENABLE_CLUSTERING, .sg_tablesize = SG_ALL, .max_sectors = 0xffff, + .use_blk_tags = 1, + .track_queue_depth = 1, }; /** diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h index bf8d34c26f13..3b73b96619e2 100644 --- a/drivers/scsi/fnic/fnic.h +++ b/drivers/scsi/fnic/fnic.h @@ -39,7 +39,7 @@ #define DRV_NAME "fnic" #define DRV_DESCRIPTION "Cisco FCoE HBA Driver" -#define DRV_VERSION "1.6.0.11" +#define DRV_VERSION "1.6.0.16" #define PFX DRV_NAME ": " #define DFX DRV_NAME "%d: " diff --git a/drivers/scsi/fnic/fnic_fcs.c b/drivers/scsi/fnic/fnic_fcs.c index f3984b48f8e9..bf0bbd42efb5 100644 --- a/drivers/scsi/fnic/fnic_fcs.c +++ b/drivers/scsi/fnic/fnic_fcs.c @@ -135,6 +135,11 @@ void fnic_handle_link(struct work_struct *work) fnic->lport->host->host_no, FNIC_FC_LE, "Link Status: UP_DOWN", strlen("Link Status: UP_DOWN")); + if (fnic->config.flags & VFCF_FIP_CAPABLE) { + FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, + "deleting fip-timer during link-down\n"); + del_timer_sync(&fnic->fip_timer); + } fcoe_ctlr_link_down(&fnic->ctlr); } diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c index 8c56fdc3a456..0c1f8177b5b7 100644 --- a/drivers/scsi/fnic/fnic_main.c +++ b/drivers/scsi/fnic/fnic_main.c @@ -95,12 +95,10 @@ static int fnic_slave_alloc(struct scsi_device *sdev) { struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); - sdev->tagged_supported = 1; - if (!rport || fc_remote_port_chkready(rport)) return -ENXIO; - scsi_activate_tcq(sdev, fnic_max_qdepth); + scsi_change_queue_depth(sdev, fnic_max_qdepth); return 0; } @@ -112,8 +110,8 @@ static struct scsi_host_template fnic_host_template = { .eh_device_reset_handler = fnic_device_reset, .eh_host_reset_handler = fnic_host_reset, .slave_alloc = fnic_slave_alloc, - .change_queue_depth = fc_change_queue_depth, - .change_queue_type = fc_change_queue_type, + .change_queue_depth = scsi_change_queue_depth, + .change_queue_type = scsi_change_queue_type, .this_id = -1, .cmd_per_lun = 3, .can_queue = FNIC_DFLT_IO_REQ, @@ -121,6 +119,8 @@ static struct scsi_host_template fnic_host_template = { .sg_tablesize = FNIC_MAX_SG_DESC_CNT, .max_sectors = 0xffff, .shost_attrs = fnic_attrs, + .use_blk_tags = 1, + .track_queue_depth = 1, }; static void @@ -438,21 +438,30 @@ static int fnic_dev_wait(struct vnic_dev *vdev, unsigned long time; int done; int err; + int count; + + count = 0; err = start(vdev, arg); if (err) return err; - /* Wait for func to complete...2 seconds max */ + /* Wait for func to complete. + * Sometime schedule_timeout_uninterruptible take long time + * to wake up so we do not retry as we are only waiting for + * 2 seconds in while loop. By adding count, we make sure + * we try atleast three times before returning -ETIMEDOUT + */ time = jiffies + (HZ * 2); do { err = finished(vdev, &done); + count++; if (err) return err; if (done) return 0; schedule_timeout_uninterruptible(HZ / 10); - } while (time_after(time, jiffies)); + } while (time_after(time, jiffies) || (count < 3)); return -ETIMEDOUT; } diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c index 961bdf5d31cd..2097de42a147 100644 --- a/drivers/scsi/fnic/fnic_scsi.c +++ b/drivers/scsi/fnic/fnic_scsi.c @@ -325,13 +325,11 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic, struct fc_rport_libfc_priv *rp = rport->dd_data; struct host_sg_desc *desc; struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats; - u8 pri_tag = 0; unsigned int i; unsigned long intr_flags; int flags; u8 exch_flags; struct scsi_lun fc_lun; - char msg[2]; if (sg_count) { /* For each SGE, create a device desc entry */ @@ -357,12 +355,6 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic, int_to_scsilun(sc->device->lun, &fc_lun); - pri_tag = FCPIO_ICMND_PTA_SIMPLE; - msg[0] = MSG_SIMPLE_TAG; - scsi_populate_tag_msg(sc, msg); - if (msg[0] == MSG_ORDERED_TAG) - pri_tag = FCPIO_ICMND_PTA_ORDERED; - /* Enqueue the descriptor in the Copy WQ */ spin_lock_irqsave(&fnic->wq_copy_lock[0], intr_flags); @@ -394,7 +386,8 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic, io_req->sgl_list_pa, io_req->sense_buf_pa, 0, /* scsi cmd ref, always 0 */ - pri_tag, /* scsi pri and tag */ + FCPIO_ICMND_PTA_SIMPLE, + /* scsi pri and tag */ flags, /* command flags */ sc->cmnd, sc->cmd_len, scsi_bufflen(sc), @@ -428,8 +421,10 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_ int ret; u64 cmd_trace; int sg_count = 0; - unsigned long flags; + unsigned long flags = 0; unsigned long ptr; + struct fc_rport_priv *rdata; + spinlock_t *io_lock = NULL; if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_IO_BLOCKED))) return SCSI_MLQUEUE_HOST_BUSY; @@ -443,6 +438,16 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_ return 0; } + rdata = lp->tt.rport_lookup(lp, rport->port_id); + if (!rdata || (rdata->rp_state == RPORT_ST_DELETE)) { + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + "returning IO as rport is removed\n"); + atomic64_inc(&fnic_stats->misc_stats.rport_not_ready); + sc->result = DID_NO_CONNECT; + done(sc); + return 0; + } + if (lp->state != LPORT_ST_READY || !(lp->link_up)) return SCSI_MLQUEUE_HOST_BUSY; @@ -505,6 +510,13 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_ } } + /* + * Will acquire lock defore setting to IO initialized. + */ + + io_lock = fnic_io_lock_hash(fnic, sc); + spin_lock_irqsave(io_lock, flags); + /* initialize rest of io_req */ io_req->port_id = rport->port_id; io_req->start_time = jiffies; @@ -521,11 +533,9 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_ * In case another thread cancelled the request, * refetch the pointer under the lock. */ - spinlock_t *io_lock = fnic_io_lock_hash(fnic, sc); FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no, sc->request->tag, sc, 0, 0, 0, (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc))); - spin_lock_irqsave(io_lock, flags); io_req = (struct fnic_io_req *)CMD_SP(sc); CMD_SP(sc) = NULL; CMD_STATE(sc) = FNIC_IOREQ_CMD_COMPLETE; @@ -534,6 +544,10 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_ fnic_release_ioreq_buf(fnic, io_req, sc); mempool_free(io_req, fnic->io_req_pool); } + atomic_dec(&fnic->in_flight); + /* acquire host lock before returning to SCSI */ + spin_lock(lp->host->host_lock); + return ret; } else { atomic64_inc(&fnic_stats->io_stats.active_ios); atomic64_inc(&fnic_stats->io_stats.num_ios); @@ -555,6 +569,11 @@ out: sc->request->tag, sc, io_req, sg_count, cmd_trace, (((u64)CMD_FLAGS(sc) >> 32) | CMD_STATE(sc))); + + /* if only we issued IO, will we have the io lock */ + if (CMD_FLAGS(sc) & FNIC_IO_INITIALIZED) + spin_unlock_irqrestore(io_lock, flags); + atomic_dec(&fnic->in_flight); /* acquire host lock before returning to SCSI */ spin_lock(lp->host->host_lock); diff --git a/drivers/scsi/fnic/fnic_trace.c b/drivers/scsi/fnic/fnic_trace.c index acf1f95cb5c5..65a9bde26974 100644 --- a/drivers/scsi/fnic/fnic_trace.c +++ b/drivers/scsi/fnic/fnic_trace.c @@ -624,12 +624,12 @@ int fnic_fc_trace_set_data(u32 host_no, u8 frame_type, if (frame_type == FNIC_FC_RECV) { eth_fcoe_hdr_len = sizeof(struct ethhdr) + sizeof(struct fcoe_hdr); - fc_trc_frame_len = fc_trc_frame_len + eth_fcoe_hdr_len; memset((char *)fc_trace, 0xff, eth_fcoe_hdr_len); /* Copy the rest of data frame */ memcpy((char *)(fc_trace + eth_fcoe_hdr_len), (void *)frame, min_t(u8, fc_trc_frame_len, - (u8)(FC_TRC_SIZE_BYTES - FC_TRC_HEADER_SIZE))); + (u8)(FC_TRC_SIZE_BYTES - FC_TRC_HEADER_SIZE + - eth_fcoe_hdr_len))); } else { memcpy((char *)fc_trace, (void *)frame, min_t(u8, fc_trc_frame_len, diff --git a/drivers/scsi/g_NCR5380.c b/drivers/scsi/g_NCR5380.c index b331272e93bc..f35792f7051c 100644 --- a/drivers/scsi/g_NCR5380.c +++ b/drivers/scsi/g_NCR5380.c @@ -18,20 +18,6 @@ * * Added ISAPNP support for DTC436 adapters, * Thomas Sailer, sailer@ife.ee.ethz.ch - * - * ALPHA RELEASE 1. - * - * For more information, please consult - * - * NCR 5380 Family - * SCSI Protocol Controller - * Databook - * - * NCR Microelectronics - * 1635 Aeroplaza Drive - * Colorado Springs, CO 80916 - * 1+ (719) 578-3400 - * 1+ (800) 334-5454 */ /* @@ -40,14 +26,6 @@ */ /* - * Options : - * - * PARITY - enable parity checking. Not supported. - * - * SCSI2 - enable support for SCSI-II tagged queueing. Untested. - * - * USLEEP - enable support for devices that don't disconnect. Untested. - * * The card is detected and initialized in one of several ways : * 1. With command line overrides - NCR5380=port,irq may be * used on the LILO command line to override the defaults. @@ -79,27 +57,21 @@ */ /* settings for DTC3181E card with only Mustek scanner attached */ -#define USLEEP #define USLEEP_POLL 1 #define USLEEP_SLEEP 20 #define USLEEP_WAITLONG 500 #define AUTOPROBE_IRQ -#define AUTOSENSE - #ifdef CONFIG_SCSI_GENERIC_NCR53C400 #define NCR53C400_PSEUDO_DMA 1 #define PSEUDO_DMA #define NCR53C400 -#define NCR5380_STATS -#undef NCR5380_STAT_LIMIT #endif #include <asm/io.h> #include <linux/signal.h> #include <linux/blkdev.h> -#include "scsi.h" #include <scsi/scsi_host.h> #include "g_NCR5380.h" #include "NCR5380.h" @@ -277,7 +249,7 @@ static int __init do_DTC3181E_setup(char *str) * Locks: none */ -int __init generic_NCR5380_detect(struct scsi_host_template * tpnt) +static int __init generic_NCR5380_detect(struct scsi_host_template *tpnt) { static int current_override = 0; int count; @@ -335,7 +307,7 @@ int __init generic_NCR5380_detect(struct scsi_host_template * tpnt) if (pnp_irq_valid(dev, 0)) overrides[count].irq = pnp_irq(dev, 0); else - overrides[count].irq = SCSI_IRQ_NONE; + overrides[count].irq = NO_IRQ; if (pnp_dma_valid(dev, 0)) overrides[count].dma = pnp_dma(dev, 0); else @@ -455,27 +427,22 @@ int __init generic_NCR5380_detect(struct scsi_host_template * tpnt) else instance->irq = NCR5380_probe_irq(instance, 0xffff); - if (instance->irq != SCSI_IRQ_NONE) + /* Compatibility with documented NCR5380 kernel parameters */ + if (instance->irq == 255) + instance->irq = NO_IRQ; + + if (instance->irq != NO_IRQ) if (request_irq(instance->irq, generic_NCR5380_intr, 0, "NCR5380", instance)) { printk(KERN_WARNING "scsi%d : IRQ%d not free, interrupts disabled\n", instance->host_no, instance->irq); - instance->irq = SCSI_IRQ_NONE; + instance->irq = NO_IRQ; } - if (instance->irq == SCSI_IRQ_NONE) { + if (instance->irq == NO_IRQ) { printk(KERN_INFO "scsi%d : interrupts not enabled. for better interactive performance,\n", instance->host_no); printk(KERN_INFO "scsi%d : please jumper the board for a free IRQ.\n", instance->host_no); } - printk(KERN_INFO "scsi%d : at " STRVAL(NCR5380_map_name) " 0x%x", instance->host_no, (unsigned int) instance->NCR5380_instance_name); - if (instance->irq == SCSI_IRQ_NONE) - printk(" interrupts disabled"); - else - printk(" irq %d", instance->irq); - printk(" options CAN_QUEUE=%d CMD_PER_LUN=%d release=%d", CAN_QUEUE, CMD_PER_LUN, GENERIC_NCR5380_PUBLIC_RELEASE); - NCR5380_print_options(instance); - printk("\n"); - ++current_override; ++count; } @@ -483,19 +450,6 @@ int __init generic_NCR5380_detect(struct scsi_host_template * tpnt) } /** - * generic_NCR5380_info - reporting string - * @host: NCR5380 to report on - * - * Report driver information for the NCR5380 - */ - -const char *generic_NCR5380_info(struct Scsi_Host *host) -{ - static const char string[] = "Generic NCR5380/53C400 Driver"; - return string; -} - -/** * generic_NCR5380_release_resources - free resources * @instance: host adapter to clean up * @@ -504,12 +458,12 @@ const char *generic_NCR5380_info(struct Scsi_Host *host) * Locks: none */ -int generic_NCR5380_release_resources(struct Scsi_Host *instance) +static int generic_NCR5380_release_resources(struct Scsi_Host *instance) { NCR5380_local_declare(); NCR5380_setup(instance); - if (instance->irq != SCSI_IRQ_NONE) + if (instance->irq != NO_IRQ) free_irq(instance->irq, instance); NCR5380_exit(instance); @@ -741,163 +695,9 @@ static inline int NCR5380_pwrite(struct Scsi_Host *instance, unsigned char *src, #include "NCR5380.c" -#define PRINTP(x) seq_printf(m, x) -#define ANDP , - -static void sprint_opcode(struct seq_file *m, int opcode) -{ - PRINTP("0x%02x " ANDP opcode); -} - -static void sprint_command(struct seq_file *m, unsigned char *command) -{ - int i, s; - sprint_opcode(m, command[0]); - for (i = 1, s = COMMAND_SIZE(command[0]); i < s; ++i) - PRINTP("%02x " ANDP command[i]); - PRINTP("\n"); -} - -/** - * sprintf_Scsi_Cmnd - print a scsi command - * @m: seq_fil to print into - * @cmd: SCSI command block - * - * Print out the target and command data in hex - */ - -static void sprint_Scsi_Cmnd(struct seq_file *m, Scsi_Cmnd * cmd) -{ - PRINTP("host number %d destination target %d, lun %llu\n" ANDP cmd->device->host->host_no ANDP cmd->device->id ANDP cmd->device->lun); - PRINTP(" command = "); - sprint_command(m, cmd->cmnd); -} - -/** - * generic_NCR5380_proc_info - /proc for NCR5380 driver - * @buffer: buffer to print into - * @start: start position - * @offset: offset into buffer - * @len: length - * @hostno: instance to affect - * @inout: read/write - * - * Provide the procfs information for the 5380 controller. We fill - * this with useful debugging information including the commands - * being executed, disconnected command queue and the statistical - * data - * - * Locks: global cli/lock for queue walk - */ - -static int generic_NCR5380_show_info(struct seq_file *m, struct Scsi_Host *scsi_ptr) -{ - NCR5380_local_declare(); - unsigned long flags; - unsigned char status; - int i; - Scsi_Cmnd *ptr; - struct NCR5380_hostdata *hostdata; -#ifdef NCR5380_STATS - struct scsi_device *dev; -#endif - - NCR5380_setup(scsi_ptr); - hostdata = (struct NCR5380_hostdata *) scsi_ptr->hostdata; - - spin_lock_irqsave(scsi_ptr->host_lock, flags); - PRINTP("SCSI host number %d : %s\n" ANDP scsi_ptr->host_no ANDP scsi_ptr->hostt->name); - PRINTP("Generic NCR5380 driver version %d\n" ANDP GENERIC_NCR5380_PUBLIC_RELEASE); - PRINTP("NCR5380 core version %d\n" ANDP NCR5380_PUBLIC_RELEASE); -#ifdef NCR53C400 - PRINTP("NCR53C400 extension version %d\n" ANDP NCR53C400_PUBLIC_RELEASE); - PRINTP("NCR53C400 card%s detected\n" ANDP(((struct NCR5380_hostdata *) scsi_ptr->hostdata)->flags & FLAG_NCR53C400) ? "" : " not"); -# if NCR53C400_PSEUDO_DMA - PRINTP("NCR53C400 pseudo DMA used\n"); -# endif -#else - PRINTP("NO NCR53C400 driver extensions\n"); -#endif - PRINTP("Using %s mapping at %s 0x%lx, " ANDP STRVAL(NCR5380_map_config) ANDP STRVAL(NCR5380_map_name) ANDP scsi_ptr->NCR5380_instance_name); - if (scsi_ptr->irq == SCSI_IRQ_NONE) - PRINTP("no interrupt\n"); - else - PRINTP("on interrupt %d\n" ANDP scsi_ptr->irq); - -#ifdef NCR5380_STATS - if (hostdata->connected || hostdata->issue_queue || hostdata->disconnected_queue) - PRINTP("There are commands pending, transfer rates may be crud\n"); - if (hostdata->pendingr) - PRINTP(" %d pending reads" ANDP hostdata->pendingr); - if (hostdata->pendingw) - PRINTP(" %d pending writes" ANDP hostdata->pendingw); - if (hostdata->pendingr || hostdata->pendingw) - PRINTP("\n"); - shost_for_each_device(dev, scsi_ptr) { - unsigned long br = hostdata->bytes_read[dev->id]; - unsigned long bw = hostdata->bytes_write[dev->id]; - long tr = hostdata->time_read[dev->id] / HZ; - long tw = hostdata->time_write[dev->id] / HZ; - - PRINTP(" T:%d %s " ANDP dev->id ANDP scsi_device_type(dev->type)); - for (i = 0; i < 8; i++) - if (dev->vendor[i] >= 0x20) - seq_putc(m, dev->vendor[i]); - seq_putc(m, ' '); - for (i = 0; i < 16; i++) - if (dev->model[i] >= 0x20) - seq_putc(m, dev->model[i]); - seq_putc(m, ' '); - for (i = 0; i < 4; i++) - if (dev->rev[i] >= 0x20) - seq_putc(m, dev->rev[i]); - seq_putc(m, ' '); - - PRINTP("\n%10ld kb read in %5ld secs" ANDP br / 1024 ANDP tr); - if (tr) - PRINTP(" @ %5ld bps" ANDP br / tr); - - PRINTP("\n%10ld kb written in %5ld secs" ANDP bw / 1024 ANDP tw); - if (tw) - PRINTP(" @ %5ld bps" ANDP bw / tw); - PRINTP("\n"); - } -#endif - - status = NCR5380_read(STATUS_REG); - if (!(status & SR_REQ)) - PRINTP("REQ not asserted, phase unknown.\n"); - else { - for (i = 0; (phases[i].value != PHASE_UNKNOWN) && (phases[i].value != (status & PHASE_MASK)); ++i); - PRINTP("Phase %s\n" ANDP phases[i].name); - } - - if (!hostdata->connected) { - PRINTP("No currently connected command\n"); - } else { - sprint_Scsi_Cmnd(m, (Scsi_Cmnd *) hostdata->connected); - } - - PRINTP("issue_queue\n"); - - for (ptr = (Scsi_Cmnd *) hostdata->issue_queue; ptr; ptr = (Scsi_Cmnd *) ptr->host_scribble) - sprint_Scsi_Cmnd(m, ptr); - - PRINTP("disconnected_queue\n"); - - for (ptr = (Scsi_Cmnd *) hostdata->disconnected_queue; ptr; ptr = (Scsi_Cmnd *) ptr->host_scribble) - sprint_Scsi_Cmnd(m, ptr); - - spin_unlock_irqrestore(scsi_ptr->host_lock, flags); - return 0; -} - -#undef PRINTP -#undef ANDP - static struct scsi_host_template driver_template = { .show_info = generic_NCR5380_show_info, - .name = "Generic NCR5380/NCR53C400 Scsi Driver", + .name = "Generic NCR5380/NCR53C400 SCSI", .detect = generic_NCR5380_detect, .release = generic_NCR5380_release_resources, .info = generic_NCR5380_info, diff --git a/drivers/scsi/g_NCR5380.h b/drivers/scsi/g_NCR5380.h index 703adf78e0b2..bea1a3b9b862 100644 --- a/drivers/scsi/g_NCR5380.h +++ b/drivers/scsi/g_NCR5380.h @@ -9,28 +9,11 @@ * * NCR53C400 extensions (c) 1994,1995,1996, Kevin Lentin * K.Lentin@cs.monash.edu.au - * - * ALPHA RELEASE 1. - * - * For more information, please consult - * - * NCR 5380 Family - * SCSI Protocol Controller - * Databook - * - * NCR Microelectronics - * 1635 Aeroplaza Drive - * Colorado Springs, CO 80916 - * 1+ (719) 578-3400 - * 1+ (800) 334-5454 */ #ifndef GENERIC_NCR5380_H #define GENERIC_NCR5380_H - -#define GENERIC_NCR5380_PUBLIC_RELEASE 1 - #ifdef NCR53C400 #define BIOSPARAM #define NCR5380_BIOSPARAM generic_NCR5380_biosparam @@ -39,12 +22,6 @@ #endif #ifndef ASM -static int generic_NCR5380_abort(Scsi_Cmnd *); -static int generic_NCR5380_detect(struct scsi_host_template *); -static int generic_NCR5380_release_resources(struct Scsi_Host *); -static int generic_NCR5380_queue_command(struct Scsi_Host *, struct scsi_cmnd *); -static int generic_NCR5380_bus_reset(Scsi_Cmnd *); -static const char* generic_NCR5380_info(struct Scsi_Host *); #ifndef CMD_PER_LUN #define CMD_PER_LUN 2 @@ -118,7 +95,8 @@ static const char* generic_NCR5380_info(struct Scsi_Host *); #define NCR5380_bus_reset generic_NCR5380_bus_reset #define NCR5380_pread generic_NCR5380_pread #define NCR5380_pwrite generic_NCR5380_pwrite -#define NCR5380_proc_info notyet_generic_proc_info +#define NCR5380_info generic_NCR5380_info +#define NCR5380_show_info generic_NCR5380_show_info #define BOARD_NCR5380 0 #define BOARD_NCR53C400 1 diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c index 0f1ae13ce7c7..71e138044379 100644 --- a/drivers/scsi/gdth.c +++ b/drivers/scsi/gdth.c @@ -2159,7 +2159,7 @@ static void gdth_next(gdth_ha_str *ha) case VERIFY: case START_STOP: case MODE_SENSE: - case SERVICE_ACTION_IN: + case SERVICE_ACTION_IN_16: TRACE(("cache cmd %x/%x/%x/%x/%x/%x\n",nscp->cmnd[0], nscp->cmnd[1],nscp->cmnd[2],nscp->cmnd[3], nscp->cmnd[4],nscp->cmnd[5])); @@ -2391,7 +2391,7 @@ static int gdth_internal_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp) gdth_copy_internal_data(ha, scp, (char*)&rdc, sizeof(gdth_rdcap_data)); break; - case SERVICE_ACTION_IN: + case SERVICE_ACTION_IN_16: if ((scp->cmnd[1] & 0x1f) == SAI_READ_CAPACITY_16 && (ha->cache_feat & GDT_64BIT)) { gdth_rdcap16_data rdc16; @@ -4661,7 +4661,6 @@ static void gdth_flush(gdth_ha_str *ha) /* configure lun */ static int gdth_slave_configure(struct scsi_device *sdev) { - scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun); sdev->skip_ms_page_3f = 1; sdev->skip_ms_page_8 = 1; return 0; diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c index 6de80e352871..8bb173e01084 100644 --- a/drivers/scsi/hosts.c +++ b/drivers/scsi/hosts.c @@ -418,7 +418,6 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize) shost->cmd_per_lun = sht->cmd_per_lun; shost->unchecked_isa_dma = sht->unchecked_isa_dma; shost->use_clustering = sht->use_clustering; - shost->ordered_tag = sht->ordered_tag; shost->no_write_same = sht->no_write_same; if (shost_eh_deadline == -1 || !sht->eh_host_reset_handler) @@ -485,8 +484,8 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize) WQ_UNBOUND | WQ_MEM_RECLAIM, 1, shost->host_no); if (!shost->tmf_work_q) { - printk(KERN_WARNING "scsi%d: failed to create tmf workq\n", - shost->host_no); + shost_printk(KERN_WARNING, shost, + "failed to create tmf workq\n"); goto fail_kthread; } scsi_proc_hostdir_add(shost->hostt); diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index cef5d49b59cd..6bb4611b238a 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -48,6 +48,7 @@ #include <linux/bitmap.h> #include <linux/atomic.h> #include <linux/jiffies.h> +#include <linux/percpu-defs.h> #include <linux/percpu.h> #include <asm/div64.h> #include "hpsa_cmd.h" @@ -103,7 +104,6 @@ static const struct pci_device_id hpsa_pci_device_id[] = { {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1922}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1923}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1924}, - {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1925}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1926}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1928}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1929}, @@ -149,6 +149,7 @@ static struct board_type products[] = { {0x3249103C, "Smart Array P812", &SA5_access}, {0x324A103C, "Smart Array P712m", &SA5_access}, {0x324B103C, "Smart Array P711m", &SA5_access}, + {0x3233103C, "HP StorageWorks 1210m", &SA5_access}, /* alias of 333f */ {0x3350103C, "Smart Array P222", &SA5_access}, {0x3351103C, "Smart Array P420", &SA5_access}, {0x3352103C, "Smart Array P421", &SA5_access}, @@ -193,12 +194,13 @@ static int number_of_controllers; static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id); static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id); -static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg); +static int hpsa_ioctl(struct scsi_device *dev, int cmd, void __user *arg); static void lock_and_start_io(struct ctlr_info *h); static void start_io(struct ctlr_info *h, unsigned long *flags); #ifdef CONFIG_COMPAT -static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void *arg); +static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, + void __user *arg); #endif static void cmd_free(struct ctlr_info *h, struct CommandList *c); @@ -214,8 +216,6 @@ static int hpsa_scsi_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd); static void hpsa_scan_start(struct Scsi_Host *); static int hpsa_scan_finished(struct Scsi_Host *sh, unsigned long elapsed_time); -static int hpsa_change_queue_depth(struct scsi_device *sdev, - int qdepth, int reason); static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd); static int hpsa_eh_abort_handler(struct scsi_cmnd *scsicmd); @@ -274,12 +274,12 @@ static int check_for_unit_attention(struct ctlr_info *h, "detected, command retried\n", h->ctlr); break; case LUN_FAILED: - dev_warn(&h->pdev->dev, HPSA "%d: LUN failure " - "detected, action required\n", h->ctlr); + dev_warn(&h->pdev->dev, + HPSA "%d: LUN failure detected\n", h->ctlr); break; case REPORT_LUNS_CHANGED: - dev_warn(&h->pdev->dev, HPSA "%d: report LUN data " - "changed, action required\n", h->ctlr); + dev_warn(&h->pdev->dev, + HPSA "%d: report LUN data changed\n", h->ctlr); /* * Note: this REPORT_LUNS_CHANGED condition only occurs on the external * target (array) devices. @@ -392,7 +392,8 @@ static ssize_t host_show_commands_outstanding(struct device *dev, struct Scsi_Host *shost = class_to_shost(dev); struct ctlr_info *h = shost_to_hba(shost); - return snprintf(buf, 20, "%d\n", h->commands_outstanding); + return snprintf(buf, 20, "%d\n", + atomic_read(&h->commands_outstanding)); } static ssize_t host_show_transport_mode(struct device *dev, @@ -670,7 +671,7 @@ static struct scsi_host_template hpsa_driver_template = { .queuecommand = hpsa_scsi_queue_command, .scan_start = hpsa_scan_start, .scan_finished = hpsa_scan_finished, - .change_queue_depth = hpsa_change_queue_depth, + .change_queue_depth = scsi_change_queue_depth, .this_id = -1, .use_clustering = ENABLE_CLUSTERING, .eh_abort_handler = hpsa_eh_abort_handler, @@ -698,7 +699,6 @@ static inline u32 next_command(struct ctlr_info *h, u8 q) { u32 a; struct reply_queue_buffer *rq = &h->reply_queue[q]; - unsigned long flags; if (h->transMethod & CFGTBL_Trans_io_accel1) return h->access.command_completed(h, q); @@ -709,9 +709,7 @@ static inline u32 next_command(struct ctlr_info *h, u8 q) if ((rq->head[rq->current_entry] & 1) == rq->wraparound) { a = rq->head[rq->current_entry]; rq->current_entry++; - spin_lock_irqsave(&h->lock, flags); - h->commands_outstanding--; - spin_unlock_irqrestore(&h->lock, flags); + atomic_dec(&h->commands_outstanding); } else { a = FIFO_EMPTY; } @@ -1500,22 +1498,22 @@ static int hpsa_map_sg_chain_block(struct ctlr_info *h, { struct SGDescriptor *chain_sg, *chain_block; u64 temp64; + u32 chain_len; chain_sg = &c->SG[h->max_cmd_sg_entries - 1]; chain_block = h->cmd_sg_list[c->cmdindex]; - chain_sg->Ext = HPSA_SG_CHAIN; - chain_sg->Len = sizeof(*chain_sg) * + chain_sg->Ext = cpu_to_le32(HPSA_SG_CHAIN); + chain_len = sizeof(*chain_sg) * (c->Header.SGTotal - h->max_cmd_sg_entries); - temp64 = pci_map_single(h->pdev, chain_block, chain_sg->Len, + chain_sg->Len = cpu_to_le32(chain_len); + temp64 = pci_map_single(h->pdev, chain_block, chain_len, PCI_DMA_TODEVICE); if (dma_mapping_error(&h->pdev->dev, temp64)) { /* prevent subsequent unmapping */ - chain_sg->Addr.lower = 0; - chain_sg->Addr.upper = 0; + chain_sg->Addr = cpu_to_le64(0); return -1; } - chain_sg->Addr.lower = (u32) (temp64 & 0x0FFFFFFFFULL); - chain_sg->Addr.upper = (u32) ((temp64 >> 32) & 0x0FFFFFFFFULL); + chain_sg->Addr = cpu_to_le64(temp64); return 0; } @@ -1523,15 +1521,13 @@ static void hpsa_unmap_sg_chain_block(struct ctlr_info *h, struct CommandList *c) { struct SGDescriptor *chain_sg; - union u64bit temp64; - if (c->Header.SGTotal <= h->max_cmd_sg_entries) + if (le16_to_cpu(c->Header.SGTotal) <= h->max_cmd_sg_entries) return; chain_sg = &c->SG[h->max_cmd_sg_entries - 1]; - temp64.val32.lower = chain_sg->Addr.lower; - temp64.val32.upper = chain_sg->Addr.upper; - pci_unmap_single(h->pdev, temp64.val, chain_sg->Len, PCI_DMA_TODEVICE); + pci_unmap_single(h->pdev, le64_to_cpu(chain_sg->Addr), + le32_to_cpu(chain_sg->Len), PCI_DMA_TODEVICE); } @@ -1732,8 +1728,7 @@ static void complete_scsi_command(struct CommandList *cp) struct io_accel1_cmd *c = &h->ioaccel_cmd_pool[cp->cmdindex]; cp->Header.SGList = cp->Header.SGTotal = scsi_sg_count(cmd); cp->Request.CDBLen = c->io_flags & IOACCEL1_IOFLAGS_CDBLEN_MASK; - cp->Header.Tag.lower = c->Tag.lower; - cp->Header.Tag.upper = c->Tag.upper; + cp->Header.tag = c->tag; memcpy(cp->Header.LUN.LunAddrBytes, c->CISS_LUN, 8); memcpy(cp->Request.CDB, c->CDB, cp->Request.CDBLen); @@ -1763,72 +1758,13 @@ static void complete_scsi_command(struct CommandList *cp) /* Get addition sense code qualifier */ ascq = ei->SenseInfo[13]; } - if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) { - if (check_for_unit_attention(h, cp)) - break; - if (sense_key == ILLEGAL_REQUEST) { - /* - * SCSI REPORT_LUNS is commonly unsupported on - * Smart Array. Suppress noisy complaint. - */ - if (cp->Request.CDB[0] == REPORT_LUNS) - break; - - /* If ASC/ASCQ indicate Logical Unit - * Not Supported condition, - */ - if ((asc == 0x25) && (ascq == 0x0)) { - dev_warn(&h->pdev->dev, "cp %p " - "has check condition\n", cp); - break; - } - } - - if (sense_key == NOT_READY) { - /* If Sense is Not Ready, Logical Unit - * Not ready, Manual Intervention - * required - */ - if ((asc == 0x04) && (ascq == 0x03)) { - dev_warn(&h->pdev->dev, "cp %p " - "has check condition: unit " - "not ready, manual " - "intervention required\n", cp); - break; - } - } if (sense_key == ABORTED_COMMAND) { - /* Aborted command is retryable */ - dev_warn(&h->pdev->dev, "cp %p " - "has check condition: aborted command: " - "ASC: 0x%x, ASCQ: 0x%x\n", - cp, asc, ascq); cmd->result |= DID_SOFT_ERROR << 16; break; } - /* Must be some other type of check condition */ - dev_dbg(&h->pdev->dev, "cp %p has check condition: " - "unknown type: " - "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, " - "Returning result: 0x%x, " - "cmd=[%02x %02x %02x %02x %02x " - "%02x %02x %02x %02x %02x %02x " - "%02x %02x %02x %02x %02x]\n", - cp, sense_key, asc, ascq, - cmd->result, - cmd->cmnd[0], cmd->cmnd[1], - cmd->cmnd[2], cmd->cmnd[3], - cmd->cmnd[4], cmd->cmnd[5], - cmd->cmnd[6], cmd->cmnd[7], - cmd->cmnd[8], cmd->cmnd[9], - cmd->cmnd[10], cmd->cmnd[11], - cmd->cmnd[12], cmd->cmnd[13], - cmd->cmnd[14], cmd->cmnd[15]); break; } - - /* Problem was not a check condition * Pass it up to the upper layers... */ @@ -1934,14 +1870,11 @@ static void hpsa_pci_unmap(struct pci_dev *pdev, struct CommandList *c, int sg_used, int data_direction) { int i; - union u64bit addr64; - for (i = 0; i < sg_used; i++) { - addr64.val32.lower = c->SG[i].Addr.lower; - addr64.val32.upper = c->SG[i].Addr.upper; - pci_unmap_single(pdev, (dma_addr_t) addr64.val, c->SG[i].Len, - data_direction); - } + for (i = 0; i < sg_used; i++) + pci_unmap_single(pdev, (dma_addr_t) le64_to_cpu(c->SG[i].Addr), + le32_to_cpu(c->SG[i].Len), + data_direction); } static int hpsa_map_one(struct pci_dev *pdev, @@ -1954,25 +1887,22 @@ static int hpsa_map_one(struct pci_dev *pdev, if (buflen == 0 || data_direction == PCI_DMA_NONE) { cp->Header.SGList = 0; - cp->Header.SGTotal = 0; + cp->Header.SGTotal = cpu_to_le16(0); return 0; } - addr64 = (u64) pci_map_single(pdev, buf, buflen, data_direction); + addr64 = pci_map_single(pdev, buf, buflen, data_direction); if (dma_mapping_error(&pdev->dev, addr64)) { /* Prevent subsequent unmap of something never mapped */ cp->Header.SGList = 0; - cp->Header.SGTotal = 0; + cp->Header.SGTotal = cpu_to_le16(0); return -1; } - cp->SG[0].Addr.lower = - (u32) (addr64 & (u64) 0x00000000FFFFFFFF); - cp->SG[0].Addr.upper = - (u32) ((addr64 >> 32) & (u64) 0x00000000FFFFFFFF); - cp->SG[0].Len = buflen; - cp->SG[0].Ext = HPSA_SG_LAST; /* we are not chaining */ - cp->Header.SGList = (u8) 1; /* no. SGs contig in this cmd */ - cp->Header.SGTotal = (u16) 1; /* total sgs in this cmd list */ + cp->SG[0].Addr = cpu_to_le64(addr64); + cp->SG[0].Len = cpu_to_le32(buflen); + cp->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST); /* we are not chaining */ + cp->Header.SGList = 1; /* no. SGs contig in this cmd */ + cp->Header.SGTotal = cpu_to_le16(1); /* total sgs in cmd list */ return 0; } @@ -2830,8 +2760,8 @@ static int hpsa_get_pdisk_of_ioaccel2(struct ctlr_info *h, if (d == NULL) return 0; /* no match */ - it_nexus = cpu_to_le32((u32) d->ioaccel_handle); - scsi_nexus = cpu_to_le32((u32) c2a->scsi_nexus); + it_nexus = cpu_to_le32(d->ioaccel_handle); + scsi_nexus = cpu_to_le32(c2a->scsi_nexus); find = c2a->scsi_nexus; if (h->raid_offload_debug > 0) @@ -2891,7 +2821,7 @@ static int hpsa_get_pdisk_of_ioaccel2(struct ctlr_info *h, * Returns 0 on success, -1 otherwise. */ static int hpsa_gather_lun_info(struct ctlr_info *h, - int reportlunsize, + int reportphyslunsize, int reportloglunsize, struct ReportLUNdata *physdev, u32 *nphysicals, int *physical_mode, struct ReportLUNdata *logdev, u32 *nlogicals) { @@ -2905,7 +2835,7 @@ static int hpsa_gather_lun_info(struct ctlr_info *h, *physical_mode = HPSA_REPORT_PHYS_EXTENDED; physical_entry_size = 24; } - if (hpsa_scsi_do_report_phys_luns(h, physdev, reportlunsize, + if (hpsa_scsi_do_report_phys_luns(h, physdev, reportphyslunsize, *physical_mode)) { dev_err(&h->pdev->dev, "report physical LUNs failed.\n"); return -1; @@ -2918,7 +2848,7 @@ static int hpsa_gather_lun_info(struct ctlr_info *h, *nphysicals - HPSA_MAX_PHYS_LUN); *nphysicals = HPSA_MAX_PHYS_LUN; } - if (hpsa_scsi_do_report_log_luns(h, logdev, reportlunsize)) { + if (hpsa_scsi_do_report_log_luns(h, logdev, reportloglunsize)) { dev_err(&h->pdev->dev, "report logical LUNs failed.\n"); return -1; } @@ -2941,8 +2871,8 @@ static int hpsa_gather_lun_info(struct ctlr_info *h, return 0; } -u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position, int i, - int nphysicals, int nlogicals, +static u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position, + int i, int nphysicals, int nlogicals, struct ReportExtendedLUNdata *physdev_list, struct ReportLUNdata *logdev_list) { @@ -3011,15 +2941,14 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno) u32 ndev_allocated = 0; struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice; int ncurrent = 0; - int reportlunsize = sizeof(*physdev_list) + HPSA_MAX_PHYS_LUN * 24; int i, n_ext_target_devs, ndevs_to_allocate; int raid_ctlr_position; int rescan_hba_mode; DECLARE_BITMAP(lunzerobits, MAX_EXT_TARGETS); currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_DEVICES, GFP_KERNEL); - physdev_list = kzalloc(reportlunsize, GFP_KERNEL); - logdev_list = kzalloc(reportlunsize, GFP_KERNEL); + physdev_list = kzalloc(sizeof(*physdev_list), GFP_KERNEL); + logdev_list = kzalloc(sizeof(*logdev_list), GFP_KERNEL); tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL); if (!currentsd || !physdev_list || !logdev_list || !tmpdevice) { @@ -3039,7 +2968,8 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno) h->hba_mode_enabled = rescan_hba_mode; - if (hpsa_gather_lun_info(h, reportlunsize, + if (hpsa_gather_lun_info(h, + sizeof(*physdev_list), sizeof(*logdev_list), (struct ReportLUNdata *) physdev_list, &nphysicals, &physical_mode, logdev_list, &nlogicals)) goto out; @@ -3210,19 +3140,19 @@ static int hpsa_scatter_gather(struct ctlr_info *h, } addr64 = (u64) sg_dma_address(sg); len = sg_dma_len(sg); - curr_sg->Addr.lower = (u32) (addr64 & 0x0FFFFFFFFULL); - curr_sg->Addr.upper = (u32) ((addr64 >> 32) & 0x0FFFFFFFFULL); - curr_sg->Len = len; - curr_sg->Ext = (i < scsi_sg_count(cmd) - 1) ? 0 : HPSA_SG_LAST; + curr_sg->Addr = cpu_to_le64(addr64); + curr_sg->Len = cpu_to_le32(len); + curr_sg->Ext = cpu_to_le32(0); curr_sg++; } + (--curr_sg)->Ext = cpu_to_le32(HPSA_SG_LAST); if (use_sg + chained > h->maxSG) h->maxSG = use_sg + chained; if (chained) { cp->Header.SGList = h->max_cmd_sg_entries; - cp->Header.SGTotal = (u16) (use_sg + 1); + cp->Header.SGTotal = cpu_to_le16(use_sg + 1); if (hpsa_map_sg_chain_block(h, cp)) { scsi_dma_unmap(cmd); return -1; @@ -3233,7 +3163,7 @@ static int hpsa_scatter_gather(struct ctlr_info *h, sglist_finished: cp->Header.SGList = (u8) use_sg; /* no. SGs contig in this cmd */ - cp->Header.SGTotal = (u16) use_sg; /* total sgs in this cmd list */ + cp->Header.SGTotal = cpu_to_le16(use_sg); /* total sgs in this cmd list */ return 0; } @@ -3325,17 +3255,12 @@ static int hpsa_scsi_ioaccel1_queue_command(struct ctlr_info *h, addr64 = (u64) sg_dma_address(sg); len = sg_dma_len(sg); total_len += len; - curr_sg->Addr.lower = (u32) (addr64 & 0x0FFFFFFFFULL); - curr_sg->Addr.upper = - (u32) ((addr64 >> 32) & 0x0FFFFFFFFULL); - curr_sg->Len = len; - - if (i == (scsi_sg_count(cmd) - 1)) - curr_sg->Ext = HPSA_SG_LAST; - else - curr_sg->Ext = 0; /* we are not chaining */ + curr_sg->Addr = cpu_to_le64(addr64); + curr_sg->Len = cpu_to_le32(len); + curr_sg->Ext = cpu_to_le32(0); curr_sg++; } + (--curr_sg)->Ext = cpu_to_le32(HPSA_SG_LAST); switch (cmd->sc_data_direction) { case DMA_TO_DEVICE: @@ -3592,7 +3517,7 @@ static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h, cp->data_len = cpu_to_le32(total_len); cp->err_ptr = cpu_to_le64(c->busaddr + offsetof(struct io_accel2_cmd, error_data)); - cp->err_len = cpu_to_le32((u32) sizeof(cp->error_data)); + cp->err_len = cpu_to_le32(sizeof(cp->error_data)); enqueue_cmd_and_start_io(h, c); return 0; @@ -3809,11 +3734,6 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h, offload_to_mirror = (offload_to_mirror >= map->layout_map_count - 1) ? 0 : offload_to_mirror + 1; - /* FIXME: remove after debug/dev */ - BUG_ON(offload_to_mirror >= map->layout_map_count); - dev_warn(&h->pdev->dev, - "DEBUG: Using physical disk map index %d from mirror group %d\n", - map_index, offload_to_mirror); dev->offload_to_mirror = offload_to_mirror; /* Avoid direct use of dev->offload_to_mirror within this * function since multiple threads might simultaneously @@ -3959,8 +3879,11 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h, dev->scsi3addr); } -static int hpsa_scsi_queue_command_lck(struct scsi_cmnd *cmd, - void (*done)(struct scsi_cmnd *)) +/* + * Running in struct Scsi_Host->host_lock less mode using LLD internal + * struct ctlr_info *h->lock w/ spin_lock_irqsave() protection. + */ +static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd) { struct ctlr_info *h; struct hpsa_scsi_dev_t *dev; @@ -3973,14 +3896,14 @@ static int hpsa_scsi_queue_command_lck(struct scsi_cmnd *cmd, dev = cmd->device->hostdata; if (!dev) { cmd->result = DID_NO_CONNECT << 16; - done(cmd); + cmd->scsi_done(cmd); return 0; } memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr)); if (unlikely(lockup_detected(h))) { cmd->result = DID_ERROR << 16; - done(cmd); + cmd->scsi_done(cmd); return 0; } c = cmd_alloc(h); @@ -3990,9 +3913,6 @@ static int hpsa_scsi_queue_command_lck(struct scsi_cmnd *cmd, } /* Fill in the command list header */ - - cmd->scsi_done = done; /* save this for use by completion code */ - /* save c in case we have to abort it */ cmd->host_scribble = (unsigned char *) c; @@ -4026,8 +3946,8 @@ static int hpsa_scsi_queue_command_lck(struct scsi_cmnd *cmd, c->Header.ReplyQueue = 0; /* unused in simple mode */ memcpy(&c->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8); - c->Header.Tag.lower = (c->cmdindex << DIRECT_LOOKUP_SHIFT); - c->Header.Tag.lower |= DIRECT_LOOKUP_BIT; + c->Header.tag = cpu_to_le64((c->cmdindex << DIRECT_LOOKUP_SHIFT) | + DIRECT_LOOKUP_BIT); /* Fill in the request block... */ @@ -4036,17 +3956,18 @@ static int hpsa_scsi_queue_command_lck(struct scsi_cmnd *cmd, BUG_ON(cmd->cmd_len > sizeof(c->Request.CDB)); c->Request.CDBLen = cmd->cmd_len; memcpy(c->Request.CDB, cmd->cmnd, cmd->cmd_len); - c->Request.Type.Type = TYPE_CMD; - c->Request.Type.Attribute = ATTR_SIMPLE; switch (cmd->sc_data_direction) { case DMA_TO_DEVICE: - c->Request.Type.Direction = XFER_WRITE; + c->Request.type_attr_dir = + TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_WRITE); break; case DMA_FROM_DEVICE: - c->Request.Type.Direction = XFER_READ; + c->Request.type_attr_dir = + TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_READ); break; case DMA_NONE: - c->Request.Type.Direction = XFER_NONE; + c->Request.type_attr_dir = + TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_NONE); break; case DMA_BIDIRECTIONAL: /* This can happen if a buggy application does a scsi passthru @@ -4054,7 +3975,8 @@ static int hpsa_scsi_queue_command_lck(struct scsi_cmnd *cmd, * ../scsi/scsi_ioctl.c:scsi_ioctl_send_command() ) */ - c->Request.Type.Direction = XFER_RSVD; + c->Request.type_attr_dir = + TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_RSVD); /* This is technically wrong, and hpsa controllers should * reject it with CMD_INVALID, which is the most correct * response, but non-fibre backends appear to let it @@ -4081,8 +4003,6 @@ static int hpsa_scsi_queue_command_lck(struct scsi_cmnd *cmd, return 0; } -static DEF_SCSI_QCMD(hpsa_scsi_queue_command) - static int do_not_scan_if_controller_locked_up(struct ctlr_info *h) { unsigned long flags; @@ -4152,23 +4072,6 @@ static int hpsa_scan_finished(struct Scsi_Host *sh, return finished; } -static int hpsa_change_queue_depth(struct scsi_device *sdev, - int qdepth, int reason) -{ - struct ctlr_info *h = sdev_to_hba(sdev); - - if (reason != SCSI_QDEPTH_DEFAULT) - return -ENOTSUPP; - - if (qdepth < 1) - qdepth = 1; - else - if (qdepth > h->nr_cmds) - qdepth = h->nr_cmds; - scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth); - return sdev->queue_depth; -} - static void hpsa_unregister_scsi(struct ctlr_info *h) { /* we are being forcibly unloaded, and may not refuse. */ @@ -4329,8 +4232,8 @@ static void hpsa_get_tag(struct ctlr_info *h, if (c->cmd_type == CMD_IOACCEL1) { struct io_accel1_cmd *cm1 = (struct io_accel1_cmd *) &h->ioaccel_cmd_pool[c->cmdindex]; - *tagupper = cm1->Tag.upper; - *taglower = cm1->Tag.lower; + *tagupper = (u32) (cm1->tag >> 32); + *taglower = (u32) (cm1->tag & 0x0ffffffffULL); return; } if (c->cmd_type == CMD_IOACCEL2) { @@ -4341,11 +4244,10 @@ static void hpsa_get_tag(struct ctlr_info *h, *taglower = cm2->Tag; return; } - *tagupper = c->Header.Tag.upper; - *taglower = c->Header.Tag.lower; + *tagupper = (u32) (c->Header.tag >> 32); + *taglower = (u32) (c->Header.tag & 0x0ffffffffULL); } - static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr, struct CommandList *abort, int swizzle) { @@ -4410,7 +4312,7 @@ static struct CommandList *hpsa_find_cmd_in_queue(struct ctlr_info *h, struct CommandList *c = NULL; /* ptr into cmpQ */ if (!find) - return 0; + return NULL; spin_lock_irqsave(&h->lock, flags); list_for_each_entry(c, queue_head, list) { if (c->scsi_cmd == NULL) /* e.g.: passthru ioctl */ @@ -4432,7 +4334,7 @@ static struct CommandList *hpsa_find_cmd_in_queue_by_tag(struct ctlr_info *h, spin_lock_irqsave(&h->lock, flags); list_for_each_entry(c, queue_head, list) { - if (memcmp(&c->Header.Tag, tag, 8) != 0) + if (memcmp(&c->Header.tag, tag, 8) != 0) continue; spin_unlock_irqrestore(&h->lock, flags); return c; @@ -4686,19 +4588,32 @@ static struct CommandList *cmd_alloc(struct ctlr_info *h) int i; union u64bit temp64; dma_addr_t cmd_dma_handle, err_dma_handle; - unsigned long flags; + int loopcount; + + /* There is some *extremely* small but non-zero chance that that + * multiple threads could get in here, and one thread could + * be scanning through the list of bits looking for a free + * one, but the free ones are always behind him, and other + * threads sneak in behind him and eat them before he can + * get to them, so that while there is always a free one, a + * very unlucky thread might be starved anyway, never able to + * beat the other threads. In reality, this happens so + * infrequently as to be indistinguishable from never. + */ - spin_lock_irqsave(&h->lock, flags); + loopcount = 0; do { i = find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds); - if (i == h->nr_cmds) { - spin_unlock_irqrestore(&h->lock, flags); - return NULL; - } - } while (test_and_set_bit - (i & (BITS_PER_LONG - 1), - h->cmd_pool_bits + (i / BITS_PER_LONG)) != 0); - spin_unlock_irqrestore(&h->lock, flags); + if (i == h->nr_cmds) + i = 0; + loopcount++; + } while (test_and_set_bit(i & (BITS_PER_LONG - 1), + h->cmd_pool_bits + (i / BITS_PER_LONG)) != 0 && + loopcount < 10); + + /* Thread got starved? We do not expect this to ever happen. */ + if (loopcount >= 10) + return NULL; c = h->cmd_pool + i; memset(c, 0, sizeof(*c)); @@ -4714,9 +4629,8 @@ static struct CommandList *cmd_alloc(struct ctlr_info *h) INIT_LIST_HEAD(&c->list); c->busaddr = (u32) cmd_dma_handle; temp64.val = (u64) err_dma_handle; - c->ErrDesc.Addr.lower = temp64.val32.lower; - c->ErrDesc.Addr.upper = temp64.val32.upper; - c->ErrDesc.Len = sizeof(*c->err_info); + c->ErrDesc.Addr = cpu_to_le64(err_dma_handle); + c->ErrDesc.Len = cpu_to_le32(sizeof(*c->err_info)); c->h = h; return c; @@ -4729,7 +4643,6 @@ static struct CommandList *cmd_alloc(struct ctlr_info *h) static struct CommandList *cmd_special_alloc(struct ctlr_info *h) { struct CommandList *c; - union u64bit temp64; dma_addr_t cmd_dma_handle, err_dma_handle; c = pci_zalloc_consistent(h->pdev, sizeof(*c), &cmd_dma_handle); @@ -4750,10 +4663,8 @@ static struct CommandList *cmd_special_alloc(struct ctlr_info *h) INIT_LIST_HEAD(&c->list); c->busaddr = (u32) cmd_dma_handle; - temp64.val = (u64) err_dma_handle; - c->ErrDesc.Addr.lower = temp64.val32.lower; - c->ErrDesc.Addr.upper = temp64.val32.upper; - c->ErrDesc.Len = sizeof(*c->err_info); + c->ErrDesc.Addr = cpu_to_le64(err_dma_handle); + c->ErrDesc.Len = cpu_to_le32(sizeof(*c->err_info)); c->h = h; return c; @@ -4762,30 +4673,25 @@ static struct CommandList *cmd_special_alloc(struct ctlr_info *h) static void cmd_free(struct ctlr_info *h, struct CommandList *c) { int i; - unsigned long flags; i = c - h->cmd_pool; - spin_lock_irqsave(&h->lock, flags); clear_bit(i & (BITS_PER_LONG - 1), h->cmd_pool_bits + (i / BITS_PER_LONG)); - spin_unlock_irqrestore(&h->lock, flags); } static void cmd_special_free(struct ctlr_info *h, struct CommandList *c) { - union u64bit temp64; - - temp64.val32.lower = c->ErrDesc.Addr.lower; - temp64.val32.upper = c->ErrDesc.Addr.upper; pci_free_consistent(h->pdev, sizeof(*c->err_info), - c->err_info, (dma_addr_t) temp64.val); + c->err_info, + (dma_addr_t) le64_to_cpu(c->ErrDesc.Addr)); pci_free_consistent(h->pdev, sizeof(*c), c, (dma_addr_t) (c->busaddr & DIRECT_LOOKUP_MASK)); } #ifdef CONFIG_COMPAT -static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd, void *arg) +static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd, + void __user *arg) { IOCTL32_Command_struct __user *arg32 = (IOCTL32_Command_struct __user *) arg; @@ -4810,7 +4716,7 @@ static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd, void *arg) if (err) return -EFAULT; - err = hpsa_ioctl(dev, CCISS_PASSTHRU, (void *)p); + err = hpsa_ioctl(dev, CCISS_PASSTHRU, p); if (err) return err; err |= copy_in_user(&arg32->error_info, &p->error_info, @@ -4821,7 +4727,7 @@ static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd, void *arg) } static int hpsa_ioctl32_big_passthru(struct scsi_device *dev, - int cmd, void *arg) + int cmd, void __user *arg) { BIG_IOCTL32_Command_struct __user *arg32 = (BIG_IOCTL32_Command_struct __user *) arg; @@ -4848,7 +4754,7 @@ static int hpsa_ioctl32_big_passthru(struct scsi_device *dev, if (err) return -EFAULT; - err = hpsa_ioctl(dev, CCISS_BIG_PASSTHRU, (void *)p); + err = hpsa_ioctl(dev, CCISS_BIG_PASSTHRU, p); if (err) return err; err |= copy_in_user(&arg32->error_info, &p->error_info, @@ -4858,7 +4764,7 @@ static int hpsa_ioctl32_big_passthru(struct scsi_device *dev, return err; } -static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void *arg) +static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void __user *arg) { switch (cmd) { case CCISS_GETPCIINFO: @@ -4932,7 +4838,7 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp) IOCTL_Command_struct iocommand; struct CommandList *c; char *buff = NULL; - union u64bit temp64; + u64 temp64; int rc = 0; if (!argp) @@ -4971,14 +4877,14 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp) c->Header.ReplyQueue = 0; /* unused in simple mode */ if (iocommand.buf_size > 0) { /* buffer to fill */ c->Header.SGList = 1; - c->Header.SGTotal = 1; + c->Header.SGTotal = cpu_to_le16(1); } else { /* no buffers to fill */ c->Header.SGList = 0; - c->Header.SGTotal = 0; + c->Header.SGTotal = cpu_to_le16(0); } memcpy(&c->Header.LUN, &iocommand.LUN_info, sizeof(c->Header.LUN)); /* use the kernel address the cmd block for tag */ - c->Header.Tag.lower = c->busaddr; + c->Header.tag = c->busaddr; /* Fill in Request block */ memcpy(&c->Request, &iocommand.Request, @@ -4986,19 +4892,17 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp) /* Fill in the scatter gather information */ if (iocommand.buf_size > 0) { - temp64.val = pci_map_single(h->pdev, buff, + temp64 = pci_map_single(h->pdev, buff, iocommand.buf_size, PCI_DMA_BIDIRECTIONAL); - if (dma_mapping_error(&h->pdev->dev, temp64.val)) { - c->SG[0].Addr.lower = 0; - c->SG[0].Addr.upper = 0; - c->SG[0].Len = 0; + if (dma_mapping_error(&h->pdev->dev, (dma_addr_t) temp64)) { + c->SG[0].Addr = cpu_to_le64(0); + c->SG[0].Len = cpu_to_le32(0); rc = -ENOMEM; goto out; } - c->SG[0].Addr.lower = temp64.val32.lower; - c->SG[0].Addr.upper = temp64.val32.upper; - c->SG[0].Len = iocommand.buf_size; - c->SG[0].Ext = HPSA_SG_LAST; /* we are not chaining*/ + c->SG[0].Addr = cpu_to_le64(temp64); + c->SG[0].Len = cpu_to_le32(iocommand.buf_size); + c->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST); /* not chaining */ } hpsa_scsi_do_simple_cmd_core_if_no_lockup(h, c); if (iocommand.buf_size > 0) @@ -5033,7 +4937,7 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp) struct CommandList *c; unsigned char **buff = NULL; int *buff_size = NULL; - union u64bit temp64; + u64 temp64; BYTE sg_used = 0; int status = 0; int i; @@ -5107,29 +5011,30 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp) } c->cmd_type = CMD_IOCTL_PEND; c->Header.ReplyQueue = 0; - c->Header.SGList = c->Header.SGTotal = sg_used; + c->Header.SGList = (u8) sg_used; + c->Header.SGTotal = cpu_to_le16(sg_used); memcpy(&c->Header.LUN, &ioc->LUN_info, sizeof(c->Header.LUN)); - c->Header.Tag.lower = c->busaddr; + c->Header.tag = c->busaddr; memcpy(&c->Request, &ioc->Request, sizeof(c->Request)); if (ioc->buf_size > 0) { int i; for (i = 0; i < sg_used; i++) { - temp64.val = pci_map_single(h->pdev, buff[i], + temp64 = pci_map_single(h->pdev, buff[i], buff_size[i], PCI_DMA_BIDIRECTIONAL); - if (dma_mapping_error(&h->pdev->dev, temp64.val)) { - c->SG[i].Addr.lower = 0; - c->SG[i].Addr.upper = 0; - c->SG[i].Len = 0; + if (dma_mapping_error(&h->pdev->dev, + (dma_addr_t) temp64)) { + c->SG[i].Addr = cpu_to_le64(0); + c->SG[i].Len = cpu_to_le32(0); hpsa_pci_unmap(h->pdev, c, i, PCI_DMA_BIDIRECTIONAL); status = -ENOMEM; goto cleanup0; } - c->SG[i].Addr.lower = temp64.val32.lower; - c->SG[i].Addr.upper = temp64.val32.upper; - c->SG[i].Len = buff_size[i]; - c->SG[i].Ext = i < sg_used - 1 ? 0 : HPSA_SG_LAST; + c->SG[i].Addr = cpu_to_le64(temp64); + c->SG[i].Len = cpu_to_le32(buff_size[i]); + c->SG[i].Ext = cpu_to_le32(0); } + c->SG[--i].Ext = cpu_to_le32(HPSA_SG_LAST); } hpsa_scsi_do_simple_cmd_core_if_no_lockup(h, c); if (sg_used) @@ -5206,7 +5111,7 @@ static void decrement_passthru_count(struct ctlr_info *h) /* * ioctl */ -static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg) +static int hpsa_ioctl(struct scsi_device *dev, int cmd, void __user *arg) { struct ctlr_info *h; void __user *argp = (void __user *)arg; @@ -5268,20 +5173,20 @@ static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h, { int pci_dir = XFER_NONE; struct CommandList *a; /* for commands to be aborted */ + u32 tupper, tlower; c->cmd_type = CMD_IOCTL_PEND; c->Header.ReplyQueue = 0; if (buff != NULL && size > 0) { c->Header.SGList = 1; - c->Header.SGTotal = 1; + c->Header.SGTotal = cpu_to_le16(1); } else { c->Header.SGList = 0; - c->Header.SGTotal = 0; + c->Header.SGTotal = cpu_to_le16(0); } - c->Header.Tag.lower = c->busaddr; + c->Header.tag = c->busaddr; memcpy(c->Header.LUN.LunAddrBytes, scsi3addr, 8); - c->Request.Type.Type = cmd_type; if (cmd_type == TYPE_CMD) { switch (cmd) { case HPSA_INQUIRY: @@ -5291,8 +5196,8 @@ static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h, c->Request.CDB[2] = (page_code & 0xff); } c->Request.CDBLen = 6; - c->Request.Type.Attribute = ATTR_SIMPLE; - c->Request.Type.Direction = XFER_READ; + c->Request.type_attr_dir = + TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ); c->Request.Timeout = 0; c->Request.CDB[0] = HPSA_INQUIRY; c->Request.CDB[4] = size & 0xFF; @@ -5303,8 +5208,8 @@ static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h, mode = 00 target = 0. Nothing to write. */ c->Request.CDBLen = 12; - c->Request.Type.Attribute = ATTR_SIMPLE; - c->Request.Type.Direction = XFER_READ; + c->Request.type_attr_dir = + TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ); c->Request.Timeout = 0; c->Request.CDB[0] = cmd; c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */ @@ -5314,8 +5219,9 @@ static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h, break; case HPSA_CACHE_FLUSH: c->Request.CDBLen = 12; - c->Request.Type.Attribute = ATTR_SIMPLE; - c->Request.Type.Direction = XFER_WRITE; + c->Request.type_attr_dir = + TYPE_ATTR_DIR(cmd_type, + ATTR_SIMPLE, XFER_WRITE); c->Request.Timeout = 0; c->Request.CDB[0] = BMIC_WRITE; c->Request.CDB[6] = BMIC_CACHE_FLUSH; @@ -5324,14 +5230,14 @@ static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h, break; case TEST_UNIT_READY: c->Request.CDBLen = 6; - c->Request.Type.Attribute = ATTR_SIMPLE; - c->Request.Type.Direction = XFER_NONE; + c->Request.type_attr_dir = + TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE); c->Request.Timeout = 0; break; case HPSA_GET_RAID_MAP: c->Request.CDBLen = 12; - c->Request.Type.Attribute = ATTR_SIMPLE; - c->Request.Type.Direction = XFER_READ; + c->Request.type_attr_dir = + TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ); c->Request.Timeout = 0; c->Request.CDB[0] = HPSA_CISS_READ; c->Request.CDB[1] = cmd; @@ -5342,8 +5248,8 @@ static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h, break; case BMIC_SENSE_CONTROLLER_PARAMETERS: c->Request.CDBLen = 10; - c->Request.Type.Attribute = ATTR_SIMPLE; - c->Request.Type.Direction = XFER_READ; + c->Request.type_attr_dir = + TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ); c->Request.Timeout = 0; c->Request.CDB[0] = BMIC_READ; c->Request.CDB[6] = BMIC_SENSE_CONTROLLER_PARAMETERS; @@ -5360,9 +5266,8 @@ static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h, case HPSA_DEVICE_RESET_MSG: c->Request.CDBLen = 16; - c->Request.Type.Type = 1; /* It is a MSG not a CMD */ - c->Request.Type.Attribute = ATTR_SIMPLE; - c->Request.Type.Direction = XFER_NONE; + c->Request.type_attr_dir = + TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE); c->Request.Timeout = 0; /* Don't time out */ memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB)); c->Request.CDB[0] = cmd; @@ -5376,27 +5281,28 @@ static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h, break; case HPSA_ABORT_MSG: a = buff; /* point to command to be aborted */ - dev_dbg(&h->pdev->dev, "Abort Tag:0x%08x:%08x using request Tag:0x%08x:%08x\n", - a->Header.Tag.upper, a->Header.Tag.lower, - c->Header.Tag.upper, c->Header.Tag.lower); + dev_dbg(&h->pdev->dev, "Abort Tag:0x%016llx using request Tag:0x%016llx", + a->Header.tag, c->Header.tag); + tlower = (u32) (a->Header.tag >> 32); + tupper = (u32) (a->Header.tag & 0x0ffffffffULL); c->Request.CDBLen = 16; - c->Request.Type.Type = TYPE_MSG; - c->Request.Type.Attribute = ATTR_SIMPLE; - c->Request.Type.Direction = XFER_WRITE; + c->Request.type_attr_dir = + TYPE_ATTR_DIR(cmd_type, + ATTR_SIMPLE, XFER_WRITE); c->Request.Timeout = 0; /* Don't time out */ c->Request.CDB[0] = HPSA_TASK_MANAGEMENT; c->Request.CDB[1] = HPSA_TMF_ABORT_TASK; c->Request.CDB[2] = 0x00; /* reserved */ c->Request.CDB[3] = 0x00; /* reserved */ /* Tag to abort goes in CDB[4]-CDB[11] */ - c->Request.CDB[4] = a->Header.Tag.lower & 0xFF; - c->Request.CDB[5] = (a->Header.Tag.lower >> 8) & 0xFF; - c->Request.CDB[6] = (a->Header.Tag.lower >> 16) & 0xFF; - c->Request.CDB[7] = (a->Header.Tag.lower >> 24) & 0xFF; - c->Request.CDB[8] = a->Header.Tag.upper & 0xFF; - c->Request.CDB[9] = (a->Header.Tag.upper >> 8) & 0xFF; - c->Request.CDB[10] = (a->Header.Tag.upper >> 16) & 0xFF; - c->Request.CDB[11] = (a->Header.Tag.upper >> 24) & 0xFF; + c->Request.CDB[4] = tlower & 0xFF; + c->Request.CDB[5] = (tlower >> 8) & 0xFF; + c->Request.CDB[6] = (tlower >> 16) & 0xFF; + c->Request.CDB[7] = (tlower >> 24) & 0xFF; + c->Request.CDB[8] = tupper & 0xFF; + c->Request.CDB[9] = (tupper >> 8) & 0xFF; + c->Request.CDB[10] = (tupper >> 16) & 0xFF; + c->Request.CDB[11] = (tupper >> 24) & 0xFF; c->Request.CDB[12] = 0x00; /* reserved */ c->Request.CDB[13] = 0x00; /* reserved */ c->Request.CDB[14] = 0x00; /* reserved */ @@ -5412,7 +5318,7 @@ static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h, BUG(); } - switch (c->Request.Type.Direction) { + switch (GET_DIR(c->Request.type_attr_dir)) { case XFER_READ: pci_dir = PCI_DMA_FROMDEVICE; break; @@ -5467,15 +5373,9 @@ static void start_io(struct ctlr_info *h, unsigned long *flags) /* Put job onto the completed Q */ addQ(&h->cmpQ, c); - - /* Must increment commands_outstanding before unlocking - * and submitting to avoid race checking for fifo full - * condition. - */ - h->commands_outstanding++; - - /* Tell the controller execute command */ + atomic_inc(&h->commands_outstanding); spin_unlock_irqrestore(&h->lock, *flags); + /* Tell the controller execute command */ h->access.submit_command(h, c); spin_lock_irqsave(&h->lock, *flags); } @@ -5521,6 +5421,7 @@ static inline void finish_cmd(struct CommandList *c) unsigned long flags; int io_may_be_stalled = 0; struct ctlr_info *h = c->h; + int count; spin_lock_irqsave(&h->lock, flags); removeQ(c); @@ -5541,11 +5442,10 @@ static inline void finish_cmd(struct CommandList *c) * want to get in a cycle where we call start_io every time * through here. */ - if (unlikely(h->fifo_recently_full) && - h->commands_outstanding < 5) - io_may_be_stalled = 1; - + count = atomic_read(&h->commands_outstanding); spin_unlock_irqrestore(&h->lock, flags); + if (unlikely(h->fifo_recently_full) && count < 5) + io_may_be_stalled = 1; dial_up_lockup_detection_on_fw_flash_complete(c->h, c); if (likely(c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_SCSI @@ -5765,22 +5665,20 @@ static int hpsa_message(struct pci_dev *pdev, unsigned char opcode, cmd->CommandHeader.ReplyQueue = 0; cmd->CommandHeader.SGList = 0; - cmd->CommandHeader.SGTotal = 0; - cmd->CommandHeader.Tag.lower = paddr32; - cmd->CommandHeader.Tag.upper = 0; + cmd->CommandHeader.SGTotal = cpu_to_le16(0); + cmd->CommandHeader.tag = paddr32; memset(&cmd->CommandHeader.LUN.LunAddrBytes, 0, 8); cmd->Request.CDBLen = 16; - cmd->Request.Type.Type = TYPE_MSG; - cmd->Request.Type.Attribute = ATTR_HEADOFQUEUE; - cmd->Request.Type.Direction = XFER_NONE; + cmd->Request.type_attr_dir = + TYPE_ATTR_DIR(TYPE_MSG, ATTR_HEADOFQUEUE, XFER_NONE); cmd->Request.Timeout = 0; /* Don't time out */ cmd->Request.CDB[0] = opcode; cmd->Request.CDB[1] = type; memset(&cmd->Request.CDB[2], 0, 14); /* rest of the CDB is reserved */ - cmd->ErrorDescriptor.Addr.lower = paddr32 + sizeof(*cmd); - cmd->ErrorDescriptor.Addr.upper = 0; - cmd->ErrorDescriptor.Len = sizeof(struct ErrorInfo); + cmd->ErrorDescriptor.Addr = + cpu_to_le64((paddr32 + sizeof(*cmd))); + cmd->ErrorDescriptor.Len = cpu_to_le32(sizeof(struct ErrorInfo)); writel(paddr32, vaddr + SA5_REQUEST_PORT_OFFSET); @@ -5818,7 +5716,7 @@ static int hpsa_message(struct pci_dev *pdev, unsigned char opcode, #define hpsa_noop(p) hpsa_message(p, 3, 0) static int hpsa_controller_hard_reset(struct pci_dev *pdev, - void * __iomem vaddr, u32 use_doorbell) + void __iomem *vaddr, u32 use_doorbell) { u16 pmcsr; int pos; @@ -6056,7 +5954,7 @@ unmap_vaddr: * the io functions. * This is for debug only. */ -static void print_cfg_table(struct device *dev, struct CfgTable *tb) +static void print_cfg_table(struct device *dev, struct CfgTable __iomem *tb) { #ifdef HPSA_DEBUG int i; @@ -6323,11 +6221,11 @@ static void hpsa_find_board_params(struct ctlr_info *h) h->max_cmd_sg_entries = 31; if (h->maxsgentries > 512) { h->max_cmd_sg_entries = 32; - h->chainsize = h->maxsgentries - h->max_cmd_sg_entries + 1; + h->chainsize = h->maxsgentries - h->max_cmd_sg_entries; h->maxsgentries--; /* save one for chain pointer */ } else { - h->maxsgentries = 31; /* default to traditional values */ h->chainsize = 0; + h->maxsgentries = 31; /* default to traditional values */ } /* Find out what task management functions are supported and cache */ @@ -6456,15 +6354,15 @@ static int hpsa_pci_init(struct ctlr_info *h) return err; } - /* Enable bus mastering (pci_disable_device may disable this) */ - pci_set_master(h->pdev); - err = pci_request_regions(h->pdev, HPSA); if (err) { dev_err(&h->pdev->dev, "cannot obtain PCI resources, aborting\n"); return err; } + + pci_set_master(h->pdev); + hpsa_interrupt_mode(h); err = hpsa_pci_find_memory_BAR(h->pdev, &h->paddr); if (err) @@ -6544,7 +6442,9 @@ static int hpsa_init_reset_devices(struct pci_dev *pdev) dev_warn(&pdev->dev, "failed to enable device.\n"); return -ENODEV; } + pci_set_master(pdev); + /* Reset the controller with a PCI power-cycle or via doorbell */ rc = hpsa_kdump_hard_reset_controller(pdev); @@ -7431,13 +7331,12 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support) cp->host_context_flags = IOACCEL1_HCFLAGS_CISS_FORMAT; cp->timeout_sec = 0; cp->ReplyQueue = 0; - cp->Tag.lower = (i << DIRECT_LOOKUP_SHIFT) | - DIRECT_LOOKUP_BIT; - cp->Tag.upper = 0; - cp->host_addr.lower = - (u32) (h->ioaccel_cmd_pool_dhandle + + cp->tag = + cpu_to_le64((i << DIRECT_LOOKUP_SHIFT) | + DIRECT_LOOKUP_BIT); + cp->host_addr = + cpu_to_le64(h->ioaccel_cmd_pool_dhandle + (i * sizeof(struct io_accel1_cmd))); - cp->host_addr.upper = 0; } } else if (trans_support & CFGTBL_Trans_io_accel2) { u64 cfg_offset, cfg_base_addr_index; @@ -7711,7 +7610,7 @@ static void __attribute__((unused)) verify_offsets(void) VERIFY_OFFSET(timeout_sec, 0x62); VERIFY_OFFSET(ReplyQueue, 0x64); VERIFY_OFFSET(reserved9, 0x65); - VERIFY_OFFSET(Tag, 0x68); + VERIFY_OFFSET(tag, 0x68); VERIFY_OFFSET(host_addr, 0x70); VERIFY_OFFSET(CISS_LUN, 0x78); VERIFY_OFFSET(SG, 0x78 + 8); diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h index 24472cec7de3..8e06d9e280ec 100644 --- a/drivers/scsi/hpsa.h +++ b/drivers/scsi/hpsa.h @@ -118,7 +118,7 @@ struct ctlr_info { struct CfgTable __iomem *cfgtable; int interrupts_enabled; int max_commands; - int commands_outstanding; + atomic_t commands_outstanding; # define PERF_MODE_INT 0 # define DOORBELL_INT 1 # define SIMPLE_MODE_INT 2 @@ -164,7 +164,7 @@ struct ctlr_info { */ u32 trans_support; u32 trans_offset; - struct TransTable_struct *transtable; + struct TransTable_struct __iomem *transtable; unsigned long transMethod; /* cap concurrent passthrus at some reasonable maximum */ @@ -181,7 +181,7 @@ struct ctlr_info { u32 *blockFetchTable; u32 *ioaccel1_blockFetchTable; u32 *ioaccel2_blockFetchTable; - u32 *ioaccel2_bft2_regs; + u32 __iomem *ioaccel2_bft2_regs; unsigned char *hba_inquiry_data; u32 driver_support; u32 fw_support; @@ -192,7 +192,7 @@ struct ctlr_info { u64 last_heartbeat_timestamp; u32 heartbeat_sample_interval; atomic_t firmware_flash_in_progress; - u32 *lockup_detected; + u32 __percpu *lockup_detected; struct delayed_work monitor_ctlr_work; int remove_in_progress; u32 fifo_recently_full; @@ -395,7 +395,7 @@ static void SA5_performant_intr_mask(struct ctlr_info *h, unsigned long val) static unsigned long SA5_performant_completed(struct ctlr_info *h, u8 q) { struct reply_queue_buffer *rq = &h->reply_queue[q]; - unsigned long flags, register_value = FIFO_EMPTY; + unsigned long register_value = FIFO_EMPTY; /* msi auto clears the interrupt pending bit. */ if (!(h->msi_vector || h->msix_vector)) { @@ -413,9 +413,7 @@ static unsigned long SA5_performant_completed(struct ctlr_info *h, u8 q) if ((rq->head[rq->current_entry] & 1) == rq->wraparound) { register_value = rq->head[rq->current_entry]; rq->current_entry++; - spin_lock_irqsave(&h->lock, flags); - h->commands_outstanding--; - spin_unlock_irqrestore(&h->lock, flags); + atomic_dec(&h->commands_outstanding); } else { register_value = FIFO_EMPTY; } @@ -433,11 +431,7 @@ static unsigned long SA5_performant_completed(struct ctlr_info *h, u8 q) */ static unsigned long SA5_fifo_full(struct ctlr_info *h) { - if (h->commands_outstanding >= h->max_commands) - return 1; - else - return 0; - + return atomic_read(&h->commands_outstanding) >= h->max_commands; } /* * returns value read from hardware. @@ -448,13 +442,9 @@ static unsigned long SA5_completed(struct ctlr_info *h, { unsigned long register_value = readl(h->vaddr + SA5_REPLY_PORT_OFFSET); - unsigned long flags; - if (register_value != FIFO_EMPTY) { - spin_lock_irqsave(&h->lock, flags); - h->commands_outstanding--; - spin_unlock_irqrestore(&h->lock, flags); - } + if (register_value != FIFO_EMPTY) + atomic_dec(&h->commands_outstanding); #ifdef HPSA_DEBUG if (register_value != FIFO_EMPTY) @@ -510,7 +500,6 @@ static unsigned long SA5_ioaccel_mode1_completed(struct ctlr_info *h, u8 q) { u64 register_value; struct reply_queue_buffer *rq = &h->reply_queue[q]; - unsigned long flags; BUG_ON(q >= h->nreply_queues); @@ -528,9 +517,7 @@ static unsigned long SA5_ioaccel_mode1_completed(struct ctlr_info *h, u8 q) wmb(); writel((q << 24) | rq->current_entry, h->vaddr + IOACCEL_MODE1_CONSUMER_INDEX); - spin_lock_irqsave(&h->lock, flags); - h->commands_outstanding--; - spin_unlock_irqrestore(&h->lock, flags); + atomic_dec(&h->commands_outstanding); } return (unsigned long) register_value; } diff --git a/drivers/scsi/hpsa_cmd.h b/drivers/scsi/hpsa_cmd.h index b5125dc31439..cb988c41cad9 100644 --- a/drivers/scsi/hpsa_cmd.h +++ b/drivers/scsi/hpsa_cmd.h @@ -252,7 +252,7 @@ struct ReportExtendedLUNdata { u8 LUNListLength[4]; u8 extended_response_flag; u8 reserved[3]; - struct ext_report_lun_entry LUN[HPSA_MAX_LUN]; + struct ext_report_lun_entry LUN[HPSA_MAX_PHYS_LUN]; }; struct SenseSubsystem_info { @@ -314,28 +314,36 @@ struct CommandListHeader { u8 ReplyQueue; u8 SGList; u16 SGTotal; - struct vals32 Tag; + u64 tag; union LUNAddr LUN; }; struct RequestBlock { u8 CDBLen; - struct { - u8 Type:3; - u8 Attribute:3; - u8 Direction:2; - } Type; + /* + * type_attr_dir: + * type: low 3 bits + * attr: middle 3 bits + * dir: high 2 bits + */ + u8 type_attr_dir; +#define TYPE_ATTR_DIR(t, a, d) ((((d) & 0x03) << 6) |\ + (((a) & 0x07) << 3) |\ + ((t) & 0x07)) +#define GET_TYPE(tad) ((tad) & 0x07) +#define GET_ATTR(tad) (((tad) >> 3) & 0x07) +#define GET_DIR(tad) (((tad) >> 6) & 0x03) u16 Timeout; u8 CDB[16]; }; struct ErrDescriptor { - struct vals32 Addr; + u64 Addr; u32 Len; }; struct SGDescriptor { - struct vals32 Addr; + u64 Addr; u32 Len; u32 Ext; }; @@ -434,8 +442,8 @@ struct io_accel1_cmd { u16 timeout_sec; /* 0x62 - 0x63 */ u8 ReplyQueue; /* 0x64 */ u8 reserved9[3]; /* 0x65 - 0x67 */ - struct vals32 Tag; /* 0x68 - 0x6F */ - struct vals32 host_addr; /* 0x70 - 0x77 */ + u64 tag; /* 0x68 - 0x6F */ + u64 host_addr; /* 0x70 - 0x77 */ u8 CISS_LUN[8]; /* 0x78 - 0x7F */ struct SGDescriptor SG[IOACCEL1_MAXSGENTRIES]; } __aligned(IOACCEL1_COMMANDLIST_ALIGNMENT); @@ -555,8 +563,8 @@ struct hpsa_tmf_struct { u8 reserved1; /* byte 3 Reserved */ u32 it_nexus; /* SCSI I-T Nexus */ u8 lun_id[8]; /* LUN ID for TMF request */ - struct vals32 Tag; /* cciss tag associated w/ request */ - struct vals32 abort_tag;/* cciss tag of SCSI cmd or task to abort */ + u64 tag; /* cciss tag associated w/ request */ + u64 abort_tag; /* cciss tag of SCSI cmd or task to abort */ u64 error_ptr; /* Error Pointer */ u32 error_len; /* Error Length */ }; diff --git a/drivers/scsi/hptiop.c b/drivers/scsi/hptiop.c index dedb62c21b29..e995218476ed 100644 --- a/drivers/scsi/hptiop.c +++ b/drivers/scsi/hptiop.c @@ -1118,17 +1118,13 @@ static int hptiop_reset(struct scsi_cmnd *scp) } static int hptiop_adjust_disk_queue_depth(struct scsi_device *sdev, - int queue_depth, int reason) + int queue_depth) { struct hptiop_hba *hba = (struct hptiop_hba *)sdev->host->hostdata; - if (reason != SCSI_QDEPTH_DEFAULT) - return -EOPNOTSUPP; - if (queue_depth > hba->max_requests) queue_depth = hba->max_requests; - scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, queue_depth); - return queue_depth; + return scsi_change_queue_depth(sdev, queue_depth); } static ssize_t hptiop_show_version(struct device *dev, diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c index 598c42cba5a8..f58c6d8e0264 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.c +++ b/drivers/scsi/ibmvscsi/ibmvfc.c @@ -1643,19 +1643,9 @@ static int ibmvfc_queuecommand_lck(struct scsi_cmnd *cmnd, int_to_scsilun(cmnd->device->lun, &vfc_cmd->iu.lun); memcpy(vfc_cmd->iu.cdb, cmnd->cmnd, cmnd->cmd_len); - if (scsi_populate_tag_msg(cmnd, tag)) { - vfc_cmd->task_tag = cpu_to_be64(tag[1]); - switch (tag[0]) { - case MSG_SIMPLE_TAG: - vfc_cmd->iu.pri_task_attr = IBMVFC_SIMPLE_TASK; - break; - case MSG_HEAD_TAG: - vfc_cmd->iu.pri_task_attr = IBMVFC_HEAD_OF_QUEUE; - break; - case MSG_ORDERED_TAG: - vfc_cmd->iu.pri_task_attr = IBMVFC_ORDERED_TASK; - break; - }; + if (cmnd->flags & SCMD_TAGGED) { + vfc_cmd->task_tag = cpu_to_be64(cmnd->tag); + vfc_cmd->iu.pri_task_attr = IBMVFC_SIMPLE_TASK; } if (likely(!(rc = ibmvfc_map_sg_data(cmnd, evt, vfc_cmd, vhost->dev)))) @@ -2897,12 +2887,6 @@ static int ibmvfc_slave_configure(struct scsi_device *sdev) spin_lock_irqsave(shost->host_lock, flags); if (sdev->type == TYPE_DISK) sdev->allow_restart = 1; - - if (sdev->tagged_supported) { - scsi_set_tag_type(sdev, MSG_SIMPLE_TAG); - scsi_activate_tcq(sdev, sdev->queue_depth); - } else - scsi_deactivate_tcq(sdev, sdev->queue_depth); spin_unlock_irqrestore(shost->host_lock, flags); return 0; } @@ -2916,40 +2900,12 @@ static int ibmvfc_slave_configure(struct scsi_device *sdev) * Return value: * actual depth set **/ -static int ibmvfc_change_queue_depth(struct scsi_device *sdev, int qdepth, - int reason) +static int ibmvfc_change_queue_depth(struct scsi_device *sdev, int qdepth) { - if (reason != SCSI_QDEPTH_DEFAULT) - return -EOPNOTSUPP; - if (qdepth > IBMVFC_MAX_CMDS_PER_LUN) qdepth = IBMVFC_MAX_CMDS_PER_LUN; - scsi_adjust_queue_depth(sdev, 0, qdepth); - return sdev->queue_depth; -} - -/** - * ibmvfc_change_queue_type - Change the device's queue type - * @sdev: scsi device struct - * @tag_type: type of tags to use - * - * Return value: - * actual queue type set - **/ -static int ibmvfc_change_queue_type(struct scsi_device *sdev, int tag_type) -{ - if (sdev->tagged_supported) { - scsi_set_tag_type(sdev, tag_type); - - if (tag_type) - scsi_activate_tcq(sdev, sdev->queue_depth); - else - scsi_deactivate_tcq(sdev, sdev->queue_depth); - } else - tag_type = 0; - - return tag_type; + return scsi_change_queue_depth(sdev, qdepth); } static ssize_t ibmvfc_show_host_partition_name(struct device *dev, @@ -3133,7 +3089,7 @@ static struct scsi_host_template driver_template = { .target_alloc = ibmvfc_target_alloc, .scan_finished = ibmvfc_scan_finished, .change_queue_depth = ibmvfc_change_queue_depth, - .change_queue_type = ibmvfc_change_queue_type, + .change_queue_type = scsi_change_queue_type, .cmd_per_lun = 16, .can_queue = IBMVFC_MAX_REQUESTS_DEFAULT, .this_id = -1, @@ -3141,6 +3097,8 @@ static struct scsi_host_template driver_template = { .max_sectors = IBMVFC_MAX_SECTORS, .use_clustering = ENABLE_CLUSTERING, .shost_attrs = ibmvfc_attrs, + .use_blk_tags = 1, + .track_queue_depth = 1, }; /** diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c index 7b23f21f22f1..acea5d6eebd0 100644 --- a/drivers/scsi/ibmvscsi/ibmvscsi.c +++ b/drivers/scsi/ibmvscsi/ibmvscsi.c @@ -1929,7 +1929,6 @@ static int ibmvscsi_slave_configure(struct scsi_device *sdev) blk_queue_rq_timeout(sdev->request_queue, 120 * HZ); } spin_unlock_irqrestore(shost->host_lock, lock_flags); - scsi_adjust_queue_depth(sdev, 0, shost->cmd_per_lun); return 0; } @@ -1942,17 +1941,11 @@ static int ibmvscsi_slave_configure(struct scsi_device *sdev) * Return value: * actual depth set **/ -static int ibmvscsi_change_queue_depth(struct scsi_device *sdev, int qdepth, - int reason) +static int ibmvscsi_change_queue_depth(struct scsi_device *sdev, int qdepth) { - if (reason != SCSI_QDEPTH_DEFAULT) - return -EOPNOTSUPP; - if (qdepth > IBMVSCSI_MAX_CMDS_PER_LUN) qdepth = IBMVSCSI_MAX_CMDS_PER_LUN; - - scsi_adjust_queue_depth(sdev, 0, qdepth); - return sdev->queue_depth; + return scsi_change_queue_depth(sdev, qdepth); } /* ------------------------------------------------------------ diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index 2a9578c116b7..540294389355 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c @@ -3942,8 +3942,9 @@ static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg, return -EIO; } - sglist->num_dma_sg = pci_map_sg(ioa_cfg->pdev, sglist->scatterlist, - sglist->num_sg, DMA_TO_DEVICE); + sglist->num_dma_sg = dma_map_sg(&ioa_cfg->pdev->dev, + sglist->scatterlist, sglist->num_sg, + DMA_TO_DEVICE); if (!sglist->num_dma_sg) { spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); @@ -4327,16 +4328,12 @@ static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; }; * Return value: * actual depth set **/ -static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth, - int reason) +static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth) { struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata; struct ipr_resource_entry *res; unsigned long lock_flags = 0; - if (reason != SCSI_QDEPTH_DEFAULT) - return -EOPNOTSUPP; - spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); res = (struct ipr_resource_entry *)sdev->hostdata; @@ -4344,7 +4341,7 @@ static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth, qdepth = IPR_MAX_CMD_PER_ATA_LUN; spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); - scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth); + scsi_change_queue_depth(sdev, qdepth); return sdev->queue_depth; } @@ -4364,24 +4361,10 @@ static int ipr_change_queue_type(struct scsi_device *sdev, int tag_type) spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); res = (struct ipr_resource_entry *)sdev->hostdata; - - if (res) { - if (ipr_is_gscsi(res) && sdev->tagged_supported) { - /* - * We don't bother quiescing the device here since the - * adapter firmware does it for us. - */ - scsi_set_tag_type(sdev, tag_type); - - if (tag_type) - scsi_activate_tcq(sdev, sdev->queue_depth); - else - scsi_deactivate_tcq(sdev, sdev->queue_depth); - } else - tag_type = 0; - } else + if (res && ipr_is_gscsi(res)) + tag_type = scsi_change_queue_type(sdev, tag_type); + else tag_type = 0; - spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); return tag_type; } @@ -4765,10 +4748,10 @@ static int ipr_slave_configure(struct scsi_device *sdev) spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); if (ap) { - scsi_adjust_queue_depth(sdev, 0, IPR_MAX_CMD_PER_ATA_LUN); + scsi_change_queue_depth(sdev, IPR_MAX_CMD_PER_ATA_LUN); ata_sas_slave_configure(sdev, ap); - } else - scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun); + } + if (ioa_cfg->sis64) sdev_printk(KERN_INFO, sdev, "Resource path: %s\n", ipr_format_res_path(ioa_cfg, @@ -5585,7 +5568,7 @@ static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg, nseg = scsi_dma_map(scsi_cmd); if (nseg < 0) { if (printk_ratelimit()) - dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n"); + dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n"); return -1; } @@ -5636,7 +5619,7 @@ static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg, nseg = scsi_dma_map(scsi_cmd); if (nseg < 0) { - dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n"); + dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n"); return -1; } @@ -5673,35 +5656,6 @@ static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg, } /** - * ipr_get_task_attributes - Translate SPI Q-Tag to task attributes - * @scsi_cmd: scsi command struct - * - * Return value: - * task attributes - **/ -static u8 ipr_get_task_attributes(struct scsi_cmnd *scsi_cmd) -{ - u8 tag[2]; - u8 rc = IPR_FLAGS_LO_UNTAGGED_TASK; - - if (scsi_populate_tag_msg(scsi_cmd, tag)) { - switch (tag[0]) { - case MSG_SIMPLE_TAG: - rc = IPR_FLAGS_LO_SIMPLE_TASK; - break; - case MSG_HEAD_TAG: - rc = IPR_FLAGS_LO_HEAD_OF_Q_TASK; - break; - case MSG_ORDERED_TAG: - rc = IPR_FLAGS_LO_ORDERED_TASK; - break; - }; - } - - return rc; -} - -/** * ipr_erp_done - Process completion of ERP for a device * @ipr_cmd: ipr command struct * @@ -6236,7 +6190,10 @@ static int ipr_queuecommand(struct Scsi_Host *shost, ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST; } ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR; - ioarcb->cmd_pkt.flags_lo |= ipr_get_task_attributes(scsi_cmd); + if (scsi_cmd->flags & SCMD_TAGGED) + ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_SIMPLE_TASK; + else + ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_UNTAGGED_TASK; } if (scsi_cmd->cmnd[0] >= 0xC0 && @@ -6357,6 +6314,7 @@ static struct scsi_host_template driver_template = { .sdev_attrs = ipr_dev_attrs, .proc_name = IPR_NAME, .no_write_same = 1, + .use_blk_tags = 1, }; /** @@ -8431,7 +8389,7 @@ static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd) struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; struct ipr_sglist *sglist = ioa_cfg->ucode_sglist; - pci_unmap_sg(ioa_cfg->pdev, sglist->scatterlist, + dma_unmap_sg(&ioa_cfg->pdev->dev, sglist->scatterlist, sglist->num_sg, DMA_TO_DEVICE); ipr_cmd->job_step = ipr_reset_alert; @@ -8871,7 +8829,7 @@ static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg) for (i = 0; i < IPR_NUM_CMD_BLKS; i++) { if (ioa_cfg->ipr_cmnd_list[i]) - pci_pool_free(ioa_cfg->ipr_cmd_pool, + dma_pool_free(ioa_cfg->ipr_cmd_pool, ioa_cfg->ipr_cmnd_list[i], ioa_cfg->ipr_cmnd_list_dma[i]); @@ -8879,7 +8837,7 @@ static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg) } if (ioa_cfg->ipr_cmd_pool) - pci_pool_destroy(ioa_cfg->ipr_cmd_pool); + dma_pool_destroy(ioa_cfg->ipr_cmd_pool); kfree(ioa_cfg->ipr_cmnd_list); kfree(ioa_cfg->ipr_cmnd_list_dma); @@ -8900,25 +8858,24 @@ static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg) int i; kfree(ioa_cfg->res_entries); - pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_misc_cbs), - ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma); + dma_free_coherent(&ioa_cfg->pdev->dev, sizeof(struct ipr_misc_cbs), + ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma); ipr_free_cmd_blks(ioa_cfg); for (i = 0; i < ioa_cfg->hrrq_num; i++) - pci_free_consistent(ioa_cfg->pdev, - sizeof(u32) * ioa_cfg->hrrq[i].size, - ioa_cfg->hrrq[i].host_rrq, - ioa_cfg->hrrq[i].host_rrq_dma); + dma_free_coherent(&ioa_cfg->pdev->dev, + sizeof(u32) * ioa_cfg->hrrq[i].size, + ioa_cfg->hrrq[i].host_rrq, + ioa_cfg->hrrq[i].host_rrq_dma); - pci_free_consistent(ioa_cfg->pdev, ioa_cfg->cfg_table_size, - ioa_cfg->u.cfg_table, - ioa_cfg->cfg_table_dma); + dma_free_coherent(&ioa_cfg->pdev->dev, ioa_cfg->cfg_table_size, + ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma); for (i = 0; i < IPR_NUM_HCAMS; i++) { - pci_free_consistent(ioa_cfg->pdev, - sizeof(struct ipr_hostrcb), - ioa_cfg->hostrcb[i], - ioa_cfg->hostrcb_dma[i]); + dma_free_coherent(&ioa_cfg->pdev->dev, + sizeof(struct ipr_hostrcb), + ioa_cfg->hostrcb[i], + ioa_cfg->hostrcb_dma[i]); } ipr_free_dump(ioa_cfg); @@ -8979,7 +8936,7 @@ static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg) dma_addr_t dma_addr; int i, entries_each_hrrq, hrrq_id = 0; - ioa_cfg->ipr_cmd_pool = pci_pool_create(IPR_NAME, ioa_cfg->pdev, + ioa_cfg->ipr_cmd_pool = dma_pool_create(IPR_NAME, &ioa_cfg->pdev->dev, sizeof(struct ipr_cmnd), 512, 0); if (!ioa_cfg->ipr_cmd_pool) @@ -9029,7 +8986,7 @@ static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg) } for (i = 0; i < IPR_NUM_CMD_BLKS; i++) { - ipr_cmd = pci_pool_alloc(ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr); + ipr_cmd = dma_pool_alloc(ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr); if (!ipr_cmd) { ipr_free_cmd_blks(ioa_cfg); @@ -9100,9 +9057,10 @@ static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg) ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg; } - ioa_cfg->vpd_cbs = pci_alloc_consistent(ioa_cfg->pdev, - sizeof(struct ipr_misc_cbs), - &ioa_cfg->vpd_cbs_dma); + ioa_cfg->vpd_cbs = dma_alloc_coherent(&pdev->dev, + sizeof(struct ipr_misc_cbs), + &ioa_cfg->vpd_cbs_dma, + GFP_KERNEL); if (!ioa_cfg->vpd_cbs) goto out_free_res_entries; @@ -9111,13 +9069,14 @@ static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg) goto out_free_vpd_cbs; for (i = 0; i < ioa_cfg->hrrq_num; i++) { - ioa_cfg->hrrq[i].host_rrq = pci_alloc_consistent(ioa_cfg->pdev, + ioa_cfg->hrrq[i].host_rrq = dma_alloc_coherent(&pdev->dev, sizeof(u32) * ioa_cfg->hrrq[i].size, - &ioa_cfg->hrrq[i].host_rrq_dma); + &ioa_cfg->hrrq[i].host_rrq_dma, + GFP_KERNEL); if (!ioa_cfg->hrrq[i].host_rrq) { while (--i > 0) - pci_free_consistent(pdev, + dma_free_coherent(&pdev->dev, sizeof(u32) * ioa_cfg->hrrq[i].size, ioa_cfg->hrrq[i].host_rrq, ioa_cfg->hrrq[i].host_rrq_dma); @@ -9126,17 +9085,19 @@ static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg) ioa_cfg->hrrq[i].ioa_cfg = ioa_cfg; } - ioa_cfg->u.cfg_table = pci_alloc_consistent(ioa_cfg->pdev, - ioa_cfg->cfg_table_size, - &ioa_cfg->cfg_table_dma); + ioa_cfg->u.cfg_table = dma_alloc_coherent(&pdev->dev, + ioa_cfg->cfg_table_size, + &ioa_cfg->cfg_table_dma, + GFP_KERNEL); if (!ioa_cfg->u.cfg_table) goto out_free_host_rrq; for (i = 0; i < IPR_NUM_HCAMS; i++) { - ioa_cfg->hostrcb[i] = pci_alloc_consistent(ioa_cfg->pdev, - sizeof(struct ipr_hostrcb), - &ioa_cfg->hostrcb_dma[i]); + ioa_cfg->hostrcb[i] = dma_alloc_coherent(&pdev->dev, + sizeof(struct ipr_hostrcb), + &ioa_cfg->hostrcb_dma[i], + GFP_KERNEL); if (!ioa_cfg->hostrcb[i]) goto out_free_hostrcb_dma; @@ -9160,25 +9121,24 @@ out: out_free_hostrcb_dma: while (i-- > 0) { - pci_free_consistent(pdev, sizeof(struct ipr_hostrcb), - ioa_cfg->hostrcb[i], - ioa_cfg->hostrcb_dma[i]); + dma_free_coherent(&pdev->dev, sizeof(struct ipr_hostrcb), + ioa_cfg->hostrcb[i], + ioa_cfg->hostrcb_dma[i]); } - pci_free_consistent(pdev, ioa_cfg->cfg_table_size, - ioa_cfg->u.cfg_table, - ioa_cfg->cfg_table_dma); + dma_free_coherent(&pdev->dev, ioa_cfg->cfg_table_size, + ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma); out_free_host_rrq: for (i = 0; i < ioa_cfg->hrrq_num; i++) { - pci_free_consistent(pdev, - sizeof(u32) * ioa_cfg->hrrq[i].size, - ioa_cfg->hrrq[i].host_rrq, - ioa_cfg->hrrq[i].host_rrq_dma); + dma_free_coherent(&pdev->dev, + sizeof(u32) * ioa_cfg->hrrq[i].size, + ioa_cfg->hrrq[i].host_rrq, + ioa_cfg->hrrq[i].host_rrq_dma); } out_ipr_free_cmd_blocks: ipr_free_cmd_blks(ioa_cfg); out_free_vpd_cbs: - pci_free_consistent(pdev, sizeof(struct ipr_misc_cbs), - ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma); + dma_free_coherent(&pdev->dev, sizeof(struct ipr_misc_cbs), + ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma); out_free_res_entries: kfree(ioa_cfg->res_entries); goto out; @@ -9618,16 +9578,17 @@ static int ipr_probe_ioa(struct pci_dev *pdev, ipr_init_regs(ioa_cfg); if (ioa_cfg->sis64) { - rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); + rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); if (rc < 0) { - dev_dbg(&pdev->dev, "Failed to set 64 bit PCI DMA mask\n"); - rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + dev_dbg(&pdev->dev, "Failed to set 64 bit DMA mask\n"); + rc = dma_set_mask_and_coherent(&pdev->dev, + DMA_BIT_MASK(32)); } } else - rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); if (rc < 0) { - dev_err(&pdev->dev, "Failed to set PCI DMA mask\n"); + dev_err(&pdev->dev, "Failed to set DMA mask\n"); goto cleanup_nomem; } diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h index d0201ceb4aac..9ebdebd944e7 100644 --- a/drivers/scsi/ipr.h +++ b/drivers/scsi/ipr.h @@ -1549,7 +1549,7 @@ struct ipr_ioa_cfg { struct ipr_misc_cbs *vpd_cbs; dma_addr_t vpd_cbs_dma; - struct pci_pool *ipr_cmd_pool; + struct dma_pool *ipr_cmd_pool; struct ipr_cmnd *reset_cmd; int (*reset) (struct ipr_cmnd *); diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c index e5afc3884d74..e5c28435d768 100644 --- a/drivers/scsi/ips.c +++ b/drivers/scsi/ips.c @@ -1210,7 +1210,7 @@ ips_slave_configure(struct scsi_device * SDptr) min = ha->max_cmds / 2; if (ha->enq->ucLogDriveCount <= 2) min = ha->max_cmds - 1; - scsi_adjust_queue_depth(SDptr, MSG_ORDERED_TAG, min); + scsi_change_queue_depth(SDptr, min); } SDptr->skip_ms_page_8 = 1; diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c index 2e890b1e2526..724c6265b667 100644 --- a/drivers/scsi/isci/init.c +++ b/drivers/scsi/isci/init.c @@ -172,6 +172,8 @@ static struct scsi_host_template isci_sht = { .target_destroy = sas_target_destroy, .ioctl = sas_ioctl, .shost_attrs = isci_host_attrs, + .use_blk_tags = 1, + .track_queue_depth = 1, }; static struct sas_domain_function_template isci_transport_ops = { @@ -258,8 +260,6 @@ static int isci_register_sas_ha(struct isci_host *isci_host) sas_ha->sas_port = sas_ports; sas_ha->num_phys = SCI_MAX_PHYS; - sas_ha->lldd_queue_size = ISCI_CAN_QUEUE_VAL; - sas_ha->lldd_max_execute_num = 1; sas_ha->strict_wide_ports = 1; sas_register_ha(sas_ha); diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c index 5d6fda72d659..3f63c6318b0d 100644 --- a/drivers/scsi/isci/task.c +++ b/drivers/scsi/isci/task.c @@ -117,104 +117,97 @@ static inline int isci_device_io_ready(struct isci_remote_device *idev, * functions. This function is called by libsas to send a task down to * hardware. * @task: This parameter specifies the SAS task to send. - * @num: This parameter specifies the number of tasks to queue. * @gfp_flags: This parameter specifies the context of this call. * * status, zero indicates success. */ -int isci_task_execute_task(struct sas_task *task, int num, gfp_t gfp_flags) +int isci_task_execute_task(struct sas_task *task, gfp_t gfp_flags) { struct isci_host *ihost = dev_to_ihost(task->dev); struct isci_remote_device *idev; unsigned long flags; + enum sci_status status = SCI_FAILURE; bool io_ready; u16 tag; - dev_dbg(&ihost->pdev->dev, "%s: num=%d\n", __func__, num); + spin_lock_irqsave(&ihost->scic_lock, flags); + idev = isci_lookup_device(task->dev); + io_ready = isci_device_io_ready(idev, task); + tag = isci_alloc_tag(ihost); + spin_unlock_irqrestore(&ihost->scic_lock, flags); - for_each_sas_task(num, task) { - enum sci_status status = SCI_FAILURE; + dev_dbg(&ihost->pdev->dev, + "task: %p, dev: %p idev: %p:%#lx cmd = %p\n", + task, task->dev, idev, idev ? idev->flags : 0, + task->uldd_task); - spin_lock_irqsave(&ihost->scic_lock, flags); - idev = isci_lookup_device(task->dev); - io_ready = isci_device_io_ready(idev, task); - tag = isci_alloc_tag(ihost); - spin_unlock_irqrestore(&ihost->scic_lock, flags); + if (!idev) { + isci_task_refuse(ihost, task, SAS_TASK_UNDELIVERED, + SAS_DEVICE_UNKNOWN); + } else if (!io_ready || tag == SCI_CONTROLLER_INVALID_IO_TAG) { + /* Indicate QUEUE_FULL so that the scsi midlayer + * retries. + */ + isci_task_refuse(ihost, task, SAS_TASK_COMPLETE, + SAS_QUEUE_FULL); + } else { + /* There is a device and it's ready for I/O. */ + spin_lock_irqsave(&task->task_state_lock, flags); - dev_dbg(&ihost->pdev->dev, - "task: %p, num: %d dev: %p idev: %p:%#lx cmd = %p\n", - task, num, task->dev, idev, idev ? idev->flags : 0, - task->uldd_task); - - if (!idev) { - isci_task_refuse(ihost, task, SAS_TASK_UNDELIVERED, - SAS_DEVICE_UNKNOWN); - } else if (!io_ready || tag == SCI_CONTROLLER_INVALID_IO_TAG) { - /* Indicate QUEUE_FULL so that the scsi midlayer - * retries. - */ - isci_task_refuse(ihost, task, SAS_TASK_COMPLETE, - SAS_QUEUE_FULL); + if (task->task_state_flags & SAS_TASK_STATE_ABORTED) { + /* The I/O was aborted. */ + spin_unlock_irqrestore(&task->task_state_lock, flags); + + isci_task_refuse(ihost, task, + SAS_TASK_UNDELIVERED, + SAM_STAT_TASK_ABORTED); } else { - /* There is a device and it's ready for I/O. */ - spin_lock_irqsave(&task->task_state_lock, flags); - - if (task->task_state_flags & SAS_TASK_STATE_ABORTED) { - /* The I/O was aborted. */ - spin_unlock_irqrestore(&task->task_state_lock, - flags); - - isci_task_refuse(ihost, task, - SAS_TASK_UNDELIVERED, - SAM_STAT_TASK_ABORTED); - } else { - task->task_state_flags |= SAS_TASK_AT_INITIATOR; + task->task_state_flags |= SAS_TASK_AT_INITIATOR; + spin_unlock_irqrestore(&task->task_state_lock, flags); + + /* build and send the request. */ + status = isci_request_execute(ihost, idev, task, tag); + + if (status != SCI_SUCCESS) { + spin_lock_irqsave(&task->task_state_lock, flags); + /* Did not really start this command. */ + task->task_state_flags &= ~SAS_TASK_AT_INITIATOR; spin_unlock_irqrestore(&task->task_state_lock, flags); - /* build and send the request. */ - status = isci_request_execute(ihost, idev, task, tag); - - if (status != SCI_SUCCESS) { - - spin_lock_irqsave(&task->task_state_lock, flags); - /* Did not really start this command. */ - task->task_state_flags &= ~SAS_TASK_AT_INITIATOR; - spin_unlock_irqrestore(&task->task_state_lock, flags); - - if (test_bit(IDEV_GONE, &idev->flags)) { - - /* Indicate that the device - * is gone. - */ - isci_task_refuse(ihost, task, - SAS_TASK_UNDELIVERED, - SAS_DEVICE_UNKNOWN); - } else { - /* Indicate QUEUE_FULL so that - * the scsi midlayer retries. - * If the request failed for - * remote device reasons, it - * gets returned as - * SAS_TASK_UNDELIVERED next - * time through. - */ - isci_task_refuse(ihost, task, - SAS_TASK_COMPLETE, - SAS_QUEUE_FULL); - } + if (test_bit(IDEV_GONE, &idev->flags)) { + /* Indicate that the device + * is gone. + */ + isci_task_refuse(ihost, task, + SAS_TASK_UNDELIVERED, + SAS_DEVICE_UNKNOWN); + } else { + /* Indicate QUEUE_FULL so that + * the scsi midlayer retries. + * If the request failed for + * remote device reasons, it + * gets returned as + * SAS_TASK_UNDELIVERED next + * time through. + */ + isci_task_refuse(ihost, task, + SAS_TASK_COMPLETE, + SAS_QUEUE_FULL); } } } - if (status != SCI_SUCCESS && tag != SCI_CONTROLLER_INVALID_IO_TAG) { - spin_lock_irqsave(&ihost->scic_lock, flags); - /* command never hit the device, so just free - * the tci and skip the sequence increment - */ - isci_tci_free(ihost, ISCI_TAG_TCI(tag)); - spin_unlock_irqrestore(&ihost->scic_lock, flags); - } - isci_put_device(idev); } + + if (status != SCI_SUCCESS && tag != SCI_CONTROLLER_INVALID_IO_TAG) { + spin_lock_irqsave(&ihost->scic_lock, flags); + /* command never hit the device, so just free + * the tci and skip the sequence increment + */ + isci_tci_free(ihost, ISCI_TAG_TCI(tag)); + spin_unlock_irqrestore(&ihost->scic_lock, flags); + } + + isci_put_device(idev); return 0; } diff --git a/drivers/scsi/isci/task.h b/drivers/scsi/isci/task.h index 9c06cbad1d26..8f4531f22ac2 100644 --- a/drivers/scsi/isci/task.h +++ b/drivers/scsi/isci/task.h @@ -131,7 +131,6 @@ static inline void isci_print_tmf(struct isci_host *ihost, struct isci_tmf *tmf) int isci_task_execute_task( struct sas_task *task, - int num, gfp_t gfp_flags); int isci_task_abort_task( diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c index 427af0f24b0f..0b8af186e707 100644 --- a/drivers/scsi/iscsi_tcp.c +++ b/drivers/scsi/iscsi_tcp.c @@ -952,7 +952,7 @@ static struct scsi_host_template iscsi_sw_tcp_sht = { .module = THIS_MODULE, .name = "iSCSI Initiator over TCP/IP", .queuecommand = iscsi_queuecommand, - .change_queue_depth = iscsi_change_queue_depth, + .change_queue_depth = scsi_change_queue_depth, .can_queue = ISCSI_DEF_XMIT_CMDS_MAX - 1, .sg_tablesize = 4096, .max_sectors = 0xFFFF, @@ -966,6 +966,7 @@ static struct scsi_host_template iscsi_sw_tcp_sht = { .target_alloc = iscsi_target_alloc, .proc_name = "iscsi_tcp", .this_id = -1, + .track_queue_depth = 1, }; static struct iscsi_transport iscsi_sw_tcp_transport = { diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c index 1d7e76e8b447..c6795941b45d 100644 --- a/drivers/scsi/libfc/fc_fcp.c +++ b/drivers/scsi/libfc/fc_fcp.c @@ -2160,62 +2160,12 @@ int fc_slave_alloc(struct scsi_device *sdev) if (!rport || fc_remote_port_chkready(rport)) return -ENXIO; - if (sdev->tagged_supported) - scsi_activate_tcq(sdev, FC_FCP_DFLT_QUEUE_DEPTH); - else - scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), - FC_FCP_DFLT_QUEUE_DEPTH); - + scsi_change_queue_depth(sdev, FC_FCP_DFLT_QUEUE_DEPTH); return 0; } EXPORT_SYMBOL(fc_slave_alloc); /** - * fc_change_queue_depth() - Change a device's queue depth - * @sdev: The SCSI device whose queue depth is to change - * @qdepth: The new queue depth - * @reason: The resason for the change - */ -int fc_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason) -{ - switch (reason) { - case SCSI_QDEPTH_DEFAULT: - scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth); - break; - case SCSI_QDEPTH_QFULL: - scsi_track_queue_full(sdev, qdepth); - break; - case SCSI_QDEPTH_RAMP_UP: - scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth); - break; - default: - return -EOPNOTSUPP; - } - return sdev->queue_depth; -} -EXPORT_SYMBOL(fc_change_queue_depth); - -/** - * fc_change_queue_type() - Change a device's queue type - * @sdev: The SCSI device whose queue depth is to change - * @tag_type: Identifier for queue type - */ -int fc_change_queue_type(struct scsi_device *sdev, int tag_type) -{ - if (sdev->tagged_supported) { - scsi_set_tag_type(sdev, tag_type); - if (tag_type) - scsi_activate_tcq(sdev, sdev->queue_depth); - else - scsi_deactivate_tcq(sdev, sdev->queue_depth); - } else - tag_type = 0; - - return tag_type; -} -EXPORT_SYMBOL(fc_change_queue_type); - -/** * fc_fcp_destory() - Tear down the FCP layer for a given local port * @lport: The local port that no longer needs the FCP layer */ diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index 0d8bc6c66650..8053f24f0349 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c @@ -1771,25 +1771,6 @@ fault: } EXPORT_SYMBOL_GPL(iscsi_queuecommand); -int iscsi_change_queue_depth(struct scsi_device *sdev, int depth, int reason) -{ - switch (reason) { - case SCSI_QDEPTH_DEFAULT: - scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth); - break; - case SCSI_QDEPTH_QFULL: - scsi_track_queue_full(sdev, depth); - break; - case SCSI_QDEPTH_RAMP_UP: - scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth); - break; - default: - return -EOPNOTSUPP; - } - return sdev->queue_depth; -} -EXPORT_SYMBOL_GPL(iscsi_change_queue_depth); - int iscsi_target_alloc(struct scsi_target *starget) { struct iscsi_cls_session *cls_session = starget_to_session(starget); diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c index 766098af4eb7..577770fdee86 100644 --- a/drivers/scsi/libsas/sas_ata.c +++ b/drivers/scsi/libsas/sas_ata.c @@ -171,7 +171,6 @@ static void sas_ata_task_done(struct sas_task *task) spin_unlock_irqrestore(ap->lock, flags); qc_already_gone: - list_del_init(&task->list); sas_free_task(task); } @@ -244,12 +243,7 @@ static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc) if (qc->scsicmd) ASSIGN_SAS_TASK(qc->scsicmd, task); - if (sas_ha->lldd_max_execute_num < 2) - ret = i->dft->lldd_execute_task(task, 1, GFP_ATOMIC); - else - ret = sas_queue_up(task); - - /* Examine */ + ret = i->dft->lldd_execute_task(task, GFP_ATOMIC); if (ret) { SAS_DPRINTK("lldd_execute_task returned: %d\n", ret); @@ -485,7 +479,6 @@ static void sas_ata_internal_abort(struct sas_task *task) return; out: - list_del_init(&task->list); sas_free_task(task); } diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c index 0cac7d8fd0f7..022bb6e10d98 100644 --- a/drivers/scsi/libsas/sas_expander.c +++ b/drivers/scsi/libsas/sas_expander.c @@ -96,7 +96,7 @@ static int smp_execute_task(struct domain_device *dev, void *req, int req_size, task->slow_task->timer.expires = jiffies + SMP_TIMEOUT*HZ; add_timer(&task->slow_task->timer); - res = i->dft->lldd_execute_task(task, 1, GFP_KERNEL); + res = i->dft->lldd_execute_task(task, GFP_KERNEL); if (res) { del_timer(&task->slow_task->timer); diff --git a/drivers/scsi/libsas/sas_init.c b/drivers/scsi/libsas/sas_init.c index dbc8a793fd86..362da44f2948 100644 --- a/drivers/scsi/libsas/sas_init.c +++ b/drivers/scsi/libsas/sas_init.c @@ -45,7 +45,6 @@ struct sas_task *sas_alloc_task(gfp_t flags) struct sas_task *task = kmem_cache_zalloc(sas_task_cache, flags); if (task) { - INIT_LIST_HEAD(&task->list); spin_lock_init(&task->task_state_lock); task->task_state_flags = SAS_TASK_STATE_PENDING; } @@ -77,7 +76,6 @@ EXPORT_SYMBOL_GPL(sas_alloc_slow_task); void sas_free_task(struct sas_task *task) { if (task) { - BUG_ON(!list_empty(&task->list)); kfree(task->slow_task); kmem_cache_free(sas_task_cache, task); } @@ -127,11 +125,6 @@ int sas_register_ha(struct sas_ha_struct *sas_ha) spin_lock_init(&sas_ha->phy_port_lock); sas_hash_addr(sas_ha->hashed_sas_addr, sas_ha->sas_addr); - if (sas_ha->lldd_queue_size == 0) - sas_ha->lldd_queue_size = 1; - else if (sas_ha->lldd_queue_size == -1) - sas_ha->lldd_queue_size = 128; /* Sanity */ - set_bit(SAS_HA_REGISTERED, &sas_ha->state); spin_lock_init(&sas_ha->lock); mutex_init(&sas_ha->drain_mutex); @@ -157,15 +150,6 @@ int sas_register_ha(struct sas_ha_struct *sas_ha) goto Undo_ports; } - if (sas_ha->lldd_max_execute_num > 1) { - error = sas_init_queue(sas_ha); - if (error) { - printk(KERN_NOTICE "couldn't start queue thread:%d, " - "running in direct mode\n", error); - sas_ha->lldd_max_execute_num = 1; - } - } - INIT_LIST_HEAD(&sas_ha->eh_done_q); INIT_LIST_HEAD(&sas_ha->eh_ata_q); @@ -201,11 +185,6 @@ int sas_unregister_ha(struct sas_ha_struct *sas_ha) __sas_drain_work(sas_ha); mutex_unlock(&sas_ha->drain_mutex); - if (sas_ha->lldd_max_execute_num > 1) { - sas_shutdown_queue(sas_ha); - sas_ha->lldd_max_execute_num = 1; - } - return 0; } diff --git a/drivers/scsi/libsas/sas_internal.h b/drivers/scsi/libsas/sas_internal.h index 7e7ba83f0a21..9cf0bc260b0e 100644 --- a/drivers/scsi/libsas/sas_internal.h +++ b/drivers/scsi/libsas/sas_internal.h @@ -66,9 +66,7 @@ void sas_unregister_ports(struct sas_ha_struct *sas_ha); enum blk_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *); -int sas_init_queue(struct sas_ha_struct *sas_ha); int sas_init_events(struct sas_ha_struct *sas_ha); -void sas_shutdown_queue(struct sas_ha_struct *sas_ha); void sas_disable_revalidation(struct sas_ha_struct *ha); void sas_enable_revalidation(struct sas_ha_struct *ha); void __sas_drain_work(struct sas_ha_struct *ha); diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c index 24e477d2ea70..72918d227ead 100644 --- a/drivers/scsi/libsas/sas_scsi_host.c +++ b/drivers/scsi/libsas/sas_scsi_host.c @@ -112,7 +112,6 @@ static void sas_end_task(struct scsi_cmnd *sc, struct sas_task *task) sc->result = (hs << 16) | stat; ASSIGN_SAS_TASK(sc, NULL); - list_del_init(&task->list); sas_free_task(task); } @@ -138,7 +137,6 @@ static void sas_scsi_task_done(struct sas_task *task) if (unlikely(!sc)) { SAS_DPRINTK("task_done called with non existing SCSI cmnd!\n"); - list_del_init(&task->list); sas_free_task(task); return; } @@ -179,31 +177,10 @@ static struct sas_task *sas_create_task(struct scsi_cmnd *cmd, return task; } -int sas_queue_up(struct sas_task *task) -{ - struct sas_ha_struct *sas_ha = task->dev->port->ha; - struct scsi_core *core = &sas_ha->core; - unsigned long flags; - LIST_HEAD(list); - - spin_lock_irqsave(&core->task_queue_lock, flags); - if (sas_ha->lldd_queue_size < core->task_queue_size + 1) { - spin_unlock_irqrestore(&core->task_queue_lock, flags); - return -SAS_QUEUE_FULL; - } - list_add_tail(&task->list, &core->task_queue); - core->task_queue_size += 1; - spin_unlock_irqrestore(&core->task_queue_lock, flags); - wake_up_process(core->queue_thread); - - return 0; -} - int sas_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) { struct sas_internal *i = to_sas_internal(host->transportt); struct domain_device *dev = cmd_to_domain_dev(cmd); - struct sas_ha_struct *sas_ha = dev->port->ha; struct sas_task *task; int res = 0; @@ -224,12 +201,7 @@ int sas_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) if (!task) return SCSI_MLQUEUE_HOST_BUSY; - /* Queue up, Direct Mode or Task Collector Mode. */ - if (sas_ha->lldd_max_execute_num < 2) - res = i->dft->lldd_execute_task(task, 1, GFP_ATOMIC); - else - res = sas_queue_up(task); - + res = i->dft->lldd_execute_task(task, GFP_ATOMIC); if (res) goto out_free_task; return 0; @@ -323,37 +295,17 @@ enum task_disposition { TASK_IS_DONE, TASK_IS_ABORTED, TASK_IS_AT_LU, - TASK_IS_NOT_AT_HA, TASK_IS_NOT_AT_LU, TASK_ABORT_FAILED, }; static enum task_disposition sas_scsi_find_task(struct sas_task *task) { - struct sas_ha_struct *ha = task->dev->port->ha; unsigned long flags; int i, res; struct sas_internal *si = to_sas_internal(task->dev->port->ha->core.shost->transportt); - if (ha->lldd_max_execute_num > 1) { - struct scsi_core *core = &ha->core; - struct sas_task *t, *n; - - mutex_lock(&core->task_queue_flush); - spin_lock_irqsave(&core->task_queue_lock, flags); - list_for_each_entry_safe(t, n, &core->task_queue, list) - if (task == t) { - list_del_init(&t->list); - break; - } - spin_unlock_irqrestore(&core->task_queue_lock, flags); - mutex_unlock(&core->task_queue_flush); - - if (task == t) - return TASK_IS_NOT_AT_HA; - } - for (i = 0; i < 5; i++) { SAS_DPRINTK("%s: aborting task 0x%p\n", __func__, task); res = si->dft->lldd_abort_task(task); @@ -667,14 +619,6 @@ static void sas_eh_handle_sas_errors(struct Scsi_Host *shost, struct list_head * cmd->eh_eflags = 0; switch (res) { - case TASK_IS_NOT_AT_HA: - SAS_DPRINTK("%s: task 0x%p is not at ha: %s\n", - __func__, task, - cmd->retries ? "retry" : "aborted"); - if (cmd->retries) - cmd->retries--; - sas_eh_finish_cmd(cmd); - continue; case TASK_IS_DONE: SAS_DPRINTK("%s: task 0x%p is done\n", __func__, task); @@ -836,9 +780,6 @@ retry: scsi_eh_ready_devs(shost, &eh_work_q, &ha->eh_done_q); out: - if (ha->lldd_max_execute_num > 1) - wake_up_process(ha->core.queue_thread); - sas_eh_handle_resets(shost); /* now link into libata eh --- if we have any ata devices */ @@ -940,15 +881,12 @@ int sas_slave_configure(struct scsi_device *scsi_dev) sas_read_port_mode_page(scsi_dev); if (scsi_dev->tagged_supported) { - scsi_set_tag_type(scsi_dev, MSG_SIMPLE_TAG); - scsi_activate_tcq(scsi_dev, SAS_DEF_QD); + scsi_change_queue_depth(scsi_dev, SAS_DEF_QD); } else { SAS_DPRINTK("device %llx, LUN %llx doesn't support " "TCQ\n", SAS_ADDR(dev->sas_addr), scsi_dev->lun); - scsi_dev->tagged_supported = 0; - scsi_set_tag_type(scsi_dev, 0); - scsi_deactivate_tcq(scsi_dev, 1); + scsi_change_queue_depth(scsi_dev, 1); } scsi_dev->allow_restart = 1; @@ -956,47 +894,23 @@ int sas_slave_configure(struct scsi_device *scsi_dev) return 0; } -int sas_change_queue_depth(struct scsi_device *sdev, int depth, int reason) +int sas_change_queue_depth(struct scsi_device *sdev, int depth) { struct domain_device *dev = sdev_to_domain_dev(sdev); if (dev_is_sata(dev)) - return __ata_change_queue_depth(dev->sata_dev.ap, sdev, depth, - reason); - - switch (reason) { - case SCSI_QDEPTH_DEFAULT: - case SCSI_QDEPTH_RAMP_UP: - if (!sdev->tagged_supported) - depth = 1; - scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth); - break; - case SCSI_QDEPTH_QFULL: - scsi_track_queue_full(sdev, depth); - break; - default: - return -EOPNOTSUPP; - } + return __ata_change_queue_depth(dev->sata_dev.ap, sdev, depth); - return depth; + if (!sdev->tagged_supported) + depth = 1; + return scsi_change_queue_depth(sdev, depth); } -int sas_change_queue_type(struct scsi_device *scsi_dev, int qt) +int sas_change_queue_type(struct scsi_device *scsi_dev, int type) { - struct domain_device *dev = sdev_to_domain_dev(scsi_dev); - - if (dev_is_sata(dev)) + if (dev_is_sata(sdev_to_domain_dev(scsi_dev))) return -EINVAL; - - if (!scsi_dev->tagged_supported) - return 0; - - scsi_deactivate_tcq(scsi_dev, 1); - - scsi_set_tag_type(scsi_dev, qt); - scsi_activate_tcq(scsi_dev, scsi_dev->queue_depth); - - return qt; + return scsi_change_queue_type(scsi_dev, type); } int sas_bios_param(struct scsi_device *scsi_dev, @@ -1011,121 +925,6 @@ int sas_bios_param(struct scsi_device *scsi_dev, return 0; } -/* ---------- Task Collector Thread implementation ---------- */ - -static void sas_queue(struct sas_ha_struct *sas_ha) -{ - struct scsi_core *core = &sas_ha->core; - unsigned long flags; - LIST_HEAD(q); - int can_queue; - int res; - struct sas_internal *i = to_sas_internal(core->shost->transportt); - - mutex_lock(&core->task_queue_flush); - spin_lock_irqsave(&core->task_queue_lock, flags); - while (!kthread_should_stop() && - !list_empty(&core->task_queue) && - !test_bit(SAS_HA_FROZEN, &sas_ha->state)) { - - can_queue = sas_ha->lldd_queue_size - core->task_queue_size; - if (can_queue >= 0) { - can_queue = core->task_queue_size; - list_splice_init(&core->task_queue, &q); - } else { - struct list_head *a, *n; - - can_queue = sas_ha->lldd_queue_size; - list_for_each_safe(a, n, &core->task_queue) { - list_move_tail(a, &q); - if (--can_queue == 0) - break; - } - can_queue = sas_ha->lldd_queue_size; - } - core->task_queue_size -= can_queue; - spin_unlock_irqrestore(&core->task_queue_lock, flags); - { - struct sas_task *task = list_entry(q.next, - struct sas_task, - list); - list_del_init(&q); - res = i->dft->lldd_execute_task(task, can_queue, - GFP_KERNEL); - if (unlikely(res)) - __list_add(&q, task->list.prev, &task->list); - } - spin_lock_irqsave(&core->task_queue_lock, flags); - if (res) { - list_splice_init(&q, &core->task_queue); /*at head*/ - core->task_queue_size += can_queue; - } - } - spin_unlock_irqrestore(&core->task_queue_lock, flags); - mutex_unlock(&core->task_queue_flush); -} - -/** - * sas_queue_thread -- The Task Collector thread - * @_sas_ha: pointer to struct sas_ha - */ -static int sas_queue_thread(void *_sas_ha) -{ - struct sas_ha_struct *sas_ha = _sas_ha; - - while (1) { - set_current_state(TASK_INTERRUPTIBLE); - schedule(); - sas_queue(sas_ha); - if (kthread_should_stop()) - break; - } - - return 0; -} - -int sas_init_queue(struct sas_ha_struct *sas_ha) -{ - struct scsi_core *core = &sas_ha->core; - - spin_lock_init(&core->task_queue_lock); - mutex_init(&core->task_queue_flush); - core->task_queue_size = 0; - INIT_LIST_HEAD(&core->task_queue); - - core->queue_thread = kthread_run(sas_queue_thread, sas_ha, - "sas_queue_%d", core->shost->host_no); - if (IS_ERR(core->queue_thread)) - return PTR_ERR(core->queue_thread); - return 0; -} - -void sas_shutdown_queue(struct sas_ha_struct *sas_ha) -{ - unsigned long flags; - struct scsi_core *core = &sas_ha->core; - struct sas_task *task, *n; - - kthread_stop(core->queue_thread); - - if (!list_empty(&core->task_queue)) - SAS_DPRINTK("HA: %llx: scsi core task queue is NOT empty!?\n", - SAS_ADDR(sas_ha->sas_addr)); - - spin_lock_irqsave(&core->task_queue_lock, flags); - list_for_each_entry_safe(task, n, &core->task_queue, list) { - struct scsi_cmnd *cmd = task->uldd_task; - - list_del_init(&task->list); - - ASSIGN_SAS_TASK(cmd, NULL); - sas_free_task(task); - cmd->result = DID_ABORT << 16; - cmd->scsi_done(cmd); - } - spin_unlock_irqrestore(&core->task_queue_lock, flags); -} - /* * Tell an upper layer that it needs to initiate an abort for a given task. * This should only ever be called by an LLDD. diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index b99399fe2548..fd85952b621d 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -243,128 +243,6 @@ lpfc_update_stats(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) } /** - * lpfc_send_sdev_queuedepth_change_event - Posts a queuedepth change event - * @phba: Pointer to HBA context object. - * @vport: Pointer to vport object. - * @ndlp: Pointer to FC node associated with the target. - * @lun: Lun number of the scsi device. - * @old_val: Old value of the queue depth. - * @new_val: New value of the queue depth. - * - * This function sends an event to the mgmt application indicating - * there is a change in the scsi device queue depth. - **/ -static void -lpfc_send_sdev_queuedepth_change_event(struct lpfc_hba *phba, - struct lpfc_vport *vport, - struct lpfc_nodelist *ndlp, - uint64_t lun, - uint32_t old_val, - uint32_t new_val) -{ - struct lpfc_fast_path_event *fast_path_evt; - unsigned long flags; - - fast_path_evt = lpfc_alloc_fast_evt(phba); - if (!fast_path_evt) - return; - - fast_path_evt->un.queue_depth_evt.scsi_event.event_type = - FC_REG_SCSI_EVENT; - fast_path_evt->un.queue_depth_evt.scsi_event.subcategory = - LPFC_EVENT_VARQUEDEPTH; - - /* Report all luns with change in queue depth */ - fast_path_evt->un.queue_depth_evt.scsi_event.lun = lun; - if (ndlp && NLP_CHK_NODE_ACT(ndlp)) { - memcpy(&fast_path_evt->un.queue_depth_evt.scsi_event.wwpn, - &ndlp->nlp_portname, sizeof(struct lpfc_name)); - memcpy(&fast_path_evt->un.queue_depth_evt.scsi_event.wwnn, - &ndlp->nlp_nodename, sizeof(struct lpfc_name)); - } - - fast_path_evt->un.queue_depth_evt.oldval = old_val; - fast_path_evt->un.queue_depth_evt.newval = new_val; - fast_path_evt->vport = vport; - - fast_path_evt->work_evt.evt = LPFC_EVT_FASTPATH_MGMT_EVT; - spin_lock_irqsave(&phba->hbalock, flags); - list_add_tail(&fast_path_evt->work_evt.evt_listp, &phba->work_list); - spin_unlock_irqrestore(&phba->hbalock, flags); - lpfc_worker_wake_up(phba); - - return; -} - -/** - * lpfc_change_queue_depth - Alter scsi device queue depth - * @sdev: Pointer the scsi device on which to change the queue depth. - * @qdepth: New queue depth to set the sdev to. - * @reason: The reason for the queue depth change. - * - * This function is called by the midlayer and the LLD to alter the queue - * depth for a scsi device. This function sets the queue depth to the new - * value and sends an event out to log the queue depth change. - **/ -static int -lpfc_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason) -{ - struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata; - struct lpfc_hba *phba = vport->phba; - struct lpfc_rport_data *rdata; - unsigned long new_queue_depth, old_queue_depth; - - old_queue_depth = sdev->queue_depth; - - switch (reason) { - case SCSI_QDEPTH_DEFAULT: - /* change request from sysfs, fall through */ - case SCSI_QDEPTH_RAMP_UP: - scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth); - break; - case SCSI_QDEPTH_QFULL: - if (scsi_track_queue_full(sdev, qdepth) == 0) - return sdev->queue_depth; - - lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, - "0711 detected queue full - lun queue " - "depth adjusted to %d.\n", sdev->queue_depth); - break; - default: - return -EOPNOTSUPP; - } - - new_queue_depth = sdev->queue_depth; - rdata = lpfc_rport_data_from_scsi_device(sdev); - if (rdata) - lpfc_send_sdev_queuedepth_change_event(phba, vport, - rdata->pnode, sdev->lun, - old_queue_depth, - new_queue_depth); - return sdev->queue_depth; -} - -/** - * lpfc_change_queue_type() - Change a device's scsi tag queuing type - * @sdev: Pointer the scsi device whose queue depth is to change - * @tag_type: Identifier for queue tag type - */ -static int -lpfc_change_queue_type(struct scsi_device *sdev, int tag_type) -{ - if (sdev->tagged_supported) { - scsi_set_tag_type(sdev, tag_type); - if (tag_type) - scsi_activate_tcq(sdev, sdev->queue_depth); - else - scsi_deactivate_tcq(sdev, sdev->queue_depth); - } else - tag_type = 0; - - return tag_type; -} - -/** * lpfc_rampdown_queue_depth - Post RAMP_DOWN_QUEUE event to worker thread * @phba: The Hba for which this call is being executed. * @@ -449,8 +327,7 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba) else new_queue_depth = sdev->queue_depth - new_queue_depth; - lpfc_change_queue_depth(sdev, new_queue_depth, - SCSI_QDEPTH_DEFAULT); + scsi_change_queue_depth(sdev, new_queue_depth); } } lpfc_destroy_vport_work_array(phba, vports); @@ -4286,7 +4163,6 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb; struct lpfc_iocbq *piocbq = &(lpfc_cmd->cur_iocbq); int datadir = scsi_cmnd->sc_data_direction; - char tag[2]; uint8_t *ptr; bool sli4; uint32_t fcpdl; @@ -4308,20 +4184,7 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, memset(ptr, 0, (LPFC_FCP_CDB_LEN - scsi_cmnd->cmd_len)); } - if (scsi_populate_tag_msg(scsi_cmnd, tag)) { - switch (tag[0]) { - case HEAD_OF_QUEUE_TAG: - fcp_cmnd->fcpCntl1 = HEAD_OF_Q; - break; - case ORDERED_QUEUE_TAG: - fcp_cmnd->fcpCntl1 = ORDERED_Q; - break; - default: - fcp_cmnd->fcpCntl1 = SIMPLE_Q; - break; - } - } else - fcp_cmnd->fcpCntl1 = SIMPLE_Q; + fcp_cmnd->fcpCntl1 = SIMPLE_Q; sli4 = (phba->sli_rev == LPFC_SLI_REV4); piocbq->iocb.un.fcpi.fcpi_XRdy = 0; @@ -5632,10 +5495,7 @@ lpfc_slave_configure(struct scsi_device *sdev) struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata; struct lpfc_hba *phba = vport->phba; - if (sdev->tagged_supported) - scsi_activate_tcq(sdev, vport->cfg_lun_queue_depth); - else - scsi_deactivate_tcq(sdev, vport->cfg_lun_queue_depth); + scsi_change_queue_depth(sdev, vport->cfg_lun_queue_depth); if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { lpfc_sli_handle_fast_ring_event(phba, @@ -6018,8 +5878,10 @@ struct scsi_host_template lpfc_template = { .shost_attrs = lpfc_hba_attrs, .max_sectors = 0xFFFF, .vendor_id = LPFC_NL_VENDOR_ID, - .change_queue_depth = lpfc_change_queue_depth, - .change_queue_type = lpfc_change_queue_type, + .change_queue_depth = scsi_change_queue_depth, + .change_queue_type = scsi_change_queue_type, + .use_blk_tags = 1, + .track_queue_depth = 1, }; struct scsi_host_template lpfc_vport_template = { @@ -6041,6 +5903,8 @@ struct scsi_host_template lpfc_vport_template = { .use_clustering = ENABLE_CLUSTERING, .shost_attrs = lpfc_vport_attrs, .max_sectors = 0xFFFF, - .change_queue_depth = lpfc_change_queue_depth, - .change_queue_type = lpfc_change_queue_type, + .change_queue_depth = scsi_change_queue_depth, + .change_queue_type = scsi_change_queue_type, + .use_blk_tags = 1, + .track_queue_depth = 1, }; diff --git a/drivers/scsi/mac_scsi.c b/drivers/scsi/mac_scsi.c index 6a039eb1cbce..953fd9b953c7 100644 --- a/drivers/scsi/mac_scsi.c +++ b/drivers/scsi/mac_scsi.c @@ -9,69 +9,62 @@ * Generic Generic NCR5380 driver * * Copyright 1995, Russell King - * - * ALPHA RELEASE 1. - * - * For more information, please consult - * - * NCR 5380 Family - * SCSI Protocol Controller - * Databook - * - * NCR Microelectronics - * 1635 Aeroplaza Drive - * Colorado Springs, CO 80916 - * 1+ (719) 578-3400 - * 1+ (800) 334-5454 */ #include <linux/types.h> -#include <linux/stddef.h> -#include <linux/ctype.h> #include <linux/delay.h> - #include <linux/module.h> -#include <linux/signal.h> #include <linux/ioport.h> #include <linux/init.h> #include <linux/blkdev.h> #include <linux/interrupt.h> +#include <linux/platform_device.h> +#include <asm/hwtest.h> #include <asm/io.h> -#include <asm/irq.h> - -#include <asm/macintosh.h> #include <asm/macints.h> -#include <asm/mac_via.h> +#include <asm/setup.h> -#include "scsi.h" #include <scsi/scsi_host.h> -#include "mac_scsi.h" -/* These control the behaviour of the generic 5380 core */ -#define AUTOSENSE +/* Definitions for the core NCR5380 driver. */ + #define PSEUDO_DMA -#include "NCR5380.h" +#define NCR5380_implementation_fields unsigned char *pdma_base +#define NCR5380_local_declare() struct Scsi_Host *_instance +#define NCR5380_setup(instance) _instance = instance -#define RESET_BOOT -#define DRIVER_SETUP +#define NCR5380_read(reg) macscsi_read(_instance, reg) +#define NCR5380_write(reg, value) macscsi_write(_instance, reg, value) -extern void via_scsi_clear(void); +#define NCR5380_pread macscsi_pread +#define NCR5380_pwrite macscsi_pwrite -#ifdef RESET_BOOT -static void mac_scsi_reset_boot(struct Scsi_Host *instance); -#endif +#define NCR5380_intr macscsi_intr +#define NCR5380_queue_command macscsi_queue_command +#define NCR5380_abort macscsi_abort +#define NCR5380_bus_reset macscsi_bus_reset +#define NCR5380_info macscsi_info +#define NCR5380_show_info macscsi_show_info +#define NCR5380_write_info macscsi_write_info + +#include "NCR5380.h" + +#define RESET_BOOT -static int setup_called = 0; static int setup_can_queue = -1; +module_param(setup_can_queue, int, 0); static int setup_cmd_per_lun = -1; +module_param(setup_cmd_per_lun, int, 0); static int setup_sg_tablesize = -1; +module_param(setup_sg_tablesize, int, 0); static int setup_use_pdma = -1; -#ifdef SUPPORT_TAGS +module_param(setup_use_pdma, int, 0); static int setup_use_tagged_queuing = -1; -#endif +module_param(setup_use_tagged_queuing, int, 0); static int setup_hostid = -1; +module_param(setup_hostid, int, 0); /* Time (in jiffies) to wait after a reset; the SCSI standard calls for 250ms, * we usually do 0.5s to be on the safe side. But Toshiba CD-ROMs once more @@ -84,232 +77,48 @@ static int setup_hostid = -1; #define AFTER_RESET_DELAY (HZ/2) #endif -static volatile unsigned char *mac_scsi_regp = NULL; -static volatile unsigned char *mac_scsi_drq = NULL; -static volatile unsigned char *mac_scsi_nodrq = NULL; - - /* * NCR 5380 register access functions */ -#if 0 -/* Debug versions */ -#define CTRL(p,v) (*ctrl = (v)) - -static char macscsi_read(struct Scsi_Host *instance, int reg) +static inline char macscsi_read(struct Scsi_Host *instance, int reg) { - int iobase = instance->io_port; - int i; - int *ctrl = &((struct NCR5380_hostdata *)instance->hostdata)->ctrl; - - CTRL(iobase, 0); - i = in_8(iobase + (reg<<4)); - CTRL(iobase, 0x40); - - return i; + return in_8(instance->base + (reg << 4)); } -static void macscsi_write(struct Scsi_Host *instance, int reg, int value) -{ - int iobase = instance->io_port; - int *ctrl = &((struct NCR5380_hostdata *)instance->hostdata)->ctrl; - - CTRL(iobase, 0); - out_8(iobase + (reg<<4), value); - CTRL(iobase, 0x40); -} -#else - -/* Fast versions */ -static __inline__ char macscsi_read(struct Scsi_Host *instance, int reg) +static inline void macscsi_write(struct Scsi_Host *instance, int reg, int value) { - return in_8(instance->io_port + (reg<<4)); + out_8(instance->base + (reg << 4), value); } -static __inline__ void macscsi_write(struct Scsi_Host *instance, int reg, int value) +#ifndef MODULE +static int __init mac_scsi_setup(char *str) { - out_8(instance->io_port + (reg<<4), value); -} -#endif - + int ints[7]; -/* - * Function : mac_scsi_setup(char *str) - * - * Purpose : booter command line initialization of the overrides array, - * - * Inputs : str - comma delimited list of options - * - */ + (void)get_options(str, ARRAY_SIZE(ints), ints); -static int __init mac_scsi_setup(char *str) { -#ifdef DRIVER_SETUP - int ints[7]; - - (void)get_options( str, ARRAY_SIZE(ints), ints); - - if (setup_called++ || ints[0] < 1 || ints[0] > 6) { - printk(KERN_WARNING "scsi: <mac5380>" - " Usage: mac5380=<can_queue>[,<cmd_per_lun>,<sg_tablesize>,<hostid>,<use_tags>,<use_pdma>]\n"); - printk(KERN_ALERT "scsi: <mac5380> Bad Penguin parameters?\n"); - return 0; - } - - if (ints[0] >= 1) { - if (ints[1] > 0) - /* no limits on this, just > 0 */ - setup_can_queue = ints[1]; - } - if (ints[0] >= 2) { - if (ints[2] > 0) - setup_cmd_per_lun = ints[2]; - } - if (ints[0] >= 3) { - if (ints[3] >= 0) { - setup_sg_tablesize = ints[3]; - /* Must be <= SG_ALL (255) */ - if (setup_sg_tablesize > SG_ALL) - setup_sg_tablesize = SG_ALL; - } - } - if (ints[0] >= 4) { - /* Must be between 0 and 7 */ - if (ints[4] >= 0 && ints[4] <= 7) - setup_hostid = ints[4]; - else if (ints[4] > 7) - printk(KERN_WARNING "mac_scsi_setup: invalid host ID %d !\n", ints[4] ); - } -#ifdef SUPPORT_TAGS - if (ints[0] >= 5) { - if (ints[5] >= 0) - setup_use_tagged_queuing = !!ints[5]; + if (ints[0] < 1 || ints[0] > 6) { + pr_err("Usage: mac5380=<can_queue>[,<cmd_per_lun>[,<sg_tablesize>[,<hostid>[,<use_tags>[,<use_pdma>]]]]]\n"); + return 0; } - - if (ints[0] == 6) { - if (ints[6] >= 0) + if (ints[0] >= 1) + setup_can_queue = ints[1]; + if (ints[0] >= 2) + setup_cmd_per_lun = ints[2]; + if (ints[0] >= 3) + setup_sg_tablesize = ints[3]; + if (ints[0] >= 4) + setup_hostid = ints[4]; + if (ints[0] >= 5) + setup_use_tagged_queuing = ints[5]; + if (ints[0] >= 6) setup_use_pdma = ints[6]; - } -#else - if (ints[0] == 5) { - if (ints[5] >= 0) - setup_use_pdma = ints[5]; - } -#endif /* SUPPORT_TAGS */ - -#endif /* DRIVER_SETUP */ return 1; } __setup("mac5380=", mac_scsi_setup); - -/* - * Function : int macscsi_detect(struct scsi_host_template * tpnt) - * - * Purpose : initializes mac NCR5380 driver based on the - * command line / compile time port and irq definitions. - * - * Inputs : tpnt - template for this SCSI adapter. - * - * Returns : 1 if a host adapter was found, 0 if not. - * - */ - -int __init macscsi_detect(struct scsi_host_template * tpnt) -{ - static int called = 0; - int flags = 0; - struct Scsi_Host *instance; - - if (!MACH_IS_MAC || called) - return( 0 ); - - if (macintosh_config->scsi_type != MAC_SCSI_OLD) - return( 0 ); - - /* setup variables */ - tpnt->can_queue = - (setup_can_queue > 0) ? setup_can_queue : CAN_QUEUE; - tpnt->cmd_per_lun = - (setup_cmd_per_lun > 0) ? setup_cmd_per_lun : CMD_PER_LUN; - tpnt->sg_tablesize = - (setup_sg_tablesize >= 0) ? setup_sg_tablesize : SG_TABLESIZE; - - if (setup_hostid >= 0) - tpnt->this_id = setup_hostid; - else { - /* use 7 as default */ - tpnt->this_id = 7; - } - -#ifdef SUPPORT_TAGS - if (setup_use_tagged_queuing < 0) - setup_use_tagged_queuing = USE_TAGGED_QUEUING; -#endif - - /* Once we support multiple 5380s (e.g. DuoDock) we'll do - something different here */ - instance = scsi_register (tpnt, sizeof(struct NCR5380_hostdata)); - if (instance == NULL) - return 0; - - if (macintosh_config->ident == MAC_MODEL_IIFX) { - mac_scsi_regp = via1+0x8000; - mac_scsi_drq = via1+0xE000; - mac_scsi_nodrq = via1+0xC000; - /* The IIFX should be able to do true DMA, but pseudo-dma doesn't work */ - flags = FLAG_NO_PSEUDO_DMA; - } else { - mac_scsi_regp = via1+0x10000; - mac_scsi_drq = via1+0x6000; - mac_scsi_nodrq = via1+0x12000; - } - - if (! setup_use_pdma) - flags = FLAG_NO_PSEUDO_DMA; - - instance->io_port = (unsigned long) mac_scsi_regp; - instance->irq = IRQ_MAC_SCSI; - -#ifdef RESET_BOOT - mac_scsi_reset_boot(instance); -#endif - - NCR5380_init(instance, flags); - - instance->n_io_port = 255; - - ((struct NCR5380_hostdata *)instance->hostdata)->ctrl = 0; - - if (instance->irq != SCSI_IRQ_NONE) - if (request_irq(instance->irq, NCR5380_intr, 0, "ncr5380", instance)) { - printk(KERN_WARNING "scsi%d: IRQ%d not free, interrupts disabled\n", - instance->host_no, instance->irq); - instance->irq = SCSI_IRQ_NONE; - } - - printk(KERN_INFO "scsi%d: generic 5380 at port %lX irq", instance->host_no, instance->io_port); - if (instance->irq == SCSI_IRQ_NONE) - printk (KERN_INFO "s disabled"); - else - printk (KERN_INFO " %d", instance->irq); - printk(KERN_INFO " options CAN_QUEUE=%d CMD_PER_LUN=%d release=%d", - instance->can_queue, instance->cmd_per_lun, MACSCSI_PUBLIC_RELEASE); - printk(KERN_INFO "\nscsi%d:", instance->host_no); - NCR5380_print_options(instance); - printk("\n"); - called = 1; - return 1; -} - -int macscsi_release (struct Scsi_Host *shpnt) -{ - if (shpnt->irq != SCSI_IRQ_NONE) - free_irq(shpnt->irq, shpnt); - NCR5380_exit(shpnt); - - return 0; -} +#endif /* !MODULE */ #ifdef RESET_BOOT /* @@ -349,10 +158,7 @@ static void mac_scsi_reset_boot(struct Scsi_Host *instance) } #endif -const char * macscsi_info (struct Scsi_Host *spnt) { - return ""; -} - +#ifdef PSEUDO_DMA /* Pseudo-DMA: (Ove Edlund) The code attempts to catch bus errors that occur if one for example @@ -422,38 +228,39 @@ __asm__ __volatile__ \ : "0"(s), "1"(d), "2"(len) \ : "d0") - -static int macscsi_pread (struct Scsi_Host *instance, - unsigned char *dst, int len) +static int macscsi_pread(struct Scsi_Host *instance, + unsigned char *dst, int len) { - unsigned char *d; - volatile unsigned char *s; - - NCR5380_local_declare(); - NCR5380_setup(instance); - - s = mac_scsi_drq+0x60; - d = dst; - -/* These conditions are derived from MacOS */ - - while (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_DRQ) - && !(NCR5380_read(STATUS_REG) & SR_REQ)) - ; - if (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_DRQ) - && (NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH)) { - printk(KERN_ERR "Error in macscsi_pread\n"); - return -1; - } - - CP_IO_TO_MEM(s, d, len); - - if (len != 0) { - printk(KERN_NOTICE "Bus error in macscsi_pread\n"); - return -1; - } - - return 0; + struct NCR5380_hostdata *hostdata = shost_priv(instance); + unsigned char *d; + unsigned char *s; + + NCR5380_local_declare(); + NCR5380_setup(instance); + + s = hostdata->pdma_base + (INPUT_DATA_REG << 4); + d = dst; + + /* These conditions are derived from MacOS */ + + while (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_DRQ) && + !(NCR5380_read(STATUS_REG) & SR_REQ)) + ; + + if (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_DRQ) && + (NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH)) { + pr_err("Error in macscsi_pread\n"); + return -1; + } + + CP_IO_TO_MEM(s, d, len); + + if (len != 0) { + pr_notice("Bus error in macscsi_pread\n"); + return -1; + } + + return 0; } @@ -515,59 +322,172 @@ __asm__ __volatile__ \ : "0"(s), "1"(d), "2"(len) \ : "d0") -static int macscsi_pwrite (struct Scsi_Host *instance, - unsigned char *src, int len) +static int macscsi_pwrite(struct Scsi_Host *instance, + unsigned char *src, int len) { - unsigned char *s; - volatile unsigned char *d; - - NCR5380_local_declare(); - NCR5380_setup(instance); - - s = src; - d = mac_scsi_drq; - -/* These conditions are derived from MacOS */ - - while (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_DRQ) - && (!(NCR5380_read(STATUS_REG) & SR_REQ) - || (NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH))) - ; - if (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_DRQ)) { - printk(KERN_ERR "Error in macscsi_pwrite\n"); - return -1; - } - - CP_MEM_TO_IO(s, d, len); - - if (len != 0) { - printk(KERN_NOTICE "Bus error in macscsi_pwrite\n"); - return -1; - } - - return 0; -} + struct NCR5380_hostdata *hostdata = shost_priv(instance); + unsigned char *s; + unsigned char *d; + + NCR5380_local_declare(); + NCR5380_setup(instance); + + s = src; + d = hostdata->pdma_base + (OUTPUT_DATA_REG << 4); + + /* These conditions are derived from MacOS */ + + while (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_DRQ) && + (!(NCR5380_read(STATUS_REG) & SR_REQ) || + (NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH))) + ; + + if (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_DRQ)) { + pr_err("Error in macscsi_pwrite\n"); + return -1; + } + + CP_MEM_TO_IO(s, d, len); + + if (len != 0) { + pr_notice("Bus error in macscsi_pwrite\n"); + return -1; + } + return 0; +} +#endif #include "NCR5380.c" -static struct scsi_host_template driver_template = { - .proc_name = "Mac5380", +#define DRV_MODULE_NAME "mac_scsi" +#define PFX DRV_MODULE_NAME ": " + +static struct scsi_host_template mac_scsi_template = { + .module = THIS_MODULE, + .proc_name = DRV_MODULE_NAME, .show_info = macscsi_show_info, .write_info = macscsi_write_info, .name = "Macintosh NCR5380 SCSI", - .detect = macscsi_detect, - .release = macscsi_release, .info = macscsi_info, .queuecommand = macscsi_queue_command, .eh_abort_handler = macscsi_abort, .eh_bus_reset_handler = macscsi_bus_reset, - .can_queue = CAN_QUEUE, + .can_queue = 16, .this_id = 7, .sg_tablesize = SG_ALL, - .cmd_per_lun = CMD_PER_LUN, + .cmd_per_lun = 2, .use_clustering = DISABLE_CLUSTERING }; +static int __init mac_scsi_probe(struct platform_device *pdev) +{ + struct Scsi_Host *instance; + int error; + int host_flags = 0; + struct resource *irq, *pio_mem, *pdma_mem = NULL; + + pio_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!pio_mem) + return -ENODEV; + +#ifdef PSEUDO_DMA + pdma_mem = platform_get_resource(pdev, IORESOURCE_MEM, 1); +#endif + + irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); + + if (!hwreg_present((unsigned char *)pio_mem->start + + (STATUS_REG << 4))) { + pr_info(PFX "no device detected at %pap\n", &pio_mem->start); + return -ENODEV; + } + + if (setup_can_queue > 0) + mac_scsi_template.can_queue = setup_can_queue; + if (setup_cmd_per_lun > 0) + mac_scsi_template.cmd_per_lun = setup_cmd_per_lun; + if (setup_sg_tablesize >= 0) + mac_scsi_template.sg_tablesize = setup_sg_tablesize; + if (setup_hostid >= 0) + mac_scsi_template.this_id = setup_hostid & 7; + if (setup_use_pdma < 0) + setup_use_pdma = 0; + + instance = scsi_host_alloc(&mac_scsi_template, + sizeof(struct NCR5380_hostdata)); + if (!instance) + return -ENOMEM; + + instance->base = pio_mem->start; + if (irq) + instance->irq = irq->start; + else + instance->irq = NO_IRQ; + + if (pdma_mem && setup_use_pdma) { + struct NCR5380_hostdata *hostdata = shost_priv(instance); + + hostdata->pdma_base = (unsigned char *)pdma_mem->start; + } else + host_flags |= FLAG_NO_PSEUDO_DMA; + +#ifdef RESET_BOOT + mac_scsi_reset_boot(instance); +#endif + +#ifdef SUPPORT_TAGS + host_flags |= setup_use_tagged_queuing > 0 ? FLAG_TAGGED_QUEUING : 0; +#endif + + NCR5380_init(instance, host_flags); + + if (instance->irq != NO_IRQ) { + error = request_irq(instance->irq, macscsi_intr, IRQF_SHARED, + "NCR5380", instance); + if (error) + goto fail_irq; + } + + error = scsi_add_host(instance, NULL); + if (error) + goto fail_host; + + platform_set_drvdata(pdev, instance); + + scsi_scan_host(instance); + return 0; + +fail_host: + if (instance->irq != NO_IRQ) + free_irq(instance->irq, instance); +fail_irq: + NCR5380_exit(instance); + scsi_host_put(instance); + return error; +} + +static int __exit mac_scsi_remove(struct platform_device *pdev) +{ + struct Scsi_Host *instance = platform_get_drvdata(pdev); + + scsi_remove_host(instance); + if (instance->irq != NO_IRQ) + free_irq(instance->irq, instance); + NCR5380_exit(instance); + scsi_host_put(instance); + return 0; +} + +static struct platform_driver mac_scsi_driver = { + .remove = __exit_p(mac_scsi_remove), + .driver = { + .name = DRV_MODULE_NAME, + .owner = THIS_MODULE, + }, +}; + +module_platform_driver_probe(mac_scsi_driver, mac_scsi_probe); -#include "scsi_module.c" +MODULE_ALIAS("platform:" DRV_MODULE_NAME); +MODULE_LICENSE("GPL"); diff --git a/drivers/scsi/mac_scsi.h b/drivers/scsi/mac_scsi.h deleted file mode 100644 index 06969b06e54b..000000000000 --- a/drivers/scsi/mac_scsi.h +++ /dev/null @@ -1,74 +0,0 @@ -/* - * Cumana Generic NCR5380 driver defines - * - * Copyright 1993, Drew Eckhardt - * Visionary Computing - * (Unix and Linux consulting and custom programming) - * drew@colorado.edu - * +1 (303) 440-4894 - * - * ALPHA RELEASE 1. - * - * For more information, please consult - * - * NCR 5380 Family - * SCSI Protocol Controller - * Databook - * - * NCR Microelectronics - * 1635 Aeroplaza Drive - * Colorado Springs, CO 80916 - * 1+ (719) 578-3400 - * 1+ (800) 334-5454 - */ - -#ifndef MAC_NCR5380_H -#define MAC_NCR5380_H - -#define MACSCSI_PUBLIC_RELEASE 2 - -#ifndef ASM - -#ifndef CMD_PER_LUN -#define CMD_PER_LUN 2 -#endif - -#ifndef CAN_QUEUE -#define CAN_QUEUE 16 -#endif - -#ifndef SG_TABLESIZE -#define SG_TABLESIZE SG_NONE -#endif - -#ifndef USE_TAGGED_QUEUING -#define USE_TAGGED_QUEUING 0 -#endif - -#include <scsi/scsicam.h> - -#define NCR5380_implementation_fields \ - int port, ctrl - -#define NCR5380_local_declare() \ - struct Scsi_Host *_instance - -#define NCR5380_setup(instance) \ - _instance = instance - -#define NCR5380_read(reg) macscsi_read(_instance, reg) -#define NCR5380_write(reg, value) macscsi_write(_instance, reg, value) - -#define NCR5380_pread macscsi_pread -#define NCR5380_pwrite macscsi_pwrite - -#define NCR5380_intr macscsi_intr -#define NCR5380_queue_command macscsi_queue_command -#define NCR5380_abort macscsi_abort -#define NCR5380_bus_reset macscsi_bus_reset -#define NCR5380_show_info macscsi_show_info -#define NCR5380_write_info macscsi_write_info - -#endif /* ndef ASM */ -#endif /* MAC_NCR5380_H */ - diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c index ac5d94cfd52f..2485255f3414 100644 --- a/drivers/scsi/megaraid.c +++ b/drivers/scsi/megaraid.c @@ -1945,7 +1945,7 @@ megaraid_abort_and_reset(adapter_t *adapter, Scsi_Cmnd *cmd, int aor) cmd->device->id, (u32)cmd->device->lun); if(list_empty(&adapter->pending_list)) - return FALSE; + return FAILED; list_for_each_safe(pos, next, &adapter->pending_list) { @@ -1968,7 +1968,7 @@ megaraid_abort_and_reset(adapter_t *adapter, Scsi_Cmnd *cmd, int aor) (aor==SCB_ABORT) ? "ABORTING":"RESET", scb->idx); - return FALSE; + return FAILED; } else { @@ -1993,12 +1993,12 @@ megaraid_abort_and_reset(adapter_t *adapter, Scsi_Cmnd *cmd, int aor) list_add_tail(SCSI_LIST(cmd), &adapter->completed_list); - return TRUE; + return SUCCESS; } } } - return FALSE; + return FAILED; } static inline int diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c index 531dce419c18..f0987f22ea70 100644 --- a/drivers/scsi/megaraid/megaraid_mbox.c +++ b/drivers/scsi/megaraid/megaraid_mbox.c @@ -332,27 +332,6 @@ static struct device_attribute *megaraid_sdev_attrs[] = { NULL, }; -/** - * megaraid_change_queue_depth - Change the device's queue depth - * @sdev: scsi device struct - * @qdepth: depth to set - * @reason: calling context - * - * Return value: - * actual depth set - */ -static int megaraid_change_queue_depth(struct scsi_device *sdev, int qdepth, - int reason) -{ - if (reason != SCSI_QDEPTH_DEFAULT) - return -EOPNOTSUPP; - - if (qdepth > MBOX_MAX_SCSI_CMDS) - qdepth = MBOX_MAX_SCSI_CMDS; - scsi_adjust_queue_depth(sdev, 0, qdepth); - return sdev->queue_depth; -} - /* * Scsi host template for megaraid unified driver */ @@ -365,7 +344,7 @@ static struct scsi_host_template megaraid_template_g = { .eh_device_reset_handler = megaraid_reset_handler, .eh_bus_reset_handler = megaraid_reset_handler, .eh_host_reset_handler = megaraid_reset_handler, - .change_queue_depth = megaraid_change_queue_depth, + .change_queue_depth = scsi_change_queue_depth, .use_clustering = ENABLE_CLUSTERING, .no_write_same = 1, .sdev_attrs = megaraid_sdev_attrs, diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h index a49914de4b95..0d44d91c2fce 100644 --- a/drivers/scsi/megaraid/megaraid_sas.h +++ b/drivers/scsi/megaraid/megaraid_sas.h @@ -1,7 +1,8 @@ /* * Linux MegaRAID driver for SAS based RAID controllers * - * Copyright (c) 2003-2012 LSI Corporation. + * Copyright (c) 2003-2013 LSI Corporation + * Copyright (c) 2013-2014 Avago Technologies * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License @@ -14,17 +15,18 @@ * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * along with this program. If not, see <http://www.gnu.org/licenses/>. * * FILE: megaraid_sas.h * - * Authors: LSI Corporation + * Authors: Avago Technologies + * Kashyap Desai <kashyap.desai@avagotech.com> + * Sumit Saxena <sumit.saxena@avagotech.com> * - * Send feedback to: <megaraidlinux@lsi.com> + * Send feedback to: megaraidlinux.pdl@avagotech.com * - * Mail to: LSI Corporation, 1621 Barber Lane, Milpitas, CA 95035 - * ATTN: Linuxraid + * Mail to: Avago Technologies, 350 West Trimble Road, Building 90, + * San Jose, California 95131 */ #ifndef LSI_MEGARAID_SAS_H @@ -33,9 +35,7 @@ /* * MegaRAID SAS Driver meta data */ -#define MEGASAS_VERSION "06.805.06.00-rc1" -#define MEGASAS_RELDATE "Sep. 4, 2014" -#define MEGASAS_EXT_VERSION "Thu. Sep. 4 17:00:00 PDT 2014" +#define MEGASAS_VERSION "06.805.06.01-rc1" /* * Device IDs @@ -1931,8 +1931,7 @@ u16 get_updated_dev_handle(struct megasas_instance *instance, struct LD_LOAD_BALANCE_INFO *lbInfo, struct IO_REQUEST_INFO *in_info); void mr_update_load_balance_params(struct MR_DRV_RAID_MAP_ALL *map, struct LD_LOAD_BALANCE_INFO *lbInfo); -int megasas_get_ctrl_info(struct megasas_instance *instance, - struct megasas_ctrl_info *ctrl_info); +int megasas_get_ctrl_info(struct megasas_instance *instance); int megasas_set_crash_dump_params(struct megasas_instance *instance, u8 crash_buf_state); void megasas_free_host_crash_buffer(struct megasas_instance *instance); diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index 5640ad1c8214..f05580e693d0 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -1,7 +1,8 @@ /* * Linux MegaRAID driver for SAS based RAID controllers * - * Copyright (c) 2003-2012 LSI Corporation. + * Copyright (c) 2003-2013 LSI Corporation + * Copyright (c) 2013-2014 Avago Technologies * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License @@ -14,22 +15,20 @@ * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * along with this program. If not, see <http://www.gnu.org/licenses/>. * - * FILE: megaraid_sas_base.c - * Version : 06.805.06.00-rc1 - * - * Authors: LSI Corporation + * Authors: Avago Technologies * Sreenivas Bagalkote * Sumant Patro * Bo Yang - * Adam Radford <linuxraid@lsi.com> + * Adam Radford + * Kashyap Desai <kashyap.desai@avagotech.com> + * Sumit Saxena <sumit.saxena@avagotech.com> * - * Send feedback to: <megaraidlinux@lsi.com> + * Send feedback to: megaraidlinux.pdl@avagotech.com * - * Mail to: LSI Corporation, 1621 Barber Lane, Milpitas, CA 95035 - * ATTN: Linuxraid + * Mail to: Avago Technologies, 350 West Trimble Road, Building 90, + * San Jose, California 95131 */ #include <linux/kernel.h> @@ -1008,7 +1007,7 @@ megasas_issue_blocked_abort_cmd(struct megasas_instance *instance, cpu_to_le32(upper_32_bits(cmd_to_abort->frame_phys_addr)); cmd->sync_cmd = 1; - cmd->cmd_status = 0xFF; + cmd->cmd_status = ENODATA; instance->instancet->issue_dcmd(instance, cmd); @@ -1572,6 +1571,12 @@ megasas_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd) instance = (struct megasas_instance *) scmd->device->host->hostdata; + if (instance->unload == 1) { + scmd->result = DID_NO_CONNECT << 16; + scmd->scsi_done(scmd); + return 0; + } + if (instance->issuepend_done == 0) return SCSI_MLQUEUE_HOST_BUSY; @@ -2586,20 +2591,6 @@ megasas_service_aen(struct megasas_instance *instance, struct megasas_cmd *cmd) } } -static int megasas_change_queue_depth(struct scsi_device *sdev, - int queue_depth, int reason) -{ - if (reason != SCSI_QDEPTH_DEFAULT) - return -EOPNOTSUPP; - - if (queue_depth > sdev->host->can_queue) - queue_depth = sdev->host->can_queue; - scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), - queue_depth); - - return queue_depth; -} - static ssize_t megasas_fw_crash_buffer_store(struct device *cdev, struct device_attribute *attr, const char *buf, size_t count) @@ -2764,7 +2755,7 @@ static struct scsi_host_template megasas_template = { .shost_attrs = megaraid_host_attrs, .bios_param = megasas_bios_param, .use_clustering = ENABLE_CLUSTERING, - .change_queue_depth = megasas_change_queue_depth, + .change_queue_depth = scsi_change_queue_depth, .no_write_same = 1, }; @@ -4028,25 +4019,83 @@ megasas_ld_list_query(struct megasas_instance *instance, u8 query_type) return ret; } +/* + * megasas_update_ext_vd_details : Update details w.r.t Extended VD + * instance : Controller's instance +*/ +static void megasas_update_ext_vd_details(struct megasas_instance *instance) +{ + struct fusion_context *fusion; + u32 old_map_sz; + u32 new_map_sz; + + fusion = instance->ctrl_context; + /* For MFI based controllers return dummy success */ + if (!fusion) + return; + + instance->supportmax256vd = + instance->ctrl_info->adapterOperations3.supportMaxExtLDs; + /* Below is additional check to address future FW enhancement */ + if (instance->ctrl_info->max_lds > 64) + instance->supportmax256vd = 1; + + instance->drv_supported_vd_count = MEGASAS_MAX_LD_CHANNELS + * MEGASAS_MAX_DEV_PER_CHANNEL; + instance->drv_supported_pd_count = MEGASAS_MAX_PD_CHANNELS + * MEGASAS_MAX_DEV_PER_CHANNEL; + if (instance->supportmax256vd) { + instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES_EXT; + instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES; + } else { + instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES; + instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES; + } + dev_info(&instance->pdev->dev, "Firmware supports %d VD %d PD\n", + instance->fw_supported_vd_count, + instance->fw_supported_pd_count); + dev_info(&instance->pdev->dev, "Driver supports %d VD %d PD\n", + instance->drv_supported_vd_count, + instance->drv_supported_pd_count); + + old_map_sz = sizeof(struct MR_FW_RAID_MAP) + + (sizeof(struct MR_LD_SPAN_MAP) * + (instance->fw_supported_vd_count - 1)); + new_map_sz = sizeof(struct MR_FW_RAID_MAP_EXT); + fusion->drv_map_sz = sizeof(struct MR_DRV_RAID_MAP) + + (sizeof(struct MR_LD_SPAN_MAP) * + (instance->drv_supported_vd_count - 1)); + + fusion->max_map_sz = max(old_map_sz, new_map_sz); + + + if (instance->supportmax256vd) + fusion->current_map_sz = new_map_sz; + else + fusion->current_map_sz = old_map_sz; + +} + /** * megasas_get_controller_info - Returns FW's controller structure * @instance: Adapter soft state - * @ctrl_info: Controller information structure * * Issues an internal command (DCMD) to get the FW's controller structure. * This information is mainly used to find out the maximum IO transfer per * command supported by the FW. */ int -megasas_get_ctrl_info(struct megasas_instance *instance, - struct megasas_ctrl_info *ctrl_info) +megasas_get_ctrl_info(struct megasas_instance *instance) { int ret = 0; struct megasas_cmd *cmd; struct megasas_dcmd_frame *dcmd; struct megasas_ctrl_info *ci; + struct megasas_ctrl_info *ctrl_info; dma_addr_t ci_h = 0; + ctrl_info = instance->ctrl_info; + cmd = megasas_get_cmd(instance); if (!cmd) { @@ -4086,8 +4135,13 @@ megasas_get_ctrl_info(struct megasas_instance *instance, else ret = megasas_issue_polled(instance, cmd); - if (!ret) + if (!ret) { memcpy(ctrl_info, ci, sizeof(struct megasas_ctrl_info)); + le32_to_cpus((u32 *)&ctrl_info->properties.OnOffProperties); + le32_to_cpus((u32 *)&ctrl_info->adapterOperations2); + le32_to_cpus((u32 *)&ctrl_info->adapterOperations3); + megasas_update_ext_vd_details(instance); + } pci_free_consistent(instance->pdev, sizeof(struct megasas_ctrl_info), ci, ci_h); @@ -4289,7 +4343,7 @@ megasas_init_adapter_mfi(struct megasas_instance *instance) if (megasas_issue_init_mfi(instance)) goto fail_fw_init; - if (megasas_get_ctrl_info(instance, instance->ctrl_info)) { + if (megasas_get_ctrl_info(instance)) { dev_err(&instance->pdev->dev, "(%d): Could get controller info " "Fail from %s %d\n", instance->unique_id, __func__, __LINE__); @@ -4527,12 +4581,8 @@ static int megasas_init_fw(struct megasas_instance *instance) dev_info(&instance->pdev->dev, "Controller type: iMR\n"); } - /* OnOffProperties are converted into CPU arch*/ - le32_to_cpus((u32 *)&ctrl_info->properties.OnOffProperties); instance->disableOnlineCtrlReset = ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset; - /* adapterOperations2 are converted into CPU arch*/ - le32_to_cpus((u32 *)&ctrl_info->adapterOperations2); instance->mpio = ctrl_info->adapterOperations2.mpio; instance->UnevenSpanSupport = ctrl_info->adapterOperations2.supportUnevenSpans; @@ -4562,7 +4612,6 @@ static int megasas_init_fw(struct megasas_instance *instance) "requestorId %d\n", instance->requestorId); } - le32_to_cpus((u32 *)&ctrl_info->adapterOperations3); instance->crash_dump_fw_support = ctrl_info->adapterOperations3.supportCrashDump; instance->crash_dump_drv_support = @@ -4587,8 +4636,6 @@ static int megasas_init_fw(struct megasas_instance *instance) if (tmp_sectors && (instance->max_sectors_per_req > tmp_sectors)) instance->max_sectors_per_req = tmp_sectors; - kfree(ctrl_info); - /* Check for valid throttlequeuedepth module parameter */ if (instance->is_imr) { if (throttlequeuedepth > (instance->max_fw_cmds - @@ -4957,10 +5004,6 @@ static int megasas_io_attach(struct megasas_instance *instance) return -ENODEV; } - /* - * Trigger SCSI to scan our drives - */ - scsi_scan_host(host); return 0; } @@ -5083,6 +5126,8 @@ static int megasas_probe_one(struct pci_dev *pdev, goto fail_alloc_dma_buf; } fusion = instance->ctrl_context; + memset(fusion, 0, + ((1 << PAGE_SHIFT) << instance->ctrl_context_pages)); INIT_LIST_HEAD(&fusion->cmd_pool); spin_lock_init(&fusion->mpt_pool_lock); memset(fusion->load_balance_info, 0, @@ -5288,6 +5333,10 @@ retry_irq_register: goto fail_io_attach; instance->unload = 0; + /* + * Trigger SCSI to scan our drives + */ + scsi_scan_host(host); /* * Initiate AEN (Asynchronous Event Notification) @@ -6051,6 +6100,11 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance, megasas_issue_blocked_cmd(instance, cmd, 0); cmd->sync_cmd = 0; + if (instance->unload == 1) { + dev_info(&instance->pdev->dev, "Driver unload is in progress " + "don't submit data to application\n"); + goto out; + } /* * copy out the kernel buffers to user buffers */ @@ -6400,16 +6454,6 @@ static ssize_t megasas_sysfs_show_version(struct device_driver *dd, char *buf) static DRIVER_ATTR(version, S_IRUGO, megasas_sysfs_show_version, NULL); static ssize_t -megasas_sysfs_show_release_date(struct device_driver *dd, char *buf) -{ - return snprintf(buf, strlen(MEGASAS_RELDATE) + 2, "%s\n", - MEGASAS_RELDATE); -} - -static DRIVER_ATTR(release_date, S_IRUGO, megasas_sysfs_show_release_date, - NULL); - -static ssize_t megasas_sysfs_show_support_poll_for_event(struct device_driver *dd, char *buf) { return sprintf(buf, "%u\n", support_poll_for_event); @@ -6712,8 +6756,7 @@ static int __init megasas_init(void) /* * Announce driver version and other information */ - printk(KERN_INFO "megasas: %s %s\n", MEGASAS_VERSION, - MEGASAS_EXT_VERSION); + pr_info("megasas: %s\n", MEGASAS_VERSION); spin_lock_init(&poll_aen_lock); @@ -6748,10 +6791,6 @@ static int __init megasas_init(void) &driver_attr_version); if (rval) goto err_dcf_attr_ver; - rval = driver_create_file(&megasas_pci_driver.driver, - &driver_attr_release_date); - if (rval) - goto err_dcf_rel_date; rval = driver_create_file(&megasas_pci_driver.driver, &driver_attr_support_poll_for_event); @@ -6775,12 +6814,7 @@ err_dcf_support_device_change: err_dcf_dbg_lvl: driver_remove_file(&megasas_pci_driver.driver, &driver_attr_support_poll_for_event); - err_dcf_support_poll_for_event: - driver_remove_file(&megasas_pci_driver.driver, - &driver_attr_release_date); - -err_dcf_rel_date: driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version); err_dcf_attr_ver: pci_unregister_driver(&megasas_pci_driver); @@ -6800,8 +6834,6 @@ static void __exit megasas_exit(void) &driver_attr_support_poll_for_event); driver_remove_file(&megasas_pci_driver.driver, &driver_attr_support_device_change); - driver_remove_file(&megasas_pci_driver.driver, - &driver_attr_release_date); driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version); pci_unregister_driver(&megasas_pci_driver); diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c index 685e6f391fe4..460c6a3d4ade 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fp.c +++ b/drivers/scsi/megaraid/megaraid_sas_fp.c @@ -1,7 +1,8 @@ /* * Linux MegaRAID driver for SAS based RAID controllers * - * Copyright (c) 2009-2012 LSI Corporation. + * Copyright (c) 2009-2013 LSI Corporation + * Copyright (c) 2013-2014 Avago Technologies * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License @@ -14,20 +15,21 @@ * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * along with this program. If not, see <http://www.gnu.org/licenses/>. * * FILE: megaraid_sas_fp.c * - * Authors: LSI Corporation + * Authors: Avago Technologies * Sumant Patro * Varad Talamacki * Manoj Jose + * Kashyap Desai <kashyap.desai@avagotech.com> + * Sumit Saxena <sumit.saxena@avagotech.com> * - * Send feedback to: <megaraidlinux@lsi.com> + * Send feedback to: megaraidlinux.pdl@avagotech.com * - * Mail to: LSI Corporation, 1621 Barber Lane, Milpitas, CA 95035 - * ATTN: Linuxraid + * Mail to: Avago Technologies, 350 West Trimble Road, Building 90, + * San Jose, California 95131 */ #include <linux/kernel.h> @@ -183,14 +185,15 @@ void MR_PopulateDrvRaidMap(struct megasas_instance *instance) /* New Raid map will not set totalSize, so keep expected value * for legacy code in ValidateMapInfo */ - pDrvRaidMap->totalSize = sizeof(struct MR_FW_RAID_MAP_EXT); + pDrvRaidMap->totalSize = + cpu_to_le32(sizeof(struct MR_FW_RAID_MAP_EXT)); } else { fw_map_old = (struct MR_FW_RAID_MAP_ALL *) fusion->ld_map[(instance->map_id & 1)]; pFwRaidMap = &fw_map_old->raidMap; #if VD_EXT_DEBUG - for (i = 0; i < pFwRaidMap->ldCount; i++) { + for (i = 0; i < le16_to_cpu(pFwRaidMap->ldCount); i++) { dev_dbg(&instance->pdev->dev, "(%d) :Index 0x%x " "Target Id 0x%x Seq Num 0x%x Size 0/%llx\n", instance->unique_id, i, @@ -202,12 +205,12 @@ void MR_PopulateDrvRaidMap(struct megasas_instance *instance) memset(drv_map, 0, fusion->drv_map_sz); pDrvRaidMap->totalSize = pFwRaidMap->totalSize; - pDrvRaidMap->ldCount = pFwRaidMap->ldCount; + pDrvRaidMap->ldCount = (__le16)pFwRaidMap->ldCount; pDrvRaidMap->fpPdIoTimeoutSec = pFwRaidMap->fpPdIoTimeoutSec; for (i = 0; i < MAX_RAIDMAP_LOGICAL_DRIVES + MAX_RAIDMAP_VIEWS; i++) pDrvRaidMap->ldTgtIdToLd[i] = (u8)pFwRaidMap->ldTgtIdToLd[i]; - for (i = 0; i < pDrvRaidMap->ldCount; i++) { + for (i = 0; i < le16_to_cpu(pDrvRaidMap->ldCount); i++) { pDrvRaidMap->ldSpanMap[i] = pFwRaidMap->ldSpanMap[i]; #if VD_EXT_DEBUG dev_dbg(&instance->pdev->dev, @@ -268,7 +271,7 @@ u8 MR_ValidateMapInfo(struct megasas_instance *instance) else expected_size = (sizeof(struct MR_FW_RAID_MAP) - sizeof(struct MR_LD_SPAN_MAP) + - (sizeof(struct MR_LD_SPAN_MAP) * le32_to_cpu(pDrvRaidMap->ldCount))); + (sizeof(struct MR_LD_SPAN_MAP) * le16_to_cpu(pDrvRaidMap->ldCount))); if (le32_to_cpu(pDrvRaidMap->totalSize) != expected_size) { dev_err(&instance->pdev->dev, "map info structure size 0x%x is not matching with ld count\n", @@ -284,7 +287,7 @@ u8 MR_ValidateMapInfo(struct megasas_instance *instance) mr_update_load_balance_params(drv_map, lbInfo); - num_lds = le32_to_cpu(drv_map->raidMap.ldCount); + num_lds = le16_to_cpu(drv_map->raidMap.ldCount); /*Convert Raid capability values to CPU arch */ for (ldCount = 0; ldCount < num_lds; ldCount++) { @@ -457,7 +460,7 @@ u32 mr_spanset_get_span_block(struct megasas_instance *instance, quad = &map->raidMap.ldSpanMap[ld]. spanBlock[span]. block_span_info.quad[info]; - if (le32_to_cpu(quad->diff == 0)) + if (le32_to_cpu(quad->diff) == 0) return SPAN_INVALID; if (le64_to_cpu(quad->logStart) <= row && row <= le64_to_cpu(quad->logEnd) && @@ -520,7 +523,7 @@ static u64 get_row_from_strip(struct megasas_instance *instance, span_set->span_row_data_width) * span_set->diff; for (span = 0, span_offset = 0; span < raid->spanDepth; span++) if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span]. - block_span_info.noElements >= info+1)) { + block_span_info.noElements) >= info+1) { if (strip_offset >= span_set->strip_offset[span]) span_offset++; diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index f37eed682c75..71557f64bb5e 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c @@ -1,7 +1,8 @@ /* * Linux MegaRAID driver for SAS based RAID controllers * - * Copyright (c) 2009-2012 LSI Corporation. + * Copyright (c) 2009-2013 LSI Corporation + * Copyright (c) 2013-2014 Avago Technologies * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License @@ -14,19 +15,20 @@ * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * along with this program. If not, see <http://www.gnu.org/licenses/>. * * FILE: megaraid_sas_fusion.c * - * Authors: LSI Corporation + * Authors: Avago Technologies * Sumant Patro - * Adam Radford <linuxraid@lsi.com> + * Adam Radford + * Kashyap Desai <kashyap.desai@avagotech.com> + * Sumit Saxena <sumit.saxena@avagotech.com> * - * Send feedback to: <megaraidlinux@lsi.com> + * Send feedback to: megaraidlinux.pdl@avagotech.com * - * Mail to: LSI Corporation, 1621 Barber Lane, Milpitas, CA 95035 - * ATTN: Linuxraid + * Mail to: Avago Technologies, 350 West Trimble Road, Building 90, + * San Jose, California 95131 */ #include <linux/kernel.h> @@ -880,7 +882,7 @@ megasas_sync_map_info(struct megasas_instance *instance) map = fusion->ld_drv_map[instance->map_id & 1]; - num_lds = le32_to_cpu(map->raidMap.ldCount); + num_lds = le16_to_cpu(map->raidMap.ldCount); dcmd = &cmd->frame->dcmd; @@ -1065,48 +1067,16 @@ megasas_init_adapter_fusion(struct megasas_instance *instance) goto fail_ioc_init; megasas_display_intel_branding(instance); - if (megasas_get_ctrl_info(instance, instance->ctrl_info)) { + if (megasas_get_ctrl_info(instance)) { dev_err(&instance->pdev->dev, "Could not get controller info. Fail from %s %d\n", __func__, __LINE__); goto fail_ioc_init; } - instance->supportmax256vd = - instance->ctrl_info->adapterOperations3.supportMaxExtLDs; - /* Below is additional check to address future FW enhancement */ - if (instance->ctrl_info->max_lds > 64) - instance->supportmax256vd = 1; - instance->drv_supported_vd_count = MEGASAS_MAX_LD_CHANNELS - * MEGASAS_MAX_DEV_PER_CHANNEL; - instance->drv_supported_pd_count = MEGASAS_MAX_PD_CHANNELS - * MEGASAS_MAX_DEV_PER_CHANNEL; - if (instance->supportmax256vd) { - instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES_EXT; - instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES; - } else { - instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES; - instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES; - } - dev_info(&instance->pdev->dev, "Firmware supports %d VDs %d PDs\n" - "Driver supports %d VDs %d PDs\n", - instance->fw_supported_vd_count, - instance->fw_supported_pd_count, - instance->drv_supported_vd_count, - instance->drv_supported_pd_count); - instance->flag_ieee = 1; fusion->fast_path_io = 0; - fusion->old_map_sz = - sizeof(struct MR_FW_RAID_MAP) + (sizeof(struct MR_LD_SPAN_MAP) * - (instance->fw_supported_vd_count - 1)); - fusion->new_map_sz = - sizeof(struct MR_FW_RAID_MAP_EXT); - fusion->drv_map_sz = - sizeof(struct MR_DRV_RAID_MAP) + (sizeof(struct MR_LD_SPAN_MAP) * - (instance->drv_supported_vd_count - 1)); - fusion->drv_map_pages = get_order(fusion->drv_map_sz); for (i = 0; i < 2; i++) { fusion->ld_map[i] = NULL; @@ -1121,16 +1091,10 @@ megasas_init_adapter_fusion(struct megasas_instance *instance) fusion->drv_map_pages); goto fail_ioc_init; } + memset(fusion->ld_drv_map[i], 0, + ((1 << PAGE_SHIFT) << fusion->drv_map_pages)); } - fusion->max_map_sz = max(fusion->old_map_sz, fusion->new_map_sz); - - if (instance->supportmax256vd) - fusion->current_map_sz = fusion->new_map_sz; - else - fusion->current_map_sz = fusion->old_map_sz; - - for (i = 0; i < 2; i++) { fusion->ld_map[i] = dma_alloc_coherent(&instance->pdev->dev, fusion->max_map_sz, @@ -1173,9 +1137,10 @@ megasas_fire_cmd_fusion(struct megasas_instance *instance, struct megasas_register_set __iomem *regs) { #if defined(writeq) && defined(CONFIG_64BIT) - u64 req_data = (((u64)req_desc_hi << 32) | (u32)req_desc_lo); + u64 req_data = (((u64)le32_to_cpu(req_desc_hi) << 32) | + le32_to_cpu(req_desc_lo)); - writeq(le64_to_cpu(req_data), &(regs)->inbound_low_queue_port); + writeq(req_data, &(regs)->inbound_low_queue_port); #else unsigned long flags; @@ -1373,7 +1338,7 @@ megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len, /* Logical block reference tag */ io_request->CDB.EEDP32.PrimaryReferenceTag = cpu_to_be32(ref_tag); - io_request->CDB.EEDP32.PrimaryApplicationTagMask = 0xffff; + io_request->CDB.EEDP32.PrimaryApplicationTagMask = cpu_to_be16(0xffff); io_request->IoFlags = cpu_to_le16(32); /* Specify 32-byte cdb */ /* Transfer length */ @@ -1769,7 +1734,7 @@ megasas_build_dcdb_fusion(struct megasas_instance *instance, /* set RAID context values */ pRAID_Context->regLockFlags = REGION_TYPE_SHARED_READ; - pRAID_Context->timeoutValue = raid->fpIoTimeoutForLd; + pRAID_Context->timeoutValue = cpu_to_le16(raid->fpIoTimeoutForLd); pRAID_Context->VirtualDiskTgtId = cpu_to_le16(device_id); pRAID_Context->regLockRowLBA = 0; pRAID_Context->regLockLength = 0; @@ -2254,7 +2219,7 @@ build_mpt_mfi_pass_thru(struct megasas_instance *instance, * megasas_complete_cmd */ - if (frame_hdr->flags & MFI_FRAME_DONT_POST_IN_REPLY_QUEUE) + if (frame_hdr->flags & cpu_to_le16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE)) cmd->flags = MFI_FRAME_DONT_POST_IN_REPLY_QUEUE; fusion = instance->ctrl_context; @@ -2385,6 +2350,8 @@ megasas_alloc_host_crash_buffer(struct megasas_instance *instance) "memory allocation failed at index %d\n", i); break; } + memset(instance->crash_buf[i], 0, + ((1 << PAGE_SHIFT) << instance->crash_buf_pages)); } instance->drv_buf_alloc = i; } @@ -2837,11 +2804,15 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout) } } - clear_bit(MEGASAS_FUSION_IN_RESET, - &instance->reset_flags); - instance->instancet->enable_intr(instance); - instance->adprecovery = MEGASAS_HBA_OPERATIONAL; - + if (megasas_get_ctrl_info(instance)) { + dev_info(&instance->pdev->dev, + "Failed from %s %d\n", + __func__, __LINE__); + instance->adprecovery = + MEGASAS_HW_CRITICAL_ERROR; + megaraid_sas_kill_hba(instance); + retval = FAILED; + } /* Reset load balance info */ memset(fusion->load_balance_info, 0, sizeof(struct LD_LOAD_BALANCE_INFO) @@ -2850,6 +2821,11 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout) if (!megasas_get_map_info(instance)) megasas_sync_map_info(instance); + clear_bit(MEGASAS_FUSION_IN_RESET, + &instance->reset_flags); + instance->instancet->enable_intr(instance); + instance->adprecovery = MEGASAS_HBA_OPERATIONAL; + /* Restart SR-IOV heartbeat */ if (instance->requestorId) { if (!megasas_sriov_start_heartbeat(instance, 0)) @@ -2866,14 +2842,14 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout) "successful for scsi%d.\n", instance->host->host_no); - if (instance->crash_dump_drv_support) { - if (instance->crash_dump_app_support) - megasas_set_crash_dump_params(instance, - MR_CRASH_BUF_TURN_ON); - else - megasas_set_crash_dump_params(instance, - MR_CRASH_BUF_TURN_OFF); - } + if (instance->crash_dump_drv_support && + instance->crash_dump_app_support) + megasas_set_crash_dump_params(instance, + MR_CRASH_BUF_TURN_ON); + else + megasas_set_crash_dump_params(instance, + MR_CRASH_BUF_TURN_OFF); + retval = SUCCESS; goto out; } diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.h b/drivers/scsi/megaraid/megaraid_sas_fusion.h index 0d183d521bdd..5ab7daee11be 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.h +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.h @@ -1,7 +1,8 @@ /* * Linux MegaRAID driver for SAS based RAID controllers * - * Copyright (c) 2009-2012 LSI Corporation. + * Copyright (c) 2009-2013 LSI Corporation + * Copyright (c) 2013-2014 Avago Technologies * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License @@ -14,19 +15,20 @@ * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * along with this program. If not, see <http://www.gnu.org/licenses/>. * * FILE: megaraid_sas_fusion.h * - * Authors: LSI Corporation + * Authors: Avago Technologies * Manoj Jose * Sumant Patro + * Kashyap Desai <kashyap.desai@avagotech.com> + * Sumit Saxena <sumit.saxena@avagotech.com> * - * Send feedback to: <megaraidlinux@lsi.com> + * Send feedback to: megaraidlinux.pdl@avagotech.com * - * Mail to: LSI Corporation, 1621 Barber Lane, Milpitas, CA 95035 - * ATTN: Linuxraid + * Mail to: Avago Technologies, 350 West Trimble Road, Building 90, + * San Jose, California 95131 */ #ifndef _MEGARAID_SAS_FUSION_H_ @@ -834,8 +836,6 @@ struct fusion_context { u32 max_map_sz; u32 current_map_sz; - u32 old_map_sz; - u32 new_map_sz; u32 drv_map_sz; u32 drv_map_pages; u8 fast_path_io; diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c index c80ed0482649..8431eb10bbb1 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c +++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c @@ -1179,15 +1179,14 @@ _scsih_build_scatter_gather(struct MPT2SAS_ADAPTER *ioc, } /** - * _scsih_adjust_queue_depth - setting device queue depth + * _scsih_change_queue_depth - setting device queue depth * @sdev: scsi device struct * @qdepth: requested queue depth * - * - * Returns nothing + * Returns queue depth. */ -static void -_scsih_adjust_queue_depth(struct scsi_device *sdev, int qdepth) +static int +_scsih_change_queue_depth(struct scsi_device *sdev, int qdepth) { struct Scsi_Host *shost = sdev->host; int max_depth; @@ -1217,63 +1216,11 @@ _scsih_adjust_queue_depth(struct scsi_device *sdev, int qdepth) spin_unlock_irqrestore(&ioc->sas_device_lock, flags); not_sata: - if (!sdev->tagged_supported) max_depth = 1; if (qdepth > max_depth) qdepth = max_depth; - scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth); -} - -/** - * _scsih_change_queue_depth - setting device queue depth - * @sdev: scsi device struct - * @qdepth: requested queue depth - * @reason: SCSI_QDEPTH_DEFAULT/SCSI_QDEPTH_QFULL/SCSI_QDEPTH_RAMP_UP - * (see include/scsi/scsi_host.h for definition) - * - * Returns queue depth. - */ -static int -_scsih_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason) -{ - if (reason == SCSI_QDEPTH_DEFAULT || reason == SCSI_QDEPTH_RAMP_UP) - _scsih_adjust_queue_depth(sdev, qdepth); - else if (reason == SCSI_QDEPTH_QFULL) - scsi_track_queue_full(sdev, qdepth); - else - return -EOPNOTSUPP; - - if (sdev->inquiry_len > 7) - sdev_printk(KERN_INFO, sdev, "qdepth(%d), tagged(%d), " - "simple(%d), ordered(%d), scsi_level(%d), cmd_que(%d)\n", - sdev->queue_depth, sdev->tagged_supported, sdev->simple_tags, - sdev->ordered_tags, sdev->scsi_level, - (sdev->inquiry[7] & 2) >> 1); - - return sdev->queue_depth; -} - -/** - * _scsih_change_queue_type - changing device queue tag type - * @sdev: scsi device struct - * @tag_type: requested tag type - * - * Returns queue tag type. - */ -static int -_scsih_change_queue_type(struct scsi_device *sdev, int tag_type) -{ - if (sdev->tagged_supported) { - scsi_set_tag_type(sdev, tag_type); - if (tag_type) - scsi_activate_tcq(sdev, sdev->queue_depth); - else - scsi_deactivate_tcq(sdev, sdev->queue_depth); - } else - tag_type = 0; - - return tag_type; + return scsi_change_queue_depth(sdev, qdepth); } /** @@ -2104,7 +2051,7 @@ _scsih_slave_configure(struct scsi_device *sdev) r_level, raid_device->handle, (unsigned long long)raid_device->wwid, raid_device->num_pds, ds); - _scsih_change_queue_depth(sdev, qdepth, SCSI_QDEPTH_DEFAULT); + _scsih_change_queue_depth(sdev, qdepth); /* raid transport support */ if (!ioc->is_warpdrive) _scsih_set_level(sdev, raid_device->volume_type); @@ -2169,7 +2116,7 @@ _scsih_slave_configure(struct scsi_device *sdev) _scsih_display_sata_capabilities(ioc, handle, sdev); - _scsih_change_queue_depth(sdev, qdepth, SCSI_QDEPTH_DEFAULT); + _scsih_change_queue_depth(sdev, qdepth); if (ssp_target) { sas_read_port_mode_page(sdev); @@ -3966,16 +3913,8 @@ _scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd) mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER; /* set tags */ - if (!(sas_device_priv_data->flags & MPT_DEVICE_FLAGS_INIT)) { - if (scmd->device->tagged_supported) { - if (scmd->device->ordered_tags) - mpi_control |= MPI2_SCSIIO_CONTROL_ORDEREDQ; - else - mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ; - } else - mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ; - } else - mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ; + mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ; + /* Make sure Device is not raid volume. * We do not expose raid functionality to upper layer for warpdrive. */ @@ -7653,7 +7592,7 @@ static struct scsi_host_template scsih_driver_template = { .scan_finished = _scsih_scan_finished, .scan_start = _scsih_scan_start, .change_queue_depth = _scsih_change_queue_depth, - .change_queue_type = _scsih_change_queue_type, + .change_queue_type = scsi_change_queue_type, .eh_abort_handler = _scsih_abort, .eh_device_reset_handler = _scsih_dev_reset, .eh_target_reset_handler = _scsih_target_reset, @@ -7667,6 +7606,7 @@ static struct scsi_host_template scsih_driver_template = { .use_clustering = ENABLE_CLUSTERING, .shost_attrs = mpt2sas_host_attrs, .sdev_attrs = mpt2sas_dev_attrs, + .track_queue_depth = 1, }; /** diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c index 857276b8880f..a2b60991efd4 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c @@ -1053,9 +1053,15 @@ _scsih_scsi_lookup_find_by_lun(struct MPT3SAS_ADAPTER *ioc, int id, return found; } - -static void -_scsih_adjust_queue_depth(struct scsi_device *sdev, int qdepth) +/** + * _scsih_change_queue_depth - setting device queue depth + * @sdev: scsi device struct + * @qdepth: requested queue depth + * + * Returns queue depth. + */ +static int +_scsih_change_queue_depth(struct scsi_device *sdev, int qdepth) { struct Scsi_Host *shost = sdev->host; int max_depth; @@ -1090,62 +1096,10 @@ _scsih_adjust_queue_depth(struct scsi_device *sdev, int qdepth) max_depth = 1; if (qdepth > max_depth) qdepth = max_depth; - scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth); + return scsi_change_queue_depth(sdev, qdepth); } /** - * _scsih_change_queue_depth - setting device queue depth - * @sdev: scsi device struct - * @qdepth: requested queue depth - * @reason: SCSI_QDEPTH_DEFAULT/SCSI_QDEPTH_QFULL/SCSI_QDEPTH_RAMP_UP - * (see include/scsi/scsi_host.h for definition) - * - * Returns queue depth. - */ -static int -_scsih_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason) -{ - if (reason == SCSI_QDEPTH_DEFAULT || reason == SCSI_QDEPTH_RAMP_UP) - _scsih_adjust_queue_depth(sdev, qdepth); - else if (reason == SCSI_QDEPTH_QFULL) - scsi_track_queue_full(sdev, qdepth); - else - return -EOPNOTSUPP; - - if (sdev->inquiry_len > 7) - sdev_printk(KERN_INFO, sdev, "qdepth(%d), tagged(%d), " \ - "simple(%d), ordered(%d), scsi_level(%d), cmd_que(%d)\n", - sdev->queue_depth, sdev->tagged_supported, sdev->simple_tags, - sdev->ordered_tags, sdev->scsi_level, - (sdev->inquiry[7] & 2) >> 1); - - return sdev->queue_depth; -} - -/** - * _scsih_change_queue_type - changing device queue tag type - * @sdev: scsi device struct - * @tag_type: requested tag type - * - * Returns queue tag type. - */ -static int -_scsih_change_queue_type(struct scsi_device *sdev, int tag_type) -{ - if (sdev->tagged_supported) { - scsi_set_tag_type(sdev, tag_type); - if (tag_type) - scsi_activate_tcq(sdev, sdev->queue_depth); - else - scsi_deactivate_tcq(sdev, sdev->queue_depth); - } else - tag_type = 0; - - return tag_type; -} - - -/** * _scsih_target_alloc - target add routine * @starget: scsi target struct * @@ -1762,7 +1716,7 @@ _scsih_slave_configure(struct scsi_device *sdev) raid_device->num_pds, ds); - _scsih_change_queue_depth(sdev, qdepth, SCSI_QDEPTH_DEFAULT); + _scsih_change_queue_depth(sdev, qdepth); /* raid transport support */ _scsih_set_level(sdev, raid_device->volume_type); @@ -1828,7 +1782,7 @@ _scsih_slave_configure(struct scsi_device *sdev) _scsih_display_sata_capabilities(ioc, handle, sdev); - _scsih_change_queue_depth(sdev, qdepth, SCSI_QDEPTH_DEFAULT); + _scsih_change_queue_depth(sdev, qdepth); if (ssp_target) { sas_read_port_mode_page(sdev); @@ -3586,16 +3540,7 @@ _scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd) mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER; /* set tags */ - if (!(sas_device_priv_data->flags & MPT_DEVICE_FLAGS_INIT)) { - if (scmd->device->tagged_supported) { - if (scmd->device->ordered_tags) - mpi_control |= MPI2_SCSIIO_CONTROL_ORDEREDQ; - else - mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ; - } else - mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ; - } else - mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ; + mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ; if ((sas_device_priv_data->flags & MPT_DEVICE_TLR_ON) && scmd->cmd_len != 32) @@ -7284,7 +7229,7 @@ static struct scsi_host_template scsih_driver_template = { .scan_finished = _scsih_scan_finished, .scan_start = _scsih_scan_start, .change_queue_depth = _scsih_change_queue_depth, - .change_queue_type = _scsih_change_queue_type, + .change_queue_type = scsi_change_queue_type, .eh_abort_handler = _scsih_abort, .eh_device_reset_handler = _scsih_dev_reset, .eh_target_reset_handler = _scsih_target_reset, @@ -7298,6 +7243,7 @@ static struct scsi_host_template scsih_driver_template = { .use_clustering = ENABLE_CLUSTERING, .shost_attrs = mpt3sas_host_attrs, .sdev_attrs = mpt3sas_dev_attrs, + .track_queue_depth = 1, }; /** diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c index eacee48a955c..f15df3de6790 100644 --- a/drivers/scsi/mvsas/mv_init.c +++ b/drivers/scsi/mvsas/mv_init.c @@ -26,18 +26,9 @@ #include "mv_sas.h" -static int lldd_max_execute_num = 1; -module_param_named(collector, lldd_max_execute_num, int, S_IRUGO); -MODULE_PARM_DESC(collector, "\n" - "\tIf greater than one, tells the SAS Layer to run in Task Collector\n" - "\tMode. If 1 or 0, tells the SAS Layer to run in Direct Mode.\n" - "\tThe mvsas SAS LLDD supports both modes.\n" - "\tDefault: 1 (Direct Mode).\n"); - int interrupt_coalescing = 0x80; static struct scsi_transport_template *mvs_stt; -struct kmem_cache *mvs_task_list_cache; static const struct mvs_chip_info mvs_chips[] = { [chip_6320] = { 1, 2, 0x400, 17, 16, 6, 9, &mvs_64xx_dispatch, }, [chip_6440] = { 1, 4, 0x400, 17, 16, 6, 9, &mvs_64xx_dispatch, }, @@ -76,6 +67,8 @@ static struct scsi_host_template mvs_sht = { .target_destroy = sas_target_destroy, .ioctl = sas_ioctl, .shost_attrs = mvst_host_attrs, + .use_blk_tags = 1, + .track_queue_depth = 1, }; static struct sas_domain_function_template mvs_transport_ops = { @@ -511,14 +504,11 @@ static void mvs_post_sas_ha_init(struct Scsi_Host *shost, sha->num_phys = nr_core * chip_info->n_phy; - sha->lldd_max_execute_num = lldd_max_execute_num; - if (mvi->flags & MVF_FLAG_SOC) can_queue = MVS_SOC_CAN_QUEUE; else can_queue = MVS_CHIP_SLOT_SZ; - sha->lldd_queue_size = can_queue; shost->sg_tablesize = min_t(u16, SG_ALL, MVS_MAX_SG); shost->can_queue = can_queue; mvi->shost->cmd_per_lun = MVS_QUEUE_SIZE; @@ -831,16 +821,7 @@ static int __init mvs_init(void) if (!mvs_stt) return -ENOMEM; - mvs_task_list_cache = kmem_cache_create("mvs_task_list", sizeof(struct mvs_task_list), - 0, SLAB_HWCACHE_ALIGN, NULL); - if (!mvs_task_list_cache) { - rc = -ENOMEM; - mv_printk("%s: mvs_task_list_cache alloc failed! \n", __func__); - goto err_out; - } - rc = pci_register_driver(&mvs_pci_driver); - if (rc) goto err_out; @@ -855,7 +836,6 @@ static void __exit mvs_exit(void) { pci_unregister_driver(&mvs_pci_driver); sas_release_transport(mvs_stt); - kmem_cache_destroy(mvs_task_list_cache); } struct device_attribute *mvst_host_attrs[] = { diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c index ac52f7c99513..85d86a5cdb60 100644 --- a/drivers/scsi/mvsas/mv_sas.c +++ b/drivers/scsi/mvsas/mv_sas.c @@ -852,43 +852,7 @@ prep_out: return rc; } -static struct mvs_task_list *mvs_task_alloc_list(int *num, gfp_t gfp_flags) -{ - struct mvs_task_list *first = NULL; - - for (; *num > 0; --*num) { - struct mvs_task_list *mvs_list = kmem_cache_zalloc(mvs_task_list_cache, gfp_flags); - - if (!mvs_list) - break; - - INIT_LIST_HEAD(&mvs_list->list); - if (!first) - first = mvs_list; - else - list_add_tail(&mvs_list->list, &first->list); - - } - - return first; -} - -static inline void mvs_task_free_list(struct mvs_task_list *mvs_list) -{ - LIST_HEAD(list); - struct list_head *pos, *a; - struct mvs_task_list *mlist = NULL; - - __list_add(&list, mvs_list->list.prev, &mvs_list->list); - - list_for_each_safe(pos, a, &list) { - list_del_init(pos); - mlist = list_entry(pos, struct mvs_task_list, list); - kmem_cache_free(mvs_task_list_cache, mlist); - } -} - -static int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags, +static int mvs_task_exec(struct sas_task *task, gfp_t gfp_flags, struct completion *completion, int is_tmf, struct mvs_tmf_task *tmf) { @@ -912,74 +876,9 @@ static int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags, return rc; } -static int mvs_collector_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags, - struct completion *completion, int is_tmf, - struct mvs_tmf_task *tmf) +int mvs_queue_command(struct sas_task *task, gfp_t gfp_flags) { - struct domain_device *dev = task->dev; - struct mvs_prv_info *mpi = dev->port->ha->lldd_ha; - struct mvs_info *mvi = NULL; - struct sas_task *t = task; - struct mvs_task_list *mvs_list = NULL, *a; - LIST_HEAD(q); - int pass[2] = {0}; - u32 rc = 0; - u32 n = num; - unsigned long flags = 0; - - mvs_list = mvs_task_alloc_list(&n, gfp_flags); - if (n) { - printk(KERN_ERR "%s: mvs alloc list failed.\n", __func__); - rc = -ENOMEM; - goto free_list; - } - - __list_add(&q, mvs_list->list.prev, &mvs_list->list); - - list_for_each_entry(a, &q, list) { - a->task = t; - t = list_entry(t->list.next, struct sas_task, list); - } - - list_for_each_entry(a, &q , list) { - - t = a->task; - mvi = ((struct mvs_device *)t->dev->lldd_dev)->mvi_info; - - spin_lock_irqsave(&mvi->lock, flags); - rc = mvs_task_prep(t, mvi, is_tmf, tmf, &pass[mvi->id]); - if (rc) - dev_printk(KERN_ERR, mvi->dev, "mvsas exec failed[%d]!\n", rc); - spin_unlock_irqrestore(&mvi->lock, flags); - } - - if (likely(pass[0])) - MVS_CHIP_DISP->start_delivery(mpi->mvi[0], - (mpi->mvi[0]->tx_prod - 1) & (MVS_CHIP_SLOT_SZ - 1)); - - if (likely(pass[1])) - MVS_CHIP_DISP->start_delivery(mpi->mvi[1], - (mpi->mvi[1]->tx_prod - 1) & (MVS_CHIP_SLOT_SZ - 1)); - - list_del_init(&q); - -free_list: - if (mvs_list) - mvs_task_free_list(mvs_list); - - return rc; -} - -int mvs_queue_command(struct sas_task *task, const int num, - gfp_t gfp_flags) -{ - struct mvs_device *mvi_dev = task->dev->lldd_dev; - struct sas_ha_struct *sas = mvi_dev->mvi_info->sas; - - if (sas->lldd_max_execute_num < 2) - return mvs_task_exec(task, num, gfp_flags, NULL, 0, NULL); - else - return mvs_collector_task_exec(task, num, gfp_flags, NULL, 0, NULL); + return mvs_task_exec(task, gfp_flags, NULL, 0, NULL); } static void mvs_slot_free(struct mvs_info *mvi, u32 rx_desc) @@ -1411,7 +1310,7 @@ static int mvs_exec_internal_tmf_task(struct domain_device *dev, task->slow_task->timer.expires = jiffies + MVS_TASK_TIMEOUT*HZ; add_timer(&task->slow_task->timer); - res = mvs_task_exec(task, 1, GFP_KERNEL, NULL, 1, tmf); + res = mvs_task_exec(task, GFP_KERNEL, NULL, 1, tmf); if (res) { del_timer(&task->slow_task->timer); diff --git a/drivers/scsi/mvsas/mv_sas.h b/drivers/scsi/mvsas/mv_sas.h index d6b19dc80bee..dc409c04747a 100644 --- a/drivers/scsi/mvsas/mv_sas.h +++ b/drivers/scsi/mvsas/mv_sas.h @@ -65,7 +65,6 @@ extern struct mvs_tgt_initiator mvs_tgt; extern struct mvs_info *tgt_mvi; extern const struct mvs_dispatch mvs_64xx_dispatch; extern const struct mvs_dispatch mvs_94xx_dispatch; -extern struct kmem_cache *mvs_task_list_cache; #define DEV_IS_EXPANDER(type) \ ((type == SAS_EDGE_EXPANDER_DEVICE) || (type == SAS_FANOUT_EXPANDER_DEVICE)) @@ -440,12 +439,6 @@ struct mvs_task_exec_info { int n_elem; }; -struct mvs_task_list { - struct sas_task *task; - struct list_head list; -}; - - /******************** function prototype *********************/ void mvs_get_sas_addr(void *buf, u32 buflen); void mvs_tag_clear(struct mvs_info *mvi, u32 tag); @@ -462,8 +455,7 @@ void mvs_set_sas_addr(struct mvs_info *mvi, int port_id, u32 off_lo, u32 off_hi, u64 sas_addr); void mvs_scan_start(struct Scsi_Host *shost); int mvs_scan_finished(struct Scsi_Host *shost, unsigned long time); -int mvs_queue_command(struct sas_task *task, const int num, - gfp_t gfp_flags); +int mvs_queue_command(struct sas_task *task, gfp_t gfp_flags); int mvs_abort_task(struct sas_task *task); int mvs_abort_task_set(struct domain_device *dev, u8 *lun); int mvs_clear_aca(struct domain_device *dev, u8 *lun); diff --git a/drivers/scsi/ncr53c8xx.c b/drivers/scsi/ncr53c8xx.c index a7305ffc359d..5b93ed810f6e 100644 --- a/drivers/scsi/ncr53c8xx.c +++ b/drivers/scsi/ncr53c8xx.c @@ -7997,10 +7997,7 @@ static int ncr53c8xx_slave_configure(struct scsi_device *device) if (depth_to_use > MAX_TAGS) depth_to_use = MAX_TAGS; - scsi_adjust_queue_depth(device, - (device->tagged_supported ? - MSG_SIMPLE_TAG : 0), - depth_to_use); + scsi_change_queue_depth(device, depth_to_use); /* ** Since the queue depth is not tunable under Linux, diff --git a/drivers/scsi/osd/osd_uld.c b/drivers/scsi/osd/osd_uld.c index 92cdd4b06526..243eab3d10d0 100644 --- a/drivers/scsi/osd/osd_uld.c +++ b/drivers/scsi/osd/osd_uld.c @@ -540,9 +540,9 @@ static int osd_remove(struct device *dev) */ static struct scsi_driver osd_driver = { - .owner = THIS_MODULE, .gendrv = { .name = osd_name, + .owner = THIS_MODULE, .probe = osd_probe, .remove = osd_remove, } diff --git a/drivers/scsi/osst.c b/drivers/scsi/osst.c index dff37a250d79..5033223f6287 100644 --- a/drivers/scsi/osst.c +++ b/drivers/scsi/osst.c @@ -172,9 +172,9 @@ static int osst_probe(struct device *); static int osst_remove(struct device *); static struct scsi_driver osst_template = { - .owner = THIS_MODULE, .gendrv = { .name = "osst", + .owner = THIS_MODULE, .probe = osst_probe, .remove = osst_remove, } @@ -259,9 +259,10 @@ static int osst_chk_result(struct osst_tape * STp, struct osst_request * SRpnt) SRpnt->cmd[0], SRpnt->cmd[1], SRpnt->cmd[2], SRpnt->cmd[3], SRpnt->cmd[4], SRpnt->cmd[5]); if (scode) printk(OSST_DEB_MSG "%s:D: Sense: %02x, ASC: %02x, ASCQ: %02x\n", - name, scode, sense[12], sense[13]); + name, scode, sense[12], sense[13]); if (cmdstatp->have_sense) - __scsi_print_sense("osst ", SRpnt->sense, SCSI_SENSE_BUFFERSIZE); + __scsi_print_sense(STp->device, name, + SRpnt->sense, SCSI_SENSE_BUFFERSIZE); } else #endif @@ -275,7 +276,8 @@ static int osst_chk_result(struct osst_tape * STp, struct osst_request * SRpnt) SRpnt->cmd[0] != TEST_UNIT_READY)) { /* Abnormal conditions for tape */ if (cmdstatp->have_sense) { printk(KERN_WARNING "%s:W: Command with sense data:\n", name); - __scsi_print_sense("osst ", SRpnt->sense, SCSI_SENSE_BUFFERSIZE); + __scsi_print_sense(STp->device, name, + SRpnt->sense, SCSI_SENSE_BUFFERSIZE); } else { static int notyetprinted = 1; @@ -3325,19 +3327,18 @@ static int osst_write_frame(struct osst_tape * STp, struct osst_request ** aSRpn /* Lock or unlock the drive door. Don't use when struct osst_request allocated. */ static int do_door_lock(struct osst_tape * STp, int do_lock) { - int retval, cmd; + int retval; - cmd = do_lock ? SCSI_IOCTL_DOORLOCK : SCSI_IOCTL_DOORUNLOCK; #if DEBUG printk(OSST_DEB_MSG "%s:D: %socking drive door.\n", tape_name(STp), do_lock ? "L" : "Unl"); #endif - retval = scsi_ioctl(STp->device, cmd, NULL); - if (!retval) { + + retval = scsi_set_medium_removal(STp->device, + do_lock ? SCSI_REMOVAL_PREVENT : SCSI_REMOVAL_ALLOW); + if (!retval) STp->door_locked = do_lock ? ST_LOCKED_EXPLICIT : ST_UNLOCKED; - } - else { + else STp->door_locked = ST_LOCK_FAILS; - } return retval; } @@ -4967,10 +4968,10 @@ static long osst_ioctl(struct file * file, * may try and take the device offline, in which case all further * access to the device is prohibited. */ - if( !scsi_block_when_processing_errors(STp->device) ) { - retval = (-ENXIO); + retval = scsi_ioctl_block_when_processing_errors(STp->device, cmd_in, + file->f_flags & O_NDELAY); + if (retval) goto out; - } cmd_type = _IOC_TYPE(cmd_in); cmd_nr = _IOC_NR(cmd_in); diff --git a/drivers/scsi/pas16.c b/drivers/scsi/pas16.c index 80bacb5dc1d4..e81eadd08afc 100644 --- a/drivers/scsi/pas16.c +++ b/drivers/scsi/pas16.c @@ -1,6 +1,4 @@ -#define AUTOSENSE #define PSEUDO_DMA -#define FOO #define UNSAFE /* Not unsafe for PAS16 -- use it */ #define PDEBUG 0 @@ -24,47 +22,9 @@ * Media Vision * (510) 770-8600 * (800) 348-7116 - * - * and - * - * NCR 5380 Family - * SCSI Protocol Controller - * Databook - * - * NCR Microelectronics - * 1635 Aeroplaza Drive - * Colorado Springs, CO 80916 - * 1+ (719) 578-3400 - * 1+ (800) 334-5454 */ /* - * Options : - * AUTOSENSE - if defined, REQUEST SENSE will be performed automatically - * for commands that return with a CHECK CONDITION status. - * - * LIMIT_TRANSFERSIZE - if defined, limit the pseudo-dma transfers to 512 - * bytes at a time. Since interrupts are disabled by default during - * these transfers, we might need this to give reasonable interrupt - * service time if the transfer size gets too large. - * - * PSEUDO_DMA - enables PSEUDO-DMA hardware, should give a 3-4X performance - * increase compared to polled I/O. - * - * PARITY - enable parity checking. Not supported. - * - * SCSI2 - enable support for SCSI-II tagged queueing. Untested. - * - * UNSAFE - leave interrupts enabled during pseudo-DMA transfers. This - * parameter comes from the NCR5380 code. It is NOT unsafe with - * the PAS16 and you should use it. If you don't you will have - * a problem with dropped characters during high speed - * communications during SCSI transfers. If you really don't - * want to use UNSAFE you can try defining LIMIT_TRANSFERSIZE or - * twiddle with the transfer size in the high level code. - * - * USLEEP - enable support for devices that don't disconnect. Untested. - * * The card is detected and initialized in one of several ways : * 1. Autoprobe (default) - There are many different models of * the Pro Audio Spectrum/Studio 16, and I only have one of @@ -102,13 +62,11 @@ * If you have problems with your card not being recognized, use * the LILO command line override. Try to get it recognized without * interrupts. Ie, for a board at the default 0x388 base port, - * boot: linux pas16=0x388,255 + * boot: linux pas16=0x388,0 * - * SCSI_IRQ_NONE (255) should be specified for no interrupt, + * NO_IRQ (0) should be specified for no interrupt, * IRQ_AUTO (254) to autoprobe for an IRQ line if overridden * on the command line. - * - * (IRQ_AUTO == 254, SCSI_IRQ_NONE == 255 in NCR5380.h) */ #include <linux/module.h> @@ -123,15 +81,12 @@ #include <linux/stat.h> #include <linux/init.h> -#include "scsi.h" #include <scsi/scsi_host.h> #include "pas16.h" #define AUTOPROBE_IRQ #include "NCR5380.h" -static int pas_maxi = 0; -static int pas_wmaxi = 0; static unsigned short pas16_addr = 0; static int pas16_irq = 0; @@ -337,6 +292,7 @@ static int __init } +#ifndef MODULE /* * Function : pas16_setup(char *str, int *ints) * @@ -347,10 +303,13 @@ static int __init * */ -void __init pas16_setup(char *str, int *ints) +static int __init pas16_setup(char *str) { static int commandline_current = 0; int i; + int ints[10]; + + get_options(str, ARRAY_SIZE(ints), ints); if (ints[0] != 2) printk("pas16_setup : usage pas16=io_port,irq\n"); else @@ -364,8 +323,12 @@ void __init pas16_setup(char *str, int *ints) } ++commandline_current; } + return 1; } +__setup("pas16=", pas16_setup); +#endif + /* * Function : int pas16_detect(struct scsi_host_template * tpnt) * @@ -379,7 +342,7 @@ void __init pas16_setup(char *str, int *ints) * */ -int __init pas16_detect(struct scsi_host_template * tpnt) +static int __init pas16_detect(struct scsi_host_template *tpnt) { static int current_override = 0; static unsigned short current_base = 0; @@ -387,10 +350,6 @@ int __init pas16_detect(struct scsi_host_template * tpnt) unsigned short io_port; int count; - tpnt->proc_name = "pas16"; - tpnt->show_info = pas16_show_info; - tpnt->write_info = pas16_write_info; - if (pas16_addr != 0) { overrides[0].io_port = pas16_addr; /* @@ -452,15 +411,19 @@ int __init pas16_detect(struct scsi_host_template * tpnt) else instance->irq = NCR5380_probe_irq(instance, PAS16_IRQS); - if (instance->irq != SCSI_IRQ_NONE) + /* Compatibility with documented NCR5380 kernel parameters */ + if (instance->irq == 255) + instance->irq = NO_IRQ; + + if (instance->irq != NO_IRQ) if (request_irq(instance->irq, pas16_intr, 0, "pas16", instance)) { printk("scsi%d : IRQ%d not free, interrupts disabled\n", instance->host_no, instance->irq); - instance->irq = SCSI_IRQ_NONE; + instance->irq = NO_IRQ; } - if (instance->irq == SCSI_IRQ_NONE) { + if (instance->irq == NO_IRQ) { printk("scsi%d : interrupts not enabled. for better interactive performance,\n", instance->host_no); printk("scsi%d : please jumper the board for a free IRQ.\n", instance->host_no); /* Disable 5380 interrupts, leave drive params the same */ @@ -472,17 +435,6 @@ int __init pas16_detect(struct scsi_host_template * tpnt) printk("scsi%d : irq = %d\n", instance->host_no, instance->irq); #endif - printk("scsi%d : at 0x%04x", instance->host_no, (int) - instance->io_port); - if (instance->irq == SCSI_IRQ_NONE) - printk (" interrupts disabled"); - else - printk (" irq %d", instance->irq); - printk(" options CAN_QUEUE=%d CMD_PER_LUN=%d release=%d", - CAN_QUEUE, CMD_PER_LUN, PAS16_PUBLIC_RELEASE); - NCR5380_print_options(instance); - printk("\n"); - ++current_override; ++count; } @@ -509,8 +461,8 @@ int __init pas16_detect(struct scsi_host_template * tpnt) * and matching the H_C_S coordinates to what DOS uses. */ -int pas16_biosparam(struct scsi_device *sdev, struct block_device *dev, - sector_t capacity, int * ip) +static int pas16_biosparam(struct scsi_device *sdev, struct block_device *dev, + sector_t capacity, int *ip) { int size = capacity; ip[0] = 64; @@ -547,6 +499,7 @@ static inline int NCR5380_pread (struct Scsi_Host *instance, unsigned char *dst, P_DATA_REG_OFFSET); register int i = len; int ii = 0; + struct NCR5380_hostdata *hostdata = shost_priv(instance); while ( !(inb(instance->io_port + P_STATUS_REG_OFFSET) & P_ST_RDY) ) ++ii; @@ -559,8 +512,8 @@ static inline int NCR5380_pread (struct Scsi_Host *instance, unsigned char *dst, instance->host_no); return -1; } - if (ii > pas_maxi) - pas_maxi = ii; + if (ii > hostdata->spin_max_r) + hostdata->spin_max_r = ii; return 0; } @@ -583,6 +536,7 @@ static inline int NCR5380_pwrite (struct Scsi_Host *instance, unsigned char *src register unsigned short reg = (instance->io_port + P_DATA_REG_OFFSET); register int i = len; int ii = 0; + struct NCR5380_hostdata *hostdata = shost_priv(instance); while ( !((inb(instance->io_port + P_STATUS_REG_OFFSET)) & P_ST_RDY) ) ++ii; @@ -595,8 +549,8 @@ static inline int NCR5380_pwrite (struct Scsi_Host *instance, unsigned char *src instance->host_no); return -1; } - if (ii > pas_maxi) - pas_wmaxi = ii; + if (ii > hostdata->spin_max_w) + hostdata->spin_max_w = ii; return 0; } @@ -604,7 +558,7 @@ static inline int NCR5380_pwrite (struct Scsi_Host *instance, unsigned char *src static int pas16_release(struct Scsi_Host *shost) { - if (shost->irq) + if (shost->irq != NO_IRQ) free_irq(shost->irq, shost); NCR5380_exit(shost); if (shost->io_port && shost->n_io_port) @@ -617,6 +571,10 @@ static struct scsi_host_template driver_template = { .name = "Pro Audio Spectrum-16 SCSI", .detect = pas16_detect, .release = pas16_release, + .proc_name = "pas16", + .show_info = pas16_show_info, + .write_info = pas16_write_info, + .info = pas16_info, .queuecommand = pas16_queue_command, .eh_abort_handler = pas16_abort, .eh_bus_reset_handler = pas16_bus_reset, diff --git a/drivers/scsi/pas16.h b/drivers/scsi/pas16.h index aa528f53c533..c6109c80050b 100644 --- a/drivers/scsi/pas16.h +++ b/drivers/scsi/pas16.h @@ -18,26 +18,12 @@ * Media Vision * (510) 770-8600 * (800) 348-7116 - * - * and - * - * NCR 5380 Family - * SCSI Protocol Controller - * Databook - * - * NCR Microelectronics - * 1635 Aeroplaza Drive - * Colorado Springs, CO 80916 - * 1+ (719) 578-3400 - * 1+ (800) 334-5454 */ #ifndef PAS16_H #define PAS16_H -#define PAS16_PUBLIC_RELEASE 3 - #define PDEBUG_INIT 0x1 #define PDEBUG_TRANSFER 0x2 @@ -114,12 +100,6 @@ #ifndef ASM -static int pas16_abort(Scsi_Cmnd *); -static int pas16_biosparam(struct scsi_device *, struct block_device *, - sector_t, int*); -static int pas16_detect(struct scsi_host_template *); -static int pas16_queue_command(struct Scsi_Host *, struct scsi_cmnd *); -static int pas16_bus_reset(Scsi_Cmnd *); #ifndef CMD_PER_LUN #define CMD_PER_LUN 2 @@ -161,6 +141,7 @@ static int pas16_bus_reset(Scsi_Cmnd *); #define NCR5380_queue_command pas16_queue_command #define NCR5380_abort pas16_abort #define NCR5380_bus_reset pas16_bus_reset +#define NCR5380_info pas16_info #define NCR5380_show_info pas16_show_info #define NCR5380_write_info pas16_write_info diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c index 666bf5af06e2..329aba0083ab 100644 --- a/drivers/scsi/pm8001/pm8001_init.c +++ b/drivers/scsi/pm8001/pm8001_init.c @@ -89,6 +89,8 @@ static struct scsi_host_template pm8001_sht = { .target_destroy = sas_target_destroy, .ioctl = sas_ioctl, .shost_attrs = pm8001_host_attrs, + .use_blk_tags = 1, + .track_queue_depth = 1, }; /** @@ -599,8 +601,6 @@ static void pm8001_post_sas_ha_init(struct Scsi_Host *shost, sha->lldd_module = THIS_MODULE; sha->sas_addr = &pm8001_ha->sas_addr[0]; sha->num_phys = chip_info->n_phy; - sha->lldd_max_execute_num = 1; - sha->lldd_queue_size = PM8001_CAN_QUEUE; sha->core.shost = shost; } diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c index 76570e6a547d..b93f289b42b3 100644 --- a/drivers/scsi/pm8001/pm8001_sas.c +++ b/drivers/scsi/pm8001/pm8001_sas.c @@ -350,7 +350,7 @@ static int sas_find_local_port_id(struct domain_device *dev) */ #define DEV_IS_GONE(pm8001_dev) \ ((!pm8001_dev || (pm8001_dev->dev_type == SAS_PHY_UNUSED))) -static int pm8001_task_exec(struct sas_task *task, const int num, +static int pm8001_task_exec(struct sas_task *task, gfp_t gfp_flags, int is_tmf, struct pm8001_tmf_task *tmf) { struct domain_device *dev = task->dev; @@ -360,7 +360,6 @@ static int pm8001_task_exec(struct sas_task *task, const int num, struct sas_task *t = task; struct pm8001_ccb_info *ccb; u32 tag = 0xdeadbeef, rc, n_elem = 0; - u32 n = num; unsigned long flags = 0; if (!dev->port) { @@ -387,18 +386,12 @@ static int pm8001_task_exec(struct sas_task *task, const int num, spin_unlock_irqrestore(&pm8001_ha->lock, flags); t->task_done(t); spin_lock_irqsave(&pm8001_ha->lock, flags); - if (n > 1) - t = list_entry(t->list.next, - struct sas_task, list); continue; } else { struct task_status_struct *ts = &t->task_status; ts->resp = SAS_TASK_UNDELIVERED; ts->stat = SAS_PHY_DOWN; t->task_done(t); - if (n > 1) - t = list_entry(t->list.next, - struct sas_task, list); continue; } } @@ -460,9 +453,7 @@ static int pm8001_task_exec(struct sas_task *task, const int num, t->task_state_flags |= SAS_TASK_AT_INITIATOR; spin_unlock(&t->task_state_lock); pm8001_dev->running_req++; - if (n > 1) - t = list_entry(t->list.next, struct sas_task, list); - } while (--n); + } while (0); rc = 0; goto out_done; @@ -483,14 +474,11 @@ out_done: * pm8001_queue_command - register for upper layer used, all IO commands sent * to HBA are from this interface. * @task: the task to be execute. - * @num: if can_queue great than 1, the task can be queued up. for SMP task, - * we always execute one one time * @gfp_flags: gfp_flags */ -int pm8001_queue_command(struct sas_task *task, const int num, - gfp_t gfp_flags) +int pm8001_queue_command(struct sas_task *task, gfp_t gfp_flags) { - return pm8001_task_exec(task, num, gfp_flags, 0, NULL); + return pm8001_task_exec(task, gfp_flags, 0, NULL); } /** @@ -708,7 +696,7 @@ static int pm8001_exec_internal_tmf_task(struct domain_device *dev, task->slow_task->timer.expires = jiffies + PM8001_TASK_TIMEOUT*HZ; add_timer(&task->slow_task->timer); - res = pm8001_task_exec(task, 1, GFP_KERNEL, 1, tmf); + res = pm8001_task_exec(task, GFP_KERNEL, 1, tmf); if (res) { del_timer(&task->slow_task->timer); diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h index f6b2ac59dae4..8dd8b7840f04 100644 --- a/drivers/scsi/pm8001/pm8001_sas.h +++ b/drivers/scsi/pm8001/pm8001_sas.h @@ -623,8 +623,7 @@ int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func, void *funcdata); void pm8001_scan_start(struct Scsi_Host *shost); int pm8001_scan_finished(struct Scsi_Host *shost, unsigned long time); -int pm8001_queue_command(struct sas_task *task, const int num, - gfp_t gfp_flags); +int pm8001_queue_command(struct sas_task *task, gfp_t gfp_flags); int pm8001_abort_task(struct sas_task *task); int pm8001_abort_task_set(struct domain_device *dev, u8 *lun); int pm8001_clear_aca(struct domain_device *dev, u8 *lun); diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c index bcb64eb1387f..b1b1f66b1ab7 100644 --- a/drivers/scsi/pmcraid.c +++ b/drivers/scsi/pmcraid.c @@ -249,15 +249,11 @@ static int pmcraid_slave_configure(struct scsi_device *scsi_dev) PMCRAID_VSET_MAX_SECTORS); } - if (scsi_dev->tagged_supported && - (RES_IS_GSCSI(res->cfg_entry) || RES_IS_VSET(res->cfg_entry))) { - scsi_activate_tcq(scsi_dev, scsi_dev->queue_depth); - scsi_adjust_queue_depth(scsi_dev, MSG_SIMPLE_TAG, - scsi_dev->host->cmd_per_lun); - } else { - scsi_adjust_queue_depth(scsi_dev, 0, - scsi_dev->host->cmd_per_lun); - } + /* + * We never want to report TCQ support for these types of devices. + */ + if (!RES_IS_GSCSI(res->cfg_entry) && !RES_IS_VSET(res->cfg_entry)) + scsi_dev->tagged_supported = 0; return 0; } @@ -289,55 +285,18 @@ static void pmcraid_slave_destroy(struct scsi_device *scsi_dev) * pmcraid_change_queue_depth - Change the device's queue depth * @scsi_dev: scsi device struct * @depth: depth to set - * @reason: calling context * * Return value * actual depth set */ -static int pmcraid_change_queue_depth(struct scsi_device *scsi_dev, int depth, - int reason) +static int pmcraid_change_queue_depth(struct scsi_device *scsi_dev, int depth) { - if (reason != SCSI_QDEPTH_DEFAULT) - return -EOPNOTSUPP; - if (depth > PMCRAID_MAX_CMD_PER_LUN) depth = PMCRAID_MAX_CMD_PER_LUN; - - scsi_adjust_queue_depth(scsi_dev, scsi_get_tag_type(scsi_dev), depth); - - return scsi_dev->queue_depth; + return scsi_change_queue_depth(scsi_dev, depth); } /** - * pmcraid_change_queue_type - Change the device's queue type - * @scsi_dev: scsi device struct - * @tag: type of tags to use - * - * Return value: - * actual queue type set - */ -static int pmcraid_change_queue_type(struct scsi_device *scsi_dev, int tag) -{ - struct pmcraid_resource_entry *res; - - res = (struct pmcraid_resource_entry *)scsi_dev->hostdata; - - if ((res) && scsi_dev->tagged_supported && - (RES_IS_GSCSI(res->cfg_entry) || RES_IS_VSET(res->cfg_entry))) { - scsi_set_tag_type(scsi_dev, tag); - - if (tag) - scsi_activate_tcq(scsi_dev, scsi_dev->queue_depth); - else - scsi_deactivate_tcq(scsi_dev, scsi_dev->queue_depth); - } else - tag = 0; - - return tag; -} - - -/** * pmcraid_init_cmdblk - initializes a command block * * @cmd: pointer to struct pmcraid_cmd to be initialized @@ -3175,36 +3134,6 @@ static int pmcraid_eh_host_reset_handler(struct scsi_cmnd *scmd) } /** - * pmcraid_task_attributes - Translate SPI Q-Tags to task attributes - * @scsi_cmd: scsi command struct - * - * Return value - * number of tags or 0 if the task is not tagged - */ -static u8 pmcraid_task_attributes(struct scsi_cmnd *scsi_cmd) -{ - char tag[2]; - u8 rc = 0; - - if (scsi_populate_tag_msg(scsi_cmd, tag)) { - switch (tag[0]) { - case MSG_SIMPLE_TAG: - rc = TASK_TAG_SIMPLE; - break; - case MSG_HEAD_TAG: - rc = TASK_TAG_QUEUE_HEAD; - break; - case MSG_ORDERED_TAG: - rc = TASK_TAG_ORDERED; - break; - }; - } - - return rc; -} - - -/** * pmcraid_init_ioadls - initializes IOADL related fields in IOARCB * @cmd: pmcraid command struct * @sgcount: count of scatter-gather elements @@ -3559,7 +3488,9 @@ static int pmcraid_queuecommand_lck( } ioarcb->request_flags0 |= NO_LINK_DESCS; - ioarcb->request_flags1 |= pmcraid_task_attributes(scsi_cmd); + + if (scsi_cmd->flags & SCMD_TAGGED) + ioarcb->request_flags1 |= TASK_TAG_SIMPLE; if (RES_IS_GSCSI(res->cfg_entry)) ioarcb->request_flags1 |= DELAY_AFTER_RESET; @@ -4320,7 +4251,7 @@ static struct scsi_host_template pmcraid_host_template = { .slave_configure = pmcraid_slave_configure, .slave_destroy = pmcraid_slave_destroy, .change_queue_depth = pmcraid_change_queue_depth, - .change_queue_type = pmcraid_change_queue_type, + .change_queue_type = scsi_change_queue_type, .can_queue = PMCRAID_MAX_IO_CMD, .this_id = -1, .sg_tablesize = PMCRAID_MAX_IOADLS, @@ -4329,7 +4260,8 @@ static struct scsi_host_template pmcraid_host_template = { .cmd_per_lun = PMCRAID_MAX_CMD_PER_LUN, .use_clustering = ENABLE_CLUSTERING, .shost_attrs = pmcraid_host_attrs, - .proc_name = PMCRAID_DRIVER_NAME + .proc_name = PMCRAID_DRIVER_NAME, + .use_blk_tags = 1, }; /* diff --git a/drivers/scsi/ps3rom.c b/drivers/scsi/ps3rom.c index ef23fabe3924..b3b48b5a984c 100644 --- a/drivers/scsi/ps3rom.c +++ b/drivers/scsi/ps3rom.c @@ -220,10 +220,6 @@ static int ps3rom_queuecommand_lck(struct scsi_cmnd *cmd, unsigned char opcode; int res; -#ifdef DEBUG - scsi_print_command(cmd); -#endif - priv->curr_cmd = cmd; cmd->scsi_done = done; diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c index 158020522dfb..c68a66e8cfc1 100644 --- a/drivers/scsi/qla1280.c +++ b/drivers/scsi/qla1280.c @@ -1224,10 +1224,9 @@ qla1280_slave_configure(struct scsi_device *device) if (device->tagged_supported && (ha->bus_settings[bus].qtag_enables & (BIT_0 << target))) { - scsi_adjust_queue_depth(device, MSG_ORDERED_TAG, - ha->bus_settings[bus].hiwat); + scsi_change_queue_depth(device, ha->bus_settings[bus].hiwat); } else { - scsi_adjust_queue_depth(device, 0, default_depth); + scsi_change_queue_depth(device, default_depth); } nv->bus[bus].target[target].parameter.enable_sync = device->sdtr; diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h index b1865a72ce59..7686bfe9a4a9 100644 --- a/drivers/scsi/qla2xxx/qla_gbl.h +++ b/drivers/scsi/qla2xxx/qla_gbl.h @@ -752,8 +752,6 @@ extern void qla8044_set_idc_dontreset(struct scsi_qla_host *ha); extern int qla8044_rd_direct(struct scsi_qla_host *vha, const uint32_t crb_reg); extern void qla8044_wr_direct(struct scsi_qla_host *vha, const uint32_t crb_reg, const uint32_t value); -extern inline void qla8044_set_qsnt_ready(struct scsi_qla_host *vha); -extern inline void qla8044_need_reset_handler(struct scsi_qla_host *vha); extern int qla8044_device_state_handler(struct scsi_qla_host *vha); extern void qla8044_clear_qsnt_ready(struct scsi_qla_host *vha); extern void qla8044_clear_drv_active(struct qla_hw_data *); diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c index f0edb07f3198..a1ab25fca874 100644 --- a/drivers/scsi/qla2xxx/qla_iocb.c +++ b/drivers/scsi/qla2xxx/qla_iocb.c @@ -325,7 +325,6 @@ qla2x00_start_scsi(srb_t *sp) struct qla_hw_data *ha; struct req_que *req; struct rsp_que *rsp; - char tag[2]; /* Setup device pointers. */ ret = 0; @@ -404,26 +403,7 @@ qla2x00_start_scsi(srb_t *sp) /* Set target ID and LUN number*/ SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id); cmd_pkt->lun = cpu_to_le16(cmd->device->lun); - - /* Update tagged queuing modifier */ - if (scsi_populate_tag_msg(cmd, tag)) { - switch (tag[0]) { - case HEAD_OF_QUEUE_TAG: - cmd_pkt->control_flags = - __constant_cpu_to_le16(CF_HEAD_TAG); - break; - case ORDERED_QUEUE_TAG: - cmd_pkt->control_flags = - __constant_cpu_to_le16(CF_ORDERED_TAG); - break; - default: - cmd_pkt->control_flags = - __constant_cpu_to_le16(CF_SIMPLE_TAG); - break; - } - } else { - cmd_pkt->control_flags = __constant_cpu_to_le16(CF_SIMPLE_TAG); - } + cmd_pkt->control_flags = __constant_cpu_to_le16(CF_SIMPLE_TAG); /* Load SCSI command packet. */ memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len); @@ -1264,7 +1244,6 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt, uint16_t fcp_cmnd_len; struct fcp_cmnd *fcp_cmnd; dma_addr_t crc_ctx_dma; - char tag[2]; cmd = GET_CMD_SP(sp); @@ -1356,25 +1335,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt, cmd_pkt->fcp_cmnd_dseg_address[1] = cpu_to_le32( MSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF)); fcp_cmnd->task_management = 0; - - /* - * Update tagged queuing modifier if using command tag queuing - */ - if (scsi_populate_tag_msg(cmd, tag)) { - switch (tag[0]) { - case HEAD_OF_QUEUE_TAG: - fcp_cmnd->task_attribute = TSK_HEAD_OF_QUEUE; - break; - case ORDERED_QUEUE_TAG: - fcp_cmnd->task_attribute = TSK_ORDERED; - break; - default: - fcp_cmnd->task_attribute = TSK_SIMPLE; - break; - } - } else { - fcp_cmnd->task_attribute = TSK_SIMPLE; - } + fcp_cmnd->task_attribute = TSK_SIMPLE; cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */ @@ -1495,7 +1456,6 @@ qla24xx_start_scsi(srb_t *sp) struct scsi_cmnd *cmd = GET_CMD_SP(sp); struct scsi_qla_host *vha = sp->fcport->vha; struct qla_hw_data *ha = vha->hw; - char tag[2]; /* Setup device pointers. */ ret = 0; @@ -1578,22 +1538,7 @@ qla24xx_start_scsi(srb_t *sp) int_to_scsilun(cmd->device->lun, &cmd_pkt->lun); host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun)); - /* Update tagged queuing modifier -- default is TSK_SIMPLE (0). */ - if (scsi_populate_tag_msg(cmd, tag)) { - switch (tag[0]) { - case HEAD_OF_QUEUE_TAG: - cmd_pkt->task = TSK_HEAD_OF_QUEUE; - break; - case ORDERED_QUEUE_TAG: - cmd_pkt->task = TSK_ORDERED; - break; - default: - cmd_pkt->task = TSK_SIMPLE; - break; - } - } else { - cmd_pkt->task = TSK_SIMPLE; - } + cmd_pkt->task = TSK_SIMPLE; /* Load SCSI command packet. */ memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len); @@ -2310,7 +2255,6 @@ qla82xx_start_scsi(srb_t *sp) struct qla_hw_data *ha = vha->hw; struct req_que *req = NULL; struct rsp_que *rsp = NULL; - char tag[2]; /* Setup device pointers. */ ret = 0; @@ -2489,22 +2433,6 @@ sufficient_dsds: else if (cmd->sc_data_direction == DMA_FROM_DEVICE) ctx->fcp_cmnd->additional_cdb_len |= 2; - /* - * Update tagged queuing modifier -- default is TSK_SIMPLE (0). - */ - if (scsi_populate_tag_msg(cmd, tag)) { - switch (tag[0]) { - case HEAD_OF_QUEUE_TAG: - ctx->fcp_cmnd->task_attribute = - TSK_HEAD_OF_QUEUE; - break; - case ORDERED_QUEUE_TAG: - ctx->fcp_cmnd->task_attribute = - TSK_ORDERED; - break; - } - } - /* Populate the FCP_PRIO. */ if (ha->flags.fcp_prio_enabled) ctx->fcp_cmnd->task_attribute |= @@ -2565,20 +2493,6 @@ sufficient_dsds: host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun)); - /* - * Update tagged queuing modifier -- default is TSK_SIMPLE (0). - */ - if (scsi_populate_tag_msg(cmd, tag)) { - switch (tag[0]) { - case HEAD_OF_QUEUE_TAG: - cmd_pkt->task = TSK_HEAD_OF_QUEUE; - break; - case ORDERED_QUEUE_TAG: - cmd_pkt->task = TSK_ORDERED; - break; - } - } - /* Populate the FCP_PRIO. */ if (ha->flags.fcp_prio_enabled) cmd_pkt->task |= sp->fcport->fcp_prio << 3; diff --git a/drivers/scsi/qla2xxx/qla_mr.c b/drivers/scsi/qla2xxx/qla_mr.c index 80867599527d..6d190b4b82a0 100644 --- a/drivers/scsi/qla2xxx/qla_mr.c +++ b/drivers/scsi/qla2xxx/qla_mr.c @@ -3086,7 +3086,6 @@ qlafx00_start_scsi(srb_t *sp) struct cmd_type_7_fx00 *cmd_pkt; struct cmd_type_7_fx00 lcmd_pkt; struct scsi_lun llun; - char tag[2]; /* Setup device pointers. */ ret = 0; @@ -3157,18 +3156,6 @@ qlafx00_start_scsi(srb_t *sp) host_to_adap((uint8_t *)&llun, (uint8_t *)&lcmd_pkt.lun, sizeof(lcmd_pkt.lun)); - /* Update tagged queuing modifier -- default is TSK_SIMPLE (0). */ - if (scsi_populate_tag_msg(cmd, tag)) { - switch (tag[0]) { - case HEAD_OF_QUEUE_TAG: - lcmd_pkt.task = TSK_HEAD_OF_QUEUE; - break; - case ORDERED_QUEUE_TAG: - lcmd_pkt.task = TSK_ORDERED; - break; - } - } - /* Load SCSI command packet. */ host_to_adap(cmd->cmnd, lcmd_pkt.fcp_cdb, sizeof(lcmd_pkt.fcp_cdb)); lcmd_pkt.byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd)); diff --git a/drivers/scsi/qla2xxx/qla_nx2.c b/drivers/scsi/qla2xxx/qla_nx2.c index 24a852828b5d..ed4d6b6b53e3 100644 --- a/drivers/scsi/qla2xxx/qla_nx2.c +++ b/drivers/scsi/qla2xxx/qla_nx2.c @@ -238,7 +238,7 @@ qla8044_rmw_crb_reg(struct scsi_qla_host *vha, return; } -inline void +static inline void qla8044_set_qsnt_ready(struct scsi_qla_host *vha) { uint32_t qsnt_state; diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index db3dbd999cb6..6b4d9235368a 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -236,8 +236,6 @@ static int qla2xxx_eh_target_reset(struct scsi_cmnd *); static int qla2xxx_eh_bus_reset(struct scsi_cmnd *); static int qla2xxx_eh_host_reset(struct scsi_cmnd *); -static int qla2x00_change_queue_depth(struct scsi_device *, int, int); -static int qla2x00_change_queue_type(struct scsi_device *, int); static void qla2x00_clear_drv_active(struct qla_hw_data *); static void qla2x00_free_device(scsi_qla_host_t *); static void qla83xx_disable_laser(scsi_qla_host_t *vha); @@ -259,8 +257,8 @@ struct scsi_host_template qla2xxx_driver_template = { .slave_destroy = qla2xxx_slave_destroy, .scan_finished = qla2xxx_scan_finished, .scan_start = qla2xxx_scan_start, - .change_queue_depth = qla2x00_change_queue_depth, - .change_queue_type = qla2x00_change_queue_type, + .change_queue_depth = scsi_change_queue_depth, + .change_queue_type = scsi_change_queue_type, .this_id = -1, .cmd_per_lun = 3, .use_clustering = ENABLE_CLUSTERING, @@ -270,6 +268,8 @@ struct scsi_host_template qla2xxx_driver_template = { .shost_attrs = qla2x00_host_attrs, .supported_mode = MODE_INITIATOR, + .use_blk_tags = 1, + .track_queue_depth = 1, }; static struct scsi_transport_template *qla2xxx_transport_template = NULL; @@ -1405,10 +1405,7 @@ qla2xxx_slave_configure(struct scsi_device *sdev) if (IS_T10_PI_CAPABLE(vha->hw)) blk_queue_update_dma_alignment(sdev->request_queue, 0x7); - if (sdev->tagged_supported) - scsi_activate_tcq(sdev, req->max_q_depth); - else - scsi_deactivate_tcq(sdev, req->max_q_depth); + scsi_change_queue_depth(sdev, req->max_q_depth); return 0; } @@ -1418,76 +1415,6 @@ qla2xxx_slave_destroy(struct scsi_device *sdev) sdev->hostdata = NULL; } -static void qla2x00_handle_queue_full(struct scsi_device *sdev, int qdepth) -{ - fc_port_t *fcport = (struct fc_port *) sdev->hostdata; - - if (!scsi_track_queue_full(sdev, qdepth)) - return; - - ql_dbg(ql_dbg_io, fcport->vha, 0x3029, - "Queue depth adjusted-down to %d for nexus=%ld:%d:%llu.\n", - sdev->queue_depth, fcport->vha->host_no, sdev->id, sdev->lun); -} - -static void qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, int qdepth) -{ - fc_port_t *fcport = sdev->hostdata; - struct scsi_qla_host *vha = fcport->vha; - struct req_que *req = NULL; - - req = vha->req; - if (!req) - return; - - if (req->max_q_depth <= sdev->queue_depth || req->max_q_depth < qdepth) - return; - - if (sdev->ordered_tags) - scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, qdepth); - else - scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, qdepth); - - ql_dbg(ql_dbg_io, vha, 0x302a, - "Queue depth adjusted-up to %d for nexus=%ld:%d:%llu.\n", - sdev->queue_depth, fcport->vha->host_no, sdev->id, sdev->lun); -} - -static int -qla2x00_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason) -{ - switch (reason) { - case SCSI_QDEPTH_DEFAULT: - scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth); - break; - case SCSI_QDEPTH_QFULL: - qla2x00_handle_queue_full(sdev, qdepth); - break; - case SCSI_QDEPTH_RAMP_UP: - qla2x00_adjust_sdev_qdepth_up(sdev, qdepth); - break; - default: - return -EOPNOTSUPP; - } - - return sdev->queue_depth; -} - -static int -qla2x00_change_queue_type(struct scsi_device *sdev, int tag_type) -{ - if (sdev->tagged_supported) { - scsi_set_tag_type(sdev, tag_type); - if (tag_type) - scsi_activate_tcq(sdev, sdev->queue_depth); - else - scsi_deactivate_tcq(sdev, sdev->queue_depth); - } else - tag_type = 0; - - return tag_type; -} - /** * qla2x00_config_dma_addressing() - Configure OS DMA addressing method. * @ha: HA context diff --git a/drivers/scsi/qla4xxx/ql4_iocb.c b/drivers/scsi/qla4xxx/ql4_iocb.c index 08ab6dac226d..17222eb49762 100644 --- a/drivers/scsi/qla4xxx/ql4_iocb.c +++ b/drivers/scsi/qla4xxx/ql4_iocb.c @@ -280,7 +280,6 @@ int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb) uint16_t req_cnt; unsigned long flags; uint32_t index; - char tag[2]; /* Get real lun and adapter */ ddb_entry = srb->ddb; @@ -352,15 +351,6 @@ int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb) /* Set tagged queueing control flags */ cmd_entry->control_flags |= CF_SIMPLE_TAG; - if (scsi_populate_tag_msg(cmd, tag)) - switch (tag[0]) { - case MSG_HEAD_TAG: - cmd_entry->control_flags |= CF_HEAD_TAG; - break; - case MSG_ORDERED_TAG: - cmd_entry->control_flags |= CF_ORDERED_TAG; - break; - } qla4xxx_advance_req_ring_ptr(ha); qla4xxx_build_scsi_iocbs(srb, cmd_entry, tot_dsds); diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c index 199fcf79a051..6d25879d87c8 100644 --- a/drivers/scsi/qla4xxx/ql4_os.c +++ b/drivers/scsi/qla4xxx/ql4_os.c @@ -162,12 +162,8 @@ static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd); static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd); static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd); static int qla4xxx_slave_alloc(struct scsi_device *device); -static int qla4xxx_slave_configure(struct scsi_device *device); -static void qla4xxx_slave_destroy(struct scsi_device *sdev); static umode_t qla4_attr_is_visible(int param_type, int param); static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type); -static int qla4xxx_change_queue_depth(struct scsi_device *sdev, int qdepth, - int reason); /* * iSCSI Flash DDB sysfs entry points @@ -204,10 +200,8 @@ static struct scsi_host_template qla4xxx_driver_template = { .eh_host_reset_handler = qla4xxx_eh_host_reset, .eh_timed_out = qla4xxx_eh_cmd_timed_out, - .slave_configure = qla4xxx_slave_configure, .slave_alloc = qla4xxx_slave_alloc, - .slave_destroy = qla4xxx_slave_destroy, - .change_queue_depth = qla4xxx_change_queue_depth, + .change_queue_depth = scsi_change_queue_depth, .this_id = -1, .cmd_per_lun = 3, @@ -218,6 +212,7 @@ static struct scsi_host_template qla4xxx_driver_template = { .shost_attrs = qla4xxx_host_attrs, .host_reset = qla4xxx_host_reset, .vendor_id = SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_QLOGIC, + .use_blk_tags = 1, }; static struct iscsi_transport qla4xxx_iscsi_transport = { @@ -9060,35 +9055,14 @@ static int qla4xxx_slave_alloc(struct scsi_device *sdev) ddb = sess->dd_data; sdev->hostdata = ddb; - sdev->tagged_supported = 1; if (ql4xmaxqdepth != 0 && ql4xmaxqdepth <= 0xffffU) queue_depth = ql4xmaxqdepth; - scsi_activate_tcq(sdev, queue_depth); + scsi_change_queue_depth(sdev, queue_depth); return 0; } -static int qla4xxx_slave_configure(struct scsi_device *sdev) -{ - sdev->tagged_supported = 1; - return 0; -} - -static void qla4xxx_slave_destroy(struct scsi_device *sdev) -{ - scsi_deactivate_tcq(sdev, 1); -} - -static int qla4xxx_change_queue_depth(struct scsi_device *sdev, int qdepth, - int reason) -{ - if (!ql4xqfulltracking) - return -EOPNOTSUPP; - - return iscsi_change_queue_depth(sdev, qdepth, reason); -} - /** * qla4xxx_del_from_active_array - returns an active srb * @ha: Pointer to host adapter structure. @@ -9888,6 +9862,9 @@ static int __init qla4xxx_module_init(void) { int ret; + if (ql4xqfulltracking) + qla4xxx_driver_template.track_queue_depth = 1; + /* Allocate cache for SRBs. */ srb_cachep = kmem_cache_create("qla4xxx_srbs", sizeof(struct srb), 0, SLAB_HWCACHE_ALIGN, NULL); diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c index 79c77b485a67..1ad0c36375b8 100644 --- a/drivers/scsi/scsi.c +++ b/drivers/scsi/scsi.c @@ -527,9 +527,9 @@ void scsi_log_send(struct scsi_cmnd *cmd) * * 1: nothing (match completion) * - * 2: log opcode + command of all commands + * 2: log opcode + command of all commands + cmd address * - * 3: same as 2 plus dump cmd address + * 3: same as 2 * * 4: same as 3 plus dump extra junk */ @@ -537,10 +537,8 @@ void scsi_log_send(struct scsi_cmnd *cmd) level = SCSI_LOG_LEVEL(SCSI_LOG_MLQUEUE_SHIFT, SCSI_LOG_MLQUEUE_BITS); if (level > 1) { - scmd_printk(KERN_INFO, cmd, "Send: "); - if (level > 2) - printk("0x%p ", cmd); - printk("\n"); + scmd_printk(KERN_INFO, cmd, + "Send: scmd 0x%p\n", cmd); scsi_print_command(cmd); if (level > 3) { printk(KERN_INFO "buffer = 0x%p, bufflen = %d," @@ -565,7 +563,7 @@ void scsi_log_completion(struct scsi_cmnd *cmd, int disposition) * * 2: same as 1 but for all command completions. * - * 3: same as 2 plus dump cmd address + * 3: same as 2 * * 4: same as 3 plus dump extra junk */ @@ -574,39 +572,10 @@ void scsi_log_completion(struct scsi_cmnd *cmd, int disposition) SCSI_LOG_MLCOMPLETE_BITS); if (((level > 0) && (cmd->result || disposition != SUCCESS)) || (level > 1)) { - scmd_printk(KERN_INFO, cmd, "Done: "); - if (level > 2) - printk("0x%p ", cmd); - /* - * Dump truncated values, so we usually fit within - * 80 chars. - */ - switch (disposition) { - case SUCCESS: - printk("SUCCESS\n"); - break; - case NEEDS_RETRY: - printk("RETRY\n"); - break; - case ADD_TO_MLQUEUE: - printk("MLQUEUE\n"); - break; - case FAILED: - printk("FAILED\n"); - break; - case TIMEOUT_ERROR: - /* - * If called via scsi_times_out. - */ - printk("TIMEOUT\n"); - break; - default: - printk("UNKNOWN\n"); - } - scsi_print_result(cmd); + scsi_print_result(cmd, "Done: ", disposition); scsi_print_command(cmd); if (status_byte(cmd->result) & CHECK_CONDITION) - scsi_print_sense("", cmd); + scsi_print_sense(cmd); if (level > 3) scmd_printk(KERN_INFO, cmd, "scsi host busy %d failed %d\n", @@ -634,87 +603,6 @@ void scsi_cmd_get_serial(struct Scsi_Host *host, struct scsi_cmnd *cmd) EXPORT_SYMBOL(scsi_cmd_get_serial); /** - * scsi_dispatch_command - Dispatch a command to the low-level driver. - * @cmd: command block we are dispatching. - * - * Return: nonzero return request was rejected and device's queue needs to be - * plugged. - */ -int scsi_dispatch_cmd(struct scsi_cmnd *cmd) -{ - struct Scsi_Host *host = cmd->device->host; - int rtn = 0; - - atomic_inc(&cmd->device->iorequest_cnt); - - /* check if the device is still usable */ - if (unlikely(cmd->device->sdev_state == SDEV_DEL)) { - /* in SDEV_DEL we error all commands. DID_NO_CONNECT - * returns an immediate error upwards, and signals - * that the device is no longer present */ - cmd->result = DID_NO_CONNECT << 16; - goto done; - } - - /* Check to see if the scsi lld made this device blocked. */ - if (unlikely(scsi_device_blocked(cmd->device))) { - /* - * in blocked state, the command is just put back on - * the device queue. The suspend state has already - * blocked the queue so future requests should not - * occur until the device transitions out of the - * suspend state. - */ - SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd, - "queuecommand : device blocked\n")); - return SCSI_MLQUEUE_DEVICE_BUSY; - } - - /* Store the LUN value in cmnd, if needed. */ - if (cmd->device->lun_in_cdb) - cmd->cmnd[1] = (cmd->cmnd[1] & 0x1f) | - (cmd->device->lun << 5 & 0xe0); - - scsi_log_send(cmd); - - /* - * Before we queue this command, check if the command - * length exceeds what the host adapter can handle. - */ - if (cmd->cmd_len > cmd->device->host->max_cmd_len) { - SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd, - "queuecommand : command too long. " - "cdb_size=%d host->max_cmd_len=%d\n", - cmd->cmd_len, cmd->device->host->max_cmd_len)); - cmd->result = (DID_ABORT << 16); - goto done; - } - - if (unlikely(host->shost_state == SHOST_DEL)) { - cmd->result = (DID_NO_CONNECT << 16); - goto done; - - } - - trace_scsi_dispatch_cmd_start(cmd); - rtn = host->hostt->queuecommand(host, cmd); - if (rtn) { - trace_scsi_dispatch_cmd_error(cmd, rtn); - if (rtn != SCSI_MLQUEUE_DEVICE_BUSY && - rtn != SCSI_MLQUEUE_TARGET_BUSY) - rtn = SCSI_MLQUEUE_HOST_BUSY; - - SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd, - "queuecommand : request rejected\n")); - } - - return rtn; - done: - cmd->scsi_done(cmd); - return 0; -} - -/** * scsi_finish_command - cleanup and pass command back to upper layer * @cmd: the command * @@ -773,32 +661,18 @@ void scsi_finish_command(struct scsi_cmnd *cmd) } /** - * scsi_adjust_queue_depth - Let low level drivers change a device's queue depth + * scsi_change_queue_depth - change a device's queue depth * @sdev: SCSI Device in question - * @tagged: Do we use tagged queueing (non-0) or do we treat - * this device as an untagged device (0) - * @tags: Number of tags allowed if tagged queueing enabled, - * or number of commands the low level driver can - * queue up in non-tagged mode (as per cmd_per_lun). - * - * Returns: Nothing + * @depth: number of commands allowed to be queued to the driver * - * Lock Status: None held on entry - * - * Notes: Low level drivers may call this at any time and we will do - * the right thing depending on whether or not the device is - * currently active and whether or not it even has the - * command blocks built yet. + * Sets the device queue depth and returns the new value. */ -void scsi_adjust_queue_depth(struct scsi_device *sdev, int tagged, int tags) +int scsi_change_queue_depth(struct scsi_device *sdev, int depth) { unsigned long flags; - /* - * refuse to set tagged depth to an unworkable size - */ - if (tags <= 0) - return; + if (depth <= 0) + goto out; spin_lock_irqsave(sdev->request_queue->queue_lock, flags); @@ -813,35 +687,17 @@ void scsi_adjust_queue_depth(struct scsi_device *sdev, int tagged, int tags) */ if (!shost_use_blk_mq(sdev->host) && !sdev->host->bqt) { if (blk_queue_tagged(sdev->request_queue) && - blk_queue_resize_tags(sdev->request_queue, tags) != 0) - goto out; + blk_queue_resize_tags(sdev->request_queue, depth) != 0) + goto out_unlock; } - sdev->queue_depth = tags; - switch (tagged) { - case 0: - sdev->ordered_tags = 0; - sdev->simple_tags = 0; - break; - case MSG_ORDERED_TAG: - sdev->ordered_tags = 1; - sdev->simple_tags = 1; - break; - case MSG_SIMPLE_TAG: - sdev->ordered_tags = 0; - sdev->simple_tags = 1; - break; - default: - sdev->ordered_tags = 0; - sdev->simple_tags = 0; - sdev_printk(KERN_WARNING, sdev, - "scsi_adjust_queue_depth, bad queue type, " - "disabled\n"); - } - out: + sdev->queue_depth = depth; +out_unlock: spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags); +out: + return sdev->queue_depth; } -EXPORT_SYMBOL(scsi_adjust_queue_depth); +EXPORT_SYMBOL(scsi_change_queue_depth); /** * scsi_track_queue_full - track QUEUE_FULL events to adjust queue depth @@ -885,19 +741,32 @@ int scsi_track_queue_full(struct scsi_device *sdev, int depth) return 0; if (sdev->last_queue_full_depth < 8) { /* Drop back to untagged */ - scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun); + scsi_set_tag_type(sdev, 0); + scsi_change_queue_depth(sdev, sdev->host->cmd_per_lun); return -1; } - - if (sdev->ordered_tags) - scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, depth); - else - scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, depth); - return depth; + + return scsi_change_queue_depth(sdev, depth); } EXPORT_SYMBOL(scsi_track_queue_full); /** + * scsi_change_queue_type() - Change a device's queue type + * @sdev: The SCSI device whose queue depth is to change + * @tag_type: Identifier for queue type + */ +int scsi_change_queue_type(struct scsi_device *sdev, int tag_type) +{ + if (!sdev->tagged_supported) + return 0; + + scsi_set_tag_type(sdev, tag_type); + return tag_type; + +} +EXPORT_SYMBOL(scsi_change_queue_type); + +/** * scsi_vpd_inquiry - Request a device provide us with a VPD page * @sdev: The device to ask * @buffer: Where to put the result diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index 238e06f13b8a..aa4b6b80aade 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c @@ -63,8 +63,8 @@ #include "sd.h" #include "scsi_logging.h" -#define SCSI_DEBUG_VERSION "1.84" -static const char *scsi_debug_version_date = "20140706"; +#define SCSI_DEBUG_VERSION "1.85" +static const char *scsi_debug_version_date = "20141022"; #define MY_NAME "scsi_debug" @@ -75,19 +75,22 @@ static const char *scsi_debug_version_date = "20140706"; #define UNRECOVERED_READ_ERR 0x11 #define PARAMETER_LIST_LENGTH_ERR 0x1a #define INVALID_OPCODE 0x20 -#define ADDR_OUT_OF_RANGE 0x21 -#define INVALID_COMMAND_OPCODE 0x20 +#define LBA_OUT_OF_RANGE 0x21 #define INVALID_FIELD_IN_CDB 0x24 #define INVALID_FIELD_IN_PARAM_LIST 0x26 #define UA_RESET_ASC 0x29 #define UA_CHANGED_ASC 0x2a +#define INSUFF_RES_ASC 0x55 +#define INSUFF_RES_ASCQ 0x3 #define POWER_ON_RESET_ASCQ 0x0 #define BUS_RESET_ASCQ 0x2 /* scsi bus reset occurred */ #define MODE_CHANGED_ASCQ 0x1 /* mode parameters changed */ +#define CAPACITY_CHANGED_ASCQ 0x9 #define SAVING_PARAMS_UNSUP 0x39 #define TRANSPORT_PROBLEM 0x4b #define THRESHOLD_EXCEEDED 0x5d #define LOW_POWER_COND_ON 0x5e +#define MISCOMPARE_VERIFY_ASC 0x1d /* Additional Sense Code Qualifier (ASCQ) */ #define ACK_NAK_TO 0x3 @@ -133,6 +136,7 @@ static const char *scsi_debug_version_date = "20140706"; #define DEF_VIRTUAL_GB 0 #define DEF_VPD_USE_HOSTNO 1 #define DEF_WRITESAME_LENGTH 0xFFFF +#define DEF_STRICT 0 #define DELAY_OVERRIDDEN -9999 /* bit mask values for scsi_debug_opts */ @@ -176,11 +180,12 @@ static const char *scsi_debug_version_date = "20140706"; #define SDEBUG_UA_POR 0 /* Power on, reset, or bus device reset */ #define SDEBUG_UA_BUS_RESET 1 #define SDEBUG_UA_MODE_CHANGED 2 -#define SDEBUG_NUM_UAS 3 +#define SDEBUG_UA_CAPACITY_CHANGED 3 +#define SDEBUG_NUM_UAS 4 /* for check_readiness() */ -#define UAS_ONLY 1 -#define UAS_TUR 0 +#define UAS_ONLY 1 /* check for UAs only */ +#define UAS_TUR 0 /* if no UAs then check if media access possible */ /* when 1==SCSI_DEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this * sector on read commands: */ @@ -206,6 +211,301 @@ static const char *scsi_debug_version_date = "20140706"; #warning "Expect DEF_CMD_PER_LUN <= SCSI_DEBUG_CANQUEUE" #endif +/* SCSI opcodes (first byte of cdb) mapped onto these indexes */ +enum sdeb_opcode_index { + SDEB_I_INVALID_OPCODE = 0, + SDEB_I_INQUIRY = 1, + SDEB_I_REPORT_LUNS = 2, + SDEB_I_REQUEST_SENSE = 3, + SDEB_I_TEST_UNIT_READY = 4, + SDEB_I_MODE_SENSE = 5, /* 6, 10 */ + SDEB_I_MODE_SELECT = 6, /* 6, 10 */ + SDEB_I_LOG_SENSE = 7, + SDEB_I_READ_CAPACITY = 8, /* 10; 16 is in SA_IN(16) */ + SDEB_I_READ = 9, /* 6, 10, 12, 16 */ + SDEB_I_WRITE = 10, /* 6, 10, 12, 16 */ + SDEB_I_START_STOP = 11, + SDEB_I_SERV_ACT_IN = 12, /* 12, 16 */ + SDEB_I_SERV_ACT_OUT = 13, /* 12, 16 */ + SDEB_I_MAINT_IN = 14, + SDEB_I_MAINT_OUT = 15, + SDEB_I_VERIFY = 16, /* 10 only */ + SDEB_I_VARIABLE_LEN = 17, + SDEB_I_RESERVE = 18, /* 6, 10 */ + SDEB_I_RELEASE = 19, /* 6, 10 */ + SDEB_I_ALLOW_REMOVAL = 20, /* PREVENT ALLOW MEDIUM REMOVAL */ + SDEB_I_REZERO_UNIT = 21, /* REWIND in SSC */ + SDEB_I_ATA_PT = 22, /* 12, 16 */ + SDEB_I_SEND_DIAG = 23, + SDEB_I_UNMAP = 24, + SDEB_I_XDWRITEREAD = 25, /* 10 only */ + SDEB_I_WRITE_BUFFER = 26, + SDEB_I_WRITE_SAME = 27, /* 10, 16 */ + SDEB_I_SYNC_CACHE = 28, /* 10 only */ + SDEB_I_COMP_WRITE = 29, + SDEB_I_LAST_ELEMENT = 30, /* keep this last */ +}; + +static const unsigned char opcode_ind_arr[256] = { +/* 0x0; 0x0->0x1f: 6 byte cdbs */ + SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE, + 0, 0, 0, 0, + SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0, + 0, 0, SDEB_I_INQUIRY, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE, + SDEB_I_RELEASE, + 0, 0, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG, + SDEB_I_ALLOW_REMOVAL, 0, +/* 0x20; 0x20->0x3f: 10 byte cdbs */ + 0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0, + SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, SDEB_I_VERIFY, + 0, 0, 0, 0, 0, SDEB_I_SYNC_CACHE, 0, 0, + 0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0, +/* 0x40; 0x40->0x5f: 10 byte cdbs */ + 0, SDEB_I_WRITE_SAME, SDEB_I_UNMAP, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, SDEB_I_LOG_SENSE, 0, 0, + 0, 0, 0, SDEB_I_XDWRITEREAD, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE, + SDEB_I_RELEASE, + 0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0, +/* 0x60; 0x60->0x7d are reserved */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, SDEB_I_VARIABLE_LEN, +/* 0x80; 0x80->0x9f: 16 byte cdbs */ + 0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0, + SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0, 0, 0, 0, 0, + 0, 0, 0, SDEB_I_WRITE_SAME, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN, SDEB_I_SERV_ACT_OUT, +/* 0xa0; 0xa0->0xbf: 12 byte cdbs */ + SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN, + SDEB_I_MAINT_OUT, 0, 0, 0, + SDEB_I_READ, SDEB_I_SERV_ACT_OUT, SDEB_I_WRITE, SDEB_I_SERV_ACT_IN, + 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, +/* 0xc0; 0xc0->0xff: vendor specific */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, +}; + +#define F_D_IN 1 +#define F_D_OUT 2 +#define F_D_OUT_MAYBE 4 /* WRITE SAME, NDOB bit */ +#define F_D_UNKN 8 +#define F_RL_WLUN_OK 0x10 +#define F_SKIP_UA 0x20 +#define F_DELAY_OVERR 0x40 +#define F_SA_LOW 0x80 /* cdb byte 1, bits 4 to 0 */ +#define F_SA_HIGH 0x100 /* as used by variable length cdbs */ +#define F_INV_OP 0x200 +#define F_FAKE_RW 0x400 +#define F_M_ACCESS 0x800 /* media access */ + +#define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR) +#define FF_DIRECT_IO (F_M_ACCESS | F_FAKE_RW) +#define FF_SA (F_SA_HIGH | F_SA_LOW) + +struct sdebug_dev_info; +static int resp_inquiry(struct scsi_cmnd *, struct sdebug_dev_info *); +static int resp_report_luns(struct scsi_cmnd *, struct sdebug_dev_info *); +static int resp_requests(struct scsi_cmnd *, struct sdebug_dev_info *); +static int resp_mode_sense(struct scsi_cmnd *, struct sdebug_dev_info *); +static int resp_mode_select(struct scsi_cmnd *, struct sdebug_dev_info *); +static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *); +static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *); +static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *); +static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *); +static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *); +static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *); +static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *); +static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *); +static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *); +static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *); +static int resp_rsup_tmfs(struct scsi_cmnd *, struct sdebug_dev_info *); +static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *); +static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *); +static int resp_xdwriteread_10(struct scsi_cmnd *, struct sdebug_dev_info *); +static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *); + +struct opcode_info_t { + u8 num_attached; /* 0 if this is it (i.e. a leaf); use 0xff + * for terminating element */ + u8 opcode; /* if num_attached > 0, preferred */ + u16 sa; /* service action */ + u32 flags; /* OR-ed set of SDEB_F_* */ + int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *); + const struct opcode_info_t *arrp; /* num_attached elements or NULL */ + u8 len_mask[16]; /* len=len_mask[0], then mask for cdb[1]... */ + /* ignore cdb bytes after position 15 */ +}; + +static const struct opcode_info_t msense_iarr[1] = { + {0, 0x1a, 0, F_D_IN, NULL, NULL, + {6, 0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, +}; + +static const struct opcode_info_t mselect_iarr[1] = { + {0, 0x15, 0, F_D_OUT, NULL, NULL, + {6, 0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, +}; + +static const struct opcode_info_t read_iarr[3] = { + {0, 0x28, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, NULL,/* READ(10) */ + {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7, 0, 0, + 0, 0, 0, 0} }, + {0, 0x8, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, NULL, /* READ(6) */ + {6, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, + {0, 0xa8, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, NULL,/* READ(12) */ + {12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x9f, + 0xc7, 0, 0, 0, 0} }, +}; + +static const struct opcode_info_t write_iarr[3] = { + {0, 0x2a, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, NULL, /* 10 */ + {10, 0xfb, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7, 0, 0, + 0, 0, 0, 0} }, + {0, 0xa, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, NULL, /* 6 */ + {6, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, + {0, 0xaa, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, NULL, /* 12 */ + {12, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x9f, + 0xc7, 0, 0, 0, 0} }, +}; + +static const struct opcode_info_t sa_in_iarr[1] = { + {0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL, + {16, 0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0, 0xc7} }, +}; + +static const struct opcode_info_t vl_iarr[1] = { /* VARIABLE LENGTH */ + {0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_DIRECT_IO, resp_write_dt0, + NULL, {32, 0xc7, 0, 0, 0, 0, 0x1f, 0x18, 0x0, 0xb, 0xfa, + 0, 0xff, 0xff, 0xff, 0xff} }, /* WRITE(32) */ +}; + +static const struct opcode_info_t maint_in_iarr[2] = { + {0, 0xa3, 0xc, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL, + {12, 0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, + 0xc7, 0, 0, 0, 0} }, + {0, 0xa3, 0xd, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL, + {12, 0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0, + 0, 0} }, +}; + +static const struct opcode_info_t write_same_iarr[1] = { + {0, 0x93, 0, F_D_OUT_MAYBE | FF_DIRECT_IO, resp_write_same_16, NULL, + {16, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0x1f, 0xc7} }, +}; + +static const struct opcode_info_t reserve_iarr[1] = { + {0, 0x16, 0, F_D_OUT, NULL, NULL, /* RESERVE(6) */ + {6, 0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, +}; + +static const struct opcode_info_t release_iarr[1] = { + {0, 0x17, 0, F_D_OUT, NULL, NULL, /* RELEASE(6) */ + {6, 0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, +}; + + +/* This array is accessed via SDEB_I_* values. Make sure all are mapped, + * plus the terminating elements for logic that scans this table such as + * REPORT SUPPORTED OPERATION CODES. */ +static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEMENT + 1] = { +/* 0 */ + {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, + {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, + {0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL, + {6, 0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, + {0, 0xa0, 0, FF_RESPOND | F_D_IN, resp_report_luns, NULL, + {12, 0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0, + 0, 0} }, + {0, 0x3, 0, FF_RESPOND | F_D_IN, resp_requests, NULL, + {6, 0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, + {0, 0x0, 0, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */ + {6, 0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, + {1, 0x5a, 0, F_D_IN, resp_mode_sense, msense_iarr, + {10, 0xf8, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, + 0} }, + {1, 0x55, 0, F_D_OUT, resp_mode_select, mselect_iarr, + {10, 0xf1, 0, 0, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} }, + {0, 0x4d, 0, F_D_IN, resp_log_sense, NULL, + {10, 0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, + 0, 0, 0} }, + {0, 0x25, 0, F_D_IN, resp_readcap, NULL, + {10, 0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0, + 0, 0} }, + {3, 0x88, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, read_iarr, + {16, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0x9f, 0xc7} }, /* READ(16) */ +/* 10 */ + {3, 0x8a, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, write_iarr, + {16, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0x9f, 0xc7} }, /* WRITE(16) */ + {0, 0x1b, 0, 0, resp_start_stop, NULL, /* START STOP UNIT */ + {6, 0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, + {1, 0x9e, 0x10, F_SA_LOW | F_D_IN, resp_readcap16, sa_in_iarr, + {16, 0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0x1, 0xc7} }, /* READ CAPACITY(16) */ + {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* SA OUT */ + {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, + {2, 0xa3, 0xa, F_SA_LOW | F_D_IN, resp_report_tgtpgs, maint_in_iarr, + {12, 0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0, 0, + 0} }, + {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */ + {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, + {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* VERIFY */ + {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, + {1, 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_DIRECT_IO, resp_read_dt0, + vl_iarr, {32, 0xc7, 0, 0, 0, 0, 0x1f, 0x18, 0x0, 0x9, 0xfe, 0, + 0xff, 0xff, 0xff, 0xff} },/* VARIABLE LENGTH, READ(32) */ + {1, 0x56, 0, F_D_OUT, NULL, reserve_iarr, /* RESERVE(10) */ + {10, 0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, + 0} }, + {1, 0x57, 0, F_D_OUT, NULL, release_iarr, /* RELEASE(10) */ + {10, 0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, + 0} }, +/* 20 */ + {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ALLOW REMOVAL */ + {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, + {0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */ + {6, 0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, + {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */ + {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, + {0, 0x1d, F_D_OUT, 0, NULL, NULL, /* SEND DIAGNOSTIC */ + {6, 0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, + {0, 0x42, 0, F_D_OUT | FF_DIRECT_IO, resp_unmap, NULL, /* UNMAP */ + {10, 0x1, 0, 0, 0, 0, 0x1f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} }, + {0, 0x53, 0, F_D_IN | F_D_OUT | FF_DIRECT_IO, resp_xdwriteread_10, + NULL, {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7, + 0, 0, 0, 0, 0, 0} }, + {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* WRITE_BUFFER */ + {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, + {1, 0x41, 0, F_D_OUT_MAYBE | FF_DIRECT_IO, resp_write_same_10, + write_same_iarr, {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, + 0xff, 0xc7, 0, 0, 0, 0, 0, 0} }, + {0, 0x35, 0, F_DELAY_OVERR | FF_DIRECT_IO, NULL, NULL, /* SYNC_CACHE */ + {10, 0x7, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7, 0, 0, + 0, 0, 0, 0} }, + {0, 0x89, 0, F_D_OUT | FF_DIRECT_IO, resp_comp_write, NULL, + {16, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0, + 0, 0xff, 0x1f, 0xc7} }, /* COMPARE AND WRITE */ + +/* 30 */ + {0xff, 0, 0, 0, NULL, NULL, /* terminating element */ + {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, +}; + +struct sdebug_scmd_extra_t { + bool inj_recovered; + bool inj_transport; + bool inj_dif; + bool inj_dix; + bool inj_short; +}; + static int scsi_debug_add_host = DEF_NUM_HOST; static int scsi_debug_ato = DEF_ATO; static int scsi_debug_delay = DEF_DELAY; @@ -245,6 +545,8 @@ static unsigned int scsi_debug_write_same_length = DEF_WRITESAME_LENGTH; static bool scsi_debug_removable = DEF_REMOVABLE; static bool scsi_debug_clustering; static bool scsi_debug_host_lock = DEF_HOST_LOCK; +static bool scsi_debug_strict = DEF_STRICT; +static bool sdebug_any_injecting_opt; static atomic_t sdebug_cmnd_count; static atomic_t sdebug_completions; @@ -277,11 +579,10 @@ struct sdebug_dev_info { unsigned int target; u64 lun; struct sdebug_host_info *sdbg_host; - u64 wlun; unsigned long uas_bm[1]; atomic_t num_in_q; - char stopped; - char used; + char stopped; /* TODO: should be atomic */ + bool used; }; struct sdebug_host_info { @@ -394,6 +695,50 @@ static void sdebug_max_tgts_luns(void) spin_unlock(&sdebug_host_list_lock); } +enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1}; + +/* Set in_bit to -1 to indicate no bit position of invalid field */ +static void +mk_sense_invalid_fld(struct scsi_cmnd *scp, enum sdeb_cmd_data c_d, + int in_byte, int in_bit) +{ + unsigned char *sbuff; + u8 sks[4]; + int sl, asc; + + sbuff = scp->sense_buffer; + if (!sbuff) { + sdev_printk(KERN_ERR, scp->device, + "%s: sense_buffer is NULL\n", __func__); + return; + } + asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST; + memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE); + scsi_build_sense_buffer(scsi_debug_dsense, sbuff, ILLEGAL_REQUEST, + asc, 0); + memset(sks, 0, sizeof(sks)); + sks[0] = 0x80; + if (c_d) + sks[0] |= 0x40; + if (in_bit >= 0) { + sks[0] |= 0x8; + sks[0] |= 0x7 & in_bit; + } + put_unaligned_be16(in_byte, sks + 1); + if (scsi_debug_dsense) { + sl = sbuff[7] + 8; + sbuff[7] = sl; + sbuff[sl] = 0x2; + sbuff[sl + 1] = 0x6; + memcpy(sbuff + sl + 4, sks, 3); + } else + memcpy(sbuff + 15, sks, 3); + if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) + sdev_printk(KERN_INFO, scp->device, "%s: [sense_key,asc,ascq" + "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n", + my_name, asc, c_d ? 'C' : 'D', in_byte, in_bit); +} + static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq) { unsigned char *sbuff; @@ -414,63 +759,10 @@ static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq) my_name, key, asc, asq); } -static void get_data_transfer_info(unsigned char *cmd, - unsigned long long *lba, unsigned int *num, - u32 *ei_lba) +static void +mk_sense_invalid_opcode(struct scsi_cmnd *scp) { - *ei_lba = 0; - - switch (*cmd) { - case VARIABLE_LENGTH_CMD: - *lba = (u64)cmd[19] | (u64)cmd[18] << 8 | - (u64)cmd[17] << 16 | (u64)cmd[16] << 24 | - (u64)cmd[15] << 32 | (u64)cmd[14] << 40 | - (u64)cmd[13] << 48 | (u64)cmd[12] << 56; - - *ei_lba = (u32)cmd[23] | (u32)cmd[22] << 8 | - (u32)cmd[21] << 16 | (u32)cmd[20] << 24; - - *num = (u32)cmd[31] | (u32)cmd[30] << 8 | (u32)cmd[29] << 16 | - (u32)cmd[28] << 24; - break; - - case WRITE_SAME_16: - case WRITE_16: - case READ_16: - *lba = (u64)cmd[9] | (u64)cmd[8] << 8 | - (u64)cmd[7] << 16 | (u64)cmd[6] << 24 | - (u64)cmd[5] << 32 | (u64)cmd[4] << 40 | - (u64)cmd[3] << 48 | (u64)cmd[2] << 56; - - *num = (u32)cmd[13] | (u32)cmd[12] << 8 | (u32)cmd[11] << 16 | - (u32)cmd[10] << 24; - break; - case WRITE_12: - case READ_12: - *lba = (u32)cmd[5] | (u32)cmd[4] << 8 | (u32)cmd[3] << 16 | - (u32)cmd[2] << 24; - - *num = (u32)cmd[9] | (u32)cmd[8] << 8 | (u32)cmd[7] << 16 | - (u32)cmd[6] << 24; - break; - case WRITE_SAME: - case WRITE_10: - case READ_10: - case XDWRITEREAD_10: - *lba = (u32)cmd[5] | (u32)cmd[4] << 8 | (u32)cmd[3] << 16 | - (u32)cmd[2] << 24; - - *num = (u32)cmd[8] | (u32)cmd[7] << 8; - break; - case WRITE_6: - case READ_6: - *lba = (u32)cmd[3] | (u32)cmd[2] << 8 | - (u32)(cmd[1] & 0x1f) << 16; - *num = (0 == cmd[4]) ? 256 : cmd[4]; - break; - default: - break; - } + mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0); } static int scsi_debug_ioctl(struct scsi_device *dev, int cmd, void __user *arg) @@ -520,6 +812,11 @@ static int check_readiness(struct scsi_cmnd *SCpnt, int uas_only, if (debug) cp = "mode parameters changed"; break; + case SDEBUG_UA_CAPACITY_CHANGED: + mk_sense_buffer(SCpnt, UNIT_ATTENTION, + UA_CHANGED_ASC, CAPACITY_CHANGED_ASCQ); + if (debug) + cp = "capacity data changed"; default: pr_warn("%s: unexpected unit attention code=%d\n", __func__, k); @@ -924,19 +1221,20 @@ static int inquiry_evpd_b2(unsigned char *arr) #define SDEBUG_LONG_INQ_SZ 96 #define SDEBUG_MAX_INQ_ARR_SZ 584 -static int resp_inquiry(struct scsi_cmnd *scp, int target, - struct sdebug_dev_info * devip) +static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) { unsigned char pq_pdt; unsigned char * arr; unsigned char *cmd = scp->cmnd; int alloc_len, n, ret; + bool have_wlun; alloc_len = (cmd[3] << 8) + cmd[4]; arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC); if (! arr) return DID_REQUEUE << 16; - if (devip->wlun) + have_wlun = (scp->device->lun == SAM2_WLUN_REPORT_LUNS); + if (have_wlun) pq_pdt = 0x1e; /* present, wlun */ else if (scsi_debug_no_lun_0 && (0 == devip->lun)) pq_pdt = 0x7f; /* not present, no device type */ @@ -944,8 +1242,7 @@ static int resp_inquiry(struct scsi_cmnd *scp, int target, pq_pdt = (scsi_debug_ptype & 0x1f); arr[0] = pq_pdt; if (0x2 & cmd[1]) { /* CMDDT bit set */ - mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, - 0); + mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1); kfree(arr); return check_condition_result; } else if (0x1 & cmd[1]) { /* EVPD bit set */ @@ -957,7 +1254,7 @@ static int resp_inquiry(struct scsi_cmnd *scp, int target, (devip->channel & 0x7f); if (0 == scsi_debug_vpd_use_hostno) host_no = 0; - lu_id_num = devip->wlun ? -1 : (((host_no + 1) * 2000) + + lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) + (devip->target * 1000) + devip->lun); target_dev_id = ((host_no + 1) * 2000) + (devip->target * 1000) - 3; @@ -1029,9 +1326,7 @@ static int resp_inquiry(struct scsi_cmnd *scp, int target, arr[1] = cmd[2]; /*sanity */ arr[3] = inquiry_evpd_b2(&arr[4]); } else { - /* Illegal request, invalid field in cdb */ - mk_sense_buffer(scp, ILLEGAL_REQUEST, - INVALID_FIELD_IN_CDB, 0); + mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1); kfree(arr); return check_condition_result; } @@ -1077,18 +1372,20 @@ static int resp_requests(struct scsi_cmnd * scp, unsigned char * sbuff; unsigned char *cmd = scp->cmnd; unsigned char arr[SCSI_SENSE_BUFFERSIZE]; - int want_dsense; + bool dsense, want_dsense; int len = 18; memset(arr, 0, sizeof(arr)); - want_dsense = !!(cmd[1] & 1) || scsi_debug_dsense; + dsense = !!(cmd[1] & 1); + want_dsense = dsense || scsi_debug_dsense; sbuff = scp->sense_buffer; if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) { - if (want_dsense) { + if (dsense) { arr[0] = 0x72; arr[1] = 0x0; /* NO_SENSE in sense_key */ arr[2] = THRESHOLD_EXCEEDED; arr[3] = 0xff; /* TEST set and MRIE==6 */ + len = 8; } else { arr[0] = 0x70; arr[2] = 0x0; /* NO_SENSE in sense_key */ @@ -1098,15 +1395,34 @@ static int resp_requests(struct scsi_cmnd * scp, } } else { memcpy(arr, sbuff, SCSI_SENSE_BUFFERSIZE); - if ((cmd[1] & 1) && (! scsi_debug_dsense)) { - /* DESC bit set and sense_buff in fixed format */ - memset(arr, 0, sizeof(arr)); + if (arr[0] >= 0x70 && dsense == scsi_debug_dsense) + ; /* have sense and formats match */ + else if (arr[0] <= 0x70) { + if (dsense) { + memset(arr, 0, 8); + arr[0] = 0x72; + len = 8; + } else { + memset(arr, 0, 18); + arr[0] = 0x70; + arr[7] = 0xa; + } + } else if (dsense) { + memset(arr, 0, 8); arr[0] = 0x72; arr[1] = sbuff[2]; /* sense key */ arr[2] = sbuff[12]; /* asc */ arr[3] = sbuff[13]; /* ascq */ len = 8; + } else { + memset(arr, 0, 18); + arr[0] = 0x70; + arr[2] = sbuff[1]; + arr[7] = 0xa; + arr[12] = sbuff[1]; + arr[13] = sbuff[3]; } + } mk_sense_buffer(scp, 0, NO_ADDITIONAL_SENSE, 0); return fill_from_dev_buffer(scp, arr, len); @@ -1116,15 +1432,11 @@ static int resp_start_stop(struct scsi_cmnd * scp, struct sdebug_dev_info * devip) { unsigned char *cmd = scp->cmnd; - int power_cond, errsts, start; + int power_cond, start; - errsts = check_readiness(scp, UAS_ONLY, devip); - if (errsts) - return errsts; power_cond = (cmd[4] & 0xf0) >> 4; if (power_cond) { - mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, - 0); + mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 7); return check_condition_result; } start = cmd[4] & 1; @@ -1148,11 +1460,7 @@ static int resp_readcap(struct scsi_cmnd * scp, { unsigned char arr[SDEBUG_READCAP_ARR_SZ]; unsigned int capac; - int errsts; - errsts = check_readiness(scp, UAS_ONLY, devip); - if (errsts) - return errsts; /* following just in case virtual_gb changed */ sdebug_capacity = get_sdebug_capacity(); memset(arr, 0, SDEBUG_READCAP_ARR_SZ); @@ -1180,11 +1488,8 @@ static int resp_readcap16(struct scsi_cmnd * scp, unsigned char *cmd = scp->cmnd; unsigned char arr[SDEBUG_READCAP16_ARR_SZ]; unsigned long long capac; - int errsts, k, alloc_len; + int k, alloc_len; - errsts = check_readiness(scp, UAS_ONLY, devip); - if (errsts) - return errsts; alloc_len = ((cmd[10] << 24) + (cmd[11] << 16) + (cmd[12] << 8) + cmd[13]); /* following just in case virtual_gb changed */ @@ -1300,6 +1605,184 @@ static int resp_report_tgtpgs(struct scsi_cmnd * scp, return ret; } +static int +resp_rsup_opcodes(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) +{ + bool rctd; + u8 reporting_opts, req_opcode, sdeb_i, supp; + u16 req_sa, u; + u32 alloc_len, a_len; + int k, offset, len, errsts, count, bump, na; + const struct opcode_info_t *oip; + const struct opcode_info_t *r_oip; + u8 *arr; + u8 *cmd = scp->cmnd; + + rctd = !!(cmd[2] & 0x80); + reporting_opts = cmd[2] & 0x7; + req_opcode = cmd[3]; + req_sa = get_unaligned_be16(cmd + 4); + alloc_len = get_unaligned_be32(cmd + 6); + if (alloc_len < 4 && alloc_len > 0xffff) { + mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1); + return check_condition_result; + } + if (alloc_len > 8192) + a_len = 8192; + else + a_len = alloc_len; + arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_KERNEL); + if (NULL == arr) { + mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC, + INSUFF_RES_ASCQ); + return check_condition_result; + } + switch (reporting_opts) { + case 0: /* all commands */ + /* count number of commands */ + for (count = 0, oip = opcode_info_arr; + oip->num_attached != 0xff; ++oip) { + if (F_INV_OP & oip->flags) + continue; + count += (oip->num_attached + 1); + } + bump = rctd ? 20 : 8; + put_unaligned_be32(count * bump, arr); + for (offset = 4, oip = opcode_info_arr; + oip->num_attached != 0xff && offset < a_len; ++oip) { + if (F_INV_OP & oip->flags) + continue; + na = oip->num_attached; + arr[offset] = oip->opcode; + put_unaligned_be16(oip->sa, arr + offset + 2); + if (rctd) + arr[offset + 5] |= 0x2; + if (FF_SA & oip->flags) + arr[offset + 5] |= 0x1; + put_unaligned_be16(oip->len_mask[0], arr + offset + 6); + if (rctd) + put_unaligned_be16(0xa, arr + offset + 8); + r_oip = oip; + for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) { + if (F_INV_OP & oip->flags) + continue; + offset += bump; + arr[offset] = oip->opcode; + put_unaligned_be16(oip->sa, arr + offset + 2); + if (rctd) + arr[offset + 5] |= 0x2; + if (FF_SA & oip->flags) + arr[offset + 5] |= 0x1; + put_unaligned_be16(oip->len_mask[0], + arr + offset + 6); + if (rctd) + put_unaligned_be16(0xa, + arr + offset + 8); + } + oip = r_oip; + offset += bump; + } + break; + case 1: /* one command: opcode only */ + case 2: /* one command: opcode plus service action */ + case 3: /* one command: if sa==0 then opcode only else opcode+sa */ + sdeb_i = opcode_ind_arr[req_opcode]; + oip = &opcode_info_arr[sdeb_i]; + if (F_INV_OP & oip->flags) { + supp = 1; + offset = 4; + } else { + if (1 == reporting_opts) { + if (FF_SA & oip->flags) { + mk_sense_invalid_fld(scp, SDEB_IN_CDB, + 2, 2); + kfree(arr); + return check_condition_result; + } + req_sa = 0; + } else if (2 == reporting_opts && + 0 == (FF_SA & oip->flags)) { + mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1); + kfree(arr); /* point at requested sa */ + return check_condition_result; + } + if (0 == (FF_SA & oip->flags) && + req_opcode == oip->opcode) + supp = 3; + else if (0 == (FF_SA & oip->flags)) { + na = oip->num_attached; + for (k = 0, oip = oip->arrp; k < na; + ++k, ++oip) { + if (req_opcode == oip->opcode) + break; + } + supp = (k >= na) ? 1 : 3; + } else if (req_sa != oip->sa) { + na = oip->num_attached; + for (k = 0, oip = oip->arrp; k < na; + ++k, ++oip) { + if (req_sa == oip->sa) + break; + } + supp = (k >= na) ? 1 : 3; + } else + supp = 3; + if (3 == supp) { + u = oip->len_mask[0]; + put_unaligned_be16(u, arr + 2); + arr[4] = oip->opcode; + for (k = 1; k < u; ++k) + arr[4 + k] = (k < 16) ? + oip->len_mask[k] : 0xff; + offset = 4 + u; + } else + offset = 4; + } + arr[1] = (rctd ? 0x80 : 0) | supp; + if (rctd) { + put_unaligned_be16(0xa, arr + offset); + offset += 12; + } + break; + default: + mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2); + kfree(arr); + return check_condition_result; + } + offset = (offset < a_len) ? offset : a_len; + len = (offset < alloc_len) ? offset : alloc_len; + errsts = fill_from_dev_buffer(scp, arr, len); + kfree(arr); + return errsts; +} + +static int +resp_rsup_tmfs(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) +{ + bool repd; + u32 alloc_len, len; + u8 arr[16]; + u8 *cmd = scp->cmnd; + + memset(arr, 0, sizeof(arr)); + repd = !!(cmd[2] & 0x80); + alloc_len = get_unaligned_be32(cmd + 6); + if (alloc_len < 4) { + mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1); + return check_condition_result; + } + arr[0] = 0xc8; /* ATS | ATSS | LURS */ + arr[1] = 0x1; /* ITNRS */ + if (repd) { + arr[3] = 0xc; + len = 16; + } else + len = 4; + + len = (len < alloc_len) ? len : alloc_len; + return fill_from_dev_buffer(scp, arr, len); +} + /* <<Following mode page info copied from ST318451LW>> */ static int resp_err_recov_pg(unsigned char * p, int pcontrol, int target) @@ -1459,20 +1942,18 @@ static int resp_sas_sha_m_spg(unsigned char * p, int pcontrol) #define SDEBUG_MAX_MSENSE_SZ 256 -static int resp_mode_sense(struct scsi_cmnd * scp, int target, - struct sdebug_dev_info * devip) +static int +resp_mode_sense(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) { unsigned char dbd, llbaa; int pcontrol, pcode, subpcode, bd_len; unsigned char dev_spec; - int k, alloc_len, msense_6, offset, len, errsts, target_dev_id; + int k, alloc_len, msense_6, offset, len, target_dev_id; + int target = scp->device->id; unsigned char * ap; unsigned char arr[SDEBUG_MAX_MSENSE_SZ]; unsigned char *cmd = scp->cmnd; - errsts = check_readiness(scp, UAS_ONLY, devip); - if (errsts) - return errsts; dbd = !!(cmd[1] & 0x8); pcontrol = (cmd[2] & 0xc0) >> 6; pcode = cmd[2] & 0x3f; @@ -1542,8 +2023,7 @@ static int resp_mode_sense(struct scsi_cmnd * scp, int target, if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) { /* TODO: Control Extension page */ - mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, - 0); + mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1); return check_condition_result; } switch (pcode) { @@ -1569,8 +2049,7 @@ static int resp_mode_sense(struct scsi_cmnd * scp, int target, break; case 0x19: /* if spc==1 then sas phy, control+discover */ if ((subpcode > 0x2) && (subpcode < 0xff)) { - mk_sense_buffer(scp, ILLEGAL_REQUEST, - INVALID_FIELD_IN_CDB, 0); + mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1); return check_condition_result; } len = 0; @@ -1602,15 +2081,13 @@ static int resp_mode_sense(struct scsi_cmnd * scp, int target, } len += resp_iec_m_pg(ap + len, pcontrol, target); } else { - mk_sense_buffer(scp, ILLEGAL_REQUEST, - INVALID_FIELD_IN_CDB, 0); + mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1); return check_condition_result; } offset += len; break; default: - mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, - 0); + mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5); return check_condition_result; } if (msense_6) @@ -1624,24 +2101,21 @@ static int resp_mode_sense(struct scsi_cmnd * scp, int target, #define SDEBUG_MAX_MSELECT_SZ 512 -static int resp_mode_select(struct scsi_cmnd * scp, int mselect6, - struct sdebug_dev_info * devip) +static int +resp_mode_select(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) { int pf, sp, ps, md_len, bd_len, off, spf, pg_len; - int param_len, res, errsts, mpage; + int param_len, res, mpage; unsigned char arr[SDEBUG_MAX_MSELECT_SZ]; unsigned char *cmd = scp->cmnd; + int mselect6 = (MODE_SELECT == cmd[0]); - errsts = check_readiness(scp, UAS_ONLY, devip); - if (errsts) - return errsts; memset(arr, 0, sizeof(arr)); pf = cmd[1] & 0x10; sp = cmd[1] & 0x1; param_len = mselect6 ? cmd[4] : ((cmd[7] << 8) + cmd[8]); if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) { - mk_sense_buffer(scp, ILLEGAL_REQUEST, - INVALID_FIELD_IN_CDB, 0); + mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1); return check_condition_result; } res = fetch_to_dev_buffer(scp, arr, param_len); @@ -1655,16 +2129,14 @@ static int resp_mode_select(struct scsi_cmnd * scp, int mselect6, md_len = mselect6 ? (arr[0] + 1) : ((arr[0] << 8) + arr[1] + 2); bd_len = mselect6 ? arr[3] : ((arr[6] << 8) + arr[7]); if (md_len > 2) { - mk_sense_buffer(scp, ILLEGAL_REQUEST, - INVALID_FIELD_IN_PARAM_LIST, 0); + mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1); return check_condition_result; } off = bd_len + (mselect6 ? 4 : 8); mpage = arr[off] & 0x3f; ps = !!(arr[off] & 0x80); if (ps) { - mk_sense_buffer(scp, ILLEGAL_REQUEST, - INVALID_FIELD_IN_PARAM_LIST, 0); + mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 7); return check_condition_result; } spf = !!(arr[off] & 0x40); @@ -1701,8 +2173,7 @@ static int resp_mode_select(struct scsi_cmnd * scp, int mselect6, default: break; } - mk_sense_buffer(scp, ILLEGAL_REQUEST, - INVALID_FIELD_IN_PARAM_LIST, 0); + mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 5); return check_condition_result; set_mode_changed_ua: set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm); @@ -1737,19 +2208,15 @@ static int resp_ie_l_pg(unsigned char * arr) static int resp_log_sense(struct scsi_cmnd * scp, struct sdebug_dev_info * devip) { - int ppc, sp, pcontrol, pcode, subpcode, alloc_len, errsts, len, n; + int ppc, sp, pcontrol, pcode, subpcode, alloc_len, len, n; unsigned char arr[SDEBUG_MAX_LSENSE_SZ]; unsigned char *cmd = scp->cmnd; - errsts = check_readiness(scp, UAS_ONLY, devip); - if (errsts) - return errsts; memset(arr, 0, sizeof(arr)); ppc = cmd[1] & 0x2; sp = cmd[1] & 0x1; if (ppc || sp) { - mk_sense_buffer(scp, ILLEGAL_REQUEST, - INVALID_FIELD_IN_CDB, 0); + mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, ppc ? 1 : 0); return check_condition_result; } pcontrol = (cmd[2] & 0xc0) >> 6; @@ -1773,8 +2240,7 @@ static int resp_log_sense(struct scsi_cmnd * scp, arr[3] = resp_ie_l_pg(arr + 4); break; default: - mk_sense_buffer(scp, ILLEGAL_REQUEST, - INVALID_FIELD_IN_CDB, 0); + mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5); return check_condition_result; } } else if (0xff == subpcode) { @@ -1806,13 +2272,11 @@ static int resp_log_sense(struct scsi_cmnd * scp, arr[3] = n - 4; break; default: - mk_sense_buffer(scp, ILLEGAL_REQUEST, - INVALID_FIELD_IN_CDB, 0); + mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5); return check_condition_result; } } else { - mk_sense_buffer(scp, ILLEGAL_REQUEST, - INVALID_FIELD_IN_CDB, 0); + mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1); return check_condition_result; } len = min(((arr[2] << 8) + arr[3]) + 4, alloc_len); @@ -1824,11 +2288,12 @@ static int check_device_access_params(struct scsi_cmnd *scp, unsigned long long lba, unsigned int num) { if (lba + num > sdebug_capacity) { - mk_sense_buffer(scp, ILLEGAL_REQUEST, ADDR_OUT_OF_RANGE, 0); + mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0); return check_condition_result; } /* transfer length excessive (tie in to block limits VPD page) */ if (num > sdebug_store_sectors) { + /* needs work to find which cdb byte 'num' comes from */ mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0); return check_condition_result; } @@ -1836,17 +2301,17 @@ static int check_device_access_params(struct scsi_cmnd *scp, } /* Returns number of bytes copied or -1 if error. */ -static int do_device_access(struct scsi_cmnd *scmd, - unsigned long long lba, unsigned int num, int write) +static int +do_device_access(struct scsi_cmnd *scmd, u64 lba, u32 num, bool do_write) { int ret; - unsigned long long block, rest = 0; + u64 block, rest = 0; struct scsi_data_buffer *sdb; enum dma_data_direction dir; size_t (*func)(struct scatterlist *, unsigned int, void *, size_t, off_t); - if (write) { + if (do_write) { sdb = scsi_out(scmd); dir = DMA_TO_DEVICE; func = sg_pcopy_to_buffer; @@ -1880,6 +2345,38 @@ static int do_device_access(struct scsi_cmnd *scmd, return ret; } +/* If fake_store(lba,num) compares equal to arr(num), then copy top half of + * arr into fake_store(lba,num) and return true. If comparison fails then + * return false. */ +static bool +comp_write_worker(u64 lba, u32 num, const u8 *arr) +{ + bool res; + u64 block, rest = 0; + u32 store_blks = sdebug_store_sectors; + u32 lb_size = scsi_debug_sector_size; + + block = do_div(lba, store_blks); + if (block + num > store_blks) + rest = block + num - store_blks; + + res = !memcmp(fake_storep + (block * lb_size), arr, + (num - rest) * lb_size); + if (!res) + return res; + if (rest) + res = memcmp(fake_storep, arr + ((num - rest) * lb_size), + rest * lb_size); + if (!res) + return res; + arr += num * lb_size; + memcpy(fake_storep + (block * lb_size), arr, (num - rest) * lb_size); + if (rest) + memcpy(fake_storep, arr + ((num - rest) * lb_size), + rest * lb_size); + return res; +} + static __be16 dif_compute_csum(const void *buf, int len) { __be16 csum; @@ -1992,55 +2489,143 @@ static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec, return 0; } -static int resp_read(struct scsi_cmnd *SCpnt, unsigned long long lba, - unsigned int num, u32 ei_lba) +static int +resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) { + u8 *cmd = scp->cmnd; + u64 lba; + u32 num; + u32 ei_lba; unsigned long iflags; int ret; + bool check_prot; - ret = check_device_access_params(SCpnt, lba, num); - if (ret) - return ret; + switch (cmd[0]) { + case READ_16: + ei_lba = 0; + lba = get_unaligned_be64(cmd + 2); + num = get_unaligned_be32(cmd + 10); + check_prot = true; + break; + case READ_10: + ei_lba = 0; + lba = get_unaligned_be32(cmd + 2); + num = get_unaligned_be16(cmd + 7); + check_prot = true; + break; + case READ_6: + ei_lba = 0; + lba = (u32)cmd[3] | (u32)cmd[2] << 8 | + (u32)(cmd[1] & 0x1f) << 16; + num = (0 == cmd[4]) ? 256 : cmd[4]; + check_prot = true; + break; + case READ_12: + ei_lba = 0; + lba = get_unaligned_be32(cmd + 2); + num = get_unaligned_be32(cmd + 6); + check_prot = true; + break; + case XDWRITEREAD_10: + ei_lba = 0; + lba = get_unaligned_be32(cmd + 2); + num = get_unaligned_be16(cmd + 7); + check_prot = false; + break; + default: /* assume READ(32) */ + lba = get_unaligned_be64(cmd + 12); + ei_lba = get_unaligned_be32(cmd + 20); + num = get_unaligned_be32(cmd + 28); + check_prot = false; + break; + } + if (check_prot) { + if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION && + (cmd[1] & 0xe0)) { + mk_sense_invalid_opcode(scp); + return check_condition_result; + } + if ((scsi_debug_dif == SD_DIF_TYPE1_PROTECTION || + scsi_debug_dif == SD_DIF_TYPE3_PROTECTION) && + (cmd[1] & 0xe0) == 0) + sdev_printk(KERN_ERR, scp->device, "Unprotected RD " + "to DIF device\n"); + } + if (sdebug_any_injecting_opt) { + struct sdebug_scmd_extra_t *ep = scsi_cmd_priv(scp); + + if (ep->inj_short) + num /= 2; + } + + /* inline check_device_access_params() */ + if (lba + num > sdebug_capacity) { + mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0); + return check_condition_result; + } + /* transfer length excessive (tie in to block limits VPD page) */ + if (num > sdebug_store_sectors) { + /* needs work to find which cdb byte 'num' comes from */ + mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0); + return check_condition_result; + } if ((SCSI_DEBUG_OPT_MEDIUM_ERR & scsi_debug_opts) && (lba <= (OPT_MEDIUM_ERR_ADDR + OPT_MEDIUM_ERR_NUM - 1)) && ((lba + num) > OPT_MEDIUM_ERR_ADDR)) { /* claim unrecoverable read error */ - mk_sense_buffer(SCpnt, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0); + mk_sense_buffer(scp, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0); /* set info field and valid bit for fixed descriptor */ - if (0x70 == (SCpnt->sense_buffer[0] & 0x7f)) { - SCpnt->sense_buffer[0] |= 0x80; /* Valid bit */ + if (0x70 == (scp->sense_buffer[0] & 0x7f)) { + scp->sense_buffer[0] |= 0x80; /* Valid bit */ ret = (lba < OPT_MEDIUM_ERR_ADDR) ? OPT_MEDIUM_ERR_ADDR : (int)lba; - SCpnt->sense_buffer[3] = (ret >> 24) & 0xff; - SCpnt->sense_buffer[4] = (ret >> 16) & 0xff; - SCpnt->sense_buffer[5] = (ret >> 8) & 0xff; - SCpnt->sense_buffer[6] = ret & 0xff; + put_unaligned_be32(ret, scp->sense_buffer + 3); } - scsi_set_resid(SCpnt, scsi_bufflen(SCpnt)); + scsi_set_resid(scp, scsi_bufflen(scp)); return check_condition_result; } read_lock_irqsave(&atomic_rw, iflags); /* DIX + T10 DIF */ - if (scsi_debug_dix && scsi_prot_sg_count(SCpnt)) { - int prot_ret = prot_verify_read(SCpnt, lba, num, ei_lba); + if (scsi_debug_dix && scsi_prot_sg_count(scp)) { + int prot_ret = prot_verify_read(scp, lba, num, ei_lba); if (prot_ret) { read_unlock_irqrestore(&atomic_rw, iflags); - mk_sense_buffer(SCpnt, ABORTED_COMMAND, 0x10, prot_ret); + mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, prot_ret); return illegal_condition_result; } } - ret = do_device_access(SCpnt, lba, num, 0); + ret = do_device_access(scp, lba, num, false); read_unlock_irqrestore(&atomic_rw, iflags); if (ret == -1) return DID_ERROR << 16; - scsi_in(SCpnt)->resid = scsi_bufflen(SCpnt) - ret; + scsi_in(scp)->resid = scsi_bufflen(scp) - ret; + + if (sdebug_any_injecting_opt) { + struct sdebug_scmd_extra_t *ep = scsi_cmd_priv(scp); + if (ep->inj_recovered) { + mk_sense_buffer(scp, RECOVERED_ERROR, + THRESHOLD_EXCEEDED, 0); + return check_condition_result; + } else if (ep->inj_transport) { + mk_sense_buffer(scp, ABORTED_COMMAND, + TRANSPORT_PROBLEM, ACK_NAK_TO); + return check_condition_result; + } else if (ep->inj_dif) { + /* Logical block guard check failed */ + mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1); + return illegal_condition_result; + } else if (ep->inj_dix) { + mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1); + return illegal_condition_result; + } + } return 0; } @@ -2223,31 +2808,95 @@ static void unmap_region(sector_t lba, unsigned int len) } } -static int resp_write(struct scsi_cmnd *SCpnt, unsigned long long lba, - unsigned int num, u32 ei_lba) +static int +resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) { + u8 *cmd = scp->cmnd; + u64 lba; + u32 num; + u32 ei_lba; unsigned long iflags; int ret; + bool check_prot; - ret = check_device_access_params(SCpnt, lba, num); - if (ret) - return ret; + switch (cmd[0]) { + case WRITE_16: + ei_lba = 0; + lba = get_unaligned_be64(cmd + 2); + num = get_unaligned_be32(cmd + 10); + check_prot = true; + break; + case WRITE_10: + ei_lba = 0; + lba = get_unaligned_be32(cmd + 2); + num = get_unaligned_be16(cmd + 7); + check_prot = true; + break; + case WRITE_6: + ei_lba = 0; + lba = (u32)cmd[3] | (u32)cmd[2] << 8 | + (u32)(cmd[1] & 0x1f) << 16; + num = (0 == cmd[4]) ? 256 : cmd[4]; + check_prot = true; + break; + case WRITE_12: + ei_lba = 0; + lba = get_unaligned_be32(cmd + 2); + num = get_unaligned_be32(cmd + 6); + check_prot = true; + break; + case 0x53: /* XDWRITEREAD(10) */ + ei_lba = 0; + lba = get_unaligned_be32(cmd + 2); + num = get_unaligned_be16(cmd + 7); + check_prot = false; + break; + default: /* assume WRITE(32) */ + lba = get_unaligned_be64(cmd + 12); + ei_lba = get_unaligned_be32(cmd + 20); + num = get_unaligned_be32(cmd + 28); + check_prot = false; + break; + } + if (check_prot) { + if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION && + (cmd[1] & 0xe0)) { + mk_sense_invalid_opcode(scp); + return check_condition_result; + } + if ((scsi_debug_dif == SD_DIF_TYPE1_PROTECTION || + scsi_debug_dif == SD_DIF_TYPE3_PROTECTION) && + (cmd[1] & 0xe0) == 0) + sdev_printk(KERN_ERR, scp->device, "Unprotected WR " + "to DIF device\n"); + } + + /* inline check_device_access_params() */ + if (lba + num > sdebug_capacity) { + mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0); + return check_condition_result; + } + /* transfer length excessive (tie in to block limits VPD page) */ + if (num > sdebug_store_sectors) { + /* needs work to find which cdb byte 'num' comes from */ + mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0); + return check_condition_result; + } write_lock_irqsave(&atomic_rw, iflags); /* DIX + T10 DIF */ - if (scsi_debug_dix && scsi_prot_sg_count(SCpnt)) { - int prot_ret = prot_verify_write(SCpnt, lba, num, ei_lba); + if (scsi_debug_dix && scsi_prot_sg_count(scp)) { + int prot_ret = prot_verify_write(scp, lba, num, ei_lba); if (prot_ret) { write_unlock_irqrestore(&atomic_rw, iflags); - mk_sense_buffer(SCpnt, ILLEGAL_REQUEST, 0x10, - prot_ret); + mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, prot_ret); return illegal_condition_result; } } - ret = do_device_access(SCpnt, lba, num, 1); + ret = do_device_access(scp, lba, num, true); if (scsi_debug_lbp()) map_region(lba, num); write_unlock_irqrestore(&atomic_rw, iflags); @@ -2255,30 +2904,41 @@ static int resp_write(struct scsi_cmnd *SCpnt, unsigned long long lba, return (DID_ERROR << 16); else if ((ret < (num * scsi_debug_sector_size)) && (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)) - sdev_printk(KERN_INFO, SCpnt->device, + sdev_printk(KERN_INFO, scp->device, "%s: write: cdb indicated=%u, IO sent=%d bytes\n", my_name, num * scsi_debug_sector_size, ret); + if (sdebug_any_injecting_opt) { + struct sdebug_scmd_extra_t *ep = scsi_cmd_priv(scp); + + if (ep->inj_recovered) { + mk_sense_buffer(scp, RECOVERED_ERROR, + THRESHOLD_EXCEEDED, 0); + return check_condition_result; + } else if (ep->inj_dif) { + /* Logical block guard check failed */ + mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1); + return illegal_condition_result; + } else if (ep->inj_dix) { + mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1); + return illegal_condition_result; + } + } return 0; } -static int resp_write_same(struct scsi_cmnd *scmd, unsigned long long lba, - unsigned int num, u32 ei_lba, unsigned int unmap) +static int +resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num, u32 ei_lba, + bool unmap, bool ndob) { unsigned long iflags; unsigned long long i; int ret; - ret = check_device_access_params(scmd, lba, num); + ret = check_device_access_params(scp, lba, num); if (ret) return ret; - if (num > scsi_debug_write_same_length) { - mk_sense_buffer(scmd, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, - 0); - return check_condition_result; - } - write_lock_irqsave(&atomic_rw, iflags); if (unmap && scsi_debug_lbp()) { @@ -2286,17 +2946,22 @@ static int resp_write_same(struct scsi_cmnd *scmd, unsigned long long lba, goto out; } - /* Else fetch one logical block */ - ret = fetch_to_dev_buffer(scmd, - fake_storep + (lba * scsi_debug_sector_size), - scsi_debug_sector_size); + /* if ndob then zero 1 logical block, else fetch 1 logical block */ + if (ndob) { + memset(fake_storep + (lba * scsi_debug_sector_size), 0, + scsi_debug_sector_size); + ret = 0; + } else + ret = fetch_to_dev_buffer(scp, fake_storep + + (lba * scsi_debug_sector_size), + scsi_debug_sector_size); if (-1 == ret) { write_unlock_irqrestore(&atomic_rw, iflags); return (DID_ERROR << 16); } else if ((ret < (num * scsi_debug_sector_size)) && (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)) - sdev_printk(KERN_INFO, scmd->device, + sdev_printk(KERN_INFO, scp->device, "%s: %s: cdb indicated=%u, IO sent=%d bytes\n", my_name, "write same", num * scsi_debug_sector_size, ret); @@ -2315,13 +2980,143 @@ out: return 0; } +static int +resp_write_same_10(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) +{ + u8 *cmd = scp->cmnd; + u32 lba; + u16 num; + u32 ei_lba = 0; + bool unmap = false; + + if (cmd[1] & 0x8) { + if (scsi_debug_lbpws10 == 0) { + mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3); + return check_condition_result; + } else + unmap = true; + } + lba = get_unaligned_be32(cmd + 2); + num = get_unaligned_be16(cmd + 7); + if (num > scsi_debug_write_same_length) { + mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1); + return check_condition_result; + } + return resp_write_same(scp, lba, num, ei_lba, unmap, false); +} + +static int +resp_write_same_16(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) +{ + u8 *cmd = scp->cmnd; + u64 lba; + u32 num; + u32 ei_lba = 0; + bool unmap = false; + bool ndob = false; + + if (cmd[1] & 0x8) { /* UNMAP */ + if (scsi_debug_lbpws == 0) { + mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3); + return check_condition_result; + } else + unmap = true; + } + if (cmd[1] & 0x1) /* NDOB (no data-out buffer, assumes zeroes) */ + ndob = true; + lba = get_unaligned_be64(cmd + 2); + num = get_unaligned_be32(cmd + 10); + if (num > scsi_debug_write_same_length) { + mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1); + return check_condition_result; + } + return resp_write_same(scp, lba, num, ei_lba, unmap, ndob); +} + +static int +resp_comp_write(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) +{ + u8 *cmd = scp->cmnd; + u8 *arr; + u8 *fake_storep_hold; + u64 lba; + u32 dnum; + u32 lb_size = scsi_debug_sector_size; + u8 num; + unsigned long iflags; + int ret; + + lba = get_unaligned_be32(cmd + 2); + num = cmd[13]; /* 1 to a maximum of 255 logical blocks */ + if (0 == num) + return 0; /* degenerate case, not an error */ + dnum = 2 * num; + arr = kzalloc(dnum * lb_size, GFP_ATOMIC); + if (NULL == arr) { + mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC, + INSUFF_RES_ASCQ); + return check_condition_result; + } + if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION && + (cmd[1] & 0xe0)) { + mk_sense_invalid_opcode(scp); + return check_condition_result; + } + if ((scsi_debug_dif == SD_DIF_TYPE1_PROTECTION || + scsi_debug_dif == SD_DIF_TYPE3_PROTECTION) && + (cmd[1] & 0xe0) == 0) + sdev_printk(KERN_ERR, scp->device, "Unprotected WR " + "to DIF device\n"); + + /* inline check_device_access_params() */ + if (lba + num > sdebug_capacity) { + mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0); + return check_condition_result; + } + /* transfer length excessive (tie in to block limits VPD page) */ + if (num > sdebug_store_sectors) { + /* needs work to find which cdb byte 'num' comes from */ + mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0); + return check_condition_result; + } + + write_lock_irqsave(&atomic_rw, iflags); + + /* trick do_device_access() to fetch both compare and write buffers + * from data-in into arr. Safe (atomic) since write_lock held. */ + fake_storep_hold = fake_storep; + fake_storep = arr; + ret = do_device_access(scp, 0, dnum, true); + fake_storep = fake_storep_hold; + if (ret == -1) { + write_unlock_irqrestore(&atomic_rw, iflags); + kfree(arr); + return DID_ERROR << 16; + } else if ((ret < (dnum * lb_size)) && + (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)) + sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb " + "indicated=%u, IO sent=%d bytes\n", my_name, + dnum * lb_size, ret); + if (!comp_write_worker(lba, num, arr)) { + write_unlock_irqrestore(&atomic_rw, iflags); + kfree(arr); + mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0); + return check_condition_result; + } + if (scsi_debug_lbp()) + map_region(lba, num); + write_unlock_irqrestore(&atomic_rw, iflags); + return 0; +} + struct unmap_block_desc { __be64 lba; __be32 blocks; __be32 __reserved; }; -static int resp_unmap(struct scsi_cmnd * scmd, struct sdebug_dev_info * devip) +static int +resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) { unsigned char *buf; struct unmap_block_desc *desc; @@ -2329,20 +3124,26 @@ static int resp_unmap(struct scsi_cmnd * scmd, struct sdebug_dev_info * devip) int ret; unsigned long iflags; - ret = check_readiness(scmd, UAS_ONLY, devip); - if (ret) - return ret; - payload_len = get_unaligned_be16(&scmd->cmnd[7]); - BUG_ON(scsi_bufflen(scmd) != payload_len); + if (!scsi_debug_lbp()) + return 0; /* fib and say its done */ + payload_len = get_unaligned_be16(scp->cmnd + 7); + BUG_ON(scsi_bufflen(scp) != payload_len); descriptors = (payload_len - 8) / 16; + if (descriptors > scsi_debug_unmap_max_desc) { + mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1); + return check_condition_result; + } - buf = kmalloc(scsi_bufflen(scmd), GFP_ATOMIC); - if (!buf) + buf = kmalloc(scsi_bufflen(scp), GFP_ATOMIC); + if (!buf) { + mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC, + INSUFF_RES_ASCQ); return check_condition_result; + } - scsi_sg_copy_to_buffer(scmd, buf, scsi_bufflen(scmd)); + scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp)); BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2); BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16); @@ -2355,7 +3156,7 @@ static int resp_unmap(struct scsi_cmnd * scmd, struct sdebug_dev_info * devip) unsigned long long lba = get_unaligned_be64(&desc[i].lba); unsigned int num = get_unaligned_be32(&desc[i].blocks); - ret = check_device_access_params(scmd, lba, num); + ret = check_device_access_params(scp, lba, num); if (ret) goto out; @@ -2373,37 +3174,44 @@ out: #define SDEBUG_GET_LBA_STATUS_LEN 32 -static int resp_get_lba_status(struct scsi_cmnd * scmd, - struct sdebug_dev_info * devip) +static int +resp_get_lba_status(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) { - unsigned long long lba; - unsigned int alloc_len, mapped, num; - unsigned char arr[SDEBUG_GET_LBA_STATUS_LEN]; + u8 *cmd = scp->cmnd; + u64 lba; + u32 alloc_len, mapped, num; + u8 arr[SDEBUG_GET_LBA_STATUS_LEN]; int ret; - ret = check_readiness(scmd, UAS_ONLY, devip); - if (ret) - return ret; - - lba = get_unaligned_be64(&scmd->cmnd[2]); - alloc_len = get_unaligned_be32(&scmd->cmnd[10]); + lba = get_unaligned_be64(cmd + 2); + alloc_len = get_unaligned_be32(cmd + 10); if (alloc_len < 24) return 0; - ret = check_device_access_params(scmd, lba, 1); + ret = check_device_access_params(scp, lba, 1); if (ret) return ret; - mapped = map_state(lba, &num); + if (scsi_debug_lbp()) + mapped = map_state(lba, &num); + else { + mapped = 1; + /* following just in case virtual_gb changed */ + sdebug_capacity = get_sdebug_capacity(); + if (sdebug_capacity - lba <= 0xffffffff) + num = sdebug_capacity - lba; + else + num = 0xffffffff; + } memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN); - put_unaligned_be32(20, &arr[0]); /* Parameter Data Length */ - put_unaligned_be64(lba, &arr[8]); /* LBA */ - put_unaligned_be32(num, &arr[16]); /* Number of blocks */ - arr[20] = !mapped; /* mapped = 0, unmapped = 1 */ + put_unaligned_be32(20, arr); /* Parameter Data Length */ + put_unaligned_be64(lba, arr + 8); /* LBA */ + put_unaligned_be32(num, arr + 16); /* Number of blocks */ + arr[20] = !mapped; /* prov_stat=0: mapped; 1: dealloc */ - return fill_from_dev_buffer(scmd, arr, SDEBUG_GET_LBA_STATUS_LEN); + return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN); } #define SDEBUG_RLUN_ARR_SZ 256 @@ -2412,8 +3220,8 @@ static int resp_report_luns(struct scsi_cmnd * scp, struct sdebug_dev_info * devip) { unsigned int alloc_len; - int lun_cnt, i, upper, num, n; - u64 wlun, lun; + int lun_cnt, i, upper, num, n, want_wlun, shortish; + u64 lun; unsigned char *cmd = scp->cmnd; int select_report = (int)cmd[2]; struct scsi_lun *one_lun; @@ -2421,9 +3229,9 @@ static int resp_report_luns(struct scsi_cmnd * scp, unsigned char * max_addr; alloc_len = cmd[9] + (cmd[8] << 8) + (cmd[7] << 16) + (cmd[6] << 24); - if ((alloc_len < 4) || (select_report > 2)) { - mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, - 0); + shortish = (alloc_len < 4); + if (shortish || (select_report > 2)) { + mk_sense_invalid_fld(scp, SDEB_IN_CDB, shortish ? 6 : 2, -1); return check_condition_result; } /* can produce response with up to 16k luns (lun 0 to lun 16383) */ @@ -2433,14 +3241,14 @@ static int resp_report_luns(struct scsi_cmnd * scp, lun_cnt = 0; else if (scsi_debug_no_lun_0 && (lun_cnt > 0)) --lun_cnt; - wlun = (select_report > 0) ? 1 : 0; - num = lun_cnt + wlun; + want_wlun = (select_report > 0) ? 1 : 0; + num = lun_cnt + want_wlun; arr[2] = ((sizeof(struct scsi_lun) * num) >> 8) & 0xff; arr[3] = (sizeof(struct scsi_lun) * num) & 0xff; n = min((int)((SDEBUG_RLUN_ARR_SZ - 8) / sizeof(struct scsi_lun)), num); if (n < num) { - wlun = 0; + want_wlun = 0; lun_cnt = n; } one_lun = (struct scsi_lun *) &arr[8]; @@ -2454,7 +3262,7 @@ static int resp_report_luns(struct scsi_cmnd * scp, (upper | (SAM2_LUN_ADDRESS_METHOD << 6)); one_lun[i].scsi_lun[1] = lun & 0xff; } - if (wlun) { + if (want_wlun) { one_lun[i].scsi_lun[0] = (SAM2_WLUN_REPORT_LUNS >> 8) & 0xff; one_lun[i].scsi_lun[1] = SAM2_WLUN_REPORT_LUNS & 0xff; i++; @@ -2476,8 +3284,8 @@ static int resp_xdwriteread(struct scsi_cmnd *scp, unsigned long long lba, /* better not to use temporary buffer. */ buf = kmalloc(scsi_bufflen(scp), GFP_ATOMIC); if (!buf) { - mk_sense_buffer(scp, NOT_READY, - LOGICAL_UNIT_COMMUNICATION_FAILURE, 0); + mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC, + INSUFF_RES_ASCQ); return check_condition_result; } @@ -2500,6 +3308,32 @@ static int resp_xdwriteread(struct scsi_cmnd *scp, unsigned long long lba, return 0; } +static int +resp_xdwriteread_10(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) +{ + u8 *cmd = scp->cmnd; + u64 lba; + u32 num; + int errsts; + + if (!scsi_bidi_cmnd(scp)) { + mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC, + INSUFF_RES_ASCQ); + return check_condition_result; + } + errsts = resp_read_dt0(scp, devip); + if (errsts) + return errsts; + if (!(cmd[1] & 0x4)) { /* DISABLE_WRITE is not set */ + errsts = resp_write_dt0(scp, devip); + if (errsts) + return errsts; + } + lba = get_unaligned_be32(cmd + 2); + num = get_unaligned_be16(cmd + 7); + return resp_xdwriteread(scp, lba, num, devip); +} + /* When timer or tasklet goes off this function is called. */ static void sdebug_q_cmd_complete(unsigned long indx) { @@ -2672,10 +3506,7 @@ static struct sdebug_dev_info * devInfoReg(struct scsi_device * sdev) open_devip->sdbg_host = sdbg_host; atomic_set(&open_devip->num_in_q, 0); set_bit(SDEBUG_UA_POR, open_devip->uas_bm); - open_devip->used = 1; - if (sdev->lun == SAM2_WLUN_REPORT_LUNS) - open_devip->wlun = SAM2_WLUN_REPORT_LUNS & 0xff; - + open_devip->used = true; return open_devip; } @@ -2701,10 +3532,6 @@ static int scsi_debug_slave_configure(struct scsi_device *sdp) if (NULL == devip) return 1; /* no resources, will be marked offline */ sdp->hostdata = devip; - sdp->tagged_supported = 1; - if (sdp->host->cmd_per_lun) - scsi_adjust_queue_depth(sdp, DEF_TAGGED_QUEUING, - DEF_CMD_PER_LUN); blk_queue_max_segment_size(sdp->request_queue, -1U); if (scsi_debug_no_uld) sdp->no_uld_attach = 1; @@ -2721,7 +3548,7 @@ static void scsi_debug_slave_destroy(struct scsi_device *sdp) sdp->host->host_no, sdp->channel, sdp->id, sdp->lun); if (devip) { /* make this slot available for re-use */ - devip->used = 0; + devip->used = false; sdp->hostdata = NULL; } } @@ -3166,6 +3993,7 @@ module_param_named(ptype, scsi_debug_ptype, int, S_IRUGO | S_IWUSR); module_param_named(removable, scsi_debug_removable, bool, S_IRUGO | S_IWUSR); module_param_named(scsi_level, scsi_debug_scsi_level, int, S_IRUGO); module_param_named(sector_size, scsi_debug_sector_size, int, S_IRUGO); +module_param_named(strict, scsi_debug_strict, bool, S_IRUGO | S_IWUSR); module_param_named(unmap_alignment, scsi_debug_unmap_alignment, int, S_IRUGO); module_param_named(unmap_granularity, scsi_debug_unmap_granularity, int, S_IRUGO); module_param_named(unmap_max_blocks, scsi_debug_unmap_max_blocks, int, S_IRUGO); @@ -3185,7 +4013,7 @@ MODULE_PARM_DESC(add_host, "0..127 hosts allowed(def=1)"); MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)"); MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)"); MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny"); -MODULE_PARM_DESC(dev_size_mb, "size in MB of ram shared by devs(def=8)"); +MODULE_PARM_DESC(dev_size_mb, "size in MiB of ram shared by devs(def=8)"); MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)"); MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)"); MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)"); @@ -3212,11 +4040,12 @@ MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])"); MODULE_PARM_DESC(removable, "claim to have removable media (def=0)"); MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=6[SPC-4])"); MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)"); +MODULE_PARM_DESC(strict, "stricter checks: reserved field in cdb (def=0)"); MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)"); MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)"); MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)"); MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)"); -MODULE_PARM_DESC(virtual_gb, "virtual gigabyte size (def=0 -> use dev_size_mb)"); +MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)"); MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)"); MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)"); @@ -3382,6 +4211,16 @@ static ssize_t opts_store(struct device_driver *ddp, const char *buf, return -EINVAL; opts_done: scsi_debug_opts = opts; + if (SCSI_DEBUG_OPT_RECOVERED_ERR & opts) + sdebug_any_injecting_opt = true; + else if (SCSI_DEBUG_OPT_TRANSPORT_ERR & opts) + sdebug_any_injecting_opt = true; + else if (SCSI_DEBUG_OPT_DIF_ERR & opts) + sdebug_any_injecting_opt = true; + else if (SCSI_DEBUG_OPT_DIX_ERR & opts) + sdebug_any_injecting_opt = true; + else if (SCSI_DEBUG_OPT_SHORT_TRANSFER & opts) + sdebug_any_injecting_opt = true; atomic_set(&sdebug_cmnd_count, 0); atomic_set(&sdebug_a_tsf, 0); return count; @@ -3589,12 +4428,25 @@ static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf, size_t count) { int n; + bool changed; if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) { + changed = (scsi_debug_virtual_gb != n); scsi_debug_virtual_gb = n; - sdebug_capacity = get_sdebug_capacity(); - + if (changed) { + struct sdebug_host_info *sdhp; + struct sdebug_dev_info *dp; + + list_for_each_entry(sdhp, &sdebug_host_list, + host_list) { + list_for_each_entry(dp, &sdhp->dev_info_list, + dev_list) { + set_bit(SDEBUG_UA_CAPACITY_CHANGED, + dp->uas_bm); + } + } + } return count; } return -EINVAL; @@ -3740,6 +4592,23 @@ static ssize_t host_lock_store(struct device_driver *ddp, const char *buf, } static DRIVER_ATTR_RW(host_lock); +static ssize_t strict_show(struct device_driver *ddp, char *buf) +{ + return scnprintf(buf, PAGE_SIZE, "%d\n", !!scsi_debug_strict); +} +static ssize_t strict_store(struct device_driver *ddp, const char *buf, + size_t count) +{ + int n; + + if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) { + scsi_debug_strict = (n > 0); + return count; + } + return -EINVAL; +} +static DRIVER_ATTR_RW(strict); + /* Note: The following array creates attribute files in the /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these @@ -3775,6 +4644,7 @@ static struct attribute *sdebug_drv_attrs[] = { &driver_attr_removable.attr, &driver_attr_host_lock.attr, &driver_attr_ndelay.attr, + &driver_attr_strict.attr, NULL, }; ATTRIBUTE_GROUPS(sdebug_drv); @@ -4087,396 +4957,9 @@ static void sdebug_remove_adapter(void) } static int -scsi_debug_queuecommand(struct scsi_cmnd *SCpnt) -{ - unsigned char *cmd = SCpnt->cmnd; - int len, k; - unsigned int num; - unsigned long long lba; - u32 ei_lba; - int errsts = 0; - int target = SCpnt->device->id; - struct sdebug_dev_info *devip = NULL; - int inj_recovered = 0; - int inj_transport = 0; - int inj_dif = 0; - int inj_dix = 0; - int inj_short = 0; - int delay_override = 0; - int unmap = 0; - - scsi_set_resid(SCpnt, 0); - if ((SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) && - !(SCSI_DEBUG_OPT_NO_CDB_NOISE & scsi_debug_opts)) { - char b[120]; - int n; - - len = SCpnt->cmd_len; - if (len > 32) - strcpy(b, "too long, over 32 bytes"); - else { - for (k = 0, n = 0; k < len; ++k) - n += scnprintf(b + n, sizeof(b) - n, "%02x ", - (unsigned int)cmd[k]); - } - sdev_printk(KERN_INFO, SCpnt->device, "%s: cmd %s\n", my_name, - b); - } - - if ((SCpnt->device->lun >= scsi_debug_max_luns) && - (SCpnt->device->lun != SAM2_WLUN_REPORT_LUNS)) - return schedule_resp(SCpnt, NULL, DID_NO_CONNECT << 16, 0); - devip = devInfoReg(SCpnt->device); - if (NULL == devip) - return schedule_resp(SCpnt, NULL, DID_NO_CONNECT << 16, 0); - - if ((scsi_debug_every_nth != 0) && - (atomic_inc_return(&sdebug_cmnd_count) >= - abs(scsi_debug_every_nth))) { - atomic_set(&sdebug_cmnd_count, 0); - if (scsi_debug_every_nth < -1) - scsi_debug_every_nth = -1; - if (SCSI_DEBUG_OPT_TIMEOUT & scsi_debug_opts) - return 0; /* ignore command causing timeout */ - else if (SCSI_DEBUG_OPT_MAC_TIMEOUT & scsi_debug_opts && - scsi_medium_access_command(SCpnt)) - return 0; /* time out reads and writes */ - else if (SCSI_DEBUG_OPT_RECOVERED_ERR & scsi_debug_opts) - inj_recovered = 1; /* to reads and writes below */ - else if (SCSI_DEBUG_OPT_TRANSPORT_ERR & scsi_debug_opts) - inj_transport = 1; /* to reads and writes below */ - else if (SCSI_DEBUG_OPT_DIF_ERR & scsi_debug_opts) - inj_dif = 1; /* to reads and writes below */ - else if (SCSI_DEBUG_OPT_DIX_ERR & scsi_debug_opts) - inj_dix = 1; /* to reads and writes below */ - else if (SCSI_DEBUG_OPT_SHORT_TRANSFER & scsi_debug_opts) - inj_short = 1; - } - - if (devip->wlun) { - switch (*cmd) { - case INQUIRY: - case REQUEST_SENSE: - case TEST_UNIT_READY: - case REPORT_LUNS: - break; /* only allowable wlun commands */ - default: - if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) - printk(KERN_INFO "scsi_debug: Opcode: 0x%x " - "not supported for wlun\n", *cmd); - mk_sense_buffer(SCpnt, ILLEGAL_REQUEST, - INVALID_OPCODE, 0); - errsts = check_condition_result; - return schedule_resp(SCpnt, devip, errsts, 0); - } - } - - switch (*cmd) { - case INQUIRY: /* mandatory, ignore unit attention */ - delay_override = 1; - errsts = resp_inquiry(SCpnt, target, devip); - break; - case REQUEST_SENSE: /* mandatory, ignore unit attention */ - delay_override = 1; - errsts = resp_requests(SCpnt, devip); - break; - case REZERO_UNIT: /* actually this is REWIND for SSC */ - case START_STOP: - errsts = resp_start_stop(SCpnt, devip); - break; - case ALLOW_MEDIUM_REMOVAL: - errsts = check_readiness(SCpnt, UAS_ONLY, devip); - if (errsts) - break; - if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) - printk(KERN_INFO "scsi_debug: Medium removal %s\n", - cmd[4] ? "inhibited" : "enabled"); - break; - case SEND_DIAGNOSTIC: /* mandatory */ - errsts = check_readiness(SCpnt, UAS_ONLY, devip); - break; - case TEST_UNIT_READY: /* mandatory */ - /* delay_override = 1; */ - errsts = check_readiness(SCpnt, UAS_TUR, devip); - break; - case RESERVE: - errsts = check_readiness(SCpnt, UAS_ONLY, devip); - break; - case RESERVE_10: - errsts = check_readiness(SCpnt, UAS_ONLY, devip); - break; - case RELEASE: - errsts = check_readiness(SCpnt, UAS_ONLY, devip); - break; - case RELEASE_10: - errsts = check_readiness(SCpnt, UAS_ONLY, devip); - break; - case READ_CAPACITY: - errsts = resp_readcap(SCpnt, devip); - break; - case SERVICE_ACTION_IN: - if (cmd[1] == SAI_READ_CAPACITY_16) - errsts = resp_readcap16(SCpnt, devip); - else if (cmd[1] == SAI_GET_LBA_STATUS) { - - if (scsi_debug_lbp() == 0) { - mk_sense_buffer(SCpnt, ILLEGAL_REQUEST, - INVALID_COMMAND_OPCODE, 0); - errsts = check_condition_result; - } else - errsts = resp_get_lba_status(SCpnt, devip); - } else { - mk_sense_buffer(SCpnt, ILLEGAL_REQUEST, - INVALID_OPCODE, 0); - errsts = check_condition_result; - } - break; - case MAINTENANCE_IN: - if (MI_REPORT_TARGET_PGS != cmd[1]) { - mk_sense_buffer(SCpnt, ILLEGAL_REQUEST, - INVALID_OPCODE, 0); - errsts = check_condition_result; - break; - } - errsts = resp_report_tgtpgs(SCpnt, devip); - break; - case READ_16: - case READ_12: - case READ_10: - /* READ{10,12,16} and DIF Type 2 are natural enemies */ - if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION && - cmd[1] & 0xe0) { - mk_sense_buffer(SCpnt, ILLEGAL_REQUEST, - INVALID_COMMAND_OPCODE, 0); - errsts = check_condition_result; - break; - } - - if ((scsi_debug_dif == SD_DIF_TYPE1_PROTECTION || - scsi_debug_dif == SD_DIF_TYPE3_PROTECTION) && - (cmd[1] & 0xe0) == 0) - printk(KERN_ERR "Unprotected RD/WR to DIF device\n"); - - /* fall through */ - case READ_6: -read: - errsts = check_readiness(SCpnt, UAS_TUR, devip); - if (errsts) - break; - if (scsi_debug_fake_rw) - break; - get_data_transfer_info(cmd, &lba, &num, &ei_lba); - - if (inj_short) - num /= 2; - - errsts = resp_read(SCpnt, lba, num, ei_lba); - if (inj_recovered && (0 == errsts)) { - mk_sense_buffer(SCpnt, RECOVERED_ERROR, - THRESHOLD_EXCEEDED, 0); - errsts = check_condition_result; - } else if (inj_transport && (0 == errsts)) { - mk_sense_buffer(SCpnt, ABORTED_COMMAND, - TRANSPORT_PROBLEM, ACK_NAK_TO); - errsts = check_condition_result; - } else if (inj_dif && (0 == errsts)) { - /* Logical block guard check failed */ - mk_sense_buffer(SCpnt, ABORTED_COMMAND, 0x10, 1); - errsts = illegal_condition_result; - } else if (inj_dix && (0 == errsts)) { - mk_sense_buffer(SCpnt, ILLEGAL_REQUEST, 0x10, 1); - errsts = illegal_condition_result; - } - break; - case REPORT_LUNS: /* mandatory, ignore unit attention */ - delay_override = 1; - errsts = resp_report_luns(SCpnt, devip); - break; - case VERIFY: /* 10 byte SBC-2 command */ - errsts = check_readiness(SCpnt, UAS_TUR, devip); - break; - case WRITE_16: - case WRITE_12: - case WRITE_10: - /* WRITE{10,12,16} and DIF Type 2 are natural enemies */ - if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION && - cmd[1] & 0xe0) { - mk_sense_buffer(SCpnt, ILLEGAL_REQUEST, - INVALID_COMMAND_OPCODE, 0); - errsts = check_condition_result; - break; - } - - if ((scsi_debug_dif == SD_DIF_TYPE1_PROTECTION || - scsi_debug_dif == SD_DIF_TYPE3_PROTECTION) && - (cmd[1] & 0xe0) == 0) - printk(KERN_ERR "Unprotected RD/WR to DIF device\n"); - - /* fall through */ - case WRITE_6: -write: - errsts = check_readiness(SCpnt, UAS_TUR, devip); - if (errsts) - break; - if (scsi_debug_fake_rw) - break; - get_data_transfer_info(cmd, &lba, &num, &ei_lba); - errsts = resp_write(SCpnt, lba, num, ei_lba); - if (inj_recovered && (0 == errsts)) { - mk_sense_buffer(SCpnt, RECOVERED_ERROR, - THRESHOLD_EXCEEDED, 0); - errsts = check_condition_result; - } else if (inj_dif && (0 == errsts)) { - mk_sense_buffer(SCpnt, ABORTED_COMMAND, 0x10, 1); - errsts = illegal_condition_result; - } else if (inj_dix && (0 == errsts)) { - mk_sense_buffer(SCpnt, ILLEGAL_REQUEST, 0x10, 1); - errsts = illegal_condition_result; - } - break; - case WRITE_SAME_16: - case WRITE_SAME: - if (cmd[1] & 0x8) { - if ((*cmd == WRITE_SAME_16 && scsi_debug_lbpws == 0) || - (*cmd == WRITE_SAME && scsi_debug_lbpws10 == 0)) { - mk_sense_buffer(SCpnt, ILLEGAL_REQUEST, - INVALID_FIELD_IN_CDB, 0); - errsts = check_condition_result; - } else - unmap = 1; - } - if (errsts) - break; - errsts = check_readiness(SCpnt, UAS_TUR, devip); - if (errsts) - break; - if (scsi_debug_fake_rw) - break; - get_data_transfer_info(cmd, &lba, &num, &ei_lba); - errsts = resp_write_same(SCpnt, lba, num, ei_lba, unmap); - break; - case UNMAP: - errsts = check_readiness(SCpnt, UAS_TUR, devip); - if (errsts) - break; - if (scsi_debug_fake_rw) - break; - - if (scsi_debug_unmap_max_desc == 0 || scsi_debug_lbpu == 0) { - mk_sense_buffer(SCpnt, ILLEGAL_REQUEST, - INVALID_COMMAND_OPCODE, 0); - errsts = check_condition_result; - } else - errsts = resp_unmap(SCpnt, devip); - break; - case MODE_SENSE: - case MODE_SENSE_10: - errsts = resp_mode_sense(SCpnt, target, devip); - break; - case MODE_SELECT: - errsts = resp_mode_select(SCpnt, 1, devip); - break; - case MODE_SELECT_10: - errsts = resp_mode_select(SCpnt, 0, devip); - break; - case LOG_SENSE: - errsts = resp_log_sense(SCpnt, devip); - break; - case SYNCHRONIZE_CACHE: - delay_override = 1; - errsts = check_readiness(SCpnt, UAS_TUR, devip); - break; - case WRITE_BUFFER: - errsts = check_readiness(SCpnt, UAS_ONLY, devip); - break; - case XDWRITEREAD_10: - if (!scsi_bidi_cmnd(SCpnt)) { - mk_sense_buffer(SCpnt, ILLEGAL_REQUEST, - INVALID_FIELD_IN_CDB, 0); - errsts = check_condition_result; - break; - } - - errsts = check_readiness(SCpnt, UAS_TUR, devip); - if (errsts) - break; - if (scsi_debug_fake_rw) - break; - get_data_transfer_info(cmd, &lba, &num, &ei_lba); - errsts = resp_read(SCpnt, lba, num, ei_lba); - if (errsts) - break; - errsts = resp_write(SCpnt, lba, num, ei_lba); - if (errsts) - break; - errsts = resp_xdwriteread(SCpnt, lba, num, devip); - break; - case VARIABLE_LENGTH_CMD: - if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION) { - - if ((cmd[10] & 0xe0) == 0) - printk(KERN_ERR - "Unprotected RD/WR to DIF device\n"); - - if (cmd[9] == READ_32) { - BUG_ON(SCpnt->cmd_len < 32); - goto read; - } - - if (cmd[9] == WRITE_32) { - BUG_ON(SCpnt->cmd_len < 32); - goto write; - } - } - - mk_sense_buffer(SCpnt, ILLEGAL_REQUEST, - INVALID_FIELD_IN_CDB, 0); - errsts = check_condition_result; - break; - case 0x85: - if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) - sdev_printk(KERN_INFO, SCpnt->device, - "%s: ATA PASS-THROUGH(16) not supported\n", my_name); - mk_sense_buffer(SCpnt, ILLEGAL_REQUEST, - INVALID_OPCODE, 0); - errsts = check_condition_result; - break; - default: - if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) - sdev_printk(KERN_INFO, SCpnt->device, - "%s: Opcode: 0x%x not supported\n", - my_name, *cmd); - errsts = check_readiness(SCpnt, UAS_ONLY, devip); - if (errsts) - break; /* Unit attention takes precedence */ - mk_sense_buffer(SCpnt, ILLEGAL_REQUEST, INVALID_OPCODE, 0); - errsts = check_condition_result; - break; - } - return schedule_resp(SCpnt, devip, errsts, - (delay_override ? 0 : scsi_debug_delay)); -} - -static int -sdebug_queuecommand_lock_or_not(struct Scsi_Host *shost, struct scsi_cmnd *cmd) -{ - if (scsi_debug_host_lock) { - unsigned long iflags; - int rc; - - spin_lock_irqsave(shost->host_lock, iflags); - rc = scsi_debug_queuecommand(cmd); - spin_unlock_irqrestore(shost->host_lock, iflags); - return rc; - } else - return scsi_debug_queuecommand(cmd); -} - -static int -sdebug_change_qdepth(struct scsi_device *sdev, int qdepth, int reason) +sdebug_change_qdepth(struct scsi_device *sdev, int qdepth) { int num_in_q = 0; - int bad = 0; unsigned long iflags; struct sdebug_dev_info *devip; @@ -4488,43 +4971,18 @@ sdebug_change_qdepth(struct scsi_device *sdev, int qdepth, int reason) } num_in_q = atomic_read(&devip->num_in_q); spin_unlock_irqrestore(&queued_arr_lock, iflags); - if (reason == SCSI_QDEPTH_DEFAULT || reason == SCSI_QDEPTH_RAMP_UP) { - if (qdepth < 1) - qdepth = 1; - /* allow to exceed max host queued_arr elements for testing */ - if (qdepth > SCSI_DEBUG_CANQUEUE + 10) - qdepth = SCSI_DEBUG_CANQUEUE + 10; - scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth); - } else if (reason == SCSI_QDEPTH_QFULL) - scsi_track_queue_full(sdev, qdepth); - else - bad = 1; - if (bad) - sdev_printk(KERN_WARNING, sdev, - "%s: unknown reason=0x%x\n", __func__, reason); + + if (qdepth < 1) + qdepth = 1; + /* allow to exceed max host queued_arr elements for testing */ + if (qdepth > SCSI_DEBUG_CANQUEUE + 10) + qdepth = SCSI_DEBUG_CANQUEUE + 10; + scsi_change_queue_depth(sdev, qdepth); + if (SCSI_DEBUG_OPT_Q_NOISE & scsi_debug_opts) { - if (SCSI_QDEPTH_QFULL == reason) - sdev_printk(KERN_INFO, sdev, - "%s: -> %d, num_in_q=%d, reason: queue full\n", - __func__, qdepth, num_in_q); - else { - const char *cp; - - switch (reason) { - case SCSI_QDEPTH_DEFAULT: - cp = "default (sysfs ?)"; - break; - case SCSI_QDEPTH_RAMP_UP: - cp = "ramp up"; - break; - default: - cp = "unknown"; - break; - } - sdev_printk(KERN_INFO, sdev, - "%s: qdepth=%d, num_in_q=%d, reason: %s\n", - __func__, qdepth, num_in_q, cp); - } + sdev_printk(KERN_INFO, sdev, + "%s: qdepth=%d, num_in_q=%d\n", + __func__, qdepth, num_in_q); } return sdev->queue_depth; } @@ -4532,14 +4990,7 @@ sdebug_change_qdepth(struct scsi_device *sdev, int qdepth, int reason) static int sdebug_change_qtype(struct scsi_device *sdev, int qtype) { - if (sdev->tagged_supported) { - scsi_set_tag_type(sdev, qtype); - if (qtype) - scsi_activate_tcq(sdev, sdev->queue_depth); - else - scsi_deactivate_tcq(sdev, sdev->queue_depth); - } else - qtype = 0; + qtype = scsi_change_queue_type(sdev, qtype); if (SCSI_DEBUG_OPT_Q_NOISE & scsi_debug_opts) { const char *cp; @@ -4562,6 +5013,193 @@ sdebug_change_qtype(struct scsi_device *sdev, int qtype) return qtype; } +static int +check_inject(struct scsi_cmnd *scp) +{ + struct sdebug_scmd_extra_t *ep = scsi_cmd_priv(scp); + + memset(ep, 0, sizeof(struct sdebug_scmd_extra_t)); + + if (atomic_inc_return(&sdebug_cmnd_count) >= + abs(scsi_debug_every_nth)) { + atomic_set(&sdebug_cmnd_count, 0); + if (scsi_debug_every_nth < -1) + scsi_debug_every_nth = -1; + if (SCSI_DEBUG_OPT_TIMEOUT & scsi_debug_opts) + return 1; /* ignore command causing timeout */ + else if (SCSI_DEBUG_OPT_MAC_TIMEOUT & scsi_debug_opts && + scsi_medium_access_command(scp)) + return 1; /* time out reads and writes */ + if (sdebug_any_injecting_opt) { + int opts = scsi_debug_opts; + + if (SCSI_DEBUG_OPT_RECOVERED_ERR & opts) + ep->inj_recovered = true; + else if (SCSI_DEBUG_OPT_TRANSPORT_ERR & opts) + ep->inj_transport = true; + else if (SCSI_DEBUG_OPT_DIF_ERR & opts) + ep->inj_dif = true; + else if (SCSI_DEBUG_OPT_DIX_ERR & opts) + ep->inj_dix = true; + else if (SCSI_DEBUG_OPT_SHORT_TRANSFER & opts) + ep->inj_short = true; + } + } + return 0; +} + +static int +scsi_debug_queuecommand(struct scsi_cmnd *scp) +{ + u8 sdeb_i; + struct scsi_device *sdp = scp->device; + const struct opcode_info_t *oip; + const struct opcode_info_t *r_oip; + struct sdebug_dev_info *devip; + u8 *cmd = scp->cmnd; + int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *); + int k, na; + int errsts = 0; + int errsts_no_connect = DID_NO_CONNECT << 16; + u32 flags; + u16 sa; + u8 opcode = cmd[0]; + bool has_wlun_rl; + bool debug = !!(SCSI_DEBUG_OPT_NOISE & scsi_debug_opts); + + scsi_set_resid(scp, 0); + if (debug && !(SCSI_DEBUG_OPT_NO_CDB_NOISE & scsi_debug_opts)) { + char b[120]; + int n, len, sb; + + len = scp->cmd_len; + sb = (int)sizeof(b); + if (len > 32) + strcpy(b, "too long, over 32 bytes"); + else { + for (k = 0, n = 0; k < len && n < sb; ++k) + n += scnprintf(b + n, sb - n, "%02x ", + (u32)cmd[k]); + } + sdev_printk(KERN_INFO, sdp, "%s: cmd %s\n", my_name, b); + } + has_wlun_rl = (sdp->lun == SAM2_WLUN_REPORT_LUNS); + if ((sdp->lun >= scsi_debug_max_luns) && !has_wlun_rl) + return schedule_resp(scp, NULL, errsts_no_connect, 0); + + sdeb_i = opcode_ind_arr[opcode]; /* fully mapped */ + oip = &opcode_info_arr[sdeb_i]; /* safe if table consistent */ + devip = (struct sdebug_dev_info *)sdp->hostdata; + if (!devip) { + devip = devInfoReg(sdp); + if (NULL == devip) + return schedule_resp(scp, NULL, errsts_no_connect, 0); + } + na = oip->num_attached; + r_pfp = oip->pfp; + if (na) { /* multiple commands with this opcode */ + r_oip = oip; + if (FF_SA & r_oip->flags) { + if (F_SA_LOW & oip->flags) + sa = 0x1f & cmd[1]; + else + sa = get_unaligned_be16(cmd + 8); + for (k = 0; k <= na; oip = r_oip->arrp + k++) { + if (opcode == oip->opcode && sa == oip->sa) + break; + } + } else { /* since no service action only check opcode */ + for (k = 0; k <= na; oip = r_oip->arrp + k++) { + if (opcode == oip->opcode) + break; + } + } + if (k > na) { + if (F_SA_LOW & r_oip->flags) + mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 4); + else if (F_SA_HIGH & r_oip->flags) + mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, 7); + else + mk_sense_invalid_opcode(scp); + goto check_cond; + } + } /* else (when na==0) we assume the oip is a match */ + flags = oip->flags; + if (F_INV_OP & flags) { + mk_sense_invalid_opcode(scp); + goto check_cond; + } + if (has_wlun_rl && !(F_RL_WLUN_OK & flags)) { + if (debug) + sdev_printk(KERN_INFO, sdp, "scsi_debug: Opcode: " + "0x%x not supported for wlun\n", opcode); + mk_sense_invalid_opcode(scp); + goto check_cond; + } + if (scsi_debug_strict) { /* check cdb against mask */ + u8 rem; + int j; + + for (k = 1; k < oip->len_mask[0] && k < 16; ++k) { + rem = ~oip->len_mask[k] & cmd[k]; + if (rem) { + for (j = 7; j >= 0; --j, rem <<= 1) { + if (0x80 & rem) + break; + } + mk_sense_invalid_fld(scp, SDEB_IN_CDB, k, j); + goto check_cond; + } + } + } + if (!(F_SKIP_UA & flags) && + SDEBUG_NUM_UAS != find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS)) { + errsts = check_readiness(scp, UAS_ONLY, devip); + if (errsts) + goto check_cond; + } + if ((F_M_ACCESS & flags) && devip->stopped) { + mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x2); + if (debug) + sdev_printk(KERN_INFO, sdp, "%s reports: Not ready: " + "%s\n", my_name, "initializing command " + "required"); + errsts = check_condition_result; + goto fini; + } + if (scsi_debug_fake_rw && (F_FAKE_RW & flags)) + goto fini; + if (scsi_debug_every_nth) { + if (check_inject(scp)) + return 0; /* ignore command: make trouble */ + } + if (oip->pfp) /* if this command has a resp_* function, call it */ + errsts = oip->pfp(scp, devip); + else if (r_pfp) /* if leaf function ptr NULL, try the root's */ + errsts = r_pfp(scp, devip); + +fini: + return schedule_resp(scp, devip, errsts, + ((F_DELAY_OVERR & flags) ? 0 : scsi_debug_delay)); +check_cond: + return schedule_resp(scp, devip, check_condition_result, 0); +} + +static int +sdebug_queuecommand_lock_or_not(struct Scsi_Host *shost, struct scsi_cmnd *cmd) +{ + if (scsi_debug_host_lock) { + unsigned long iflags; + int rc; + + spin_lock_irqsave(shost->host_lock, iflags); + rc = scsi_debug_queuecommand(cmd); + spin_unlock_irqrestore(shost->host_lock, iflags); + return rc; + } else + return scsi_debug_queuecommand(cmd); +} + static struct scsi_host_template sdebug_driver_template = { .show_info = scsi_debug_show_info, .write_info = scsi_debug_write_info, @@ -4587,13 +5225,16 @@ static struct scsi_host_template sdebug_driver_template = { .max_sectors = -1U, .use_clustering = DISABLE_CLUSTERING, .module = THIS_MODULE, + .track_queue_depth = 1, + .cmd_size = sizeof(struct sdebug_scmd_extra_t), }; static int sdebug_driver_probe(struct device * dev) { - int error = 0; - struct sdebug_host_info *sdbg_host; - struct Scsi_Host *hpnt; + int error = 0; + int opts; + struct sdebug_host_info *sdbg_host; + struct Scsi_Host *hpnt; int host_prot; sdbg_host = to_sdebug_host(dev); @@ -4603,7 +5244,7 @@ static int sdebug_driver_probe(struct device * dev) sdebug_driver_template.use_clustering = ENABLE_CLUSTERING; hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host)); if (NULL == hpnt) { - printk(KERN_ERR "%s: scsi_register failed\n", __func__); + pr_err("%s: scsi_host_alloc failed\n", __func__); error = -ENODEV; return error; } @@ -4660,6 +5301,18 @@ static int sdebug_driver_probe(struct device * dev) else scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC); + opts = scsi_debug_opts; + if (SCSI_DEBUG_OPT_RECOVERED_ERR & opts) + sdebug_any_injecting_opt = true; + else if (SCSI_DEBUG_OPT_TRANSPORT_ERR & opts) + sdebug_any_injecting_opt = true; + else if (SCSI_DEBUG_OPT_DIF_ERR & opts) + sdebug_any_injecting_opt = true; + else if (SCSI_DEBUG_OPT_DIX_ERR & opts) + sdebug_any_injecting_opt = true; + else if (SCSI_DEBUG_OPT_SHORT_TRANSFER & opts) + sdebug_any_injecting_opt = true; + error = scsi_add_host(hpnt, &sdbg_host->dev); if (error) { printk(KERN_ERR "%s: scsi_add_host failed\n", __func__); diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c index bc5ff6ff9c79..e42fff6e8c10 100644 --- a/drivers/scsi/scsi_error.c +++ b/drivers/scsi/scsi_error.c @@ -36,6 +36,7 @@ #include <scsi/scsi_transport.h> #include <scsi/scsi_host.h> #include <scsi/scsi_ioctl.h> +#include <scsi/sg.h> #include "scsi_priv.h" #include "scsi_logging.h" @@ -157,8 +158,9 @@ scmd_eh_abort_handler(struct work_struct *work) } else { SCSI_LOG_ERROR_RECOVERY(3, scmd_printk(KERN_INFO, scmd, - "scmd %p abort failed, rtn %d\n", - scmd, rtn)); + "scmd %p abort %s\n", scmd, + (rtn == FAST_IO_FAIL) ? + "not send" : "failed")); } } @@ -355,7 +357,7 @@ static inline void scsi_eh_prt_fail_stats(struct Scsi_Host *shost, if (cmd_cancel || cmd_failed) { SCSI_LOG_ERROR_RECOVERY(3, - sdev_printk(KERN_INFO, sdev, + shost_printk(KERN_INFO, shost, "%s: cmds failed: %d, cancel: %d\n", __func__, cmd_failed, cmd_cancel)); @@ -608,7 +610,7 @@ static void scsi_handle_queue_ramp_up(struct scsi_device *sdev) struct scsi_host_template *sht = sdev->host->hostt; struct scsi_device *tmp_sdev; - if (!sht->change_queue_depth || + if (!sht->track_queue_depth || sdev->queue_depth >= sdev->max_queue_depth) return; @@ -629,12 +631,8 @@ static void scsi_handle_queue_ramp_up(struct scsi_device *sdev) tmp_sdev->id != sdev->id || tmp_sdev->queue_depth == sdev->max_queue_depth) continue; - /* - * call back into LLD to increase queue_depth by one - * with ramp up reason code. - */ - sht->change_queue_depth(tmp_sdev, tmp_sdev->queue_depth + 1, - SCSI_QDEPTH_RAMP_UP); + + scsi_change_queue_depth(tmp_sdev, tmp_sdev->queue_depth + 1); sdev->last_queue_ramp_up = jiffies; } } @@ -644,7 +642,7 @@ static void scsi_handle_queue_full(struct scsi_device *sdev) struct scsi_host_template *sht = sdev->host->hostt; struct scsi_device *tmp_sdev; - if (!sht->change_queue_depth) + if (!sht->track_queue_depth) return; shost_for_each_device(tmp_sdev, sdev->host) { @@ -656,8 +654,7 @@ static void scsi_handle_queue_full(struct scsi_device *sdev) * the device when we got the queue full so we start * from the highest possible value and work our way down. */ - sht->change_queue_depth(tmp_sdev, tmp_sdev->queue_depth - 1, - SCSI_QDEPTH_QFULL); + scsi_track_queue_full(tmp_sdev, tmp_sdev->queue_depth - 1); } } @@ -869,7 +866,24 @@ static int scsi_try_bus_device_reset(struct scsi_cmnd *scmd) return rtn; } -static int scsi_try_to_abort_cmd(struct scsi_host_template *hostt, struct scsi_cmnd *scmd) +/** + * scsi_try_to_abort_cmd - Ask host to abort a SCSI command + * @scmd: SCSI cmd used to send a target reset + * + * Return value: + * SUCCESS, FAILED, or FAST_IO_FAIL + * + * Notes: + * SUCCESS does not necessarily indicate that the command + * has been aborted; it only indicates that the LLDDs + * has cleared all references to that command. + * LLDDs should return FAILED only if an abort was required + * but could not be executed. LLDDs should return FAST_IO_FAIL + * if the device is temporarily unavailable (eg due to a + * link down on FibreChannel) + */ +static int scsi_try_to_abort_cmd(struct scsi_host_template *hostt, + struct scsi_cmnd *scmd) { if (!hostt->eh_abort_handler) return FAILED; @@ -1156,9 +1170,9 @@ int scsi_eh_get_sense(struct list_head *work_q, shost = scmd->device->host; if (scsi_host_eh_past_deadline(shost)) { SCSI_LOG_ERROR_RECOVERY(3, - shost_printk(KERN_INFO, shost, - "skip %s, past eh deadline\n", - __func__)); + scmd_printk(KERN_INFO, scmd, + "%s: skip request sense, past eh deadline\n", + current->comm)); break; } if (status_byte(scmd->result) != CHECK_CONDITION) @@ -1180,7 +1194,7 @@ int scsi_eh_get_sense(struct list_head *work_q, SCSI_LOG_ERROR_RECOVERY(3, scmd_printk(KERN_INFO, scmd, "sense requested for %p result %x\n", scmd, scmd->result)); - SCSI_LOG_ERROR_RECOVERY(3, scsi_print_sense("bh", scmd)); + SCSI_LOG_ERROR_RECOVERY(3, scsi_print_sense(scmd)); rtn = scsi_decide_disposition(scmd); @@ -1265,9 +1279,9 @@ static int scsi_eh_test_devices(struct list_head *cmd_list, /* Push items back onto work_q */ list_splice_init(cmd_list, work_q); SCSI_LOG_ERROR_RECOVERY(3, - shost_printk(KERN_INFO, sdev->host, - "skip %s, past eh deadline", - __func__)); + sdev_printk(KERN_INFO, sdev, + "%s: skip test device, past eh deadline", + current->comm)); break; } } @@ -1318,21 +1332,20 @@ static int scsi_eh_abort_cmds(struct list_head *work_q, if (scsi_host_eh_past_deadline(shost)) { list_splice_init(&check_list, work_q); SCSI_LOG_ERROR_RECOVERY(3, - shost_printk(KERN_INFO, shost, - "skip %s, past eh deadline\n", - __func__)); + scmd_printk(KERN_INFO, scmd, + "%s: skip aborting cmd, past eh deadline\n", + current->comm)); return list_empty(work_q); } SCSI_LOG_ERROR_RECOVERY(3, - shost_printk(KERN_INFO, shost, - "%s: aborting cmd: 0x%p\n", - current->comm, scmd)); + scmd_printk(KERN_INFO, scmd, + "%s: aborting cmd\n", current->comm)); rtn = scsi_try_to_abort_cmd(shost->hostt, scmd); if (rtn == FAILED) { SCSI_LOG_ERROR_RECOVERY(3, - shost_printk(KERN_INFO, shost, - "%s: aborting cmd failed: 0x%p\n", - current->comm, scmd)); + scmd_printk(KERN_INFO, scmd, + "%s: aborting cmd failed\n", + current->comm)); list_splice_init(&check_list, work_q); return list_empty(work_q); } @@ -1390,9 +1403,9 @@ static int scsi_eh_stu(struct Scsi_Host *shost, shost_for_each_device(sdev, shost) { if (scsi_host_eh_past_deadline(shost)) { SCSI_LOG_ERROR_RECOVERY(3, - shost_printk(KERN_INFO, shost, - "skip %s, past eh deadline\n", - __func__)); + sdev_printk(KERN_INFO, sdev, + "%s: skip START_UNIT, past eh deadline\n", + current->comm)); break; } stu_scmd = NULL; @@ -1407,9 +1420,9 @@ static int scsi_eh_stu(struct Scsi_Host *shost, continue; SCSI_LOG_ERROR_RECOVERY(3, - shost_printk(KERN_INFO, shost, - "%s: Sending START_UNIT to sdev: 0x%p\n", - current->comm, sdev)); + sdev_printk(KERN_INFO, sdev, + "%s: Sending START_UNIT\n", + current->comm)); if (!scsi_eh_try_stu(stu_scmd)) { if (!scsi_device_online(sdev) || @@ -1423,9 +1436,9 @@ static int scsi_eh_stu(struct Scsi_Host *shost, } } else { SCSI_LOG_ERROR_RECOVERY(3, - shost_printk(KERN_INFO, shost, - "%s: START_UNIT failed to sdev:" - " 0x%p\n", current->comm, sdev)); + sdev_printk(KERN_INFO, sdev, + "%s: START_UNIT failed\n", + current->comm)); } } @@ -1456,9 +1469,9 @@ static int scsi_eh_bus_device_reset(struct Scsi_Host *shost, shost_for_each_device(sdev, shost) { if (scsi_host_eh_past_deadline(shost)) { SCSI_LOG_ERROR_RECOVERY(3, - shost_printk(KERN_INFO, shost, - "skip %s, past eh deadline\n", - __func__)); + sdev_printk(KERN_INFO, sdev, + "%s: skip BDR, past eh deadline\n", + current->comm)); break; } bdr_scmd = NULL; @@ -1472,9 +1485,8 @@ static int scsi_eh_bus_device_reset(struct Scsi_Host *shost, continue; SCSI_LOG_ERROR_RECOVERY(3, - shost_printk(KERN_INFO, shost, - "%s: Sending BDR sdev: 0x%p\n", - current->comm, sdev)); + sdev_printk(KERN_INFO, sdev, + "%s: Sending BDR\n", current->comm)); rtn = scsi_try_bus_device_reset(bdr_scmd); if (rtn == SUCCESS || rtn == FAST_IO_FAIL) { if (!scsi_device_online(sdev) || @@ -1490,9 +1502,8 @@ static int scsi_eh_bus_device_reset(struct Scsi_Host *shost, } } else { SCSI_LOG_ERROR_RECOVERY(3, - shost_printk(KERN_INFO, shost, - "%s: BDR failed sdev: 0x%p\n", - current->comm, sdev)); + sdev_printk(KERN_INFO, sdev, + "%s: BDR failed\n", current->comm)); } } @@ -1528,8 +1539,8 @@ static int scsi_eh_target_reset(struct Scsi_Host *shost, list_splice_init(&tmp_list, work_q); SCSI_LOG_ERROR_RECOVERY(3, shost_printk(KERN_INFO, shost, - "skip %s, past eh deadline\n", - __func__)); + "%s: Skip target reset, past eh deadline\n", + current->comm)); return list_empty(work_q); } @@ -1591,8 +1602,8 @@ static int scsi_eh_bus_reset(struct Scsi_Host *shost, list_splice_init(&check_list, work_q); SCSI_LOG_ERROR_RECOVERY(3, shost_printk(KERN_INFO, shost, - "skip %s, past eh deadline\n", - __func__)); + "%s: skip BRST, past eh deadline\n", + current->comm)); return list_empty(work_q); } @@ -2193,9 +2204,9 @@ int scsi_error_handler(void *data) */ if (!shost->eh_noresume && scsi_autopm_get_host(shost) != 0) { SCSI_LOG_ERROR_RECOVERY(1, - printk(KERN_ERR "Error handler scsi_eh_%d " - "unable to autoresume\n", - shost->host_no)); + shost_printk(KERN_ERR, shost, + "scsi_eh_%d: unable to autoresume\n", + shost->host_no)); continue; } @@ -2296,42 +2307,34 @@ scsi_reset_provider_done_command(struct scsi_cmnd *scmd) { } -/* - * Function: scsi_reset_provider - * - * Purpose: Send requested reset to a bus or device at any phase. - * - * Arguments: device - device to send reset to - * flag - reset type (see scsi.h) - * - * Returns: SUCCESS/FAILURE. - * - * Notes: This is used by the SCSI Generic driver to provide - * Bus/Device reset capability. +/** + * scsi_ioctl_reset: explicitly reset a host/bus/target/device + * @dev: scsi_device to operate on + * @arg: reset type (see sg.h) */ int -scsi_reset_provider(struct scsi_device *dev, int flag) +scsi_ioctl_reset(struct scsi_device *dev, int __user *arg) { struct scsi_cmnd *scmd; struct Scsi_Host *shost = dev->host; struct request req; unsigned long flags; - int rtn; + int error = 0, rtn, val; - if (scsi_autopm_get_host(shost) < 0) - return FAILED; + if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) + return -EACCES; - if (!get_device(&dev->sdev_gendev)) { - rtn = FAILED; - goto out_put_autopm_host; - } + error = get_user(val, arg); + if (error) + return error; + if (scsi_autopm_get_host(shost) < 0) + return -EIO; + + error = -EIO; scmd = scsi_get_command(dev, GFP_KERNEL); - if (!scmd) { - rtn = FAILED; - put_device(&dev->sdev_gendev); + if (!scmd) goto out_put_autopm_host; - } blk_rq_init(NULL, &req); scmd->request = &req; @@ -2349,29 +2352,37 @@ scsi_reset_provider(struct scsi_device *dev, int flag) shost->tmf_in_progress = 1; spin_unlock_irqrestore(shost->host_lock, flags); - switch (flag) { - case SCSI_TRY_RESET_DEVICE: + switch (val & ~SG_SCSI_RESET_NO_ESCALATE) { + case SG_SCSI_RESET_NOTHING: + rtn = SUCCESS; + break; + case SG_SCSI_RESET_DEVICE: rtn = scsi_try_bus_device_reset(scmd); - if (rtn == SUCCESS) + if (rtn == SUCCESS || (val & SG_SCSI_RESET_NO_ESCALATE)) break; /* FALLTHROUGH */ - case SCSI_TRY_RESET_TARGET: + case SG_SCSI_RESET_TARGET: rtn = scsi_try_target_reset(scmd); - if (rtn == SUCCESS) + if (rtn == SUCCESS || (val & SG_SCSI_RESET_NO_ESCALATE)) break; /* FALLTHROUGH */ - case SCSI_TRY_RESET_BUS: + case SG_SCSI_RESET_BUS: rtn = scsi_try_bus_reset(scmd); - if (rtn == SUCCESS) + if (rtn == SUCCESS || (val & SG_SCSI_RESET_NO_ESCALATE)) break; /* FALLTHROUGH */ - case SCSI_TRY_RESET_HOST: + case SG_SCSI_RESET_HOST: rtn = scsi_try_host_reset(scmd); - break; + if (rtn == SUCCESS) + break; default: + /* FALLTHROUGH */ rtn = FAILED; + break; } + error = (rtn == SUCCESS) ? 0 : -EIO; + spin_lock_irqsave(shost->host_lock, flags); shost->tmf_in_progress = 0; spin_unlock_irqrestore(shost->host_lock, flags); @@ -2385,15 +2396,15 @@ scsi_reset_provider(struct scsi_device *dev, int flag) "waking up host to restart after TMF\n")); wake_up(&shost->host_wait); - scsi_run_host_queues(shost); - scsi_next_command(scmd); + scsi_put_command(scmd); + out_put_autopm_host: scsi_autopm_put_host(shost); - return rtn; + return error; } -EXPORT_SYMBOL(scsi_reset_provider); +EXPORT_SYMBOL(scsi_ioctl_reset); /** * scsi_normalize_sense - normalize main elements from either fixed or @@ -2412,20 +2423,20 @@ EXPORT_SYMBOL(scsi_reset_provider); * responded to a SCSI command with the CHECK_CONDITION status. * * Return value: - * 1 if valid sense data information found, else 0; + * true if valid sense data information found, else false; */ -int scsi_normalize_sense(const u8 *sense_buffer, int sb_len, - struct scsi_sense_hdr *sshdr) +bool scsi_normalize_sense(const u8 *sense_buffer, int sb_len, + struct scsi_sense_hdr *sshdr) { if (!sense_buffer || !sb_len) - return 0; + return false; memset(sshdr, 0, sizeof(struct scsi_sense_hdr)); sshdr->response_code = (sense_buffer[0] & 0x7f); if (!scsi_sense_valid(sshdr)) - return 0; + return false; if (sshdr->response_code >= 0x72) { /* @@ -2455,12 +2466,12 @@ int scsi_normalize_sense(const u8 *sense_buffer, int sb_len, } } - return 1; + return true; } EXPORT_SYMBOL(scsi_normalize_sense); -int scsi_command_normalize_sense(struct scsi_cmnd *cmd, - struct scsi_sense_hdr *sshdr) +bool scsi_command_normalize_sense(const struct scsi_cmnd *cmd, + struct scsi_sense_hdr *sshdr) { return scsi_normalize_sense(cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE, sshdr); diff --git a/drivers/scsi/scsi_ioctl.c b/drivers/scsi/scsi_ioctl.c index 1aaaf43c6803..c4f7b56fa6f6 100644 --- a/drivers/scsi/scsi_ioctl.c +++ b/drivers/scsi/scsi_ioctl.c @@ -126,7 +126,7 @@ static int ioctl_internal_command(struct scsi_device *sdev, char *cmd, sdev_printk(KERN_INFO, sdev, "ioctl_internal_command return code = %x\n", result); - scsi_print_sense_hdr(" ", &sshdr); + scsi_print_sense_hdr(sdev, NULL, &sshdr); break; } } @@ -200,19 +200,6 @@ int scsi_ioctl(struct scsi_device *sdev, int cmd, void __user *arg) { char scsi_cmd[MAX_COMMAND_SIZE]; - /* No idea how this happens.... */ - if (!sdev) - return -ENXIO; - - /* - * If we are in the middle of error recovery, don't let anyone - * else try and use this device. Also, if error recovery fails, it - * may try and take the device offline, in which case all further - * access to the device is prohibited. - */ - if (!scsi_block_when_processing_errors(sdev)) - return -ENODEV; - /* Check for deprecated ioctls ... all the ioctls which don't * follow the new unique numbering scheme are deprecated */ switch (cmd) { @@ -273,6 +260,8 @@ int scsi_ioctl(struct scsi_device *sdev, int cmd, void __user *arg) START_STOP_TIMEOUT, NORMAL_RETRIES); case SCSI_IOCTL_GET_PCI: return scsi_ioctl_get_pci(sdev, arg); + case SG_SCSI_RESET: + return scsi_ioctl_reset(sdev, arg); default: if (sdev->host->hostt->ioctl) return sdev->host->hostt->ioctl(sdev, cmd, arg); @@ -281,55 +270,20 @@ int scsi_ioctl(struct scsi_device *sdev, int cmd, void __user *arg) } EXPORT_SYMBOL(scsi_ioctl); -/** - * scsi_nonblockable_ioctl() - Handle SG_SCSI_RESET - * @sdev: scsi device receiving ioctl - * @cmd: Must be SC_SCSI_RESET - * @arg: pointer to int containing SG_SCSI_RESET_{DEVICE,BUS,HOST} - * @ndelay: file mode O_NDELAY flag +/* + * We can process a reset even when a device isn't fully operable. */ -int scsi_nonblockable_ioctl(struct scsi_device *sdev, int cmd, - void __user *arg, int ndelay) +int scsi_ioctl_block_when_processing_errors(struct scsi_device *sdev, int cmd, + bool ndelay) { - int val, result; - - /* The first set of iocts may be executed even if we're doing - * error processing, as long as the device was opened - * non-blocking */ - if (ndelay) { + if (cmd == SG_SCSI_RESET && ndelay) { if (scsi_host_in_recovery(sdev->host)) + return -EAGAIN; + } else { + if (!scsi_block_when_processing_errors(sdev)) return -ENODEV; - } else if (!scsi_block_when_processing_errors(sdev)) - return -ENODEV; - - switch (cmd) { - case SG_SCSI_RESET: - result = get_user(val, (int __user *)arg); - if (result) - return result; - if (val == SG_SCSI_RESET_NOTHING) - return 0; - switch (val) { - case SG_SCSI_RESET_DEVICE: - val = SCSI_TRY_RESET_DEVICE; - break; - case SG_SCSI_RESET_TARGET: - val = SCSI_TRY_RESET_TARGET; - break; - case SG_SCSI_RESET_BUS: - val = SCSI_TRY_RESET_BUS; - break; - case SG_SCSI_RESET_HOST: - val = SCSI_TRY_RESET_HOST; - break; - default: - return -EINVAL; - } - if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) - return -EACCES; - return (scsi_reset_provider(sdev, val) == - SUCCESS) ? 0 : -EIO; } - return -ENODEV; + + return 0; } -EXPORT_SYMBOL(scsi_nonblockable_ioctl); +EXPORT_SYMBOL_GPL(scsi_ioctl_block_when_processing_errors); diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 50a6e1ac8d9c..7e3d954c9cac 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -22,6 +22,7 @@ #include <linux/hardirq.h> #include <linux/scatterlist.h> #include <linux/blk-mq.h> +#include <linux/ratelimit.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> @@ -47,7 +48,7 @@ struct scsi_host_sg_pool { mempool_t *pool; }; -#define SP(x) { x, "sgpool-" __stringify(x) } +#define SP(x) { .size = x, "sgpool-" __stringify(x) } #if (SCSI_MAX_SG_SEGMENTS < 32) #error SCSI_MAX_SG_SEGMENTS is too small (must be 32 or greater) #endif @@ -542,17 +543,6 @@ static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd) put_device(&sdev->sdev_gendev); } -void scsi_next_command(struct scsi_cmnd *cmd) -{ - struct scsi_device *sdev = cmd->device; - struct request_queue *q = sdev->request_queue; - - scsi_put_command(cmd); - scsi_run_queue(q); - - put_device(&sdev->sdev_gendev); -} - void scsi_run_host_queues(struct Scsi_Host *shost) { struct scsi_device *sdev; @@ -598,10 +588,10 @@ static void scsi_free_sgtable(struct scsi_data_buffer *sdb, bool mq) __sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS, mq, scsi_sg_free); } -static int scsi_alloc_sgtable(struct scsi_data_buffer *sdb, int nents, - gfp_t gfp_mask, bool mq) +static int scsi_alloc_sgtable(struct scsi_data_buffer *sdb, int nents, bool mq) { struct scatterlist *first_chunk = NULL; + gfp_t gfp_mask = mq ? GFP_NOIO : GFP_ATOMIC; int ret; BUG_ON(!nents); @@ -730,8 +720,6 @@ static bool scsi_end_request(struct request *req, int error, kblockd_schedule_work(&sdev->requeue_work); else blk_mq_start_stopped_hw_queues(q, true); - - put_device(&sdev->sdev_gendev); } else { unsigned long flags; @@ -743,9 +731,12 @@ static bool scsi_end_request(struct request *req, int error, spin_unlock_irqrestore(q->queue_lock, flags); scsi_release_buffers(cmd); - scsi_next_command(cmd); + + scsi_put_command(cmd); + scsi_run_queue(q); } + put_device(&sdev->sdev_gendev); return false; } @@ -831,8 +822,8 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) struct request *req = cmd->request; int error = 0; struct scsi_sense_hdr sshdr; - int sense_valid = 0; - int sense_deferred = 0; + bool sense_valid = false; + int sense_deferred = 0, level = 0; enum {ACTION_FAIL, ACTION_REPREP, ACTION_RETRY, ACTION_DELAYED_RETRY} action; unsigned long wait_for = (cmd->allowed + 1) * req->timeout; @@ -912,7 +903,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) if ((sshdr.asc == 0x0) && (sshdr.ascq == 0x1d)) ; else if (!(req->cmd_flags & REQ_QUIET)) - scsi_print_sense("", cmd); + scsi_print_sense(cmd); result = 0; /* BLOCK_PC may have set error */ error = 0; @@ -1039,10 +1030,24 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) case ACTION_FAIL: /* Give up and fail the remainder of the request */ if (!(req->cmd_flags & REQ_QUIET)) { - scsi_print_result(cmd); - if (driver_byte(result) & DRIVER_SENSE) - scsi_print_sense("", cmd); - scsi_print_command(cmd); + static DEFINE_RATELIMIT_STATE(_rs, + DEFAULT_RATELIMIT_INTERVAL, + DEFAULT_RATELIMIT_BURST); + + if (unlikely(scsi_logging_level)) + level = SCSI_LOG_LEVEL(SCSI_LOG_MLCOMPLETE_SHIFT, + SCSI_LOG_MLCOMPLETE_BITS); + + /* + * if logging is enabled the failure will be printed + * in scsi_log_completion(), so avoid duplicate messages + */ + if (!level && __ratelimit(&_rs)) { + scsi_print_result(cmd, NULL, FAILED); + if (driver_byte(result) & DRIVER_SENSE) + scsi_print_sense(cmd); + scsi_print_command(cmd); + } } if (!scsi_end_request(req, error, blk_rq_err_bytes(req), 0)) return; @@ -1072,8 +1077,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) } } -static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb, - gfp_t gfp_mask) +static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb) { int count; @@ -1081,7 +1085,7 @@ static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb, * If sg table allocation fails, requeue request later. */ if (unlikely(scsi_alloc_sgtable(sdb, req->nr_phys_segments, - gfp_mask, req->mq_ctx != NULL))) + req->mq_ctx != NULL))) return BLKPREP_DEFER; /* @@ -1106,7 +1110,7 @@ static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb, * BLKPREP_DEFER if the failure is retryable * BLKPREP_KILL if the failure is fatal */ -int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask) +int scsi_init_io(struct scsi_cmnd *cmd) { struct scsi_device *sdev = cmd->device; struct request *rq = cmd->request; @@ -1115,7 +1119,7 @@ int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask) BUG_ON(!rq->nr_phys_segments); - error = scsi_init_sgtable(rq, &cmd->sdb, gfp_mask); + error = scsi_init_sgtable(rq, &cmd->sdb); if (error) goto err_exit; @@ -1131,8 +1135,7 @@ int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask) rq->next_rq->special = bidi_sdb; } - error = scsi_init_sgtable(rq->next_rq, rq->next_rq->special, - GFP_ATOMIC); + error = scsi_init_sgtable(rq->next_rq, rq->next_rq->special); if (error) goto err_exit; } @@ -1144,7 +1147,7 @@ int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask) BUG_ON(prot_sdb == NULL); ivecs = blk_rq_count_integrity_sg(rq->q, rq->bio); - if (scsi_alloc_sgtable(prot_sdb, ivecs, gfp_mask, is_mq)) { + if (scsi_alloc_sgtable(prot_sdb, ivecs, is_mq)) { error = BLKPREP_DEFER; goto err_exit; } @@ -1213,7 +1216,7 @@ static int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req) * submit a request without an attached bio. */ if (req->bio) { - int ret = scsi_init_io(cmd, GFP_ATOMIC); + int ret = scsi_init_io(cmd); if (unlikely(ret)) return ret; } else { @@ -1638,6 +1641,87 @@ static void scsi_softirq_done(struct request *rq) } /** + * scsi_dispatch_command - Dispatch a command to the low-level driver. + * @cmd: command block we are dispatching. + * + * Return: nonzero return request was rejected and device's queue needs to be + * plugged. + */ +static int scsi_dispatch_cmd(struct scsi_cmnd *cmd) +{ + struct Scsi_Host *host = cmd->device->host; + int rtn = 0; + + atomic_inc(&cmd->device->iorequest_cnt); + + /* check if the device is still usable */ + if (unlikely(cmd->device->sdev_state == SDEV_DEL)) { + /* in SDEV_DEL we error all commands. DID_NO_CONNECT + * returns an immediate error upwards, and signals + * that the device is no longer present */ + cmd->result = DID_NO_CONNECT << 16; + goto done; + } + + /* Check to see if the scsi lld made this device blocked. */ + if (unlikely(scsi_device_blocked(cmd->device))) { + /* + * in blocked state, the command is just put back on + * the device queue. The suspend state has already + * blocked the queue so future requests should not + * occur until the device transitions out of the + * suspend state. + */ + SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd, + "queuecommand : device blocked\n")); + return SCSI_MLQUEUE_DEVICE_BUSY; + } + + /* Store the LUN value in cmnd, if needed. */ + if (cmd->device->lun_in_cdb) + cmd->cmnd[1] = (cmd->cmnd[1] & 0x1f) | + (cmd->device->lun << 5 & 0xe0); + + scsi_log_send(cmd); + + /* + * Before we queue this command, check if the command + * length exceeds what the host adapter can handle. + */ + if (cmd->cmd_len > cmd->device->host->max_cmd_len) { + SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd, + "queuecommand : command too long. " + "cdb_size=%d host->max_cmd_len=%d\n", + cmd->cmd_len, cmd->device->host->max_cmd_len)); + cmd->result = (DID_ABORT << 16); + goto done; + } + + if (unlikely(host->shost_state == SHOST_DEL)) { + cmd->result = (DID_NO_CONNECT << 16); + goto done; + + } + + trace_scsi_dispatch_cmd_start(cmd); + rtn = host->hostt->queuecommand(host, cmd); + if (rtn) { + trace_scsi_dispatch_cmd_error(cmd, rtn); + if (rtn != SCSI_MLQUEUE_DEVICE_BUSY && + rtn != SCSI_MLQUEUE_TARGET_BUSY) + rtn = SCSI_MLQUEUE_HOST_BUSY; + + SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd, + "queuecommand : request rejected\n")); + } + + return rtn; + done: + cmd->scsi_done(cmd); + return 0; +} + +/** * scsi_done - Invoke completion on finished SCSI command. * @cmd: The SCSI Command for which a low-level device driver (LLDD) gives * ownership back to SCSI Core -- i.e. the LLDD has finished with it. @@ -1725,7 +1809,7 @@ static void scsi_request_fn(struct request_queue *q) * we add the dev to the starved list so it eventually gets * a run when a tag is freed. */ - if (blk_queue_tagged(q) && !blk_rq_tagged(req)) { + if (blk_queue_tagged(q) && !(req->cmd_flags & REQ_QUEUED)) { spin_lock_irq(shost->host_lock); if (list_empty(&sdev->starved_entry)) list_add_tail(&sdev->starved_entry, @@ -1739,6 +1823,11 @@ static void scsi_request_fn(struct request_queue *q) if (!scsi_host_queue_ready(q, shost, sdev)) goto host_not_ready; + + if (sdev->simple_tags) + cmd->flags |= SCMD_TAGGED; + else + cmd->flags &= ~SCMD_TAGGED; /* * Finally, initialize any error handling parameters, and set up @@ -1893,10 +1982,10 @@ static int scsi_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req, blk_mq_start_request(req); } - if (blk_queue_tagged(q)) - req->cmd_flags |= REQ_QUEUED; + if (sdev->simple_tags) + cmd->flags |= SCMD_TAGGED; else - req->cmd_flags &= ~REQ_QUEUED; + cmd->flags &= ~SCMD_TAGGED; scsi_init_cmd_errh(cmd); cmd->scsi_done = scsi_mq_done; @@ -2091,7 +2180,7 @@ int scsi_mq_setup_tags(struct Scsi_Host *shost) memset(&shost->tag_set, 0, sizeof(shost->tag_set)); shost->tag_set.ops = &scsi_mq_ops; - shost->tag_set.nr_hw_queues = 1; + shost->tag_set.nr_hw_queues = shost->nr_hw_queues ? : 1; shost->tag_set.queue_depth = shost->can_queue; shost->tag_set.cmd_size = cmd_size; shost->tag_set.numa_node = NUMA_NO_NODE; diff --git a/drivers/scsi/scsi_logging.h b/drivers/scsi/scsi_logging.h index 1f65139e14f8..7fe64a847143 100644 --- a/drivers/scsi/scsi_logging.h +++ b/drivers/scsi/scsi_logging.h @@ -51,6 +51,7 @@ do { \ } while (0); \ } while (0) #else +#define SCSI_LOG_LEVEL(SHIFT, BITS) 0 #define SCSI_CHECK_LOGGING(SHIFT, BITS, LEVEL, CMD) #endif /* CONFIG_SCSI_LOGGING */ diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h index 12b8e1bee7f0..2dc4a83fb84c 100644 --- a/drivers/scsi/scsi_priv.h +++ b/drivers/scsi/scsi_priv.h @@ -29,7 +29,6 @@ extern int scsi_init_hosts(void); extern void scsi_exit_hosts(void); /* scsi.c */ -extern int scsi_dispatch_cmd(struct scsi_cmnd *cmd); extern int scsi_setup_command_freelist(struct Scsi_Host *shost); extern void scsi_destroy_command_freelist(struct Scsi_Host *shost); #ifdef CONFIG_SCSI_LOGGING @@ -84,7 +83,6 @@ int scsi_noretry_cmd(struct scsi_cmnd *scmd); extern int scsi_maybe_unblock_host(struct scsi_device *sdev); extern void scsi_device_unbusy(struct scsi_device *sdev); extern void scsi_queue_insert(struct scsi_cmnd *cmd, int reason); -extern void scsi_next_command(struct scsi_cmnd *cmd); extern void scsi_io_completion(struct scsi_cmnd *, unsigned int); extern void scsi_run_host_queues(struct Scsi_Host *shost); extern struct request_queue *scsi_alloc_queue(struct scsi_device *sdev); diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c index ba3f1e8d0d57..983aed10ff2f 100644 --- a/drivers/scsi/scsi_scan.c +++ b/drivers/scsi/scsi_scan.c @@ -286,7 +286,13 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget, } WARN_ON_ONCE(!blk_get_queue(sdev->request_queue)); sdev->request_queue->queuedata = sdev; - scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun); + + if (!shost_use_blk_mq(sdev->host) && + (shost->bqt || shost->hostt->use_blk_tags)) { + blk_queue_init_tags(sdev->request_queue, + sdev->host->cmd_per_lun, shost->bqt); + } + scsi_change_queue_depth(sdev, sdev->host->cmd_per_lun); scsi_sysfs_device_initialize(sdev); @@ -874,8 +880,10 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result, (inq_result[3] & 0x0f) == 1 ? " CCS" : ""); if ((sdev->scsi_level >= SCSI_2) && (inq_result[7] & 2) && - !(*bflags & BLIST_NOTQ)) + !(*bflags & BLIST_NOTQ)) { sdev->tagged_supported = 1; + sdev->simple_tags = 1; + } /* * Some devices (Texel CD ROM drives) have handshaking problems @@ -1214,9 +1222,9 @@ static void scsi_sequential_lun_scan(struct scsi_target *starget, sparse_lun = 0; /* - * If less than SCSI_1_CSS, and no special lun scaning, stop + * If less than SCSI_1_CCS, and no special lun scanning, stop * scanning; this matches 2.4 behaviour, but could just be a bug - * (to continue scanning a SCSI_1_CSS device). + * (to continue scanning a SCSI_1_CCS device). * * This test is broken. We might not have any device on lun0 for * a sparselun device, and if that's the case then how would we @@ -1585,16 +1593,15 @@ EXPORT_SYMBOL(scsi_add_device); void scsi_rescan_device(struct device *dev) { - struct scsi_driver *drv; - if (!dev->driver) return; - drv = to_scsi_driver(dev->driver); - if (try_module_get(drv->owner)) { + if (try_module_get(dev->driver->owner)) { + struct scsi_driver *drv = to_scsi_driver(dev->driver); + if (drv->rescan) drv->rescan(dev); - module_put(drv->owner); + module_put(dev->driver->owner); } } EXPORT_SYMBOL(scsi_rescan_device); @@ -1727,7 +1734,7 @@ int scsi_scan_host_selected(struct Scsi_Host *shost, unsigned int channel, if (((channel != SCAN_WILD_CARD) && (channel > shost->max_channel)) || ((id != SCAN_WILD_CARD) && (id >= shost->max_id)) || - ((lun != SCAN_WILD_CARD) && (lun > shost->max_lun))) + ((lun != SCAN_WILD_CARD) && (lun >= shost->max_lun))) return -EINVAL; mutex_lock(&shost->scan_mutex); diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c index f4cb7b3e9e23..1cb64a8e18c9 100644 --- a/drivers/scsi/scsi_sysfs.c +++ b/drivers/scsi/scsi_sysfs.c @@ -727,9 +727,7 @@ show_queue_type_field(struct device *dev, struct device_attribute *attr, struct scsi_device *sdev = to_scsi_device(dev); const char *name = "none"; - if (sdev->ordered_tags) - name = "ordered"; - else if (sdev->simple_tags) + if (sdev->simple_tags) name = "simple"; return snprintf(buf, 20, "%s\n", name); @@ -747,9 +745,12 @@ store_queue_type_field(struct device *dev, struct device_attribute *attr, if (!sdev->tagged_supported || !sht->change_queue_type) return -EINVAL; - if (strncmp(buf, "ordered", 7) == 0) - tag_type = MSG_ORDERED_TAG; - else if (strncmp(buf, "simple", 6) == 0) + /* + * We're never issueing order tags these days, but allow the value + * for backwards compatibility. + */ + if (strncmp(buf, "ordered", 7) == 0 || + strncmp(buf, "simple", 6) == 0) tag_type = MSG_SIMPLE_TAG; else if (strncmp(buf, "none", 4) != 0) return -EINVAL; @@ -876,11 +877,10 @@ sdev_store_queue_depth(struct device *dev, struct device_attribute *attr, depth = simple_strtoul(buf, NULL, 0); - if (depth < 1) + if (depth < 1 || depth > sht->can_queue) return -EINVAL; - retval = sht->change_queue_depth(sdev, depth, - SCSI_QDEPTH_DEFAULT); + retval = sht->change_queue_depth(sdev, depth); if (retval < 0) return retval; diff --git a/drivers/scsi/scsi_trace.c b/drivers/scsi/scsi_trace.c index 503594e5f76d..82af28b90294 100644 --- a/drivers/scsi/scsi_trace.c +++ b/drivers/scsi/scsi_trace.c @@ -278,7 +278,7 @@ scsi_trace_parse_cdb(struct trace_seq *p, unsigned char *cdb, int len) return scsi_trace_rw16(p, cdb, len); case UNMAP: return scsi_trace_unmap(p, cdb, len); - case SERVICE_ACTION_IN: + case SERVICE_ACTION_IN_16: return scsi_trace_service_action_in(p, cdb, len); case VARIABLE_LENGTH_CMD: return scsi_trace_varlen(p, cdb, len); diff --git a/drivers/scsi/scsi_transport_spi.c b/drivers/scsi/scsi_transport_spi.c index cf08071a9b6e..fa2aece76cc2 100644 --- a/drivers/scsi/scsi_transport_spi.c +++ b/drivers/scsi/scsi_transport_spi.c @@ -32,6 +32,7 @@ #include <scsi/scsi_host.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_eh.h> +#include <scsi/scsi_tcq.h> #include <scsi/scsi_transport.h> #include <scsi/scsi_transport_spi.h> @@ -1207,6 +1208,28 @@ int spi_populate_ppr_msg(unsigned char *msg, int period, int offset, } EXPORT_SYMBOL_GPL(spi_populate_ppr_msg); +/** + * spi_populate_tag_msg - place a tag message in a buffer + * @msg: pointer to the area to place the tag + * @cmd: pointer to the scsi command for the tag + * + * Notes: + * designed to create the correct type of tag message for the + * particular request. Returns the size of the tag message. + * May return 0 if TCQ is disabled for this device. + **/ +int spi_populate_tag_msg(unsigned char *msg, struct scsi_cmnd *cmd) +{ + if (cmd->flags & SCMD_TAGGED) { + *msg++ = MSG_SIMPLE_TAG; + *msg++ = cmd->request->tag; + return 2; + } + + return 0; +} +EXPORT_SYMBOL_GPL(spi_populate_tag_msg); + #ifdef CONFIG_SCSI_CONSTANTS static const char * const one_byte_msgs[] = { /* 0x00 */ "Task Complete", NULL /* Extended Message */, "Save Pointers", diff --git a/drivers/scsi/scsicam.c b/drivers/scsi/scsicam.c index 92d24d6dcb39..910f4a7a3924 100644 --- a/drivers/scsi/scsicam.c +++ b/drivers/scsi/scsicam.c @@ -163,8 +163,8 @@ int scsi_partsize(unsigned char *buf, unsigned long capacity, end_head * end_sector + end_sector; /* This is the actual _sector_ number at the end */ - logical_end = get_unaligned(&largest->start_sect) - + get_unaligned(&largest->nr_sects); + logical_end = get_unaligned_le32(&largest->start_sect) + + get_unaligned_le32(&largest->nr_sects); /* This is for >1023 cylinders */ ext_cyl = (logical_end - (end_head * end_sector + end_sector)) diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index cfba74cd8e8b..fedab3c21ddf 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -116,7 +116,7 @@ static int sd_eh_action(struct scsi_cmnd *, int); static void sd_read_capacity(struct scsi_disk *sdkp, unsigned char *buffer); static void scsi_disk_release(struct device *cdev); static void sd_print_sense_hdr(struct scsi_disk *, struct scsi_sense_hdr *); -static void sd_print_result(struct scsi_disk *, int); +static void sd_print_result(const struct scsi_disk *, const char *, int); static DEFINE_SPINLOCK(sd_index_lock); static DEFINE_IDA(sd_index_ida); @@ -510,9 +510,9 @@ static const struct dev_pm_ops sd_pm_ops = { }; static struct scsi_driver sd_template = { - .owner = THIS_MODULE, .gendrv = { .name = "sd", + .owner = THIS_MODULE, .probe = sd_probe, .remove = sd_remove, .shutdown = sd_shutdown, @@ -656,7 +656,7 @@ static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode) unsigned int logical_block_size = sdkp->device->sector_size; unsigned int max_blocks = 0; - q->limits.discard_zeroes_data = sdkp->lbprz; + q->limits.discard_zeroes_data = 0; q->limits.discard_alignment = sdkp->unmap_alignment * logical_block_size; q->limits.discard_granularity = @@ -680,11 +680,13 @@ static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode) case SD_LBP_WS16: max_blocks = min_not_zero(sdkp->max_ws_blocks, (u32)SD_MAX_WS16_BLOCKS); + q->limits.discard_zeroes_data = sdkp->lbprz; break; case SD_LBP_WS10: max_blocks = min_not_zero(sdkp->max_ws_blocks, (u32)SD_MAX_WS10_BLOCKS); + q->limits.discard_zeroes_data = sdkp->lbprz; break; case SD_LBP_ZERO: @@ -784,7 +786,7 @@ static int sd_setup_discard_cmnd(struct scsi_cmnd *cmd) * amount of blocks described by the request. */ blk_add_request_payload(rq, page, len); - ret = scsi_init_io(cmd, GFP_ATOMIC); + ret = scsi_init_io(cmd); rq->__data_len = nr_bytes; out: @@ -878,7 +880,7 @@ static int sd_setup_write_same_cmnd(struct scsi_cmnd *cmd) * knows how much to actually write. */ rq->__data_len = sdp->sector_size; - ret = scsi_init_io(cmd, GFP_ATOMIC); + ret = scsi_init_io(cmd); rq->__data_len = nr_bytes; return ret; } @@ -912,7 +914,7 @@ static int sd_setup_read_write_cmnd(struct scsi_cmnd *SCpnt) int ret; unsigned char protect; - ret = scsi_init_io(SCpnt, GFP_ATOMIC); + ret = scsi_init_io(SCpnt); if (ret != BLKPREP_OK) goto out; SCpnt = rq->special; @@ -1334,9 +1336,9 @@ static int sd_ioctl(struct block_device *bdev, fmode_t mode, * may try and take the device offline, in which case all further * access to the device is prohibited. */ - error = scsi_nonblockable_ioctl(sdp, cmd, p, - (mode & FMODE_NDELAY) != 0); - if (!scsi_block_when_processing_errors(sdp) || !error) + error = scsi_ioctl_block_when_processing_errors(sdp, cmd, + (mode & FMODE_NDELAY) != 0); + if (error) goto out; /* @@ -1492,7 +1494,7 @@ static int sd_sync_cache(struct scsi_disk *sdkp) } if (res) { - sd_print_result(sdkp, res); + sd_print_result(sdkp, "Synchronize Cache(10) failed", res); if (driver_byte(res) & DRIVER_SENSE) sd_print_sense_hdr(sdkp, &sshdr); @@ -1541,31 +1543,19 @@ static int sd_compat_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg) { struct scsi_device *sdev = scsi_disk(bdev->bd_disk)->device; - int ret; - - ret = scsi_verify_blk_ioctl(bdev, cmd); - if (ret < 0) - return ret; + int error; - /* - * If we are in the middle of error recovery, don't let anyone - * else try and use this device. Also, if error recovery fails, it - * may try and take the device offline, in which case all further - * access to the device is prohibited. - */ - if (!scsi_block_when_processing_errors(sdev)) - return -ENODEV; + error = scsi_ioctl_block_when_processing_errors(sdev, cmd, + (mode & FMODE_NDELAY) != 0); + if (error) + return error; - if (sdev->host->hostt->compat_ioctl) { - ret = sdev->host->hostt->compat_ioctl(sdev, cmd, (void __user *)arg); - - return ret; - } - /* * Let the static ioctl translation table take care of it. */ - return -ENOIOCTLCMD; + if (!sdev->host->hostt->compat_ioctl) + return -ENOIOCTLCMD; + return sdev->host->hostt->compat_ioctl(sdev, cmd, (void __user *)arg); } #endif @@ -1713,17 +1703,6 @@ static int sd_done(struct scsi_cmnd *SCpnt) if (sense_valid) sense_deferred = scsi_sense_is_deferred(&sshdr); } -#ifdef CONFIG_SCSI_LOGGING - SCSI_LOG_HLCOMPLETE(1, scsi_print_result(SCpnt)); - if (sense_valid) { - SCSI_LOG_HLCOMPLETE(1, scmd_printk(KERN_INFO, SCpnt, - "sd_done: sb[respc,sk,asc," - "ascq]=%x,%x,%x,%x\n", - sshdr.response_code, - sshdr.sense_key, sshdr.asc, - sshdr.ascq)); - } -#endif sdkp->medium_access_timed_out = 0; if (driver_byte(result) != DRIVER_SENSE && @@ -1743,7 +1722,6 @@ static int sd_done(struct scsi_cmnd *SCpnt) * unknown amount of data was transferred so treat it as an * error. */ - scsi_print_sense("sd", SCpnt); SCpnt->result = 0; memset(SCpnt->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); break; @@ -1779,6 +1757,10 @@ static int sd_done(struct scsi_cmnd *SCpnt) break; } out: + SCSI_LOG_HLCOMPLETE(1, scmd_printk(KERN_INFO, SCpnt, + "sd_done: completed %d of %d bytes\n", + good_bytes, scsi_bufflen(SCpnt))); + if (rq_data_dir(SCpnt->request) == READ && scsi_prot_sg_count(SCpnt)) sd_dif_complete(SCpnt, good_bytes); @@ -1834,12 +1816,12 @@ sd_spinup_disk(struct scsi_disk *sdkp) /* no sense, TUR either succeeded or failed * with a status error */ if(!spintime && !scsi_status_is_good(the_result)) { - sd_printk(KERN_NOTICE, sdkp, "Unit Not Ready\n"); - sd_print_result(sdkp, the_result); + sd_print_result(sdkp, "Test Unit Ready failed", + the_result); } break; } - + /* * The device does not want the automatic start to be issued. */ @@ -1955,7 +1937,6 @@ static void read_capacity_error(struct scsi_disk *sdkp, struct scsi_device *sdp, struct scsi_sense_hdr *sshdr, int sense_valid, int the_result) { - sd_print_result(sdkp, the_result); if (driver_byte(the_result) & DRIVER_SENSE) sd_print_sense_hdr(sdkp, sshdr); else @@ -2001,7 +1982,7 @@ static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp, do { memset(cmd, 0, 16); - cmd[0] = SERVICE_ACTION_IN; + cmd[0] = SERVICE_ACTION_IN_16; cmd[1] = SAI_READ_CAPACITY_16; cmd[13] = RC16_LEN; memset(buffer, 0, RC16_LEN); @@ -2036,7 +2017,7 @@ static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp, } while (the_result && retries); if (the_result) { - sd_printk(KERN_NOTICE, sdkp, "READ CAPACITY(16) failed\n"); + sd_print_result(sdkp, "Read Capacity(16) failed", the_result); read_capacity_error(sdkp, sdp, &sshdr, sense_valid, the_result); return -EINVAL; } @@ -2118,7 +2099,7 @@ static int read_capacity_10(struct scsi_disk *sdkp, struct scsi_device *sdp, } while (the_result && retries); if (the_result) { - sd_printk(KERN_NOTICE, sdkp, "READ CAPACITY failed\n"); + sd_print_result(sdkp, "Read Capacity(10) failed", the_result); read_capacity_error(sdkp, sdp, &sshdr, sense_valid, the_result); return -EINVAL; } @@ -2643,12 +2624,12 @@ static void sd_read_block_limits(struct scsi_disk *sdkp) } else { /* LBP VPD page tells us what to use */ - if (sdkp->lbpu && sdkp->max_unmap_blocks) - sd_config_discard(sdkp, SD_LBP_UNMAP); - else if (sdkp->lbpws) + if (sdkp->lbpws) sd_config_discard(sdkp, SD_LBP_WS16); else if (sdkp->lbpws10) sd_config_discard(sdkp, SD_LBP_WS10); + else if (sdkp->lbpu && sdkp->max_unmap_blocks) + sd_config_discard(sdkp, SD_LBP_UNMAP); else sd_config_discard(sdkp, SD_LBP_DISABLE); } @@ -3142,8 +3123,7 @@ static int sd_start_stop_device(struct scsi_disk *sdkp, int start) res = scsi_execute_req_flags(sdp, cmd, DMA_NONE, NULL, 0, &sshdr, SD_TIMEOUT, SD_MAX_RETRIES, NULL, REQ_PM); if (res) { - sd_printk(KERN_WARNING, sdkp, "START_STOP FAILED\n"); - sd_print_result(sdkp, res); + sd_print_result(sdkp, "Start/Stop Unit failed", res); if (driver_byte(res) & DRIVER_SENSE) sd_print_sense_hdr(sdkp, &sshdr); if (scsi_sense_valid(&sshdr) && @@ -3337,15 +3317,27 @@ module_exit(exit_sd); static void sd_print_sense_hdr(struct scsi_disk *sdkp, struct scsi_sense_hdr *sshdr) { - sd_printk(KERN_INFO, sdkp, " "); - scsi_show_sense_hdr(sshdr); - sd_printk(KERN_INFO, sdkp, " "); - scsi_show_extd_sense(sshdr->asc, sshdr->ascq); + scsi_show_sense_hdr(sdkp->device, + sdkp->disk ? sdkp->disk->disk_name : NULL, sshdr); + scsi_show_extd_sense(sdkp->device, + sdkp->disk ? sdkp->disk->disk_name : NULL, + sshdr->asc, sshdr->ascq); } -static void sd_print_result(struct scsi_disk *sdkp, int result) +static void sd_print_result(const struct scsi_disk *sdkp, const char *msg, + int result) { - sd_printk(KERN_INFO, sdkp, " "); - scsi_show_result(result); + const char *hb_string = scsi_hostbyte_string(result); + const char *db_string = scsi_driverbyte_string(result); + + if (hb_string || db_string) + sd_printk(KERN_INFO, sdkp, + "%s: Result: hostbyte=%s driverbyte=%s\n", msg, + hb_string ? hb_string : "invalid", + db_string ? db_string : "invalid"); + else + sd_printk(KERN_INFO, sdkp, + "%s: Result: hostbyte=0x%02x driverbyte=0x%02x\n", + msg, host_byte(result), driver_byte(result)); } diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h index 467377884b63..63ba5ca7f9a1 100644 --- a/drivers/scsi/sd.h +++ b/drivers/scsi/sd.h @@ -103,9 +103,9 @@ static inline struct scsi_disk *scsi_disk(struct gendisk *disk) #define sd_printk(prefix, sdsk, fmt, a...) \ (sdsk)->disk ? \ - sdev_printk(prefix, (sdsk)->device, "[%s] " fmt, \ - (sdsk)->disk->disk_name, ##a) : \ - sdev_printk(prefix, (sdsk)->device, fmt, ##a) + sdev_prefix_printk(prefix, (sdsk)->device, \ + (sdsk)->disk->disk_name, fmt, ##a) : \ + sdev_printk(prefix, (sdsk)->device, fmt, ##a) #define sd_first_printk(prefix, sdsk, fmt, a...) \ do { \ diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c index 80bfece1a2de..b7e79e7646ad 100644 --- a/drivers/scsi/ses.c +++ b/drivers/scsi/ses.c @@ -693,9 +693,9 @@ static struct class_interface ses_interface = { }; static struct scsi_driver ses_template = { - .owner = THIS_MODULE, .gendrv = { .name = "ses", + .owner = THIS_MODULE, .probe = ses_probe, .remove = ses_remove, }, diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index 60354449d9ed..b14f64cb9724 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c @@ -219,8 +219,8 @@ static void sg_device_destroy(struct kref *kref); #define SZ_SG_REQ_INFO sizeof(sg_req_info_t) #define sg_printk(prefix, sdp, fmt, a...) \ - sdev_printk(prefix, (sdp)->device, "[%s] " fmt, \ - (sdp)->disk->disk_name, ##a) + sdev_prefix_printk(prefix, (sdp)->device, \ + (sdp)->disk->disk_name, fmt, ##a) static int sg_allow_access(struct file *filp, unsigned char *cmd) { @@ -1071,39 +1071,6 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg) if (atomic_read(&sdp->detaching)) return -ENODEV; return put_user(sdp->device->host->hostt->emulated, ip); - case SG_SCSI_RESET: - if (atomic_read(&sdp->detaching)) - return -ENODEV; - if (filp->f_flags & O_NONBLOCK) { - if (scsi_host_in_recovery(sdp->device->host)) - return -EBUSY; - } else if (!scsi_block_when_processing_errors(sdp->device)) - return -EBUSY; - result = get_user(val, ip); - if (result) - return result; - if (SG_SCSI_RESET_NOTHING == val) - return 0; - switch (val) { - case SG_SCSI_RESET_DEVICE: - val = SCSI_TRY_RESET_DEVICE; - break; - case SG_SCSI_RESET_TARGET: - val = SCSI_TRY_RESET_TARGET; - break; - case SG_SCSI_RESET_BUS: - val = SCSI_TRY_RESET_BUS; - break; - case SG_SCSI_RESET_HOST: - val = SCSI_TRY_RESET_HOST; - break; - default: - return -EINVAL; - } - if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) - return -EACCES; - return (scsi_reset_provider(sdp->device, val) == - SUCCESS) ? 0 : -EIO; case SCSI_IOCTL_SEND_COMMAND: if (atomic_read(&sdp->detaching)) return -ENODEV; @@ -1123,13 +1090,6 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg) return result; sdp->sgdebug = (char) val; return 0; - case SCSI_IOCTL_GET_IDLUN: - case SCSI_IOCTL_GET_BUS_NUMBER: - case SCSI_IOCTL_PROBE_HOST: - case SG_GET_TRANSFORM: - if (atomic_read(&sdp->detaching)) - return -ENODEV; - return scsi_ioctl(sdp->device, cmd_in, p); case BLKSECTGET: return put_user(max_sectors_bytes(sdp->device->request_queue), ip); @@ -1145,11 +1105,25 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg) return blk_trace_startstop(sdp->device->request_queue, 0); case BLKTRACETEARDOWN: return blk_trace_remove(sdp->device->request_queue); + case SCSI_IOCTL_GET_IDLUN: + case SCSI_IOCTL_GET_BUS_NUMBER: + case SCSI_IOCTL_PROBE_HOST: + case SG_GET_TRANSFORM: + case SG_SCSI_RESET: + if (atomic_read(&sdp->detaching)) + return -ENODEV; + break; default: if (read_only) return -EPERM; /* don't know so take safe approach */ - return scsi_ioctl(sdp->device, cmd_in, p); + break; } + + result = scsi_ioctl_block_when_processing_errors(sdp->device, + cmd_in, filp->f_flags & O_NDELAY); + if (result) + return result; + return scsi_ioctl(sdp->device, cmd_in, p); } #ifdef CONFIG_COMPAT @@ -1360,7 +1334,7 @@ sg_rq_end_io(struct request *rq, int uptodate) if ((sdp->sgdebug > 0) && ((CHECK_CONDITION == srp->header.masked_status) || (COMMAND_TERMINATED == srp->header.masked_status))) - __scsi_print_sense(__func__, sense, + __scsi_print_sense(sdp->device, __func__, sense, SCSI_SENSE_BUFFERSIZE); /* Following if statement is a patch supplied by Eric Youngdale */ diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c index 2de44cc58b1a..8bd54a64efd6 100644 --- a/drivers/scsi/sr.c +++ b/drivers/scsi/sr.c @@ -88,9 +88,9 @@ static struct dev_pm_ops sr_pm_ops = { }; static struct scsi_driver sr_template = { - .owner = THIS_MODULE, .gendrv = { .name = "sr", + .owner = THIS_MODULE, .probe = sr_probe, .remove = sr_remove, .pm = &sr_pm_ops, @@ -387,7 +387,7 @@ static int sr_init_command(struct scsi_cmnd *SCpnt) struct request *rq = SCpnt->request; int ret; - ret = scsi_init_io(SCpnt, GFP_ATOMIC); + ret = scsi_init_io(SCpnt); if (ret != BLKPREP_OK) goto out; SCpnt = rq->special; @@ -549,6 +549,11 @@ static int sr_block_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd, mutex_lock(&sr_mutex); + ret = scsi_ioctl_block_when_processing_errors(sdev, cmd, + (mode & FMODE_NDELAY) != 0); + if (ret) + goto out; + /* * Send SCSI addressing ioctls directly to mid level, send other * ioctls to cdrom/block level. @@ -564,16 +569,6 @@ static int sr_block_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd, if (ret != -ENOSYS) goto out; - /* - * ENODEV means that we didn't recognise the ioctl, or that we - * cannot execute it in the current device state. In either - * case fall through to scsi_ioctl, which will return ENDOEV again - * if it doesn't recognise the ioctl - */ - ret = scsi_nonblockable_ioctl(sdev, cmd, argp, - (mode & FMODE_NDELAY) != 0); - if (ret != -ENODEV) - goto out; ret = scsi_ioctl(sdev, cmd, argp); out: diff --git a/drivers/scsi/sr.h b/drivers/scsi/sr.h index 1d1f6f416c59..1de33719ad8e 100644 --- a/drivers/scsi/sr.h +++ b/drivers/scsi/sr.h @@ -57,8 +57,7 @@ typedef struct scsi_cd { } Scsi_CD; #define sr_printk(prefix, cd, fmt, a...) \ - sdev_printk(prefix, (cd)->device, "[%s] " fmt, \ - (cd)->cdi.name, ##a) + sdev_prefix_printk(prefix, (cd)->device, (cd)->cdi.name, fmt, ##a) int sr_do_ioctl(Scsi_CD *, struct packet_command *); diff --git a/drivers/scsi/sr_ioctl.c b/drivers/scsi/sr_ioctl.c index 6389fcff12ec..fb929fac22ba 100644 --- a/drivers/scsi/sr_ioctl.c +++ b/drivers/scsi/sr_ioctl.c @@ -246,7 +246,7 @@ int sr_do_ioctl(Scsi_CD *cd, struct packet_command *cgc) "CDROM not ready. Make sure there " "is a disc in the drive.\n"); #ifdef DEBUG - scsi_print_sense_hdr("sr", &sshdr); + scsi_print_sense_hdr(cd->device, cd->cdi.name, &sshdr); #endif err = -ENOMEDIUM; break; @@ -257,15 +257,15 @@ int sr_do_ioctl(Scsi_CD *cd, struct packet_command *cgc) /* sense: Invalid command operation code */ err = -EDRIVE_CANT_DO_THIS; #ifdef DEBUG - __scsi_print_command(cgc->cmd); - scsi_print_sense_hdr("sr", &sshdr); + __scsi_print_command(cgc->cmd, CDROM_PACKET_SIZE); + scsi_print_sense_hdr(cd->device, cd->cdi.name, &sshdr); #endif break; default: sr_printk(KERN_ERR, cd, "CDROM (ioctl) error, command: "); - __scsi_print_command(cgc->cmd); - scsi_print_sense_hdr("sr", &sshdr); + __scsi_print_command(cgc->cmd, CDROM_PACKET_SIZE); + scsi_print_sense_hdr(cd->device, cd->cdi.name, &sshdr); err = -EIO; } } diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c index 4daa372ed381..128d3b55bdd9 100644 --- a/drivers/scsi/st.c +++ b/drivers/scsi/st.c @@ -56,7 +56,8 @@ static const char *verstr = "20101219"; /* The driver prints some debugging information on the console if DEBUG is defined and non-zero. */ -#define DEBUG 0 +#define DEBUG 1 +#define NO_DEBUG 0 #define ST_DEB_MSG KERN_NOTICE #if DEBUG @@ -80,6 +81,7 @@ static int max_sg_segs; static int try_direct_io = TRY_DIRECT_IO; static int try_rdio = 1; static int try_wdio = 1; +static int debug_flag; static struct class st_sysfs_class; static const struct attribute_group *st_dev_groups[]; @@ -100,6 +102,9 @@ module_param_named(max_sg_segs, max_sg_segs, int, 0); MODULE_PARM_DESC(max_sg_segs, "Maximum number of scatter/gather segments to use (256)"); module_param_named(try_direct_io, try_direct_io, int, 0); MODULE_PARM_DESC(try_direct_io, "Try direct I/O between user buffer and tape drive (1)"); +module_param_named(debug_flag, debug_flag, int, 0); +MODULE_PARM_DESC(debug_flag, "Enable DEBUG, same as setting debugging=1"); + /* Extra parameters for testing */ module_param_named(try_rdio, try_rdio, int, 0); @@ -124,6 +129,9 @@ static struct st_dev_parm { }, { "try_direct_io", &try_direct_io + }, + { + "debug_flag", &debug_flag } }; #endif @@ -194,9 +202,9 @@ static int do_create_sysfs_files(void); static void do_remove_sysfs_files(void); static struct scsi_driver st_template = { - .owner = THIS_MODULE, .gendrv = { .name = "st", + .owner = THIS_MODULE, .probe = st_probe, .remove = st_remove, }, @@ -306,8 +314,7 @@ static inline char *tape_name(struct scsi_tape *tape) } #define st_printk(prefix, t, fmt, a...) \ - sdev_printk(prefix, (t)->device, "%s: " fmt, \ - tape_name(t), ##a) + sdev_prefix_printk(prefix, (t)->device, tape_name(t), fmt, ##a) #ifdef DEBUG #define DEBC_printk(t, fmt, a...) \ if (debugging) { st_printk(ST_DEB_MSG, t, fmt, ##a ); } @@ -374,7 +381,8 @@ static int st_chk_result(struct scsi_tape *STp, struct st_request * SRpnt) SRpnt->cmd[0], SRpnt->cmd[1], SRpnt->cmd[2], SRpnt->cmd[3], SRpnt->cmd[4], SRpnt->cmd[5]); if (cmdstatp->have_sense) - __scsi_print_sense(name, SRpnt->sense, SCSI_SENSE_BUFFERSIZE); + __scsi_print_sense(STp->device, name, + SRpnt->sense, SCSI_SENSE_BUFFERSIZE); } ) /* end DEB */ if (!debugging) { /* Abnormal conditions for tape */ if (!cmdstatp->have_sense) @@ -390,7 +398,8 @@ static int st_chk_result(struct scsi_tape *STp, struct st_request * SRpnt) SRpnt->cmd[0] != MODE_SENSE && SRpnt->cmd[0] != TEST_UNIT_READY) { - __scsi_print_sense(name, SRpnt->sense, SCSI_SENSE_BUFFERSIZE); + __scsi_print_sense(STp->device, name, + SRpnt->sense, SCSI_SENSE_BUFFERSIZE); } } @@ -852,17 +861,16 @@ static int set_mode_densblk(struct scsi_tape * STp, struct st_modedef * STm) /* Lock or unlock the drive door. Don't use when st_request allocated. */ static int do_door_lock(struct scsi_tape * STp, int do_lock) { - int retval, cmd; + int retval; - cmd = do_lock ? SCSI_IOCTL_DOORLOCK : SCSI_IOCTL_DOORUNLOCK; DEBC_printk(STp, "%socking drive door.\n", do_lock ? "L" : "Unl"); - retval = scsi_ioctl(STp->device, cmd, NULL); - if (!retval) { + + retval = scsi_set_medium_removal(STp->device, + do_lock ? SCSI_REMOVAL_PREVENT : SCSI_REMOVAL_ALLOW); + if (!retval) STp->door_locked = do_lock ? ST_LOCKED_EXPLICIT : ST_UNLOCKED; - } - else { + else STp->door_locked = ST_LOCK_FAILS; - } return retval; } @@ -3367,11 +3375,10 @@ static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg) * may try and take the device offline, in which case all further * access to the device is prohibited. */ - retval = scsi_nonblockable_ioctl(STp->device, cmd_in, p, - file->f_flags & O_NDELAY); - if (!scsi_block_when_processing_errors(STp->device) || retval != -ENODEV) + retval = scsi_ioctl_block_when_processing_errors(STp->device, cmd_in, + file->f_flags & O_NDELAY); + if (retval) goto out; - retval = 0; cmd_type = _IOC_TYPE(cmd_in); cmd_nr = _IOC_NR(cmd_in); @@ -4309,6 +4316,12 @@ static int __init init_st(void) printk(KERN_INFO "st: Version %s, fixed bufsize %d, s/g segs %d\n", verstr, st_fixed_buffer_size, st_max_sg_segs); + debugging = (debug_flag > 0) ? debug_flag : NO_DEBUG; + if (debugging) { + printk(KERN_INFO "st: Debugging enabled debug_flag = %d\n", + debugging); + } + err = class_register(&st_sysfs_class); if (err) { pr_err("Unable register sysfs class for SCSI tapes\n"); diff --git a/drivers/scsi/stex.c b/drivers/scsi/stex.c index 1aa4befcfbd0..98a62bc15069 100644 --- a/drivers/scsi/stex.c +++ b/drivers/scsi/stex.c @@ -544,33 +544,15 @@ stex_ss_send_cmd(struct st_hba *hba, struct req_msg *req, u16 tag) } static int -stex_slave_alloc(struct scsi_device *sdev) -{ - /* Cheat: usually extracted from Inquiry data */ - sdev->tagged_supported = 1; - - scsi_activate_tcq(sdev, sdev->host->can_queue); - - return 0; -} - -static int stex_slave_config(struct scsi_device *sdev) { sdev->use_10_for_rw = 1; sdev->use_10_for_ms = 1; blk_queue_rq_timeout(sdev->request_queue, 60 * HZ); - sdev->tagged_supported = 1; return 0; } -static void -stex_slave_destroy(struct scsi_device *sdev) -{ - scsi_deactivate_tcq(sdev, 1); -} - static int stex_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) { @@ -1162,9 +1144,7 @@ static int stex_abort(struct scsi_cmnd *cmd) int result = SUCCESS; unsigned long flags; - printk(KERN_INFO DRV_NAME - "(%s): aborting command\n", pci_name(hba->pdev)); - scsi_print_command(cmd); + scmd_printk(KERN_INFO, cmd, "aborting command\n"); base = hba->mmio_base; spin_lock_irqsave(host->host_lock, flags); @@ -1352,9 +1332,8 @@ static int stex_reset(struct scsi_cmnd *cmd) hba = (struct st_hba *) &cmd->device->host->hostdata[0]; - printk(KERN_INFO DRV_NAME - "(%s): resetting host\n", pci_name(hba->pdev)); - scsi_print_command(cmd); + shost_printk(KERN_INFO, cmd->device->host, + "resetting host\n"); return stex_do_reset(hba) ? FAILED : SUCCESS; } @@ -1391,12 +1370,11 @@ static struct scsi_host_template driver_template = { .proc_name = DRV_NAME, .bios_param = stex_biosparam, .queuecommand = stex_queuecommand, - .slave_alloc = stex_slave_alloc, .slave_configure = stex_slave_config, - .slave_destroy = stex_slave_destroy, .eh_abort_handler = stex_abort, .eh_host_reset_handler = stex_reset, .this_id = -1, + .use_blk_tags = 1, }; static struct pci_device_id stex_pci_tbl[] = { diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c index 733e5f759518..e3ba251fb6e7 100644 --- a/drivers/scsi/storvsc_drv.c +++ b/drivers/scsi/storvsc_drv.c @@ -1097,7 +1097,8 @@ static void storvsc_command_completion(struct storvsc_cmd_request *cmd_request) if (scmnd->result) { if (scsi_normalize_sense(scmnd->sense_buffer, SCSI_SENSE_BUFFERSIZE, &sense_hdr)) - scsi_print_sense_hdr("storvsc", &sense_hdr); + scsi_print_sense_hdr(scmnd->device, "storvsc", + &sense_hdr); } if (vm_srb->srb_status != SRB_STATUS_SUCCESS) @@ -1428,8 +1429,7 @@ static void storvsc_device_destroy(struct scsi_device *sdevice) static int storvsc_device_configure(struct scsi_device *sdevice) { - scsi_adjust_queue_depth(sdevice, MSG_SIMPLE_TAG, - STORVSC_MAX_IO_REQUESTS); + scsi_change_queue_depth(sdevice, STORVSC_MAX_IO_REQUESTS); blk_queue_max_segment_size(sdevice->request_queue, PAGE_SIZE); diff --git a/drivers/scsi/sun3_NCR5380.c b/drivers/scsi/sun3_NCR5380.c deleted file mode 100644 index 1a2367a1b1f2..000000000000 --- a/drivers/scsi/sun3_NCR5380.c +++ /dev/null @@ -1,2933 +0,0 @@ -/* sun3_NCR5380.c -- adapted from atari_NCR5380.c for the sun3 by - Sam Creasey. */ -/* - * NCR 5380 generic driver routines. These should make it *trivial* - * to implement 5380 SCSI drivers under Linux with a non-trantor - * architecture. - * - * Note that these routines also work with NR53c400 family chips. - * - * Copyright 1993, Drew Eckhardt - * Visionary Computing - * (Unix and Linux consulting and custom programming) - * drew@colorado.edu - * +1 (303) 666-5836 - * - * DISTRIBUTION RELEASE 6. - * - * For more information, please consult - * - * NCR 5380 Family - * SCSI Protocol Controller - * Databook - * - * NCR Microelectronics - * 1635 Aeroplaza Drive - * Colorado Springs, CO 80916 - * 1+ (719) 578-3400 - * 1+ (800) 334-5454 - */ - -/* - * ++roman: To port the 5380 driver to the Atari, I had to do some changes in - * this file, too: - * - * - Some of the debug statements were incorrect (undefined variables and the - * like). I fixed that. - * - * - In information_transfer(), I think a #ifdef was wrong. Looking at the - * possible DMA transfer size should also happen for REAL_DMA. I added this - * in the #if statement. - * - * - When using real DMA, information_transfer() should return in a DATAOUT - * phase after starting the DMA. It has nothing more to do. - * - * - The interrupt service routine should run main after end of DMA, too (not - * only after RESELECTION interrupts). Additionally, it should _not_ test - * for more interrupts after running main, since a DMA process may have - * been started and interrupts are turned on now. The new int could happen - * inside the execution of NCR5380_intr(), leading to recursive - * calls. - * - * - I've deleted all the stuff for AUTOPROBE_IRQ, REAL_DMA_POLL, PSEUDO_DMA - * and USLEEP, because these were messing up readability and will never be - * needed for Atari SCSI. - * - * - I've revised the NCR5380_main() calling scheme (relax the 'main_running' - * stuff), and 'main' is executed in a bottom half if awoken by an - * interrupt. - * - * - The code was quite cluttered up by "#if (NDEBUG & NDEBUG_*) printk..." - * constructs. In my eyes, this made the source rather unreadable, so I - * finally replaced that by the *_PRINTK() macros. - * - */ -#include <scsi/scsi_dbg.h> -#include <scsi/scsi_transport_spi.h> - -/* - * Further development / testing that should be done : - * 1. Test linked command handling code after Eric is ready with - * the high level code. - */ - -#if (NDEBUG & NDEBUG_LISTS) -#define LIST(x,y) \ - { printk("LINE:%d Adding %p to %p\n", __LINE__, (void*)(x), (void*)(y)); \ - if ((x)==(y)) udelay(5); } -#define REMOVE(w,x,y,z) \ - { printk("LINE:%d Removing: %p->%p %p->%p \n", __LINE__, \ - (void*)(w), (void*)(x), (void*)(y), (void*)(z)); \ - if ((x)==(y)) udelay(5); } -#else -#define LIST(x,y) -#define REMOVE(w,x,y,z) -#endif - -#ifndef notyet -#undef LINKED -#endif - -/* - * Design - * Issues : - * - * The other Linux SCSI drivers were written when Linux was Intel PC-only, - * and specifically for each board rather than each chip. This makes their - * adaptation to platforms like the Mac (Some of which use NCR5380's) - * more difficult than it has to be. - * - * Also, many of the SCSI drivers were written before the command queuing - * routines were implemented, meaning their implementations of queued - * commands were hacked on rather than designed in from the start. - * - * When I designed the Linux SCSI drivers I figured that - * while having two different SCSI boards in a system might be useful - * for debugging things, two of the same type wouldn't be used. - * Well, I was wrong and a number of users have mailed me about running - * multiple high-performance SCSI boards in a server. - * - * Finally, when I get questions from users, I have no idea what - * revision of my driver they are running. - * - * This driver attempts to address these problems : - * This is a generic 5380 driver. To use it on a different platform, - * one simply writes appropriate system specific macros (ie, data - * transfer - some PC's will use the I/O bus, 68K's must use - * memory mapped) and drops this file in their 'C' wrapper. - * - * As far as command queueing, two queues are maintained for - * each 5380 in the system - commands that haven't been issued yet, - * and commands that are currently executing. This means that an - * unlimited number of commands may be queued, letting - * more commands propagate from the higher driver levels giving higher - * throughput. Note that both I_T_L and I_T_L_Q nexuses are supported, - * allowing multiple commands to propagate all the way to a SCSI-II device - * while a command is already executing. - * - * To solve the multiple-boards-in-the-same-system problem, - * there is a separate instance structure for each instance - * of a 5380 in the system. So, multiple NCR5380 drivers will - * be able to coexist with appropriate changes to the high level - * SCSI code. - * - * A NCR5380_PUBLIC_REVISION macro is provided, with the release - * number (updated for each public release) printed by the - * NCR5380_print_options command, which should be called from the - * wrapper detect function, so that I know what release of the driver - * users are using. - * - * Issues specific to the NCR5380 : - * - * When used in a PIO or pseudo-dma mode, the NCR5380 is a braindead - * piece of hardware that requires you to sit in a loop polling for - * the REQ signal as long as you are connected. Some devices are - * brain dead (ie, many TEXEL CD ROM drives) and won't disconnect - * while doing long seek operations. - * - * The workaround for this is to keep track of devices that have - * disconnected. If the device hasn't disconnected, for commands that - * should disconnect, we do something like - * - * while (!REQ is asserted) { sleep for N usecs; poll for M usecs } - * - * Some tweaking of N and M needs to be done. An algorithm based - * on "time to data" would give the best results as long as short time - * to datas (ie, on the same track) were considered, however these - * broken devices are the exception rather than the rule and I'd rather - * spend my time optimizing for the normal case. - * - * Architecture : - * - * At the heart of the design is a coroutine, NCR5380_main, - * which is started when not running by the interrupt handler, - * timer, and queue command function. It attempts to establish - * I_T_L or I_T_L_Q nexuses by removing the commands from the - * issue queue and calling NCR5380_select() if a nexus - * is not established. - * - * Once a nexus is established, the NCR5380_information_transfer() - * phase goes through the various phases as instructed by the target. - * if the target goes into MSG IN and sends a DISCONNECT message, - * the command structure is placed into the per instance disconnected - * queue, and NCR5380_main tries to find more work. If USLEEP - * was defined, and the target is idle for too long, the system - * will try to sleep. - * - * If a command has disconnected, eventually an interrupt will trigger, - * calling NCR5380_intr() which will in turn call NCR5380_reselect - * to reestablish a nexus. This will run main if necessary. - * - * On command termination, the done function will be called as - * appropriate. - * - * SCSI pointers are maintained in the SCp field of SCSI command - * structures, being initialized after the command is connected - * in NCR5380_select, and set as appropriate in NCR5380_information_transfer. - * Note that in violation of the standard, an implicit SAVE POINTERS operation - * is done, since some BROKEN disks fail to issue an explicit SAVE POINTERS. - */ - -/* - * Using this file : - * This file a skeleton Linux SCSI driver for the NCR 5380 series - * of chips. To use it, you write an architecture specific functions - * and macros and include this file in your driver. - * - * These macros control options : - * AUTOSENSE - if defined, REQUEST SENSE will be performed automatically - * for commands that return with a CHECK CONDITION status. - * - * LINKED - if defined, linked commands are supported. - * - * REAL_DMA - if defined, REAL DMA is used during the data transfer phases. - * - * SUPPORT_TAGS - if defined, SCSI-2 tagged queuing is used where possible - * - * These macros MUST be defined : - * - * NCR5380_read(register) - read from the specified register - * - * NCR5380_write(register, value) - write to the specific register - * - * Either real DMA *or* pseudo DMA may be implemented - * REAL functions : - * NCR5380_REAL_DMA should be defined if real DMA is to be used. - * Note that the DMA setup functions should return the number of bytes - * that they were able to program the controller for. - * - * Also note that generic i386/PC versions of these macros are - * available as NCR5380_i386_dma_write_setup, - * NCR5380_i386_dma_read_setup, and NCR5380_i386_dma_residual. - * - * NCR5380_dma_write_setup(instance, src, count) - initialize - * NCR5380_dma_read_setup(instance, dst, count) - initialize - * NCR5380_dma_residual(instance); - residual count - * - * PSEUDO functions : - * NCR5380_pwrite(instance, src, count) - * NCR5380_pread(instance, dst, count); - * - * If nothing specific to this implementation needs doing (ie, with external - * hardware), you must also define - * - * NCR5380_queue_command - * NCR5380_reset - * NCR5380_abort - * NCR5380_proc_info - * - * to be the global entry points into the specific driver, ie - * #define NCR5380_queue_command t128_queue_command. - * - * If this is not done, the routines will be defined as static functions - * with the NCR5380* names and the user must provide a globally - * accessible wrapper function. - * - * The generic driver is initialized by calling NCR5380_init(instance), - * after setting the appropriate host specific fields and ID. If the - * driver wishes to autoprobe for an IRQ line, the NCR5380_probe_irq(instance, - * possible) function may be used. Before the specific driver initialization - * code finishes, NCR5380_print_options should be called. - */ - -static struct Scsi_Host *first_instance = NULL; -static struct scsi_host_template *the_template = NULL; - -/* Macros ease life... :-) */ -#define SETUP_HOSTDATA(in) \ - struct NCR5380_hostdata *hostdata = \ - (struct NCR5380_hostdata *)(in)->hostdata -#define HOSTDATA(in) ((struct NCR5380_hostdata *)(in)->hostdata) - -#define NEXT(cmd) ((struct scsi_cmnd *)(cmd)->host_scribble) -#define SET_NEXT(cmd, next) ((cmd)->host_scribble = (void *)(next)) -#define NEXTADDR(cmd) ((struct scsi_cmnd **)&((cmd)->host_scribble)) - -#define HOSTNO instance->host_no -#define H_NO(cmd) (cmd)->device->host->host_no - -#define SGADDR(buffer) (void *)(((unsigned long)sg_virt(((buffer))))) - -#ifdef SUPPORT_TAGS - -/* - * Functions for handling tagged queuing - * ===================================== - * - * ++roman (01/96): Now I've implemented SCSI-2 tagged queuing. Some notes: - * - * Using consecutive numbers for the tags is no good idea in my eyes. There - * could be wrong re-usings if the counter (8 bit!) wraps and some early - * command has been preempted for a long time. My solution: a bitfield for - * remembering used tags. - * - * There's also the problem that each target has a certain queue size, but we - * cannot know it in advance :-( We just see a QUEUE_FULL status being - * returned. So, in this case, the driver internal queue size assumption is - * reduced to the number of active tags if QUEUE_FULL is returned by the - * target. The command is returned to the mid-level, but with status changed - * to BUSY, since --as I've seen-- the mid-level can't handle QUEUE_FULL - * correctly. - * - * We're also not allowed running tagged commands as long as an untagged - * command is active. And REQUEST SENSE commands after a contingent allegiance - * condition _must_ be untagged. To keep track whether an untagged command has - * been issued, the host->busy array is still employed, as it is without - * support for tagged queuing. - * - * One could suspect that there are possible race conditions between - * is_lun_busy(), cmd_get_tag() and cmd_free_tag(). But I think this isn't the - * case: is_lun_busy() and cmd_get_tag() are both called from NCR5380_main(), - * which already guaranteed to be running at most once. It is also the only - * place where tags/LUNs are allocated. So no other allocation can slip - * between that pair, there could only happen a reselection, which can free a - * tag, but that doesn't hurt. Only the sequence in cmd_free_tag() becomes - * important: the tag bit must be cleared before 'nr_allocated' is decreased. - */ - -/* -1 for TAG_NONE is not possible with unsigned char cmd->tag */ -#undef TAG_NONE -#define TAG_NONE 0xff - -/* For the m68k, the number of bits in 'allocated' must be a multiple of 32! */ -#if (MAX_TAGS % 32) != 0 -#error "MAX_TAGS must be a multiple of 32!" -#endif - -typedef struct { - char allocated[MAX_TAGS/8]; - int nr_allocated; - int queue_size; -} TAG_ALLOC; - -static TAG_ALLOC TagAlloc[8][8]; /* 8 targets and 8 LUNs */ - - -static void __init init_tags( void ) -{ - int target, lun; - TAG_ALLOC *ta; - - if (!setup_use_tagged_queuing) - return; - - for( target = 0; target < 8; ++target ) { - for( lun = 0; lun < 8; ++lun ) { - ta = &TagAlloc[target][lun]; - memset( &ta->allocated, 0, MAX_TAGS/8 ); - ta->nr_allocated = 0; - /* At the beginning, assume the maximum queue size we could - * support (MAX_TAGS). This value will be decreased if the target - * returns QUEUE_FULL status. - */ - ta->queue_size = MAX_TAGS; - } - } -} - - -/* Check if we can issue a command to this LUN: First see if the LUN is marked - * busy by an untagged command. If the command should use tagged queuing, also - * check that there is a free tag and the target's queue won't overflow. This - * function should be called with interrupts disabled to avoid race - * conditions. - */ - -static int is_lun_busy(struct scsi_cmnd *cmd, int should_be_tagged) -{ - u8 lun = cmd->device->lun; - SETUP_HOSTDATA(cmd->device->host); - - if (hostdata->busy[cmd->device->id] & (1 << lun)) - return( 1 ); - if (!should_be_tagged || - !setup_use_tagged_queuing || !cmd->device->tagged_supported) - return( 0 ); - if (TagAlloc[cmd->device->id][lun].nr_allocated >= - TagAlloc[cmd->device->id][lun].queue_size ) { - dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d: no free tags\n", - H_NO(cmd), cmd->device->id, lun ); - return( 1 ); - } - return( 0 ); -} - - -/* Allocate a tag for a command (there are no checks anymore, check_lun_busy() - * must be called before!), or reserve the LUN in 'busy' if the command is - * untagged. - */ - -static void cmd_get_tag(struct scsi_cmnd *cmd, int should_be_tagged) -{ - u8 lun = cmd->device->lun; - SETUP_HOSTDATA(cmd->device->host); - - /* If we or the target don't support tagged queuing, allocate the LUN for - * an untagged command. - */ - if (!should_be_tagged || - !setup_use_tagged_queuing || !cmd->device->tagged_supported) { - cmd->tag = TAG_NONE; - hostdata->busy[cmd->device->id] |= (1 << lun); - dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d now allocated by untagged " - "command\n", H_NO(cmd), cmd->device->id, lun ); - } - else { - TAG_ALLOC *ta = &TagAlloc[cmd->device->id][lun]; - - cmd->tag = find_first_zero_bit( &ta->allocated, MAX_TAGS ); - set_bit( cmd->tag, &ta->allocated ); - ta->nr_allocated++; - dprintk(NDEBUG_TAGS, "scsi%d: using tag %d for target %d lun %d " - "(now %d tags in use)\n", - H_NO(cmd), cmd->tag, cmd->device->id, lun, - ta->nr_allocated ); - } -} - - -/* Mark the tag of command 'cmd' as free, or in case of an untagged command, - * unlock the LUN. - */ - -static void cmd_free_tag(struct scsi_cmnd *cmd) -{ - u8 lun = cmd->device->lun; - SETUP_HOSTDATA(cmd->device->host); - - if (cmd->tag == TAG_NONE) { - hostdata->busy[cmd->device->id] &= ~(1 << lun); - dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d untagged cmd finished\n", - H_NO(cmd), cmd->device->id, lun ); - } - else if (cmd->tag >= MAX_TAGS) { - printk(KERN_NOTICE "scsi%d: trying to free bad tag %d!\n", - H_NO(cmd), cmd->tag ); - } - else { - TAG_ALLOC *ta = &TagAlloc[cmd->device->id][lun]; - clear_bit( cmd->tag, &ta->allocated ); - ta->nr_allocated--; - dprintk(NDEBUG_TAGS, "scsi%d: freed tag %d for target %d lun %d\n", - H_NO(cmd), cmd->tag, cmd->device->id, lun ); - } -} - - -static void free_all_tags( void ) -{ - int target, lun; - TAG_ALLOC *ta; - - if (!setup_use_tagged_queuing) - return; - - for( target = 0; target < 8; ++target ) { - for( lun = 0; lun < 8; ++lun ) { - ta = &TagAlloc[target][lun]; - memset( &ta->allocated, 0, MAX_TAGS/8 ); - ta->nr_allocated = 0; - } - } -} - -#endif /* SUPPORT_TAGS */ - - -/* - * Function : void initialize_SCp(struct scsi_cmnd *cmd) - * - * Purpose : initialize the saved data pointers for cmd to point to the - * start of the buffer. - * - * Inputs : cmd - struct scsi_cmnd structure to have pointers reset. - */ - -static __inline__ void initialize_SCp(struct scsi_cmnd *cmd) -{ - /* - * Initialize the Scsi Pointer field so that all of the commands in the - * various queues are valid. - */ - - if (scsi_bufflen(cmd)) { - cmd->SCp.buffer = scsi_sglist(cmd); - cmd->SCp.buffers_residual = scsi_sg_count(cmd) - 1; - cmd->SCp.ptr = (char *) SGADDR(cmd->SCp.buffer); - cmd->SCp.this_residual = cmd->SCp.buffer->length; - } else { - cmd->SCp.buffer = NULL; - cmd->SCp.buffers_residual = 0; - cmd->SCp.ptr = NULL; - cmd->SCp.this_residual = 0; - } - -} - -#include <linux/delay.h> - -#if NDEBUG -static struct { - unsigned char mask; - const char * name;} -signals[] = {{ SR_DBP, "PARITY"}, { SR_RST, "RST" }, { SR_BSY, "BSY" }, - { SR_REQ, "REQ" }, { SR_MSG, "MSG" }, { SR_CD, "CD" }, { SR_IO, "IO" }, - { SR_SEL, "SEL" }, {0, NULL}}, -basrs[] = {{BASR_ATN, "ATN"}, {BASR_ACK, "ACK"}, {0, NULL}}, -icrs[] = {{ICR_ASSERT_RST, "ASSERT RST"},{ICR_ASSERT_ACK, "ASSERT ACK"}, - {ICR_ASSERT_BSY, "ASSERT BSY"}, {ICR_ASSERT_SEL, "ASSERT SEL"}, - {ICR_ASSERT_ATN, "ASSERT ATN"}, {ICR_ASSERT_DATA, "ASSERT DATA"}, - {0, NULL}}, -mrs[] = {{MR_BLOCK_DMA_MODE, "MODE BLOCK DMA"}, {MR_TARGET, "MODE TARGET"}, - {MR_ENABLE_PAR_CHECK, "MODE PARITY CHECK"}, {MR_ENABLE_PAR_INTR, - "MODE PARITY INTR"}, {MR_ENABLE_EOP_INTR,"MODE EOP INTR"}, - {MR_MONITOR_BSY, "MODE MONITOR BSY"}, - {MR_DMA_MODE, "MODE DMA"}, {MR_ARBITRATE, "MODE ARBITRATION"}, - {0, NULL}}; - -/* - * Function : void NCR5380_print(struct Scsi_Host *instance) - * - * Purpose : print the SCSI bus signals for debugging purposes - * - * Input : instance - which NCR5380 - */ - -static void NCR5380_print(struct Scsi_Host *instance) { - unsigned char status, data, basr, mr, icr, i; - unsigned long flags; - - local_irq_save(flags); - data = NCR5380_read(CURRENT_SCSI_DATA_REG); - status = NCR5380_read(STATUS_REG); - mr = NCR5380_read(MODE_REG); - icr = NCR5380_read(INITIATOR_COMMAND_REG); - basr = NCR5380_read(BUS_AND_STATUS_REG); - local_irq_restore(flags); - printk("STATUS_REG: %02x ", status); - for (i = 0; signals[i].mask ; ++i) - if (status & signals[i].mask) - printk(",%s", signals[i].name); - printk("\nBASR: %02x ", basr); - for (i = 0; basrs[i].mask ; ++i) - if (basr & basrs[i].mask) - printk(",%s", basrs[i].name); - printk("\nICR: %02x ", icr); - for (i = 0; icrs[i].mask; ++i) - if (icr & icrs[i].mask) - printk(",%s", icrs[i].name); - printk("\nMODE: %02x ", mr); - for (i = 0; mrs[i].mask; ++i) - if (mr & mrs[i].mask) - printk(",%s", mrs[i].name); - printk("\n"); -} - -static struct { - unsigned char value; - const char *name; -} phases[] = { - {PHASE_DATAOUT, "DATAOUT"}, {PHASE_DATAIN, "DATAIN"}, {PHASE_CMDOUT, "CMDOUT"}, - {PHASE_STATIN, "STATIN"}, {PHASE_MSGOUT, "MSGOUT"}, {PHASE_MSGIN, "MSGIN"}, - {PHASE_UNKNOWN, "UNKNOWN"}}; - -/* - * Function : void NCR5380_print_phase(struct Scsi_Host *instance) - * - * Purpose : print the current SCSI phase for debugging purposes - * - * Input : instance - which NCR5380 - */ - -static void NCR5380_print_phase(struct Scsi_Host *instance) -{ - unsigned char status; - int i; - - status = NCR5380_read(STATUS_REG); - if (!(status & SR_REQ)) - printk(KERN_DEBUG "scsi%d: REQ not asserted, phase unknown.\n", HOSTNO); - else { - for (i = 0; (phases[i].value != PHASE_UNKNOWN) && - (phases[i].value != (status & PHASE_MASK)); ++i); - printk(KERN_DEBUG "scsi%d: phase %s\n", HOSTNO, phases[i].name); - } -} - -#endif - -/* - * ++roman: New scheme of calling NCR5380_main() - * - * If we're not in an interrupt, we can call our main directly, it cannot be - * already running. Else, we queue it on a task queue, if not 'main_running' - * tells us that a lower level is already executing it. This way, - * 'main_running' needs not be protected in a special way. - * - * queue_main() is a utility function for putting our main onto the task - * queue, if main_running is false. It should be called only from a - * interrupt or bottom half. - */ - -#include <linux/gfp.h> -#include <linux/workqueue.h> -#include <linux/interrupt.h> - -static volatile int main_running = 0; -static DECLARE_WORK(NCR5380_tqueue, NCR5380_main); - -static __inline__ void queue_main(void) -{ - if (!main_running) { - /* If in interrupt and NCR5380_main() not already running, - queue it on the 'immediate' task queue, to be processed - immediately after the current interrupt processing has - finished. */ - schedule_work(&NCR5380_tqueue); - } - /* else: nothing to do: the running NCR5380_main() will pick up - any newly queued command. */ -} - - -static inline void NCR5380_all_init (void) -{ - static int done = 0; - if (!done) { - dprintk(NDEBUG_INIT, "scsi : NCR5380_all_init()\n"); - done = 1; - } -} - - -/* - * Function : void NCR58380_print_options (struct Scsi_Host *instance) - * - * Purpose : called by probe code indicating the NCR5380 driver - * options that were selected. - * - * Inputs : instance, pointer to this instance. Unused. - */ - -static void __init NCR5380_print_options (struct Scsi_Host *instance) -{ - printk(" generic options" -#ifdef AUTOSENSE - " AUTOSENSE" -#endif -#ifdef REAL_DMA - " REAL DMA" -#endif -#ifdef PARITY - " PARITY" -#endif -#ifdef SUPPORT_TAGS - " SCSI-2 TAGGED QUEUING" -#endif - ); - printk(" generic release=%d", NCR5380_PUBLIC_RELEASE); -} - -/* - * Function : void NCR5380_print_status (struct Scsi_Host *instance) - * - * Purpose : print commands in the various queues, called from - * NCR5380_abort and NCR5380_debug to aid debugging. - * - * Inputs : instance, pointer to this instance. - */ - -static void lprint_Scsi_Cmnd(Scsi_Cmnd *cmd) -{ - int i, s; - unsigned char *command; - printk("scsi%d: destination target %d, lun %llu\n", - H_NO(cmd), cmd->device->id, cmd->device->lun); - printk(KERN_CONT " command = "); - command = cmd->cmnd; - printk(KERN_CONT "%2d (0x%02x)", command[0], command[0]); - for (i = 1, s = COMMAND_SIZE(command[0]); i < s; ++i) - printk(KERN_CONT " %02x", command[i]); - printk("\n"); -} - -static void NCR5380_print_status(struct Scsi_Host *instance) -{ - struct NCR5380_hostdata *hostdata; - Scsi_Cmnd *ptr; - unsigned long flags; - - NCR5380_dprint(NDEBUG_ANY, instance); - NCR5380_dprint_phase(NDEBUG_ANY, instance); - - hostdata = (struct NCR5380_hostdata *)instance->hostdata; - - printk("\nNCR5380 core release=%d.\n", NCR5380_PUBLIC_RELEASE); - local_irq_save(flags); - printk("NCR5380: coroutine is%s running.\n", - main_running ? "" : "n't"); - if (!hostdata->connected) - printk("scsi%d: no currently connected command\n", HOSTNO); - else - lprint_Scsi_Cmnd((Scsi_Cmnd *) hostdata->connected); - printk("scsi%d: issue_queue\n", HOSTNO); - for (ptr = (Scsi_Cmnd *)hostdata->issue_queue; ptr; ptr = NEXT(ptr)) - lprint_Scsi_Cmnd(ptr); - - printk("scsi%d: disconnected_queue\n", HOSTNO); - for (ptr = (Scsi_Cmnd *) hostdata->disconnected_queue; ptr; - ptr = NEXT(ptr)) - lprint_Scsi_Cmnd(ptr); - - local_irq_restore(flags); - printk("\n"); -} - -static void show_Scsi_Cmnd(Scsi_Cmnd *cmd, struct seq_file *m) -{ - int i, s; - unsigned char *command; - seq_printf(m, "scsi%d: destination target %d, lun %llu\n", - H_NO(cmd), cmd->device->id, cmd->device->lun); - seq_printf(m, " command = "); - command = cmd->cmnd; - seq_printf(m, "%2d (0x%02x)", command[0], command[0]); - for (i = 1, s = COMMAND_SIZE(command[0]); i < s; ++i) - seq_printf(m, " %02x", command[i]); - seq_printf(m, "\n"); -} - -static int NCR5380_show_info(struct seq_file *m, struct Scsi_Host *instance) -{ - struct NCR5380_hostdata *hostdata; - Scsi_Cmnd *ptr; - unsigned long flags; - - hostdata = (struct NCR5380_hostdata *)instance->hostdata; - - seq_printf(m, "NCR5380 core release=%d.\n", NCR5380_PUBLIC_RELEASE); - local_irq_save(flags); - seq_printf(m, "NCR5380: coroutine is%s running.\n", - main_running ? "" : "n't"); - if (!hostdata->connected) - seq_printf(m, "scsi%d: no currently connected command\n", HOSTNO); - else - show_Scsi_Cmnd((Scsi_Cmnd *) hostdata->connected, m); - seq_printf(m, "scsi%d: issue_queue\n", HOSTNO); - for (ptr = (Scsi_Cmnd *)hostdata->issue_queue; ptr; ptr = NEXT(ptr)) - show_Scsi_Cmnd(ptr, m); - - seq_printf(m, "scsi%d: disconnected_queue\n", HOSTNO); - for (ptr = (Scsi_Cmnd *) hostdata->disconnected_queue; ptr; - ptr = NEXT(ptr)) - show_Scsi_Cmnd(ptr, m); - - local_irq_restore(flags); - return 0; -} - -/* - * Function : void NCR5380_init (struct Scsi_Host *instance) - * - * Purpose : initializes *instance and corresponding 5380 chip. - * - * Inputs : instance - instantiation of the 5380 driver. - * - * Notes : I assume that the host, hostno, and id bits have been - * set correctly. I don't care about the irq and other fields. - * - */ - -static int __init NCR5380_init(struct Scsi_Host *instance, int flags) -{ - int i; - SETUP_HOSTDATA(instance); - - NCR5380_all_init(); - - hostdata->aborted = 0; - hostdata->id_mask = 1 << instance->this_id; - hostdata->id_higher_mask = 0; - for (i = hostdata->id_mask; i <= 0x80; i <<= 1) - if (i > hostdata->id_mask) - hostdata->id_higher_mask |= i; - for (i = 0; i < 8; ++i) - hostdata->busy[i] = 0; -#ifdef SUPPORT_TAGS - init_tags(); -#endif -#if defined (REAL_DMA) - hostdata->dma_len = 0; -#endif - hostdata->targets_present = 0; - hostdata->connected = NULL; - hostdata->issue_queue = NULL; - hostdata->disconnected_queue = NULL; - hostdata->flags = FLAG_CHECK_LAST_BYTE_SENT; - - if (!the_template) { - the_template = instance->hostt; - first_instance = instance; - } - - -#ifndef AUTOSENSE - if ((instance->cmd_per_lun > 1) || (instance->can_queue > 1)) - printk("scsi%d: WARNING : support for multiple outstanding commands enabled\n" - " without AUTOSENSE option, contingent allegiance conditions may\n" - " be incorrectly cleared.\n", HOSTNO); -#endif /* def AUTOSENSE */ - - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); - NCR5380_write(MODE_REG, MR_BASE); - NCR5380_write(TARGET_COMMAND_REG, 0); - NCR5380_write(SELECT_ENABLE_REG, 0); - - return 0; -} - -static void NCR5380_exit(struct Scsi_Host *instance) -{ - /* Empty, as we didn't schedule any delayed work */ -} - -/* - * Function : int NCR5380_queue_command (struct scsi_cmnd *cmd, - * void (*done)(struct scsi_cmnd *)) - * - * Purpose : enqueues a SCSI command - * - * Inputs : cmd - SCSI command, done - function called on completion, with - * a pointer to the command descriptor. - * - * Returns : 0 - * - * Side effects : - * cmd is added to the per instance issue_queue, with minor - * twiddling done to the host specific fields of cmd. If the - * main coroutine is not running, it is restarted. - * - */ - -/* Only make static if a wrapper function is used */ -static int NCR5380_queue_command_lck(struct scsi_cmnd *cmd, - void (*done)(struct scsi_cmnd *)) -{ - SETUP_HOSTDATA(cmd->device->host); - struct scsi_cmnd *tmp; - unsigned long flags; - -#if (NDEBUG & NDEBUG_NO_WRITE) - switch (cmd->cmnd[0]) { - case WRITE_6: - case WRITE_10: - printk(KERN_NOTICE "scsi%d: WRITE attempted with NO_WRITE debugging flag set\n", - H_NO(cmd)); - cmd->result = (DID_ERROR << 16); - done(cmd); - return 0; - } -#endif /* (NDEBUG & NDEBUG_NO_WRITE) */ - - -#ifdef NCR5380_STATS -# if 0 - if (!hostdata->connected && !hostdata->issue_queue && - !hostdata->disconnected_queue) { - hostdata->timebase = jiffies; - } -# endif -# ifdef NCR5380_STAT_LIMIT - if (scsi_bufflen(cmd) > NCR5380_STAT_LIMIT) -# endif - switch (cmd->cmnd[0]) - { - case WRITE: - case WRITE_6: - case WRITE_10: - hostdata->time_write[cmd->device->id] -= (jiffies - hostdata->timebase); - hostdata->bytes_write[cmd->device->id] += scsi_bufflen(cmd); - hostdata->pendingw++; - break; - case READ: - case READ_6: - case READ_10: - hostdata->time_read[cmd->device->id] -= (jiffies - hostdata->timebase); - hostdata->bytes_read[cmd->device->id] += scsi_bufflen(cmd); - hostdata->pendingr++; - break; - } -#endif - - /* - * We use the host_scribble field as a pointer to the next command - * in a queue - */ - - SET_NEXT(cmd, NULL); - cmd->scsi_done = done; - - cmd->result = 0; - - - /* - * Insert the cmd into the issue queue. Note that REQUEST SENSE - * commands are added to the head of the queue since any command will - * clear the contingent allegiance condition that exists and the - * sense data is only guaranteed to be valid while the condition exists. - */ - - local_irq_save(flags); - /* ++guenther: now that the issue queue is being set up, we can lock ST-DMA. - * Otherwise a running NCR5380_main may steal the lock. - * Lock before actually inserting due to fairness reasons explained in - * atari_scsi.c. If we insert first, then it's impossible for this driver - * to release the lock. - * Stop timer for this command while waiting for the lock, or timeouts - * may happen (and they really do), and it's no good if the command doesn't - * appear in any of the queues. - * ++roman: Just disabling the NCR interrupt isn't sufficient here, - * because also a timer int can trigger an abort or reset, which would - * alter queues and touch the lock. - */ - if (!(hostdata->issue_queue) || (cmd->cmnd[0] == REQUEST_SENSE)) { - LIST(cmd, hostdata->issue_queue); - SET_NEXT(cmd, hostdata->issue_queue); - hostdata->issue_queue = cmd; - } else { - for (tmp = (struct scsi_cmnd *)hostdata->issue_queue; - NEXT(tmp); tmp = NEXT(tmp)) - ; - LIST(cmd, tmp); - SET_NEXT(tmp, cmd); - } - - local_irq_restore(flags); - - dprintk(NDEBUG_QUEUES, "scsi%d: command added to %s of queue\n", H_NO(cmd), - (cmd->cmnd[0] == REQUEST_SENSE) ? "head" : "tail"); - - /* If queue_command() is called from an interrupt (real one or bottom - * half), we let queue_main() do the job of taking care about main. If it - * is already running, this is a no-op, else main will be queued. - * - * If we're not in an interrupt, we can call NCR5380_main() - * unconditionally, because it cannot be already running. - */ - if (in_interrupt() || ((flags >> 8) & 7) >= 6) - queue_main(); - else - NCR5380_main(NULL); - return 0; -} - -static DEF_SCSI_QCMD(NCR5380_queue_command) - -/* - * Function : NCR5380_main (void) - * - * Purpose : NCR5380_main is a coroutine that runs as long as more work can - * be done on the NCR5380 host adapters in a system. Both - * NCR5380_queue_command() and NCR5380_intr() will try to start it - * in case it is not running. - * - * NOTE : NCR5380_main exits with interrupts *disabled*, the caller should - * reenable them. This prevents reentrancy and kernel stack overflow. - */ - -static void NCR5380_main (struct work_struct *bl) -{ - struct scsi_cmnd *tmp, *prev; - struct Scsi_Host *instance = first_instance; - struct NCR5380_hostdata *hostdata = HOSTDATA(instance); - int done; - unsigned long flags; - - /* - * We run (with interrupts disabled) until we're sure that none of - * the host adapters have anything that can be done, at which point - * we set main_running to 0 and exit. - * - * Interrupts are enabled before doing various other internal - * instructions, after we've decided that we need to run through - * the loop again. - * - * this should prevent any race conditions. - * - * ++roman: Just disabling the NCR interrupt isn't sufficient here, - * because also a timer int can trigger an abort or reset, which can - * alter queues and touch the Falcon lock. - */ - - /* Tell int handlers main() is now already executing. Note that - no races are possible here. If an int comes in before - 'main_running' is set here, and queues/executes main via the - task queue, it doesn't do any harm, just this instance of main - won't find any work left to do. */ - if (main_running) - return; - main_running = 1; - - local_save_flags(flags); - do { - local_irq_disable(); /* Freeze request queues */ - done = 1; - - if (!hostdata->connected) { - dprintk(NDEBUG_MAIN, "scsi%d: not connected\n", HOSTNO ); - /* - * Search through the issue_queue for a command destined - * for a target that's not busy. - */ -#if (NDEBUG & NDEBUG_LISTS) - for (tmp = (struct scsi_cmnd *) hostdata->issue_queue, prev = NULL; - tmp && (tmp != prev); prev = tmp, tmp = NEXT(tmp)) - ; - if ((tmp == prev) && tmp) printk(" LOOP\n");/* else printk("\n");*/ -#endif - for (tmp = (struct scsi_cmnd *) hostdata->issue_queue, - prev = NULL; tmp; prev = tmp, tmp = NEXT(tmp) ) { - - if (prev != tmp) - dprintk(NDEBUG_LISTS, "MAIN tmp=%p target=%d busy=%d lun=%llu\n", tmp, tmp->device->id, hostdata->busy[tmp->device->id], tmp->device->lun); - /* When we find one, remove it from the issue queue. */ - /* ++guenther: possible race with Falcon locking */ - if ( -#ifdef SUPPORT_TAGS - !is_lun_busy( tmp, tmp->cmnd[0] != REQUEST_SENSE) -#else - !(hostdata->busy[tmp->device->id] & (1 << tmp->device->lun)) -#endif - ) { - /* ++guenther: just to be sure, this must be atomic */ - local_irq_disable(); - if (prev) { - REMOVE(prev, NEXT(prev), tmp, NEXT(tmp)); - SET_NEXT(prev, NEXT(tmp)); - } else { - REMOVE(-1, hostdata->issue_queue, tmp, NEXT(tmp)); - hostdata->issue_queue = NEXT(tmp); - } - SET_NEXT(tmp, NULL); - - /* reenable interrupts after finding one */ - local_irq_restore(flags); - - /* - * Attempt to establish an I_T_L nexus here. - * On success, instance->hostdata->connected is set. - * On failure, we must add the command back to the - * issue queue so we can keep trying. - */ - dprintk(NDEBUG_MAIN, "scsi%d: main(): command for target %d " - "lun %llu removed from issue_queue\n", - HOSTNO, tmp->device->id, tmp->device->lun); - /* - * REQUEST SENSE commands are issued without tagged - * queueing, even on SCSI-II devices because the - * contingent allegiance condition exists for the - * entire unit. - */ - /* ++roman: ...and the standard also requires that - * REQUEST SENSE command are untagged. - */ - -#ifdef SUPPORT_TAGS - cmd_get_tag( tmp, tmp->cmnd[0] != REQUEST_SENSE ); -#endif - if (!NCR5380_select(instance, tmp, - (tmp->cmnd[0] == REQUEST_SENSE) ? TAG_NONE : - TAG_NEXT)) { - break; - } else { - local_irq_disable(); - LIST(tmp, hostdata->issue_queue); - SET_NEXT(tmp, hostdata->issue_queue); - hostdata->issue_queue = tmp; -#ifdef SUPPORT_TAGS - cmd_free_tag( tmp ); -#endif - local_irq_restore(flags); - dprintk(NDEBUG_MAIN, "scsi%d: main(): select() failed, " - "returned to issue_queue\n", HOSTNO); - if (hostdata->connected) - break; - } - } /* if target/lun/target queue is not busy */ - } /* for issue_queue */ - } /* if (!hostdata->connected) */ - if (hostdata->connected -#ifdef REAL_DMA - && !hostdata->dma_len -#endif - ) { - local_irq_restore(flags); - dprintk(NDEBUG_MAIN, "scsi%d: main: performing information transfer\n", - HOSTNO); - NCR5380_information_transfer(instance); - dprintk(NDEBUG_MAIN, "scsi%d: main: done set false\n", HOSTNO); - done = 0; - } - } while (!done); - - /* Better allow ints _after_ 'main_running' has been cleared, else - an interrupt could believe we'll pick up the work it left for - us, but we won't see it anymore here... */ - main_running = 0; - local_irq_restore(flags); -} - - -#ifdef REAL_DMA -/* - * Function : void NCR5380_dma_complete (struct Scsi_Host *instance) - * - * Purpose : Called by interrupt handler when DMA finishes or a phase - * mismatch occurs (which would finish the DMA transfer). - * - * Inputs : instance - this instance of the NCR5380. - * - */ - -static void NCR5380_dma_complete( struct Scsi_Host *instance ) -{ - SETUP_HOSTDATA(instance); - int transfered; - unsigned char **data; - volatile int *count; - - if (!hostdata->connected) { - printk(KERN_WARNING "scsi%d: received end of DMA interrupt with " - "no connected cmd\n", HOSTNO); - return; - } - - dprintk(NDEBUG_DMA, "scsi%d: real DMA transfer complete, basr 0x%X, sr 0x%X\n", - HOSTNO, NCR5380_read(BUS_AND_STATUS_REG), - NCR5380_read(STATUS_REG)); - - if((sun3scsi_dma_finish(rq_data_dir(hostdata->connected->request)))) { - printk("scsi%d: overrun in UDC counter -- not prepared to deal with this!\n", HOSTNO); - printk("please e-mail sammy@sammy.net with a description of how this\n"); - printk("error was produced.\n"); - BUG(); - } - - /* make sure we're not stuck in a data phase */ - if((NCR5380_read(BUS_AND_STATUS_REG) & (BASR_PHASE_MATCH | - BASR_ACK)) == - (BASR_PHASE_MATCH | BASR_ACK)) { - printk("scsi%d: BASR %02x\n", HOSTNO, NCR5380_read(BUS_AND_STATUS_REG)); - printk("scsi%d: bus stuck in data phase -- probably a single byte " - "overrun!\n", HOSTNO); - printk("not prepared for this error!\n"); - printk("please e-mail sammy@sammy.net with a description of how this\n"); - printk("error was produced.\n"); - BUG(); - } - - - - (void) NCR5380_read(RESET_PARITY_INTERRUPT_REG); - NCR5380_write(MODE_REG, MR_BASE); - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); - - transfered = hostdata->dma_len - NCR5380_dma_residual(instance); - hostdata->dma_len = 0; - - data = (unsigned char **) &(hostdata->connected->SCp.ptr); - count = &(hostdata->connected->SCp.this_residual); - *data += transfered; - *count -= transfered; - -} -#endif /* REAL_DMA */ - - -/* - * Function : void NCR5380_intr (int irq) - * - * Purpose : handle interrupts, reestablishing I_T_L or I_T_L_Q nexuses - * from the disconnected queue, and restarting NCR5380_main() - * as required. - * - * Inputs : int irq, irq that caused this interrupt. - * - */ - -static irqreturn_t NCR5380_intr (int irq, void *dev_id) -{ - struct Scsi_Host *instance = first_instance; - int done = 1, handled = 0; - unsigned char basr; - - dprintk(NDEBUG_INTR, "scsi%d: NCR5380 irq triggered\n", HOSTNO); - - /* Look for pending interrupts */ - basr = NCR5380_read(BUS_AND_STATUS_REG); - dprintk(NDEBUG_INTR, "scsi%d: BASR=%02x\n", HOSTNO, basr); - /* dispatch to appropriate routine if found and done=0 */ - if (basr & BASR_IRQ) { - NCR5380_dprint(NDEBUG_INTR, instance); - if ((NCR5380_read(STATUS_REG) & (SR_SEL|SR_IO)) == (SR_SEL|SR_IO)) { - done = 0; -// ENABLE_IRQ(); - dprintk(NDEBUG_INTR, "scsi%d: SEL interrupt\n", HOSTNO); - NCR5380_reselect(instance); - (void) NCR5380_read(RESET_PARITY_INTERRUPT_REG); - } - else if (basr & BASR_PARITY_ERROR) { - dprintk(NDEBUG_INTR, "scsi%d: PARITY interrupt\n", HOSTNO); - (void) NCR5380_read(RESET_PARITY_INTERRUPT_REG); - } - else if ((NCR5380_read(STATUS_REG) & SR_RST) == SR_RST) { - dprintk(NDEBUG_INTR, "scsi%d: RESET interrupt\n", HOSTNO); - (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG); - } - else { - /* - * The rest of the interrupt conditions can occur only during a - * DMA transfer - */ - -#if defined(REAL_DMA) - /* - * We should only get PHASE MISMATCH and EOP interrupts if we have - * DMA enabled, so do a sanity check based on the current setting - * of the MODE register. - */ - - if ((NCR5380_read(MODE_REG) & MR_DMA_MODE) && - ((basr & BASR_END_DMA_TRANSFER) || - !(basr & BASR_PHASE_MATCH))) { - - dprintk(NDEBUG_INTR, "scsi%d: PHASE MISM or EOP interrupt\n", HOSTNO); - NCR5380_dma_complete( instance ); - done = 0; -// ENABLE_IRQ(); - } else -#endif /* REAL_DMA */ - { -/* MS: Ignore unknown phase mismatch interrupts (caused by EOP interrupt) */ - if (basr & BASR_PHASE_MATCH) - dprintk(NDEBUG_INTR, "scsi%d: unknown interrupt, " - "BASR 0x%x, MR 0x%x, SR 0x%x\n", - HOSTNO, basr, NCR5380_read(MODE_REG), - NCR5380_read(STATUS_REG)); - (void) NCR5380_read(RESET_PARITY_INTERRUPT_REG); -#ifdef SUN3_SCSI_VME - dregs->csr |= CSR_DMA_ENABLE; -#endif - } - } /* if !(SELECTION || PARITY) */ - handled = 1; - } /* BASR & IRQ */ - else { - - printk(KERN_NOTICE "scsi%d: interrupt without IRQ bit set in BASR, " - "BASR 0x%X, MR 0x%X, SR 0x%x\n", HOSTNO, basr, - NCR5380_read(MODE_REG), NCR5380_read(STATUS_REG)); - (void) NCR5380_read(RESET_PARITY_INTERRUPT_REG); -#ifdef SUN3_SCSI_VME - dregs->csr |= CSR_DMA_ENABLE; -#endif - } - - if (!done) { - dprintk(NDEBUG_INTR, "scsi%d: in int routine, calling main\n", HOSTNO); - /* Put a call to NCR5380_main() on the queue... */ - queue_main(); - } - return IRQ_RETVAL(handled); -} - -#ifdef NCR5380_STATS -static void collect_stats(struct NCR5380_hostdata *hostdata, - struct scsi_cmnd *cmd) -{ -# ifdef NCR5380_STAT_LIMIT - if (scsi_bufflen(cmd) > NCR5380_STAT_LIMIT) -# endif - switch (cmd->cmnd[0]) - { - case WRITE: - case WRITE_6: - case WRITE_10: - hostdata->time_write[cmd->device->id] += (jiffies - hostdata->timebase); - /*hostdata->bytes_write[cmd->device->id] += scsi_bufflen(cmd);*/ - hostdata->pendingw--; - break; - case READ: - case READ_6: - case READ_10: - hostdata->time_read[cmd->device->id] += (jiffies - hostdata->timebase); - /*hostdata->bytes_read[cmd->device->id] += scsi_bufflen(cmd);*/ - hostdata->pendingr--; - break; - } -} -#endif - -/* - * Function : int NCR5380_select(struct Scsi_Host *instance, - * struct scsi_cmnd *cmd, int tag); - * - * Purpose : establishes I_T_L or I_T_L_Q nexus for new or existing command, - * including ARBITRATION, SELECTION, and initial message out for - * IDENTIFY and queue messages. - * - * Inputs : instance - instantiation of the 5380 driver on which this - * target lives, cmd - SCSI command to execute, tag - set to TAG_NEXT for - * new tag, TAG_NONE for untagged queueing, otherwise set to the tag for - * the command that is presently connected. - * - * Returns : -1 if selection could not execute for some reason, - * 0 if selection succeeded or failed because the target - * did not respond. - * - * Side effects : - * If bus busy, arbitration failed, etc, NCR5380_select() will exit - * with registers as they should have been on entry - ie - * SELECT_ENABLE will be set appropriately, the NCR5380 - * will cease to drive any SCSI bus signals. - * - * If successful : I_T_L or I_T_L_Q nexus will be established, - * instance->connected will be set to cmd. - * SELECT interrupt will be disabled. - * - * If failed (no target) : cmd->scsi_done() will be called, and the - * cmd->result host byte set to DID_BAD_TARGET. - */ - -static int NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd, - int tag) -{ - SETUP_HOSTDATA(instance); - unsigned char tmp[3], phase; - unsigned char *data; - int len; - unsigned long timeout; - unsigned long flags; - - hostdata->restart_select = 0; - NCR5380_dprint(NDEBUG_ARBITRATION, instance); - dprintk(NDEBUG_ARBITRATION, "scsi%d: starting arbitration, id = %d\n", HOSTNO, - instance->this_id); - - /* - * Set the phase bits to 0, otherwise the NCR5380 won't drive the - * data bus during SELECTION. - */ - - local_irq_save(flags); - if (hostdata->connected) { - local_irq_restore(flags); - return -1; - } - NCR5380_write(TARGET_COMMAND_REG, 0); - - - /* - * Start arbitration. - */ - - NCR5380_write(OUTPUT_DATA_REG, hostdata->id_mask); - NCR5380_write(MODE_REG, MR_ARBITRATE); - - local_irq_restore(flags); - - /* Wait for arbitration logic to complete */ -#ifdef NCR_TIMEOUT - { - unsigned long timeout = jiffies + 2*NCR_TIMEOUT; - - while (!(NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_PROGRESS) - && time_before(jiffies, timeout) && !hostdata->connected) - ; - if (time_after_eq(jiffies, timeout)) - { - printk("scsi : arbitration timeout at %d\n", __LINE__); - NCR5380_write(MODE_REG, MR_BASE); - NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); - return -1; - } - } -#else /* NCR_TIMEOUT */ - while (!(NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_PROGRESS) - && !hostdata->connected); -#endif - - dprintk(NDEBUG_ARBITRATION, "scsi%d: arbitration complete\n", HOSTNO); - - if (hostdata->connected) { - NCR5380_write(MODE_REG, MR_BASE); - return -1; - } - /* - * The arbitration delay is 2.2us, but this is a minimum and there is - * no maximum so we can safely sleep for ceil(2.2) usecs to accommodate - * the integral nature of udelay(). - * - */ - - udelay(3); - - /* Check for lost arbitration */ - if ((NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST) || - (NCR5380_read(CURRENT_SCSI_DATA_REG) & hostdata->id_higher_mask) || - (NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST) || - hostdata->connected) { - NCR5380_write(MODE_REG, MR_BASE); - dprintk(NDEBUG_ARBITRATION, "scsi%d: lost arbitration, deasserting MR_ARBITRATE\n", - HOSTNO); - return -1; - } - - /* after/during arbitration, BSY should be asserted. - IBM DPES-31080 Version S31Q works now */ - /* Tnx to Thomas_Roesch@m2.maus.de for finding this! (Roman) */ - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_SEL | - ICR_ASSERT_BSY ) ; - - if ((NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST) || - hostdata->connected) { - NCR5380_write(MODE_REG, MR_BASE); - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); - dprintk(NDEBUG_ARBITRATION, "scsi%d: lost arbitration, deasserting ICR_ASSERT_SEL\n", - HOSTNO); - return -1; - } - - /* - * Again, bus clear + bus settle time is 1.2us, however, this is - * a minimum so we'll udelay ceil(1.2) - */ - -#ifdef CONFIG_ATARI_SCSI_TOSHIBA_DELAY - /* ++roman: But some targets (see above :-) seem to need a bit more... */ - udelay(15); -#else - udelay(2); -#endif - - if (hostdata->connected) { - NCR5380_write(MODE_REG, MR_BASE); - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); - return -1; - } - - dprintk(NDEBUG_ARBITRATION, "scsi%d: won arbitration\n", HOSTNO); - - /* - * Now that we have won arbitration, start Selection process, asserting - * the host and target ID's on the SCSI bus. - */ - - NCR5380_write(OUTPUT_DATA_REG, (hostdata->id_mask | (1 << cmd->device->id))); - - /* - * Raise ATN while SEL is true before BSY goes false from arbitration, - * since this is the only way to guarantee that we'll get a MESSAGE OUT - * phase immediately after selection. - */ - - NCR5380_write(INITIATOR_COMMAND_REG, (ICR_BASE | ICR_ASSERT_BSY | - ICR_ASSERT_DATA | ICR_ASSERT_ATN | ICR_ASSERT_SEL )); - NCR5380_write(MODE_REG, MR_BASE); - - /* - * Reselect interrupts must be turned off prior to the dropping of BSY, - * otherwise we will trigger an interrupt. - */ - - if (hostdata->connected) { - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); - return -1; - } - - NCR5380_write(SELECT_ENABLE_REG, 0); - - /* - * The initiator shall then wait at least two deskew delays and release - * the BSY signal. - */ - udelay(1); /* wingel -- wait two bus deskew delay >2*45ns */ - - /* Reset BSY */ - NCR5380_write(INITIATOR_COMMAND_REG, (ICR_BASE | ICR_ASSERT_DATA | - ICR_ASSERT_ATN | ICR_ASSERT_SEL)); - - /* - * Something weird happens when we cease to drive BSY - looks - * like the board/chip is letting us do another read before the - * appropriate propagation delay has expired, and we're confusing - * a BSY signal from ourselves as the target's response to SELECTION. - * - * A small delay (the 'C++' frontend breaks the pipeline with an - * unnecessary jump, making it work on my 386-33/Trantor T128, the - * tighter 'C' code breaks and requires this) solves the problem - - * the 1 us delay is arbitrary, and only used because this delay will - * be the same on other platforms and since it works here, it should - * work there. - * - * wingel suggests that this could be due to failing to wait - * one deskew delay. - */ - - udelay(1); - - dprintk(NDEBUG_SELECTION, "scsi%d: selecting target %d\n", HOSTNO, cmd->device->id); - - /* - * The SCSI specification calls for a 250 ms timeout for the actual - * selection. - */ - - timeout = jiffies + 25; - - /* - * XXX very interesting - we're seeing a bounce where the BSY we - * asserted is being reflected / still asserted (propagation delay?) - * and it's detecting as true. Sigh. - */ - -#if 0 - /* ++roman: If a target conformed to the SCSI standard, it wouldn't assert - * IO while SEL is true. But again, there are some disks out the in the - * world that do that nevertheless. (Somebody claimed that this announces - * reselection capability of the target.) So we better skip that test and - * only wait for BSY... (Famous german words: Der Klügere gibt nach :-) - */ - - while (time_before(jiffies, timeout) && !(NCR5380_read(STATUS_REG) & - (SR_BSY | SR_IO))); - - if ((NCR5380_read(STATUS_REG) & (SR_SEL | SR_IO)) == - (SR_SEL | SR_IO)) { - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); - NCR5380_reselect(instance); - printk (KERN_ERR "scsi%d: reselection after won arbitration?\n", - HOSTNO); - NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); - return -1; - } -#else - while (time_before(jiffies, timeout) && !(NCR5380_read(STATUS_REG) & SR_BSY)); -#endif - - /* - * No less than two deskew delays after the initiator detects the - * BSY signal is true, it shall release the SEL signal and may - * change the DATA BUS. -wingel - */ - - udelay(1); - - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN); - - if (!(NCR5380_read(STATUS_REG) & SR_BSY)) { - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); - if (hostdata->targets_present & (1 << cmd->device->id)) { - printk(KERN_ERR "scsi%d: weirdness\n", HOSTNO); - if (hostdata->restart_select) - printk(KERN_NOTICE "\trestart select\n"); - NCR5380_dprint(NDEBUG_ANY, instance); - NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); - return -1; - } - cmd->result = DID_BAD_TARGET << 16; -#ifdef NCR5380_STATS - collect_stats(hostdata, cmd); -#endif -#ifdef SUPPORT_TAGS - cmd_free_tag( cmd ); -#endif - cmd->scsi_done(cmd); - NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); - dprintk(NDEBUG_SELECTION, "scsi%d: target did not respond within 250ms\n", HOSTNO); - NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); - return 0; - } - - hostdata->targets_present |= (1 << cmd->device->id); - - /* - * Since we followed the SCSI spec, and raised ATN while SEL - * was true but before BSY was false during selection, the information - * transfer phase should be a MESSAGE OUT phase so that we can send the - * IDENTIFY message. - * - * If SCSI-II tagged queuing is enabled, we also send a SIMPLE_QUEUE_TAG - * message (2 bytes) with a tag ID that we increment with every command - * until it wraps back to 0. - * - * XXX - it turns out that there are some broken SCSI-II devices, - * which claim to support tagged queuing but fail when more than - * some number of commands are issued at once. - */ - - /* Wait for start of REQ/ACK handshake */ - while (!(NCR5380_read(STATUS_REG) & SR_REQ)); - - dprintk(NDEBUG_SELECTION, "scsi%d: target %d selected, going into MESSAGE OUT phase.\n", - HOSTNO, cmd->device->id); - tmp[0] = IDENTIFY(1, cmd->device->lun); - -#ifdef SUPPORT_TAGS - if (cmd->tag != TAG_NONE) { - tmp[1] = hostdata->last_message = SIMPLE_QUEUE_TAG; - tmp[2] = cmd->tag; - len = 3; - } else - len = 1; -#else - len = 1; - cmd->tag=0; -#endif /* SUPPORT_TAGS */ - - /* Send message(s) */ - data = tmp; - phase = PHASE_MSGOUT; - NCR5380_transfer_pio(instance, &phase, &len, &data); - dprintk(NDEBUG_SELECTION, "scsi%d: nexus established.\n", HOSTNO); - /* XXX need to handle errors here */ - hostdata->connected = cmd; -#ifndef SUPPORT_TAGS - hostdata->busy[cmd->device->id] |= (1 << cmd->device->lun); -#endif -#ifdef SUN3_SCSI_VME - dregs->csr |= CSR_INTR; -#endif - initialize_SCp(cmd); - - - return 0; -} - -/* - * Function : int NCR5380_transfer_pio (struct Scsi_Host *instance, - * unsigned char *phase, int *count, unsigned char **data) - * - * Purpose : transfers data in given phase using polled I/O - * - * Inputs : instance - instance of driver, *phase - pointer to - * what phase is expected, *count - pointer to number of - * bytes to transfer, **data - pointer to data pointer. - * - * Returns : -1 when different phase is entered without transferring - * maximum number of bytes, 0 if all bytes are transferred or exit - * is in same phase. - * - * Also, *phase, *count, *data are modified in place. - * - * XXX Note : handling for bus free may be useful. - */ - -/* - * Note : this code is not as quick as it could be, however it - * IS 100% reliable, and for the actual data transfer where speed - * counts, we will always do a pseudo DMA or DMA transfer. - */ - -static int NCR5380_transfer_pio( struct Scsi_Host *instance, - unsigned char *phase, int *count, - unsigned char **data) -{ - register unsigned char p = *phase, tmp; - register int c = *count; - register unsigned char *d = *data; - - /* - * The NCR5380 chip will only drive the SCSI bus when the - * phase specified in the appropriate bits of the TARGET COMMAND - * REGISTER match the STATUS REGISTER - */ - - NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(p)); - - do { - /* - * Wait for assertion of REQ, after which the phase bits will be - * valid - */ - while (!((tmp = NCR5380_read(STATUS_REG)) & SR_REQ)); - - dprintk(NDEBUG_HANDSHAKE, "scsi%d: REQ detected\n", HOSTNO); - - /* Check for phase mismatch */ - if ((tmp & PHASE_MASK) != p) { - dprintk(NDEBUG_PIO, "scsi%d: phase mismatch\n", HOSTNO); - NCR5380_dprint_phase(NDEBUG_PIO, instance); - break; - } - - /* Do actual transfer from SCSI bus to / from memory */ - if (!(p & SR_IO)) - NCR5380_write(OUTPUT_DATA_REG, *d); - else - *d = NCR5380_read(CURRENT_SCSI_DATA_REG); - - ++d; - - /* - * The SCSI standard suggests that in MSGOUT phase, the initiator - * should drop ATN on the last byte of the message phase - * after REQ has been asserted for the handshake but before - * the initiator raises ACK. - */ - - if (!(p & SR_IO)) { - if (!((p & SR_MSG) && c > 1)) { - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | - ICR_ASSERT_DATA); - NCR5380_dprint(NDEBUG_PIO, instance); - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | - ICR_ASSERT_DATA | ICR_ASSERT_ACK); - } else { - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | - ICR_ASSERT_DATA | ICR_ASSERT_ATN); - NCR5380_dprint(NDEBUG_PIO, instance); - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | - ICR_ASSERT_DATA | ICR_ASSERT_ATN | ICR_ASSERT_ACK); - } - } else { - NCR5380_dprint(NDEBUG_PIO, instance); - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ACK); - } - - while (NCR5380_read(STATUS_REG) & SR_REQ); - - dprintk(NDEBUG_HANDSHAKE, "scsi%d: req false, handshake complete\n", HOSTNO); - -/* - * We have several special cases to consider during REQ/ACK handshaking : - * 1. We were in MSGOUT phase, and we are on the last byte of the - * message. ATN must be dropped as ACK is dropped. - * - * 2. We are in a MSGIN phase, and we are on the last byte of the - * message. We must exit with ACK asserted, so that the calling - * code may raise ATN before dropping ACK to reject the message. - * - * 3. ACK and ATN are clear and the target may proceed as normal. - */ - if (!(p == PHASE_MSGIN && c == 1)) { - if (p == PHASE_MSGOUT && c > 1) - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN); - else - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); - } - } while (--c); - - dprintk(NDEBUG_PIO, "scsi%d: residual %d\n", HOSTNO, c); - - *count = c; - *data = d; - tmp = NCR5380_read(STATUS_REG); - /* The phase read from the bus is valid if either REQ is (already) - * asserted or if ACK hasn't been released yet. The latter is the case if - * we're in MSGIN and all wanted bytes have been received. */ - if ((tmp & SR_REQ) || (p == PHASE_MSGIN && c == 0)) - *phase = tmp & PHASE_MASK; - else - *phase = PHASE_UNKNOWN; - - if (!c || (*phase == p)) - return 0; - else - return -1; -} - -/* - * Function : do_abort (Scsi_Host *host) - * - * Purpose : abort the currently established nexus. Should only be - * called from a routine which can drop into a - * - * Returns : 0 on success, -1 on failure. - */ - -static int do_abort (struct Scsi_Host *host) -{ - unsigned char tmp, *msgptr, phase; - int len; - - /* Request message out phase */ - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN); - - /* - * Wait for the target to indicate a valid phase by asserting - * REQ. Once this happens, we'll have either a MSGOUT phase - * and can immediately send the ABORT message, or we'll have some - * other phase and will have to source/sink data. - * - * We really don't care what value was on the bus or what value - * the target sees, so we just handshake. - */ - - while (!((tmp = NCR5380_read(STATUS_REG)) & SR_REQ)); - - NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(tmp)); - - if ((tmp & PHASE_MASK) != PHASE_MSGOUT) { - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN | - ICR_ASSERT_ACK); - while (NCR5380_read(STATUS_REG) & SR_REQ); - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN); - } - - tmp = ABORT; - msgptr = &tmp; - len = 1; - phase = PHASE_MSGOUT; - NCR5380_transfer_pio (host, &phase, &len, &msgptr); - - /* - * If we got here, and the command completed successfully, - * we're about to go into bus free state. - */ - - return len ? -1 : 0; -} - -#if defined(REAL_DMA) -/* - * Function : int NCR5380_transfer_dma (struct Scsi_Host *instance, - * unsigned char *phase, int *count, unsigned char **data) - * - * Purpose : transfers data in given phase using either real - * or pseudo DMA. - * - * Inputs : instance - instance of driver, *phase - pointer to - * what phase is expected, *count - pointer to number of - * bytes to transfer, **data - pointer to data pointer. - * - * Returns : -1 when different phase is entered without transferring - * maximum number of bytes, 0 if all bytes or transferred or exit - * is in same phase. - * - * Also, *phase, *count, *data are modified in place. - * - */ - - -static int NCR5380_transfer_dma( struct Scsi_Host *instance, - unsigned char *phase, int *count, - unsigned char **data) -{ - SETUP_HOSTDATA(instance); - register int c = *count; - register unsigned char p = *phase; - unsigned long flags; - - /* sanity check */ - if(!sun3_dma_setup_done) { - printk("scsi%d: transfer_dma without setup!\n", HOSTNO); - BUG(); - } - hostdata->dma_len = c; - - dprintk(NDEBUG_DMA, "scsi%d: initializing DMA for %s, %d bytes %s %p\n", - HOSTNO, (p & SR_IO) ? "reading" : "writing", - c, (p & SR_IO) ? "to" : "from", *data); - - /* netbsd turns off ints here, why not be safe and do it too */ - local_irq_save(flags); - - /* send start chain */ - sun3scsi_dma_start(c, *data); - - if (p & SR_IO) { - NCR5380_write(TARGET_COMMAND_REG, 1); - NCR5380_read(RESET_PARITY_INTERRUPT_REG); - NCR5380_write(INITIATOR_COMMAND_REG, 0); - NCR5380_write(MODE_REG, (NCR5380_read(MODE_REG) | MR_DMA_MODE | MR_ENABLE_EOP_INTR)); - NCR5380_write(START_DMA_INITIATOR_RECEIVE_REG, 0); - } else { - NCR5380_write(TARGET_COMMAND_REG, 0); - NCR5380_read(RESET_PARITY_INTERRUPT_REG); - NCR5380_write(INITIATOR_COMMAND_REG, ICR_ASSERT_DATA); - NCR5380_write(MODE_REG, (NCR5380_read(MODE_REG) | MR_DMA_MODE | MR_ENABLE_EOP_INTR)); - NCR5380_write(START_DMA_SEND_REG, 0); - } - -#ifdef SUN3_SCSI_VME - dregs->csr |= CSR_DMA_ENABLE; -#endif - - local_irq_restore(flags); - - sun3_dma_active = 1; - return 0; -} -#endif /* defined(REAL_DMA) */ - -/* - * Function : NCR5380_information_transfer (struct Scsi_Host *instance) - * - * Purpose : run through the various SCSI phases and do as the target - * directs us to. Operates on the currently connected command, - * instance->connected. - * - * Inputs : instance, instance for which we are doing commands - * - * Side effects : SCSI things happen, the disconnected queue will be - * modified if a command disconnects, *instance->connected will - * change. - * - * XXX Note : we need to watch for bus free or a reset condition here - * to recover from an unexpected bus free condition. - */ - -static void NCR5380_information_transfer (struct Scsi_Host *instance) -{ - SETUP_HOSTDATA(instance); - unsigned long flags; - unsigned char msgout = NOP; - int sink = 0; - int len; -#if defined(REAL_DMA) - int transfersize; -#endif - unsigned char *data; - unsigned char phase, tmp, extended_msg[10], old_phase=0xff; - struct scsi_cmnd *cmd = (struct scsi_cmnd *) hostdata->connected; - -#ifdef SUN3_SCSI_VME - dregs->csr |= CSR_INTR; -#endif - - while (1) { - tmp = NCR5380_read(STATUS_REG); - /* We only have a valid SCSI phase when REQ is asserted */ - if (tmp & SR_REQ) { - phase = (tmp & PHASE_MASK); - if (phase != old_phase) { - old_phase = phase; - NCR5380_dprint_phase(NDEBUG_INFORMATION, instance); - } - - if(phase == PHASE_CMDOUT) { - void *d; - unsigned long count; - - if (!cmd->SCp.this_residual && cmd->SCp.buffers_residual) { - count = cmd->SCp.buffer->length; - d = SGADDR(cmd->SCp.buffer); - } else { - count = cmd->SCp.this_residual; - d = cmd->SCp.ptr; - } -#ifdef REAL_DMA - /* this command setup for dma yet? */ - if((count > SUN3_DMA_MINSIZE) && (sun3_dma_setup_done - != cmd)) - { - if (cmd->request->cmd_type == REQ_TYPE_FS) { - sun3scsi_dma_setup(d, count, - rq_data_dir(cmd->request)); - sun3_dma_setup_done = cmd; - } - } -#endif -#ifdef SUN3_SCSI_VME - dregs->csr |= CSR_INTR; -#endif - } - - - if (sink && (phase != PHASE_MSGOUT)) { - NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(tmp)); - - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN | - ICR_ASSERT_ACK); - while (NCR5380_read(STATUS_REG) & SR_REQ); - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | - ICR_ASSERT_ATN); - sink = 0; - continue; - } - - switch (phase) { - case PHASE_DATAOUT: -#if (NDEBUG & NDEBUG_NO_DATAOUT) - printk("scsi%d: NDEBUG_NO_DATAOUT set, attempted DATAOUT " - "aborted\n", HOSTNO); - sink = 1; - do_abort(instance); - cmd->result = DID_ERROR << 16; - cmd->scsi_done(cmd); - return; -#endif - case PHASE_DATAIN: - /* - * If there is no room left in the current buffer in the - * scatter-gather list, move onto the next one. - */ - if (!cmd->SCp.this_residual && cmd->SCp.buffers_residual) { - ++cmd->SCp.buffer; - --cmd->SCp.buffers_residual; - cmd->SCp.this_residual = cmd->SCp.buffer->length; - cmd->SCp.ptr = SGADDR(cmd->SCp.buffer); - dprintk(NDEBUG_INFORMATION, "scsi%d: %d bytes and %d buffers left\n", - HOSTNO, cmd->SCp.this_residual, - cmd->SCp.buffers_residual); - } - - /* - * The preferred transfer method is going to be - * PSEUDO-DMA for systems that are strictly PIO, - * since we can let the hardware do the handshaking. - * - * For this to work, we need to know the transfersize - * ahead of time, since the pseudo-DMA code will sit - * in an unconditional loop. - */ - -/* ++roman: I suggest, this should be - * #if def(REAL_DMA) - * instead of leaving REAL_DMA out. - */ - -#if defined(REAL_DMA) -// if (!cmd->device->borken && - if((transfersize = - NCR5380_dma_xfer_len(instance,cmd,phase)) > SUN3_DMA_MINSIZE) { - len = transfersize; - cmd->SCp.phase = phase; - - if (NCR5380_transfer_dma(instance, &phase, - &len, (unsigned char **) &cmd->SCp.ptr)) { - /* - * If the watchdog timer fires, all future - * accesses to this device will use the - * polled-IO. */ - printk(KERN_NOTICE "scsi%d: switching target %d " - "lun %llu to slow handshake\n", HOSTNO, - cmd->device->id, cmd->device->lun); - cmd->device->borken = 1; - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | - ICR_ASSERT_ATN); - sink = 1; - do_abort(instance); - cmd->result = DID_ERROR << 16; - cmd->scsi_done(cmd); - /* XXX - need to source or sink data here, as appropriate */ - } else { -#ifdef REAL_DMA - /* ++roman: When using real DMA, - * information_transfer() should return after - * starting DMA since it has nothing more to - * do. - */ - return; -#else - cmd->SCp.this_residual -= transfersize - len; -#endif - } - } else -#endif /* defined(REAL_DMA) */ - NCR5380_transfer_pio(instance, &phase, - (int *) &cmd->SCp.this_residual, (unsigned char **) - &cmd->SCp.ptr); -#ifdef REAL_DMA - /* if we had intended to dma that command clear it */ - if(sun3_dma_setup_done == cmd) - sun3_dma_setup_done = NULL; -#endif - - break; - case PHASE_MSGIN: - len = 1; - data = &tmp; - NCR5380_write(SELECT_ENABLE_REG, 0); /* disable reselects */ - NCR5380_transfer_pio(instance, &phase, &len, &data); - cmd->SCp.Message = tmp; - - switch (tmp) { - /* - * Linking lets us reduce the time required to get the - * next command out to the device, hopefully this will - * mean we don't waste another revolution due to the delays - * required by ARBITRATION and another SELECTION. - * - * In the current implementation proposal, low level drivers - * merely have to start the next command, pointed to by - * next_link, done() is called as with unlinked commands. - */ -#ifdef LINKED - case LINKED_CMD_COMPLETE: - case LINKED_FLG_CMD_COMPLETE: - /* Accept message by clearing ACK */ - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); - - dprintk(NDEBUG_LINKED, "scsi%d: target %d lun %llu linked command " - "complete.\n", HOSTNO, cmd->device->id, cmd->device->lun); - - /* Enable reselect interrupts */ - NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); - /* - * Sanity check : A linked command should only terminate - * with one of these messages if there are more linked - * commands available. - */ - - if (!cmd->next_link) { - printk(KERN_NOTICE "scsi%d: target %d lun %llu " - "linked command complete, no next_link\n", - HOSTNO, cmd->device->id, cmd->device->lun); - sink = 1; - do_abort (instance); - return; - } - - initialize_SCp(cmd->next_link); - /* The next command is still part of this process; copy it - * and don't free it! */ - cmd->next_link->tag = cmd->tag; - cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8); - dprintk(NDEBUG_LINKED, "scsi%d: target %d lun %llu linked request " - "done, calling scsi_done().\n", - HOSTNO, cmd->device->id, cmd->device->lun); -#ifdef NCR5380_STATS - collect_stats(hostdata, cmd); -#endif - cmd->scsi_done(cmd); - cmd = hostdata->connected; - break; -#endif /* def LINKED */ - case ABORT: - case COMMAND_COMPLETE: - /* Accept message by clearing ACK */ - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); - hostdata->connected = NULL; - dprintk(NDEBUG_QUEUES, "scsi%d: command for target %d, lun %llu " - "completed\n", HOSTNO, cmd->device->id, cmd->device->lun); -#ifdef SUPPORT_TAGS - cmd_free_tag( cmd ); - if (status_byte(cmd->SCp.Status) == QUEUE_FULL) { - /* Turn a QUEUE FULL status into BUSY, I think the - * mid level cannot handle QUEUE FULL :-( (The - * command is retried after BUSY). Also update our - * queue size to the number of currently issued - * commands now. - */ - /* ++Andreas: the mid level code knows about - QUEUE_FULL now. */ - TAG_ALLOC *ta = &TagAlloc[cmd->device->id][cmd->device->lun]; - dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %llu returned " - "QUEUE_FULL after %d commands\n", - HOSTNO, cmd->device->id, cmd->device->lun, - ta->nr_allocated); - if (ta->queue_size > ta->nr_allocated) - ta->nr_allocated = ta->queue_size; - } -#else - hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun); -#endif - /* Enable reselect interrupts */ - NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); - - /* - * I'm not sure what the correct thing to do here is : - * - * If the command that just executed is NOT a request - * sense, the obvious thing to do is to set the result - * code to the values of the stored parameters. - * - * If it was a REQUEST SENSE command, we need some way to - * differentiate between the failure code of the original - * and the failure code of the REQUEST sense - the obvious - * case is success, where we fall through and leave the - * result code unchanged. - * - * The non-obvious place is where the REQUEST SENSE failed - */ - - if (cmd->cmnd[0] != REQUEST_SENSE) - cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8); - else if (status_byte(cmd->SCp.Status) != GOOD) - cmd->result = (cmd->result & 0x00ffff) | (DID_ERROR << 16); - -#ifdef AUTOSENSE - if ((cmd->cmnd[0] == REQUEST_SENSE) && - hostdata->ses.cmd_len) { - scsi_eh_restore_cmnd(cmd, &hostdata->ses); - hostdata->ses.cmd_len = 0 ; - } - - if ((cmd->cmnd[0] != REQUEST_SENSE) && - (status_byte(cmd->SCp.Status) == CHECK_CONDITION)) { - scsi_eh_prep_cmnd(cmd, &hostdata->ses, NULL, 0, ~0); - dprintk(NDEBUG_AUTOSENSE, "scsi%d: performing request sense\n", - HOSTNO); - /* this is initialized from initialize_SCp - cmd->SCp.buffer = NULL; - cmd->SCp.buffers_residual = 0; - */ - - local_irq_save(flags); - LIST(cmd,hostdata->issue_queue); - SET_NEXT(cmd, hostdata->issue_queue); - hostdata->issue_queue = (struct scsi_cmnd *) cmd; - local_irq_restore(flags); - dprintk(NDEBUG_QUEUES, "scsi%d: REQUEST SENSE added to head of " - "issue queue\n", H_NO(cmd)); - } else -#endif /* def AUTOSENSE */ - { -#ifdef NCR5380_STATS - collect_stats(hostdata, cmd); -#endif - cmd->scsi_done(cmd); - } - - NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); - /* - * Restore phase bits to 0 so an interrupted selection, - * arbitration can resume. - */ - NCR5380_write(TARGET_COMMAND_REG, 0); - - while ((NCR5380_read(STATUS_REG) & SR_BSY) && !hostdata->connected) - barrier(); - - return; - case MESSAGE_REJECT: - /* Accept message by clearing ACK */ - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); - /* Enable reselect interrupts */ - NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); - switch (hostdata->last_message) { - case HEAD_OF_QUEUE_TAG: - case ORDERED_QUEUE_TAG: - case SIMPLE_QUEUE_TAG: - /* The target obviously doesn't support tagged - * queuing, even though it announced this ability in - * its INQUIRY data ?!? (maybe only this LUN?) Ok, - * clear 'tagged_supported' and lock the LUN, since - * the command is treated as untagged further on. - */ - cmd->device->tagged_supported = 0; - hostdata->busy[cmd->device->id] |= (1 << cmd->device->lun); - cmd->tag = TAG_NONE; - dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %llu rejected " - "QUEUE_TAG message; tagged queuing " - "disabled\n", - HOSTNO, cmd->device->id, cmd->device->lun); - break; - } - break; - case DISCONNECT: - /* Accept message by clearing ACK */ - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); - local_irq_save(flags); - cmd->device->disconnect = 1; - LIST(cmd,hostdata->disconnected_queue); - SET_NEXT(cmd, hostdata->disconnected_queue); - hostdata->connected = NULL; - hostdata->disconnected_queue = cmd; - local_irq_restore(flags); - dprintk(NDEBUG_QUEUES, "scsi%d: command for target %d lun %llu was " - "moved from connected to the " - "disconnected_queue\n", HOSTNO, - cmd->device->id, cmd->device->lun); - /* - * Restore phase bits to 0 so an interrupted selection, - * arbitration can resume. - */ - NCR5380_write(TARGET_COMMAND_REG, 0); - - /* Enable reselect interrupts */ - NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); - /* Wait for bus free to avoid nasty timeouts */ - while ((NCR5380_read(STATUS_REG) & SR_BSY) && !hostdata->connected) - barrier(); -#ifdef SUN3_SCSI_VME - dregs->csr |= CSR_DMA_ENABLE; -#endif - return; - /* - * The SCSI data pointer is *IMPLICITLY* saved on a disconnect - * operation, in violation of the SCSI spec so we can safely - * ignore SAVE/RESTORE pointers calls. - * - * Unfortunately, some disks violate the SCSI spec and - * don't issue the required SAVE_POINTERS message before - * disconnecting, and we have to break spec to remain - * compatible. - */ - case SAVE_POINTERS: - case RESTORE_POINTERS: - /* Accept message by clearing ACK */ - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); - /* Enable reselect interrupts */ - NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); - break; - case EXTENDED_MESSAGE: -/* - * Extended messages are sent in the following format : - * Byte - * 0 EXTENDED_MESSAGE == 1 - * 1 length (includes one byte for code, doesn't - * include first two bytes) - * 2 code - * 3..length+1 arguments - * - * Start the extended message buffer with the EXTENDED_MESSAGE - * byte, since spi_print_msg() wants the whole thing. - */ - extended_msg[0] = EXTENDED_MESSAGE; - /* Accept first byte by clearing ACK */ - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); - - dprintk(NDEBUG_EXTENDED, "scsi%d: receiving extended message\n", HOSTNO); - - len = 2; - data = extended_msg + 1; - phase = PHASE_MSGIN; - NCR5380_transfer_pio(instance, &phase, &len, &data); - dprintk(NDEBUG_EXTENDED, "scsi%d: length=%d, code=0x%02x\n", HOSTNO, - (int)extended_msg[1], (int)extended_msg[2]); - - if (!len && extended_msg[1] <= - (sizeof (extended_msg) - 1)) { - /* Accept third byte by clearing ACK */ - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); - len = extended_msg[1] - 1; - data = extended_msg + 3; - phase = PHASE_MSGIN; - - NCR5380_transfer_pio(instance, &phase, &len, &data); - dprintk(NDEBUG_EXTENDED, "scsi%d: message received, residual %d\n", - HOSTNO, len); - - switch (extended_msg[2]) { - case EXTENDED_SDTR: - case EXTENDED_WDTR: - case EXTENDED_MODIFY_DATA_POINTER: - case EXTENDED_EXTENDED_IDENTIFY: - tmp = 0; - } - } else if (len) { - printk(KERN_NOTICE "scsi%d: error receiving " - "extended message\n", HOSTNO); - tmp = 0; - } else { - printk(KERN_NOTICE "scsi%d: extended message " - "code %02x length %d is too long\n", - HOSTNO, extended_msg[2], extended_msg[1]); - tmp = 0; - } - /* Fall through to reject message */ - - /* - * If we get something weird that we aren't expecting, - * reject it. - */ - default: - if (!tmp) { - printk(KERN_DEBUG "scsi%d: rejecting message ", HOSTNO); - spi_print_msg(extended_msg); - printk("\n"); - } else if (tmp != EXTENDED_MESSAGE) - printk(KERN_DEBUG "scsi%d: rejecting unknown " - "message %02x from target %d, lun %llu\n", - HOSTNO, tmp, cmd->device->id, cmd->device->lun); - else - printk(KERN_DEBUG "scsi%d: rejecting unknown " - "extended message " - "code %02x, length %d from target %d, lun %llu\n", - HOSTNO, extended_msg[1], extended_msg[0], - cmd->device->id, cmd->device->lun); - - - msgout = MESSAGE_REJECT; - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | - ICR_ASSERT_ATN); - break; - } /* switch (tmp) */ - break; - case PHASE_MSGOUT: - len = 1; - data = &msgout; - hostdata->last_message = msgout; - NCR5380_transfer_pio(instance, &phase, &len, &data); - if (msgout == ABORT) { -#ifdef SUPPORT_TAGS - cmd_free_tag( cmd ); -#else - hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun); -#endif - hostdata->connected = NULL; - cmd->result = DID_ERROR << 16; -#ifdef NCR5380_STATS - collect_stats(hostdata, cmd); -#endif - cmd->scsi_done(cmd); - NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); - return; - } - msgout = NOP; - break; - case PHASE_CMDOUT: - len = cmd->cmd_len; - data = cmd->cmnd; - /* - * XXX for performance reasons, on machines with a - * PSEUDO-DMA architecture we should probably - * use the dma transfer function. - */ - NCR5380_transfer_pio(instance, &phase, &len, - &data); - break; - case PHASE_STATIN: - len = 1; - data = &tmp; - NCR5380_transfer_pio(instance, &phase, &len, &data); - cmd->SCp.Status = tmp; - break; - default: - printk("scsi%d: unknown phase\n", HOSTNO); - NCR5380_dprint(NDEBUG_ANY, instance); - } /* switch(phase) */ - } /* if (tmp * SR_REQ) */ - } /* while (1) */ -} - -/* - * Function : void NCR5380_reselect (struct Scsi_Host *instance) - * - * Purpose : does reselection, initializing the instance->connected - * field to point to the struct scsi_cmnd for which the I_T_L or I_T_L_Q - * nexus has been reestablished, - * - * Inputs : instance - this instance of the NCR5380. - * - */ - -/* it might eventually prove necessary to do a dma setup on - reselection, but it doesn't seem to be needed now -- sam */ - -static void NCR5380_reselect (struct Scsi_Host *instance) -{ - SETUP_HOSTDATA(instance); - unsigned char target_mask; - unsigned char lun; -#ifdef SUPPORT_TAGS - unsigned char tag; -#endif - unsigned char msg[3]; - struct scsi_cmnd *tmp = NULL, *prev; -/* unsigned long flags; */ - - /* - * Disable arbitration, etc. since the host adapter obviously - * lost, and tell an interrupted NCR5380_select() to restart. - */ - - NCR5380_write(MODE_REG, MR_BASE); - hostdata->restart_select = 1; - - target_mask = NCR5380_read(CURRENT_SCSI_DATA_REG) & ~(hostdata->id_mask); - - dprintk(NDEBUG_RESELECTION, "scsi%d: reselect\n", HOSTNO); - - /* - * At this point, we have detected that our SCSI ID is on the bus, - * SEL is true and BSY was false for at least one bus settle delay - * (400 ns). - * - * We must assert BSY ourselves, until the target drops the SEL - * signal. - */ - - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_BSY); - - while (NCR5380_read(STATUS_REG) & SR_SEL); - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); - - /* - * Wait for target to go into MSGIN. - */ - - while (!(NCR5380_read(STATUS_REG) & SR_REQ)); - -#if 1 - // acknowledge toggle to MSGIN - NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(PHASE_MSGIN)); - - // peek at the byte without really hitting the bus - msg[0] = NCR5380_read(CURRENT_SCSI_DATA_REG); -#endif - - if (!(msg[0] & 0x80)) { - printk(KERN_DEBUG "scsi%d: expecting IDENTIFY message, got ", HOSTNO); - spi_print_msg(msg); - do_abort(instance); - return; - } - lun = (msg[0] & 0x07); - - /* - * Find the command corresponding to the I_T_L or I_T_L_Q nexus we - * just reestablished, and remove it from the disconnected queue. - */ - - for (tmp = (struct scsi_cmnd *) hostdata->disconnected_queue, prev = NULL; - tmp; prev = tmp, tmp = NEXT(tmp) ) { - if ((target_mask == (1 << tmp->device->id)) && (lun == tmp->device->lun) -#ifdef SUPPORT_TAGS - && (tag == tmp->tag) -#endif - ) { - if (prev) { - REMOVE(prev, NEXT(prev), tmp, NEXT(tmp)); - SET_NEXT(prev, NEXT(tmp)); - } else { - REMOVE(-1, hostdata->disconnected_queue, tmp, NEXT(tmp)); - hostdata->disconnected_queue = NEXT(tmp); - } - SET_NEXT(tmp, NULL); - break; - } - } - - if (!tmp) { - printk(KERN_WARNING "scsi%d: warning: target bitmask %02x lun %d " -#ifdef SUPPORT_TAGS - "tag %d " -#endif - "not in disconnected_queue.\n", - HOSTNO, target_mask, lun -#ifdef SUPPORT_TAGS - , tag -#endif - ); - /* - * Since we have an established nexus that we can't do anything - * with, we must abort it. - */ - do_abort(instance); - return; - } -#if 1 - /* engage dma setup for the command we just saw */ - { - void *d; - unsigned long count; - - if (!tmp->SCp.this_residual && tmp->SCp.buffers_residual) { - count = tmp->SCp.buffer->length; - d = SGADDR(tmp->SCp.buffer); - } else { - count = tmp->SCp.this_residual; - d = tmp->SCp.ptr; - } -#ifdef REAL_DMA - /* setup this command for dma if not already */ - if((count > SUN3_DMA_MINSIZE) && (sun3_dma_setup_done != tmp)) - { - sun3scsi_dma_setup(d, count, rq_data_dir(tmp->request)); - sun3_dma_setup_done = tmp; - } -#endif - } -#endif - - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ACK); - /* Accept message by clearing ACK */ - NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); - -#ifdef SUPPORT_TAGS - /* If the phase is still MSGIN, the target wants to send some more - * messages. In case it supports tagged queuing, this is probably a - * SIMPLE_QUEUE_TAG for the I_T_L_Q nexus. - */ - tag = TAG_NONE; - if (phase == PHASE_MSGIN && setup_use_tagged_queuing) { - /* Accept previous IDENTIFY message by clearing ACK */ - NCR5380_write( INITIATOR_COMMAND_REG, ICR_BASE ); - len = 2; - data = msg+1; - if (!NCR5380_transfer_pio(instance, &phase, &len, &data) && - msg[1] == SIMPLE_QUEUE_TAG) - tag = msg[2]; - dprintk(NDEBUG_TAGS, "scsi%d: target mask %02x, lun %d sent tag %d at " - "reselection\n", HOSTNO, target_mask, lun, tag); - } -#endif - - hostdata->connected = tmp; - dprintk(NDEBUG_RESELECTION, "scsi%d: nexus established, target = %d, lun = %llu, tag = %d\n", - HOSTNO, tmp->device->id, tmp->device->lun, tmp->tag); -} - - -/* - * Function : int NCR5380_abort(struct scsi_cmnd *cmd) - * - * Purpose : abort a command - * - * Inputs : cmd - the struct scsi_cmnd to abort, code - code to set the - * host byte of the result field to, if zero DID_ABORTED is - * used. - * - * Returns : 0 - success, -1 on failure. - * - * XXX - there is no way to abort the command that is currently - * connected, you have to wait for it to complete. If this is - * a problem, we could implement longjmp() / setjmp(), setjmp() - * called where the loop started in NCR5380_main(). - */ - -static int NCR5380_abort(struct scsi_cmnd *cmd) -{ - struct Scsi_Host *instance = cmd->device->host; - SETUP_HOSTDATA(instance); - struct scsi_cmnd *tmp, **prev; - unsigned long flags; - - printk(KERN_NOTICE "scsi%d: aborting command\n", HOSTNO); - scsi_print_command(cmd); - - NCR5380_print_status (instance); - - local_irq_save(flags); - - dprintk(NDEBUG_ABORT, "scsi%d: abort called basr 0x%02x, sr 0x%02x\n", HOSTNO, - NCR5380_read(BUS_AND_STATUS_REG), - NCR5380_read(STATUS_REG)); - -#if 1 -/* - * Case 1 : If the command is the currently executing command, - * we'll set the aborted flag and return control so that - * information transfer routine can exit cleanly. - */ - - if (hostdata->connected == cmd) { - - dprintk(NDEBUG_ABORT, "scsi%d: aborting connected command\n", HOSTNO); -/* - * We should perform BSY checking, and make sure we haven't slipped - * into BUS FREE. - */ - -/* NCR5380_write(INITIATOR_COMMAND_REG, ICR_ASSERT_ATN); */ -/* - * Since we can't change phases until we've completed the current - * handshake, we have to source or sink a byte of data if the current - * phase is not MSGOUT. - */ - -/* - * Return control to the executing NCR drive so we can clear the - * aborted flag and get back into our main loop. - */ - - if (do_abort(instance) == 0) { - hostdata->aborted = 1; - hostdata->connected = NULL; - cmd->result = DID_ABORT << 16; -#ifdef SUPPORT_TAGS - cmd_free_tag( cmd ); -#else - hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun); -#endif - local_irq_restore(flags); - cmd->scsi_done(cmd); - return SUCCESS; - } else { -/* local_irq_restore(flags); */ - printk("scsi%d: abort of connected command failed!\n", HOSTNO); - return FAILED; - } - } -#endif - -/* - * Case 2 : If the command hasn't been issued yet, we simply remove it - * from the issue queue. - */ - for (prev = (struct scsi_cmnd **) &(hostdata->issue_queue), - tmp = (struct scsi_cmnd *) hostdata->issue_queue; - tmp; prev = NEXTADDR(tmp), tmp = NEXT(tmp)) - if (cmd == tmp) { - REMOVE(5, *prev, tmp, NEXT(tmp)); - (*prev) = NEXT(tmp); - SET_NEXT(tmp, NULL); - tmp->result = DID_ABORT << 16; - local_irq_restore(flags); - dprintk(NDEBUG_ABORT, "scsi%d: abort removed command from issue queue.\n", - HOSTNO); - /* Tagged queuing note: no tag to free here, hasn't been assigned - * yet... */ - tmp->scsi_done(tmp); - return SUCCESS; - } - -/* - * Case 3 : If any commands are connected, we're going to fail the abort - * and let the high level SCSI driver retry at a later time or - * issue a reset. - * - * Timeouts, and therefore aborted commands, will be highly unlikely - * and handling them cleanly in this situation would make the common - * case of noresets less efficient, and would pollute our code. So, - * we fail. - */ - - if (hostdata->connected) { - local_irq_restore(flags); - dprintk(NDEBUG_ABORT, "scsi%d: abort failed, command connected.\n", HOSTNO); - return FAILED; - } - -/* - * Case 4: If the command is currently disconnected from the bus, and - * there are no connected commands, we reconnect the I_T_L or - * I_T_L_Q nexus associated with it, go into message out, and send - * an abort message. - * - * This case is especially ugly. In order to reestablish the nexus, we - * need to call NCR5380_select(). The easiest way to implement this - * function was to abort if the bus was busy, and let the interrupt - * handler triggered on the SEL for reselect take care of lost arbitrations - * where necessary, meaning interrupts need to be enabled. - * - * When interrupts are enabled, the queues may change - so we - * can't remove it from the disconnected queue before selecting it - * because that could cause a failure in hashing the nexus if that - * device reselected. - * - * Since the queues may change, we can't use the pointers from when we - * first locate it. - * - * So, we must first locate the command, and if NCR5380_select() - * succeeds, then issue the abort, relocate the command and remove - * it from the disconnected queue. - */ - - for (tmp = (struct scsi_cmnd *) hostdata->disconnected_queue; tmp; - tmp = NEXT(tmp)) - if (cmd == tmp) { - local_irq_restore(flags); - dprintk(NDEBUG_ABORT, "scsi%d: aborting disconnected command.\n", HOSTNO); - - if (NCR5380_select (instance, cmd, (int) cmd->tag)) - return FAILED; - - dprintk(NDEBUG_ABORT, "scsi%d: nexus reestablished.\n", HOSTNO); - - do_abort (instance); - - local_irq_save(flags); - for (prev = (struct scsi_cmnd **) &(hostdata->disconnected_queue), - tmp = (struct scsi_cmnd *) hostdata->disconnected_queue; - tmp; prev = NEXTADDR(tmp), tmp = NEXT(tmp) ) - if (cmd == tmp) { - REMOVE(5, *prev, tmp, NEXT(tmp)); - *prev = NEXT(tmp); - SET_NEXT(tmp, NULL); - tmp->result = DID_ABORT << 16; - /* We must unlock the tag/LUN immediately here, since the - * target goes to BUS FREE and doesn't send us another - * message (COMMAND_COMPLETE or the like) - */ -#ifdef SUPPORT_TAGS - cmd_free_tag( tmp ); -#else - hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun); -#endif - local_irq_restore(flags); - tmp->scsi_done(tmp); - return SUCCESS; - } - } - -/* - * Case 5 : If we reached this point, the command was not found in any of - * the queues. - * - * We probably reached this point because of an unlikely race condition - * between the command completing successfully and the abortion code, - * so we won't panic, but we will notify the user in case something really - * broke. - */ - - local_irq_restore(flags); - printk(KERN_INFO "scsi%d: warning : SCSI command probably completed successfully before abortion\n", HOSTNO); - - return FAILED; -} - - -/* - * Function : int NCR5380_bus_reset(struct scsi_cmnd *cmd) - * - * Purpose : reset the SCSI bus. - * - * Returns : SUCCESS or FAILURE - * - */ - -static int NCR5380_bus_reset(struct scsi_cmnd *cmd) -{ - SETUP_HOSTDATA(cmd->device->host); - int i; - unsigned long flags; -#if defined(RESET_RUN_DONE) - struct scsi_cmnd *connected, *disconnected_queue; -#endif - - - NCR5380_print_status (cmd->device->host); - - /* get in phase */ - NCR5380_write( TARGET_COMMAND_REG, - PHASE_SR_TO_TCR( NCR5380_read(STATUS_REG) )); - /* assert RST */ - NCR5380_write( INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_RST ); - udelay (40); - /* reset NCR registers */ - NCR5380_write( INITIATOR_COMMAND_REG, ICR_BASE ); - NCR5380_write( MODE_REG, MR_BASE ); - NCR5380_write( TARGET_COMMAND_REG, 0 ); - NCR5380_write( SELECT_ENABLE_REG, 0 ); - /* ++roman: reset interrupt condition! otherwise no interrupts don't get - * through anymore ... */ - (void)NCR5380_read( RESET_PARITY_INTERRUPT_REG ); - - /* MSch 20140115 - looking at the generic NCR5380 driver, all of this - * should go. - * Catch-22: if we don't clear all queues, the SCSI driver lock will - * not be released by atari_scsi_reset()! - */ - -#if defined(RESET_RUN_DONE) - /* XXX Should now be done by midlevel code, but it's broken XXX */ - /* XXX see below XXX */ - - /* MSch: old-style reset: actually abort all command processing here */ - - /* After the reset, there are no more connected or disconnected commands - * and no busy units; to avoid problems with re-inserting the commands - * into the issue_queue (via scsi_done()), the aborted commands are - * remembered in local variables first. - */ - local_irq_save(flags); - connected = (struct scsi_cmnd *)hostdata->connected; - hostdata->connected = NULL; - disconnected_queue = (struct scsi_cmnd *)hostdata->disconnected_queue; - hostdata->disconnected_queue = NULL; -#ifdef SUPPORT_TAGS - free_all_tags(); -#endif - for( i = 0; i < 8; ++i ) - hostdata->busy[i] = 0; -#ifdef REAL_DMA - hostdata->dma_len = 0; -#endif - local_irq_restore(flags); - - /* In order to tell the mid-level code which commands were aborted, - * set the command status to DID_RESET and call scsi_done() !!! - * This ultimately aborts processing of these commands in the mid-level. - */ - - if ((cmd = connected)) { - dprintk(NDEBUG_ABORT, "scsi%d: reset aborted a connected command\n", H_NO(cmd)); - cmd->result = (cmd->result & 0xffff) | (DID_RESET << 16); - cmd->scsi_done( cmd ); - } - - for (i = 0; (cmd = disconnected_queue); ++i) { - disconnected_queue = NEXT(cmd); - SET_NEXT(cmd, NULL); - cmd->result = (cmd->result & 0xffff) | (DID_RESET << 16); - cmd->scsi_done( cmd ); - } - if (i > 0) - dprintk(NDEBUG_ABORT, "scsi: reset aborted %d disconnected command(s)\n", i); - - - /* since all commands have been explicitly terminated, we need to tell - * the midlevel code that the reset was SUCCESSFUL, and there is no - * need to 'wake up' the commands by a request_sense - */ - return SUCCESS; -#else /* 1 */ - - /* MSch: new-style reset handling: let the mid-level do what it can */ - - /* ++guenther: MID-LEVEL IS STILL BROKEN. - * Mid-level is supposed to requeue all commands that were active on the - * various low-level queues. In fact it does this, but that's not enough - * because all these commands are subject to timeout. And if a timeout - * happens for any removed command, *_abort() is called but all queues - * are now empty. Abort then gives up the falcon lock, which is fatal, - * since the mid-level will queue more commands and must have the lock - * (it's all happening inside timer interrupt handler!!). - * Even worse, abort will return NOT_RUNNING for all those commands not - * on any queue, so they won't be retried ... - * - * Conclusion: either scsi.c disables timeout for all resetted commands - * immediately, or we lose! As of linux-2.0.20 it doesn't. - */ - - /* After the reset, there are no more connected or disconnected commands - * and no busy units; so clear the low-level status here to avoid - * conflicts when the mid-level code tries to wake up the affected - * commands! - */ - - if (hostdata->issue_queue) - dprintk(NDEBUG_ABORT, "scsi%d: reset aborted issued command(s)\n", H_NO(cmd)); - if (hostdata->connected) - dprintk(NDEBUG_ABORT, "scsi%d: reset aborted a connected command\n", H_NO(cmd)); - if (hostdata->disconnected_queue) - dprintk(NDEBUG_ABORT, "scsi%d: reset aborted disconnected command(s)\n", H_NO(cmd)); - - local_irq_save(flags); - hostdata->issue_queue = NULL; - hostdata->connected = NULL; - hostdata->disconnected_queue = NULL; -#ifdef SUPPORT_TAGS - free_all_tags(); -#endif - for( i = 0; i < 8; ++i ) - hostdata->busy[i] = 0; -#ifdef REAL_DMA - hostdata->dma_len = 0; -#endif - local_irq_restore(flags); - - /* we did no complete reset of all commands, so a wakeup is required */ - return SUCCESS; -#endif /* 1 */ -} - -/* Local Variables: */ -/* tab-width: 8 */ -/* End: */ diff --git a/drivers/scsi/sun3_scsi.c b/drivers/scsi/sun3_scsi.c index 9707b7494a89..2a906d1d34ba 100644 --- a/drivers/scsi/sun3_scsi.c +++ b/drivers/scsi/sun3_scsi.c @@ -20,89 +20,58 @@ * Generic Generic NCR5380 driver * * Copyright 1995, Russell King - * - * ALPHA RELEASE 1. - * - * For more information, please consult - * - * NCR 5380 Family - * SCSI Protocol Controller - * Databook - * - * NCR Microelectronics - * 1635 Aeroplaza Drive - * Colorado Springs, CO 80916 - * 1+ (719) 578-3400 - * 1+ (800) 334-5454 */ - -/* - * This is from mac_scsi.h, but hey, maybe this is useful for Sun3 too! :) - * - * Options : - * - * PARITY - enable parity checking. Not supported. - * - * SCSI2 - enable support for SCSI-II tagged queueing. Untested. - * - * USLEEP - enable support for devices that don't disconnect. Untested. - */ - -#define AUTOSENSE - #include <linux/types.h> -#include <linux/stddef.h> -#include <linux/ctype.h> #include <linux/delay.h> - #include <linux/module.h> -#include <linux/signal.h> #include <linux/ioport.h> #include <linux/init.h> #include <linux/blkdev.h> +#include <linux/platform_device.h> #include <asm/io.h> - -#include <asm/sun3ints.h> #include <asm/dvma.h> -#include <asm/idprom.h> -#include <asm/machines.h> -/* dma on! */ -#define REAL_DMA - -#include "scsi.h" #include <scsi/scsi_host.h> #include "sun3_scsi.h" -#include "NCR5380.h" -extern int sun3_map_test(unsigned long, char *); +/* Definitions for the core NCR5380 driver. */ -#define USE_WRAPPER -/*#define RESET_BOOT */ -#define DRIVER_SETUP +#define REAL_DMA +/* #define SUPPORT_TAGS */ +/* minimum number of bytes to do dma on */ +#define DMA_MIN_SIZE 129 -/* - * BUG can be used to trigger a strange code-size related hang on 2.1 kernels - */ -#ifdef BUG -#undef RESET_BOOT -#undef DRIVER_SETUP -#endif +/* #define MAX_TAGS 32 */ -/* #define SUPPORT_TAGS */ +#define NCR5380_implementation_fields /* none */ -#ifdef SUN3_SCSI_VME -#define ENABLE_IRQ() -#else -#define ENABLE_IRQ() enable_irq( IRQ_SUN3_SCSI ); -#endif +#define NCR5380_read(reg) sun3scsi_read(reg) +#define NCR5380_write(reg, value) sun3scsi_write(reg, value) + +#define NCR5380_queue_command sun3scsi_queue_command +#define NCR5380_bus_reset sun3scsi_bus_reset +#define NCR5380_abort sun3scsi_abort +#define NCR5380_show_info sun3scsi_show_info +#define NCR5380_info sun3scsi_info +#define NCR5380_dma_read_setup(instance, data, count) \ + sun3scsi_dma_setup(data, count, 0) +#define NCR5380_dma_write_setup(instance, data, count) \ + sun3scsi_dma_setup(data, count, 1) +#define NCR5380_dma_residual(instance) \ + sun3scsi_dma_residual(instance) +#define NCR5380_dma_xfer_len(instance, cmd, phase) \ + sun3scsi_dma_xfer_len(cmd->SCp.this_residual, cmd, !((phase) & SR_IO)) -static irqreturn_t scsi_sun3_intr(int irq, void *dummy); -static inline unsigned char sun3scsi_read(int reg); -static inline void sun3scsi_write(int reg, int value); +#define NCR5380_acquire_dma_irq(instance) (1) +#define NCR5380_release_dma_irq(instance) + +#include "NCR5380.h" + + +extern int sun3_map_test(unsigned long, char *); static int setup_can_queue = -1; module_param(setup_can_queue, int, 0); @@ -117,9 +86,7 @@ module_param(setup_use_tagged_queuing, int, 0); static int setup_hostid = -1; module_param(setup_hostid, int, 0); -static struct scsi_cmnd *sun3_dma_setup_done = NULL; - -#define RESET_RUN_DONE +/* #define RESET_BOOT */ #define AFTER_RESET_DELAY (HZ/2) @@ -129,18 +96,15 @@ static struct scsi_cmnd *sun3_dma_setup_done = NULL; /* dvma buffer to allocate -- 32k should hopefully be more than sufficient */ #define SUN3_DVMA_BUFSIZE 0xe000 -/* minimum number of bytes to do dma on */ -#define SUN3_DMA_MINSIZE 128 - -static volatile unsigned char *sun3_scsi_regp; +static struct scsi_cmnd *sun3_dma_setup_done; +static unsigned char *sun3_scsi_regp; static volatile struct sun3_dma_regs *dregs; -#ifndef SUN3_SCSI_VME -static struct sun3_udc_regs *udc_regs = NULL; -#endif +static struct sun3_udc_regs *udc_regs; static unsigned char *sun3_dma_orig_addr = NULL; static unsigned long sun3_dma_orig_count = 0; static int sun3_dma_active = 0; static unsigned long last_residual = 0; +static struct Scsi_Host *default_instance; /* * NCR 5380 register access functions @@ -148,12 +112,12 @@ static unsigned long last_residual = 0; static inline unsigned char sun3scsi_read(int reg) { - return( sun3_scsi_regp[reg] ); + return in_8(sun3_scsi_regp + reg); } static inline void sun3scsi_write(int reg, int value) { - sun3_scsi_regp[reg] = value; + out_8(sun3_scsi_regp + reg, value); } #ifndef SUN3_SCSI_VME @@ -180,213 +144,10 @@ static inline void sun3_udc_write(unsigned short val, unsigned char reg) } #endif -/* - * XXX: status debug - */ -static struct Scsi_Host *default_instance; - -/* - * Function : int sun3scsi_detect(struct scsi_host_template * tpnt) - * - * Purpose : initializes mac NCR5380 driver based on the - * command line / compile time port and irq definitions. - * - * Inputs : tpnt - template for this SCSI adapter. - * - * Returns : 1 if a host adapter was found, 0 if not. - * - */ - -static int __init sun3scsi_detect(struct scsi_host_template *tpnt) -{ - unsigned long ioaddr, irq; - static int called = 0; - struct Scsi_Host *instance; -#ifdef SUN3_SCSI_VME - int i; - unsigned long addrs[3] = { IOBASE_SUN3_VMESCSI, - IOBASE_SUN3_VMESCSI + 0x4000, - 0 }; - unsigned long vecs[3] = { SUN3_VEC_VMESCSI0, - SUN3_VEC_VMESCSI1, - 0 }; -#endif - - /* check that this machine has an onboard 5380 */ - switch(idprom->id_machtype) { -#ifdef SUN3_SCSI_VME - case SM_SUN3|SM_3_160: - case SM_SUN3|SM_3_260: - break; -#else - case SM_SUN3|SM_3_50: - case SM_SUN3|SM_3_60: - break; -#endif - - default: - return 0; - } - - if(called) - return 0; - -#ifdef SUN3_SCSI_VME - tpnt->proc_name = "Sun3 5380 VME SCSI"; -#else - tpnt->proc_name = "Sun3 5380 SCSI"; -#endif - - /* setup variables */ - tpnt->can_queue = - (setup_can_queue > 0) ? setup_can_queue : CAN_QUEUE; - tpnt->cmd_per_lun = - (setup_cmd_per_lun > 0) ? setup_cmd_per_lun : CMD_PER_LUN; - tpnt->sg_tablesize = - (setup_sg_tablesize >= 0) ? setup_sg_tablesize : SG_TABLESIZE; - - if (setup_hostid >= 0) - tpnt->this_id = setup_hostid; - else { - /* use 7 as default */ - tpnt->this_id = 7; - } - -#ifdef SUN3_SCSI_VME - ioaddr = 0; - for (i = 0; addrs[i] != 0; i++) { - unsigned char x; - - ioaddr = (unsigned long)sun3_ioremap(addrs[i], PAGE_SIZE, - SUN3_PAGE_TYPE_VME16); - irq = vecs[i]; - sun3_scsi_regp = (unsigned char *)ioaddr; - - dregs = (struct sun3_dma_regs *)(((unsigned char *)ioaddr) + 8); - - if (sun3_map_test((unsigned long)dregs, &x)) { - unsigned short oldcsr; - - oldcsr = dregs->csr; - dregs->csr = 0; - udelay(SUN3_DMA_DELAY); - if (dregs->csr == 0x1400) - break; - - dregs->csr = oldcsr; - } - - iounmap((void *)ioaddr); - ioaddr = 0; - } - - if (!ioaddr) - return 0; -#else - irq = IRQ_SUN3_SCSI; - ioaddr = (unsigned long)ioremap(IOBASE_SUN3_SCSI, PAGE_SIZE); - sun3_scsi_regp = (unsigned char *)ioaddr; - - dregs = (struct sun3_dma_regs *)(((unsigned char *)ioaddr) + 8); - - if((udc_regs = dvma_malloc(sizeof(struct sun3_udc_regs))) - == NULL) { - printk("SUN3 Scsi couldn't allocate DVMA memory!\n"); - return 0; - } -#endif -#ifdef SUPPORT_TAGS - if (setup_use_tagged_queuing < 0) - setup_use_tagged_queuing = USE_TAGGED_QUEUING; -#endif - - instance = scsi_register (tpnt, sizeof(struct NCR5380_hostdata)); - if(instance == NULL) - return 0; - - default_instance = instance; - - instance->io_port = (unsigned long) ioaddr; - instance->irq = irq; - - NCR5380_init(instance, 0); - - instance->n_io_port = 32; - - ((struct NCR5380_hostdata *)instance->hostdata)->ctrl = 0; - - if (request_irq(instance->irq, scsi_sun3_intr, - 0, "Sun3SCSI-5380", instance)) { -#ifndef REAL_DMA - printk("scsi%d: IRQ%d not free, interrupts disabled\n", - instance->host_no, instance->irq); - instance->irq = SCSI_IRQ_NONE; -#else - printk("scsi%d: IRQ%d not free, bailing out\n", - instance->host_no, instance->irq); - return 0; -#endif - } - - pr_info("scsi%d: %s at port %lX irq", instance->host_no, - tpnt->proc_name, instance->io_port); - if (instance->irq == SCSI_IRQ_NONE) - printk ("s disabled"); - else - printk (" %d", instance->irq); - printk(" options CAN_QUEUE=%d CMD_PER_LUN=%d release=%d", - instance->can_queue, instance->cmd_per_lun, - SUN3SCSI_PUBLIC_RELEASE); - printk("\nscsi%d:", instance->host_no); - NCR5380_print_options(instance); - printk("\n"); - - dregs->csr = 0; - udelay(SUN3_DMA_DELAY); - dregs->csr = CSR_SCSI | CSR_FIFO | CSR_INTR; - udelay(SUN3_DMA_DELAY); - dregs->fifo_count = 0; -#ifdef SUN3_SCSI_VME - dregs->fifo_count_hi = 0; - dregs->dma_addr_hi = 0; - dregs->dma_addr_lo = 0; - dregs->dma_count_hi = 0; - dregs->dma_count_lo = 0; - - dregs->ivect = VME_DATA24 | (instance->irq & 0xff); -#endif - - called = 1; - -#ifdef RESET_BOOT - sun3_scsi_reset_boot(instance); -#endif - - return 1; -} - -int sun3scsi_release (struct Scsi_Host *shpnt) -{ - if (shpnt->irq != SCSI_IRQ_NONE) - free_irq(shpnt->irq, shpnt); - - iounmap((void *)sun3_scsi_regp); - - NCR5380_exit(shpnt); - return 0; -} - #ifdef RESET_BOOT -/* - * Our 'bus reset on boot' function - */ - static void sun3_scsi_reset_boot(struct Scsi_Host *instance) { unsigned long end; - - NCR5380_local_declare(); - NCR5380_setup(instance); /* * Do a SCSI reset to clean up the bus during initialization. No @@ -422,11 +183,6 @@ static void sun3_scsi_reset_boot(struct Scsi_Host *instance) } #endif -static const char *sun3scsi_info(struct Scsi_Host *spnt) -{ - return ""; -} - // safe bits for the CSR #define CSR_GOOD 0x060f @@ -468,7 +224,6 @@ static irqreturn_t scsi_sun3_intr(int irq, void *dummy) void sun3_sun3_debug (void) { unsigned long flags; - NCR5380_local_declare(); if (default_instance) { local_irq_save(flags); @@ -732,25 +487,200 @@ static int sun3scsi_dma_finish(int write_flag) } -#include "sun3_NCR5380.c" +#include "atari_NCR5380.c" -static struct scsi_host_template driver_template = { +#ifdef SUN3_SCSI_VME +#define SUN3_SCSI_NAME "Sun3 NCR5380 VME SCSI" +#define DRV_MODULE_NAME "sun3_scsi_vme" +#else +#define SUN3_SCSI_NAME "Sun3 NCR5380 SCSI" +#define DRV_MODULE_NAME "sun3_scsi" +#endif + +#define PFX DRV_MODULE_NAME ": " + +static struct scsi_host_template sun3_scsi_template = { + .module = THIS_MODULE, + .proc_name = DRV_MODULE_NAME, .show_info = sun3scsi_show_info, .name = SUN3_SCSI_NAME, - .detect = sun3scsi_detect, - .release = sun3scsi_release, .info = sun3scsi_info, .queuecommand = sun3scsi_queue_command, .eh_abort_handler = sun3scsi_abort, .eh_bus_reset_handler = sun3scsi_bus_reset, - .can_queue = CAN_QUEUE, + .can_queue = 16, .this_id = 7, - .sg_tablesize = SG_TABLESIZE, - .cmd_per_lun = CMD_PER_LUN, + .sg_tablesize = SG_NONE, + .cmd_per_lun = 2, .use_clustering = DISABLE_CLUSTERING }; +static int __init sun3_scsi_probe(struct platform_device *pdev) +{ + struct Scsi_Host *instance; + int error; + struct resource *irq, *mem; + unsigned char *ioaddr; + int host_flags = 0; +#ifdef SUN3_SCSI_VME + int i; +#endif + + if (setup_can_queue > 0) + sun3_scsi_template.can_queue = setup_can_queue; + if (setup_cmd_per_lun > 0) + sun3_scsi_template.cmd_per_lun = setup_cmd_per_lun; + if (setup_sg_tablesize >= 0) + sun3_scsi_template.sg_tablesize = setup_sg_tablesize; + if (setup_hostid >= 0) + sun3_scsi_template.this_id = setup_hostid & 7; + +#ifdef SUN3_SCSI_VME + ioaddr = NULL; + for (i = 0; i < 2; i++) { + unsigned char x; + + irq = platform_get_resource(pdev, IORESOURCE_IRQ, i); + mem = platform_get_resource(pdev, IORESOURCE_MEM, i); + if (!irq || !mem) + break; + + ioaddr = sun3_ioremap(mem->start, resource_size(mem), + SUN3_PAGE_TYPE_VME16); + dregs = (struct sun3_dma_regs *)(ioaddr + 8); + + if (sun3_map_test((unsigned long)dregs, &x)) { + unsigned short oldcsr; + + oldcsr = dregs->csr; + dregs->csr = 0; + udelay(SUN3_DMA_DELAY); + if (dregs->csr == 0x1400) + break; + + dregs->csr = oldcsr; + } + + iounmap(ioaddr); + ioaddr = NULL; + } + if (!ioaddr) + return -ENODEV; +#else + irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!irq || !mem) + return -ENODEV; + + ioaddr = ioremap(mem->start, resource_size(mem)); + dregs = (struct sun3_dma_regs *)(ioaddr + 8); + + udc_regs = dvma_malloc(sizeof(struct sun3_udc_regs)); + if (!udc_regs) { + pr_err(PFX "couldn't allocate DVMA memory!\n"); + iounmap(ioaddr); + return -ENOMEM; + } +#endif + + sun3_scsi_regp = ioaddr; + + instance = scsi_host_alloc(&sun3_scsi_template, + sizeof(struct NCR5380_hostdata)); + if (!instance) { + error = -ENOMEM; + goto fail_alloc; + } + default_instance = instance; + + instance->io_port = (unsigned long)ioaddr; + instance->irq = irq->start; + +#ifdef SUPPORT_TAGS + host_flags |= setup_use_tagged_queuing > 0 ? FLAG_TAGGED_QUEUING : 0; +#endif + + NCR5380_init(instance, host_flags); + + error = request_irq(instance->irq, scsi_sun3_intr, 0, + "NCR5380", instance); + if (error) { +#ifdef REAL_DMA + pr_err(PFX "scsi%d: IRQ %d not free, bailing out\n", + instance->host_no, instance->irq); + goto fail_irq; +#else + pr_warn(PFX "scsi%d: IRQ %d not free, interrupts disabled\n", + instance->host_no, instance->irq); + instance->irq = NO_IRQ; +#endif + } + + dregs->csr = 0; + udelay(SUN3_DMA_DELAY); + dregs->csr = CSR_SCSI | CSR_FIFO | CSR_INTR; + udelay(SUN3_DMA_DELAY); + dregs->fifo_count = 0; +#ifdef SUN3_SCSI_VME + dregs->fifo_count_hi = 0; + dregs->dma_addr_hi = 0; + dregs->dma_addr_lo = 0; + dregs->dma_count_hi = 0; + dregs->dma_count_lo = 0; + + dregs->ivect = VME_DATA24 | (instance->irq & 0xff); +#endif + +#ifdef RESET_BOOT + sun3_scsi_reset_boot(instance); +#endif + + error = scsi_add_host(instance, NULL); + if (error) + goto fail_host; + + platform_set_drvdata(pdev, instance); + + scsi_scan_host(instance); + return 0; + +fail_host: + if (instance->irq != NO_IRQ) + free_irq(instance->irq, instance); +fail_irq: + NCR5380_exit(instance); + scsi_host_put(instance); +fail_alloc: + if (udc_regs) + dvma_free(udc_regs); + iounmap(sun3_scsi_regp); + return error; +} + +static int __exit sun3_scsi_remove(struct platform_device *pdev) +{ + struct Scsi_Host *instance = platform_get_drvdata(pdev); + + scsi_remove_host(instance); + if (instance->irq != NO_IRQ) + free_irq(instance->irq, instance); + NCR5380_exit(instance); + scsi_host_put(instance); + if (udc_regs) + dvma_free(udc_regs); + iounmap(sun3_scsi_regp); + return 0; +} + +static struct platform_driver sun3_scsi_driver = { + .remove = __exit_p(sun3_scsi_remove), + .driver = { + .name = DRV_MODULE_NAME, + .owner = THIS_MODULE, + }, +}; -#include "scsi_module.c" +module_platform_driver_probe(sun3_scsi_driver, sun3_scsi_probe); +MODULE_ALIAS("platform:" DRV_MODULE_NAME); MODULE_LICENSE("GPL"); diff --git a/drivers/scsi/sun3_scsi.h b/drivers/scsi/sun3_scsi.h index e96a37cf06ac..d22745fae328 100644 --- a/drivers/scsi/sun3_scsi.h +++ b/drivers/scsi/sun3_scsi.h @@ -13,95 +13,11 @@ * (Unix and Linux consulting and custom programming) * drew@colorado.edu * +1 (303) 440-4894 - * - * ALPHA RELEASE 1. - * - * For more information, please consult - * - * NCR 5380 Family - * SCSI Protocol Controller - * Databook - * - * NCR Microelectronics - * 1635 Aeroplaza Drive - * Colorado Springs, CO 80916 - * 1+ (719) 578-3400 - * 1+ (800) 334-5454 */ #ifndef SUN3_SCSI_H #define SUN3_SCSI_H -#define SUN3SCSI_PUBLIC_RELEASE 1 - -/* - * Int: level 2 autovector - * IO: type 1, base 0x00140000, 5 bits phys space: A<4..0> - */ -#define IRQ_SUN3_SCSI 2 -#define IOBASE_SUN3_SCSI 0x00140000 - -#define IOBASE_SUN3_VMESCSI 0xff200000 - -static int sun3scsi_abort(struct scsi_cmnd *); -static int sun3scsi_detect (struct scsi_host_template *); -static const char *sun3scsi_info (struct Scsi_Host *); -static int sun3scsi_bus_reset(struct scsi_cmnd *); -static int sun3scsi_queue_command(struct Scsi_Host *, struct scsi_cmnd *); -static int sun3scsi_release (struct Scsi_Host *); - -#ifndef CMD_PER_LUN -#define CMD_PER_LUN 2 -#endif - -#ifndef CAN_QUEUE -#define CAN_QUEUE 16 -#endif - -#ifndef SG_TABLESIZE -#define SG_TABLESIZE SG_NONE -#endif - -#ifndef MAX_TAGS -#define MAX_TAGS 32 -#endif - -#ifndef USE_TAGGED_QUEUING -#define USE_TAGGED_QUEUING 1 -#endif - -#include <scsi/scsicam.h> - -#ifdef SUN3_SCSI_VME -#define SUN3_SCSI_NAME "Sun3 NCR5380 VME SCSI" -#else -#define SUN3_SCSI_NAME "Sun3 NCR5380 SCSI" -#endif - -#define NCR5380_implementation_fields \ - int port, ctrl - -#define NCR5380_local_declare() \ - struct Scsi_Host *_instance - -#define NCR5380_setup(instance) \ - _instance = instance - -#define NCR5380_read(reg) sun3scsi_read(reg) -#define NCR5380_write(reg, value) sun3scsi_write(reg, value) - -#define NCR5380_intr sun3scsi_intr -#define NCR5380_queue_command sun3scsi_queue_command -#define NCR5380_bus_reset sun3scsi_bus_reset -#define NCR5380_abort sun3scsi_abort -#define NCR5380_show_info sun3scsi_show_info -#define NCR5380_dma_xfer_len(i, cmd, phase) \ - sun3scsi_dma_xfer_len(cmd->SCp.this_residual,cmd,((phase) & SR_IO) ? 0 : 1) - -#define NCR5380_dma_write_setup(instance, data, count) sun3scsi_dma_setup(data, count, 1) -#define NCR5380_dma_read_setup(instance, data, count) sun3scsi_dma_setup(data, count, 0) -#define NCR5380_dma_residual sun3scsi_dma_residual - /* additional registers - mainly DMA control regs */ /* these start at regbase + 8 -- directly after the NCR regs */ struct sun3_dma_regs { diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c index e59e6f96b725..5d00e514ff28 100644 --- a/drivers/scsi/sym53c8xx_2/sym_glue.c +++ b/drivers/scsi/sym53c8xx_2/sym_glue.c @@ -820,9 +820,7 @@ static int sym53c8xx_slave_configure(struct scsi_device *sdev) if (reqtags > SYM_CONF_MAX_TAG) reqtags = SYM_CONF_MAX_TAG; depth_to_use = reqtags ? reqtags : 1; - scsi_adjust_queue_depth(sdev, - sdev->tagged_supported ? MSG_SIMPLE_TAG : 0, - depth_to_use); + scsi_change_queue_depth(sdev, depth_to_use); lp->s.scdev_depth = depth_to_use; sym_tune_dev_queuing(tp, sdev->lun, reqtags); diff --git a/drivers/scsi/t128.c b/drivers/scsi/t128.c index 8cc80931df14..87828acbf7c6 100644 --- a/drivers/scsi/t128.c +++ b/drivers/scsi/t128.c @@ -1,4 +1,3 @@ -#define AUTOSENSE #define PSEUDO_DMA /* @@ -12,8 +11,6 @@ * drew@colorado.edu * +1 (303) 440-4894 * - * DISTRIBUTION RELEASE 3. - * * For more information, please consult * * Trantor Systems, Ltd. @@ -24,40 +21,9 @@ * 5415 Randall Place * Fremont, CA 94538 * 1+ (415) 770-1400, FAX 1+ (415) 770-9910 - * - * and - * - * NCR 5380 Family - * SCSI Protocol Controller - * Databook - * - * NCR Microelectronics - * 1635 Aeroplaza Drive - * Colorado Springs, CO 80916 - * 1+ (719) 578-3400 - * 1+ (800) 334-5454 */ /* - * Options : - * AUTOSENSE - if defined, REQUEST SENSE will be performed automatically - * for commands that return with a CHECK CONDITION status. - * - * PSEUDO_DMA - enables PSEUDO-DMA hardware, should give a 3-4X performance - * increase compared to polled I/O. - * - * PARITY - enable parity checking. Not supported. - * - * SCSI2 - enable support for SCSI-II tagged queueing. Untested. - * - * - * UNSAFE - leave interrupts enabled during pseudo-DMA transfers. You - * only really want to use this if you're having a problem with - * dropped characters during high speed communications, and even - * then, you're going to be better off twiddling with transfersize. - * - * USLEEP - enable support for devices that don't disconnect. Untested. - * * The card is detected and initialized in one of several ways : * 1. Autoprobe (default) - since the board is memory mapped, * a BIOS signature is scanned for to locate the registers. @@ -111,7 +77,6 @@ #include <linux/module.h> #include <linux/delay.h> -#include "scsi.h" #include <scsi/scsi_host.h> #include "t128.h" #define AUTOPROBE_IRQ @@ -148,6 +113,7 @@ static struct signature { #define NO_SIGNATURES ARRAY_SIZE(signatures) +#ifndef MODULE /* * Function : t128_setup(char *str, int *ints) * @@ -158,9 +124,13 @@ static struct signature { * */ -void __init t128_setup(char *str, int *ints){ +static int __init t128_setup(char *str) +{ static int commandline_current = 0; int i; + int ints[10]; + + get_options(str, ARRAY_SIZE(ints), ints); if (ints[0] != 2) printk("t128_setup : usage t128=address,irq\n"); else @@ -174,8 +144,12 @@ void __init t128_setup(char *str, int *ints){ } ++commandline_current; } + return 1; } +__setup("t128=", t128_setup); +#endif + /* * Function : int t128_detect(struct scsi_host_template * tpnt) * @@ -189,17 +163,14 @@ void __init t128_setup(char *str, int *ints){ * */ -int __init t128_detect(struct scsi_host_template * tpnt){ +static int __init t128_detect(struct scsi_host_template *tpnt) +{ static int current_override = 0, current_base = 0; struct Scsi_Host *instance; unsigned long base; void __iomem *p; int sig, count; - tpnt->proc_name = "t128"; - tpnt->show_info = t128_show_info; - tpnt->write_info = t128_write_info; - for (count = 0; current_override < NO_OVERRIDES; ++current_override) { base = 0; p = NULL; @@ -254,15 +225,19 @@ found: else instance->irq = NCR5380_probe_irq(instance, T128_IRQS); - if (instance->irq != SCSI_IRQ_NONE) + /* Compatibility with documented NCR5380 kernel parameters */ + if (instance->irq == 255) + instance->irq = NO_IRQ; + + if (instance->irq != NO_IRQ) if (request_irq(instance->irq, t128_intr, 0, "t128", instance)) { printk("scsi%d : IRQ%d not free, interrupts disabled\n", instance->host_no, instance->irq); - instance->irq = SCSI_IRQ_NONE; + instance->irq = NO_IRQ; } - if (instance->irq == SCSI_IRQ_NONE) { + if (instance->irq == NO_IRQ) { printk("scsi%d : interrupts not enabled. for better interactive performance,\n", instance->host_no); printk("scsi%d : please jumper the board for a free IRQ.\n", instance->host_no); } @@ -271,16 +246,6 @@ found: printk("scsi%d : irq = %d\n", instance->host_no, instance->irq); #endif - printk("scsi%d : at 0x%08lx", instance->host_no, instance->base); - if (instance->irq == SCSI_IRQ_NONE) - printk (" interrupts disabled"); - else - printk (" irq %d", instance->irq); - printk(" options CAN_QUEUE=%d CMD_PER_LUN=%d release=%d", - CAN_QUEUE, CMD_PER_LUN, T128_PUBLIC_RELEASE); - NCR5380_print_options(instance); - printk("\n"); - ++current_override; ++count; } @@ -291,7 +256,7 @@ static int t128_release(struct Scsi_Host *shost) { NCR5380_local_declare(); NCR5380_setup(shost); - if (shost->irq) + if (shost->irq != NO_IRQ) free_irq(shost->irq, shost); NCR5380_exit(shost); if (shost->io_port && shost->n_io_port) @@ -321,8 +286,8 @@ static int t128_release(struct Scsi_Host *shost) * and matching the H_C_S coordinates to what DOS uses. */ -int t128_biosparam(struct scsi_device *sdev, struct block_device *bdev, - sector_t capacity, int * ip) +static int t128_biosparam(struct scsi_device *sdev, struct block_device *bdev, + sector_t capacity, int *ip) { ip[0] = 64; ip[1] = 32; @@ -430,6 +395,10 @@ static struct scsi_host_template driver_template = { .name = "Trantor T128/T128F/T228", .detect = t128_detect, .release = t128_release, + .proc_name = "t128", + .show_info = t128_show_info, + .write_info = t128_write_info, + .info = t128_info, .queuecommand = t128_queue_command, .eh_abort_handler = t128_abort, .eh_bus_reset_handler = t128_bus_reset, diff --git a/drivers/scsi/t128.h b/drivers/scsi/t128.h index fd68cecc62af..2c7371454dfd 100644 --- a/drivers/scsi/t128.h +++ b/drivers/scsi/t128.h @@ -8,8 +8,6 @@ * drew@colorado.edu * +1 (303) 440-4894 * - * DISTRIBUTION RELEASE 3. - * * For more information, please consult * * Trantor Systems, Ltd. @@ -20,25 +18,11 @@ * 5415 Randall Place * Fremont, CA 94538 * 1+ (415) 770-1400, FAX 1+ (415) 770-9910 - * - * and - * - * NCR 5380 Family - * SCSI Protocol Controller - * Databook - * - * NCR Microelectronics - * 1635 Aeroplaza Drive - * Colorado Springs, CO 80916 - * 1+ (719) 578-3400 - * 1+ (800) 334-5454 */ #ifndef T128_H #define T128_H -#define T128_PUBLIC_RELEASE 3 - #define TDEBUG 0 #define TDEBUG_INIT 0x1 #define TDEBUG_TRANSFER 0x2 @@ -88,12 +72,6 @@ #define T_DATA_REG_OFFSET 0x1e00 /* rw 512 bytes long */ #ifndef ASM -static int t128_abort(struct scsi_cmnd *); -static int t128_biosparam(struct scsi_device *, struct block_device *, - sector_t, int*); -static int t128_detect(struct scsi_host_template *); -static int t128_queue_command(struct Scsi_Host *, struct scsi_cmnd *); -static int t128_bus_reset(struct scsi_cmnd *); #ifndef CMD_PER_LUN #define CMD_PER_LUN 2 @@ -134,6 +112,7 @@ static int t128_bus_reset(struct scsi_cmnd *); #define NCR5380_queue_command t128_queue_command #define NCR5380_abort t128_abort #define NCR5380_bus_reset t128_bus_reset +#define NCR5380_info t128_info #define NCR5380_show_info t128_show_info #define NCR5380_write_info t128_write_info diff --git a/drivers/scsi/tmscsim.c b/drivers/scsi/tmscsim.c deleted file mode 100644 index 764575726c85..000000000000 --- a/drivers/scsi/tmscsim.c +++ /dev/null @@ -1,2620 +0,0 @@ -/************************************************************************ - * FILE NAME : TMSCSIM.C * - * BY : C.L. Huang, ching@tekram.com.tw * - * Description: Device Driver for Tekram DC-390(T) PCI SCSI * - * Bus Master Host Adapter * - * (C)Copyright 1995-1996 Tekram Technology Co., Ltd. * - ************************************************************************ - * (C) Copyright: put under GNU GPL in 10/96 * - * (see Documentation/scsi/tmscsim.txt) * - ************************************************************************ - * $Id: tmscsim.c,v 2.60.2.30 2000/12/20 01:07:12 garloff Exp $ * - * Enhancements and bugfixes by * - * Kurt Garloff <kurt@garloff.de> <garloff@suse.de> * - ************************************************************************ - * HISTORY: * - * * - * REV# DATE NAME DESCRIPTION * - * 1.00 96/04/24 CLH First release * - * 1.01 96/06/12 CLH Fixed bug of Media Change for Removable * - * Device, scan all LUN. Support Pre2.0.10 * - * 1.02 96/06/18 CLH Fixed bug of Command timeout ... * - * 1.03 96/09/25 KG Added tmscsim_proc_info() * - * 1.04 96/10/11 CLH Updating for support KV 2.0.x * - * 1.05 96/10/18 KG Fixed bug in DC390_abort(null ptr deref)* - * 1.06 96/10/25 KG Fixed module support * - * 1.07 96/11/09 KG Fixed tmscsim_proc_info() * - * 1.08 96/11/18 KG Fixed null ptr in DC390_Disconnect() * - * 1.09 96/11/30 KG Added register the allocated IO space * - * 1.10 96/12/05 CLH Modified tmscsim_proc_info(), and reset * - * pending interrupt in DC390_detect() * - * 1.11 97/02/05 KG/CLH Fixeds problem with partitions greater * - * than 1GB * - * 1.12 98/02/15 MJ Rewritten PCI probing * - * 1.13 98/04/08 KG Support for non DC390, __initfunc decls,* - * changed max devs from 10 to 16 * - * 1.14a 98/05/05 KG Dynamic DCB allocation, add-single-dev * - * for LUNs if LUN_SCAN (BIOS) not set * - * runtime config using /proc interface * - * 1.14b 98/05/06 KG eliminated cli (); sti (); spinlocks * - * 1.14c 98/05/07 KG 2.0.x compatibility * - * 1.20a 98/05/07 KG changed names of funcs to be consistent * - * DC390_ (entry points), dc390_ (internal)* - * reworked locking * - * 1.20b 98/05/12 KG bugs: version, kfree, _ctmp * - * debug output * - * 1.20c 98/05/12 KG bugs: kfree, parsing, EEpromDefaults * - * 1.20d 98/05/14 KG bugs: list linkage, clear flag after * - * reset on startup, code cleanup * - * 1.20e 98/05/15 KG spinlock comments, name space cleanup * - * pLastDCB now part of ACB structure * - * added stats, timeout for 2.1, TagQ bug * - * RESET and INQUIRY interface commands * - * 1.20f 98/05/18 KG spinlocks fixes, max_lun fix, free DCBs * - * for missing LUNs, pending int * - * 1.20g 98/05/19 KG Clean up: Avoid short * - * 1.20h 98/05/21 KG Remove AdaptSCSIID, max_lun ... * - * 1.20i 98/05/21 KG Aiiie: Bug with TagQMask * - * 1.20j 98/05/24 KG Handle STAT_BUSY, handle pACB->pLinkDCB * - * == 0 in remove_dev and DoingSRB_Done * - * 1.20k 98/05/25 KG DMA_INT (experimental) * - * 1.20l 98/05/27 KG remove DMA_INT; DMA_IDLE cmds added; * - * 1.20m 98/06/10 KG glitch configurable; made some global * - * vars part of ACB; use DC390_readX * - * 1.20n 98/06/11 KG startup params * - * 1.20o 98/06/15 KG added TagMaxNum to boot/module params * - * Device Nr -> Idx, TagMaxNum power of 2 * - * 1.20p 98/06/17 KG Docu updates. Reset depends on settings * - * pci_set_master added; 2.0.xx: pcibios_* * - * used instead of MechNum things ... * - * 1.20q 98/06/23 KG Changed defaults. Added debug code for * - * removable media and fixed it. TagMaxNum * - * fixed for DC390. Locking: ACB, DRV for * - * better IRQ sharing. Spelling: Queueing * - * Parsing and glitch_cfg changes. Display * - * real SyncSpeed value. Made DisConn * - * functional (!) * - * 1.20r 98/06/30 KG Debug macros, allow disabling DsCn, set * - * BIT4 in CtrlR4, EN_PAGE_INT, 2.0 module * - * param -1 fixed. * - * 1.20s 98/08/20 KG Debug info on abort(), try to check PCI,* - * phys_to_bus instead of phys_to_virt, * - * fixed sel. process, fixed locking, * - * added MODULE_XXX infos, changed IRQ * - * request flags, disable DMA_INT * - * 1.20t 98/09/07 KG TagQ report fixed; Write Erase DMA Stat;* - * initfunc -> __init; better abort; * - * Timeout for XFER_DONE & BLAST_COMPLETE; * - * Allow up to 33 commands being processed * - * 2.0a 98/10/14 KG Max Cmnds back to 17. DMA_Stat clearing * - * all flags. Clear within while() loops * - * in DataIn_0/Out_0. Null ptr in dumpinfo * - * for pSRB==0. Better locking during init.* - * bios_param() now respects part. table. * - * 2.0b 98/10/24 KG Docu fixes. Timeout Msg in DMA Blast. * - * Disallow illegal idx in INQUIRY/REMOVE * - * 2.0c 98/11/19 KG Cleaned up detect/init for SMP boxes, * - * Write Erase DMA (1.20t) caused problems * - * 2.0d 98/12/25 KG Christmas release ;-) Message handling * - * completely reworked. Handle target ini- * - * tiated SDTR correctly. * - * 2.0d1 99/01/25 KG Try to handle RESTORE_PTR * - * 2.0d2 99/02/08 KG Check for failure of kmalloc, correct * - * inclusion of scsicam.h, DelayReset * - * 2.0d3 99/05/31 KG DRIVER_OK -> DID_OK, DID_NO_CONNECT, * - * detect Target mode and warn. * - * pcmd->result handling cleaned up. * - * 2.0d4 99/06/01 KG Cleaned selection process. Found bug * - * which prevented more than 16 tags. Now: * - * 24. SDTR cleanup. Cleaner multi-LUN * - * handling. Don't modify ControlRegs/FIFO * - * when connected. * - * 2.0d5 99/06/01 KG Clear DevID, Fix INQUIRY after cfg chg. * - * 2.0d6 99/06/02 KG Added ADD special command to allow cfg. * - * before detection. Reset SYNC_NEGO_DONE * - * after a bus reset. * - * 2.0d7 99/06/03 KG Fixed bugs wrt add,remove commands * - * 2.0d8 99/06/04 KG Removed copying of cmnd into CmdBlock. * - * Fixed Oops in _release(). * - * 2.0d9 99/06/06 KG Also tag queue INQUIRY, T_U_R, ... * - * Allow arb. no. of Tagged Cmnds. Max 32 * - * 2.0d1099/06/20 KG TagMaxNo changes now honoured! Queueing * - * clearified (renamed ..) TagMask handling* - * cleaned. * - * 2.0d1199/06/28 KG cmd->result now identical to 2.0d2 * - * 2.0d1299/07/04 KG Changed order of processing in IRQ * - * 2.0d1399/07/05 KG Don't update DCB fields if removed * - * 2.0d1499/07/05 KG remove_dev: Move kfree() to the end * - * 2.0d1599/07/12 KG use_new_eh_code: 0, ULONG -> UINT where * - * appropriate * - * 2.0d1699/07/13 KG Reenable StartSCSI interrupt, Retry msg * - * 2.0d1799/07/15 KG Remove debug msg. Disable recfg. when * - * there are queued cmnds * - * 2.0d1899/07/18 KG Selection timeout: Don't requeue * - * 2.0d1999/07/18 KG Abort: Only call scsi_done if dequeued * - * 2.0d2099/07/19 KG Rst_Detect: DoingSRB_Done * - * 2.0d2199/08/15 KG dev_id for request/free_irq, cmnd[0] for* - * RETRY, SRBdone does DID_ABORT for the * - * cmd passed by DC390_reset() * - * 2.0d2299/08/25 KG dev_id fixed. can_queue: 42 * - * 2.0d2399/08/25 KG Removed some debugging code. dev_id * - * now is set to pACB. Use u8,u16,u32. * - * 2.0d2499/11/14 KG Unreg. I/O if failed IRQ alloc. Call * - * done () w/ DID_BAD_TARGET in case of * - * missing DCB. We are old EH!! * - * 2.0d2500/01/15 KG 2.3.3x compat from Andreas Schultz * - * set unique_id. Disable RETRY message. * - * 2.0d2600/01/29 KG Go to new EH. * - * 2.0d2700/01/31 KG ... but maintain 2.0 compat. * - * and fix DCB freeing * - * 2.0d2800/02/14 KG Queue statistics fixed, dump special cmd* - * Waiting_Timer for failed StartSCSI * - * New EH: Don't return cmnds to ML on RST * - * Use old EH (don't have new EH fns yet) * - * Reset: Unlock, but refuse to queue * - * 2.3 __setup function * - * 2.0e 00/05/22 KG Return residual for 2.3 * - * 2.0e1 00/05/25 KG Compile fixes for 2.3.99 * - * 2.0e2 00/05/27 KG Jeff Garzik's pci_enable_device() * - * 2.0e3 00/09/29 KG Some 2.4 changes. Don't try Sync Nego * - * before INQUIRY has reported ability. * - * Recognise INQUIRY as scanning command. * - * 2.0e4 00/10/13 KG Allow compilation into 2.4 kernel * - * 2.0e5 00/11/17 KG Store Inq.flags in DCB * - * 2.0e6 00/11/22 KG 2.4 init function (Thx to O.Schumann) * - * 2.4 PCI device table (Thx to A.Richter) * - * 2.0e7 00/11/28 KG Allow overriding of BIOS settings * - * 2.0f 00/12/20 KG Handle failed INQUIRYs during scan * - * 2.1a 03/11/29 GL, KG Initial fixing for 2.6. Convert to * - * use the current PCI-mapping API, update * - * command-queuing. * - * 2.1b 04/04/13 GL Fix for 64-bit platforms * - * 2.1b1 04/01/31 GL (applied 05.04) Remove internal * - * command-queuing. * - * 2.1b2 04/02/01 CH (applied 05.04) Fix error-handling * - * 2.1c 04/05/23 GL Update to use the new pci_driver API, * - * some scsi EH updates, more cleanup. * - * 2.1d 04/05/27 GL Moved setting of scan_devices to * - * slave_alloc/_configure/_destroy, as * - * suggested by CH. * - ***********************************************************************/ - -/* DEBUG options */ -//#define DC390_DEBUG0 -//#define DC390_DEBUG1 -//#define DC390_DCBDEBUG -//#define DC390_PARSEDEBUG -//#define DC390_REMOVABLEDEBUG -//#define DC390_LOCKDEBUG - -//#define NOP do{}while(0) -#define C_NOP - -/* Debug definitions */ -#ifdef DC390_DEBUG0 -# define DEBUG0(x) x -#else -# define DEBUG0(x) C_NOP -#endif -#ifdef DC390_DEBUG1 -# define DEBUG1(x) x -#else -# define DEBUG1(x) C_NOP -#endif -#ifdef DC390_DCBDEBUG -# define DCBDEBUG(x) x -#else -# define DCBDEBUG(x) C_NOP -#endif -#ifdef DC390_PARSEDEBUG -# define PARSEDEBUG(x) x -#else -# define PARSEDEBUG(x) C_NOP -#endif -#ifdef DC390_REMOVABLEDEBUG -# define REMOVABLEDEBUG(x) x -#else -# define REMOVABLEDEBUG(x) C_NOP -#endif -#define DCBDEBUG1(x) C_NOP - -#include <linux/module.h> -#include <linux/delay.h> -#include <linux/signal.h> -#include <linux/errno.h> -#include <linux/kernel.h> -#include <linux/ioport.h> -#include <linux/pci.h> -#include <linux/proc_fs.h> -#include <linux/string.h> -#include <linux/mm.h> -#include <linux/blkdev.h> -#include <linux/timer.h> -#include <linux/interrupt.h> -#include <linux/init.h> -#include <linux/spinlock.h> -#include <linux/slab.h> -#include <asm/io.h> - -#include <scsi/scsi.h> -#include <scsi/scsi_cmnd.h> -#include <scsi/scsi_device.h> -#include <scsi/scsi_host.h> -#include <scsi/scsicam.h> -#include <scsi/scsi_tcq.h> - - -#define DC390_BANNER "Tekram DC390/AM53C974" -#define DC390_VERSION "2.1d 2004-05-27" - -#define PCI_DEVICE_ID_AMD53C974 PCI_DEVICE_ID_AMD_SCSI - -#include "tmscsim.h" - - -static void dc390_DataOut_0( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus); -static void dc390_DataIn_0( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus); -static void dc390_Command_0( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus); -static void dc390_Status_0( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus); -static void dc390_MsgOut_0( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus); -static void dc390_MsgIn_0( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus); -static void dc390_DataOutPhase( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus); -static void dc390_DataInPhase( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus); -static void dc390_CommandPhase( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus); -static void dc390_StatusPhase( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus); -static void dc390_MsgOutPhase( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus); -static void dc390_MsgInPhase( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus); -static void dc390_Nop_0( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus); -static void dc390_Nop_1( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus); - -static void dc390_SetXferRate( struct dc390_acb* pACB, struct dc390_dcb* pDCB ); -static void dc390_Disconnect( struct dc390_acb* pACB ); -static void dc390_Reselect( struct dc390_acb* pACB ); -static void dc390_SRBdone( struct dc390_acb* pACB, struct dc390_dcb* pDCB, struct dc390_srb* pSRB ); -static void dc390_ScsiRstDetect( struct dc390_acb* pACB ); -static void dc390_EnableMsgOut_Abort(struct dc390_acb*, struct dc390_srb*); -static void dc390_dumpinfo(struct dc390_acb* pACB, struct dc390_dcb* pDCB, struct dc390_srb* pSRB); -static void dc390_ResetDevParam(struct dc390_acb* pACB); - -static u32 dc390_laststatus = 0; -static u8 dc390_adapterCnt = 0; - -static int disable_clustering; -module_param(disable_clustering, int, S_IRUGO); -MODULE_PARM_DESC(disable_clustering, "If you experience problems with your devices, try setting to 1"); - -/* Startup values, to be overriden on the commandline */ -static int tmscsim[] = {-2, -2, -2, -2, -2, -2}; - -module_param_array(tmscsim, int, NULL, 0); -MODULE_PARM_DESC(tmscsim, "Host SCSI ID, Speed (0=10MHz), Device Flags, Adapter Flags, Max Tags (log2(tags)-1), DelayReset (s)"); -MODULE_AUTHOR("C.L. Huang / Kurt Garloff"); -MODULE_DESCRIPTION("SCSI host adapter driver for Tekram DC390 and other AMD53C974A based PCI SCSI adapters"); -MODULE_LICENSE("GPL"); -MODULE_SUPPORTED_DEVICE("sd,sr,sg,st"); - -static void *dc390_phase0[]={ - dc390_DataOut_0, - dc390_DataIn_0, - dc390_Command_0, - dc390_Status_0, - dc390_Nop_0, - dc390_Nop_0, - dc390_MsgOut_0, - dc390_MsgIn_0, - dc390_Nop_1 - }; - -static void *dc390_phase1[]={ - dc390_DataOutPhase, - dc390_DataInPhase, - dc390_CommandPhase, - dc390_StatusPhase, - dc390_Nop_0, - dc390_Nop_0, - dc390_MsgOutPhase, - dc390_MsgInPhase, - dc390_Nop_1 - }; - -#ifdef DC390_DEBUG1 -static char* dc390_p0_str[] = { - "dc390_DataOut_0", - "dc390_DataIn_0", - "dc390_Command_0", - "dc390_Status_0", - "dc390_Nop_0", - "dc390_Nop_0", - "dc390_MsgOut_0", - "dc390_MsgIn_0", - "dc390_Nop_1" - }; - -static char* dc390_p1_str[] = { - "dc390_DataOutPhase", - "dc390_DataInPhase", - "dc390_CommandPhase", - "dc390_StatusPhase", - "dc390_Nop_0", - "dc390_Nop_0", - "dc390_MsgOutPhase", - "dc390_MsgInPhase", - "dc390_Nop_1" - }; -#endif - -static u8 dc390_eepromBuf[MAX_ADAPTER_NUM][EE_LEN]; -static u8 dc390_clock_period1[] = {4, 5, 6, 7, 8, 10, 13, 20}; -static u8 dc390_clock_speed[] = {100,80,67,57,50, 40, 31, 20}; - -/*********************************************************************** - * Functions for the management of the internal structures - * (DCBs, SRBs, Queueing) - * - **********************************************************************/ -static void inline dc390_start_segment(struct dc390_srb* pSRB) -{ - struct scatterlist *psgl = pSRB->pSegmentList; - - /* start new sg segment */ - pSRB->SGBusAddr = sg_dma_address(psgl); - pSRB->SGToBeXferLen = sg_dma_len(psgl); -} - -static unsigned long inline dc390_advance_segment(struct dc390_srb* pSRB, u32 residue) -{ - unsigned long xfer = pSRB->SGToBeXferLen - residue; - - /* xfer more bytes transferred */ - pSRB->SGBusAddr += xfer; - pSRB->TotalXferredLen += xfer; - pSRB->SGToBeXferLen = residue; - - return xfer; -} - -static struct dc390_dcb __inline__ *dc390_findDCB ( struct dc390_acb* pACB, u8 id, u8 lun) -{ - struct dc390_dcb* pDCB = pACB->pLinkDCB; if (!pDCB) return NULL; - while (pDCB->TargetID != id || pDCB->TargetLUN != lun) - { - pDCB = pDCB->pNextDCB; - if (pDCB == pACB->pLinkDCB) - return NULL; - } - DCBDEBUG1( printk (KERN_DEBUG "DCB %p (%02x,%02x) found.\n", \ - pDCB, pDCB->TargetID, pDCB->TargetLUN)); - return pDCB; -} - -/* Insert SRB oin top of free list */ -static __inline__ void dc390_Free_insert (struct dc390_acb* pACB, struct dc390_srb* pSRB) -{ - DEBUG0(printk ("DC390: Free SRB %p\n", pSRB)); - pSRB->pNextSRB = pACB->pFreeSRB; - pACB->pFreeSRB = pSRB; -} - -static __inline__ void dc390_Going_append (struct dc390_dcb* pDCB, struct dc390_srb* pSRB) -{ - pDCB->GoingSRBCnt++; - DEBUG0(printk("DC390: Append SRB %p to Going\n", pSRB)); - /* Append to the list of Going commands */ - if( pDCB->pGoingSRB ) - pDCB->pGoingLast->pNextSRB = pSRB; - else - pDCB->pGoingSRB = pSRB; - - pDCB->pGoingLast = pSRB; - /* No next one in sent list */ - pSRB->pNextSRB = NULL; -} - -static __inline__ void dc390_Going_remove (struct dc390_dcb* pDCB, struct dc390_srb* pSRB) -{ - DEBUG0(printk("DC390: Remove SRB %p from Going\n", pSRB)); - if (pSRB == pDCB->pGoingSRB) - pDCB->pGoingSRB = pSRB->pNextSRB; - else - { - struct dc390_srb* psrb = pDCB->pGoingSRB; - while (psrb && psrb->pNextSRB != pSRB) - psrb = psrb->pNextSRB; - if (!psrb) - { printk (KERN_ERR "DC390: Remove non-ex. SRB %p from Going!\n", pSRB); return; } - psrb->pNextSRB = pSRB->pNextSRB; - if (pSRB == pDCB->pGoingLast) - pDCB->pGoingLast = psrb; - } - pDCB->GoingSRBCnt--; -} - -static struct scatterlist* dc390_sg_build_single(struct scatterlist *sg, void *addr, unsigned int length) -{ - sg_init_one(sg, addr, length); - return sg; -} - -/* Create pci mapping */ -static int dc390_pci_map (struct dc390_srb* pSRB) -{ - int error = 0; - struct scsi_cmnd *pcmd = pSRB->pcmd; - struct pci_dev *pdev = pSRB->pSRBDCB->pDCBACB->pdev; - dc390_cmd_scp_t* cmdp = ((dc390_cmd_scp_t*)(&pcmd->SCp)); - - /* Map sense buffer */ - if (pSRB->SRBFlag & AUTO_REQSENSE) { - pSRB->pSegmentList = dc390_sg_build_single(&pSRB->Segmentx, pcmd->sense_buffer, SCSI_SENSE_BUFFERSIZE); - pSRB->SGcount = pci_map_sg(pdev, pSRB->pSegmentList, 1, - DMA_FROM_DEVICE); - cmdp->saved_dma_handle = sg_dma_address(pSRB->pSegmentList); - - /* TODO: error handling */ - if (pSRB->SGcount != 1) - error = 1; - DEBUG1(printk("%s(): Mapped sense buffer %p at %x\n", __func__, pcmd->sense_buffer, cmdp->saved_dma_handle)); - /* Map SG list */ - } else if (scsi_sg_count(pcmd)) { - int nseg; - - nseg = scsi_dma_map(pcmd); - - pSRB->pSegmentList = scsi_sglist(pcmd); - pSRB->SGcount = nseg; - - /* TODO: error handling */ - if (nseg < 0) - error = 1; - DEBUG1(printk("%s(): Mapped SG %p with %d (%d) elements\n",\ - __func__, scsi_sglist(pcmd), nseg, scsi_sg_count(pcmd))); - /* Map single segment */ - } else - pSRB->SGcount = 0; - - return error; -} - -/* Remove pci mapping */ -static void dc390_pci_unmap (struct dc390_srb* pSRB) -{ - struct scsi_cmnd *pcmd = pSRB->pcmd; - struct pci_dev *pdev = pSRB->pSRBDCB->pDCBACB->pdev; - DEBUG1(dc390_cmd_scp_t* cmdp = ((dc390_cmd_scp_t*)(&pcmd->SCp))); - - if (pSRB->SRBFlag) { - pci_unmap_sg(pdev, &pSRB->Segmentx, 1, DMA_FROM_DEVICE); - DEBUG1(printk("%s(): Unmapped sense buffer at %x\n", __func__, cmdp->saved_dma_handle)); - } else { - scsi_dma_unmap(pcmd); - DEBUG1(printk("%s(): Unmapped SG at %p with %d elements\n", - __func__, scsi_sglist(pcmd), scsi_sg_count(pcmd))); - } -} - -static void __inline__ -dc390_freetag (struct dc390_dcb* pDCB, struct dc390_srb* pSRB) -{ - if (pSRB->TagNumber != SCSI_NO_TAG) { - pDCB->TagMask &= ~(1 << pSRB->TagNumber); /* free tag mask */ - pSRB->TagNumber = SCSI_NO_TAG; - } -} - - -static int -dc390_StartSCSI( struct dc390_acb* pACB, struct dc390_dcb* pDCB, struct dc390_srb* pSRB ) -{ - struct scsi_cmnd *scmd = pSRB->pcmd; - struct scsi_device *sdev = scmd->device; - u8 cmd, disc_allowed, try_sync_nego; - char tag[2]; - - pSRB->ScsiPhase = SCSI_NOP0; - - if (pACB->Connected) - { - // Should not happen normally - printk (KERN_WARNING "DC390: Can't select when connected! (%08x,%02x)\n", - pSRB->SRBState, pSRB->SRBFlag); - pSRB->SRBState = SRB_READY; - pACB->SelConn++; - return 1; - } - if (time_before (jiffies, pACB->last_reset)) - { - DEBUG0(printk ("DC390: We were just reset and don't accept commands yet!\n")); - return 1; - } - /* KG: Moved pci mapping here */ - dc390_pci_map(pSRB); - /* TODO: error handling */ - DC390_write8 (Scsi_Dest_ID, pDCB->TargetID); - DC390_write8 (Sync_Period, pDCB->SyncPeriod); - DC390_write8 (Sync_Offset, pDCB->SyncOffset); - DC390_write8 (CtrlReg1, pDCB->CtrlR1); - DC390_write8 (CtrlReg3, pDCB->CtrlR3); - DC390_write8 (CtrlReg4, pDCB->CtrlR4); - DC390_write8 (ScsiCmd, CLEAR_FIFO_CMD); /* Flush FIFO */ - DEBUG1(printk (KERN_INFO "DC390: Start SCSI command: %02x (Sync:%02x)\n",\ - scmd->cmnd[0], pDCB->SyncMode)); - - /* Don't disconnect on AUTO_REQSENSE, cause it might be an - * Contingent Allegiance Condition (6.6), where no tags should be used. - * All other have to be allowed to disconnect to prevent Incorrect - * Initiator Connection (6.8.2/6.5.2) */ - /* Changed KG, 99/06/06 */ - if (! (pSRB->SRBFlag & AUTO_REQSENSE)) - disc_allowed = pDCB->DevMode & EN_DISCONNECT_; - else - disc_allowed = 0; - - if ((pDCB->SyncMode & SYNC_ENABLE) && pDCB->TargetLUN == 0 && sdev->sdtr && - (((scmd->cmnd[0] == REQUEST_SENSE || (pSRB->SRBFlag & AUTO_REQSENSE)) && - !(pDCB->SyncMode & SYNC_NEGO_DONE)) || scmd->cmnd[0] == INQUIRY)) - try_sync_nego = 1; - else - try_sync_nego = 0; - - pSRB->MsgCnt = 0; - cmd = SEL_W_ATN; - DC390_write8 (ScsiFifo, IDENTIFY(disc_allowed, pDCB->TargetLUN)); - /* Change 99/05/31: Don't use tags when not disconnecting (BUSY) */ - if ((pDCB->SyncMode & EN_TAG_QUEUEING) && disc_allowed && scsi_populate_tag_msg(scmd, tag)) { - DC390_write8(ScsiFifo, tag[0]); - pDCB->TagMask |= 1 << tag[1]; - pSRB->TagNumber = tag[1]; - DC390_write8(ScsiFifo, tag[1]); - DEBUG1(printk(KERN_INFO "DC390: Select w/DisCn for SRB %p, block tag %02x\n", pSRB, tag[1])); - cmd = SEL_W_ATN3; - } else { - /* No TagQ */ -//no_tag: - DEBUG1(printk(KERN_INFO "DC390: Select w%s/DisCn for SRB %p, No TagQ\n", disc_allowed ? "" : "o", pSRB)); - } - - pSRB->SRBState = SRB_START_; - - if (try_sync_nego) - { - u8 Sync_Off = pDCB->SyncOffset; - DEBUG0(printk (KERN_INFO "DC390: NEW Sync Nego code triggered (%i %i)\n", pDCB->TargetID, pDCB->TargetLUN)); - pSRB->MsgOutBuf[0] = EXTENDED_MESSAGE; - pSRB->MsgOutBuf[1] = 3; - pSRB->MsgOutBuf[2] = EXTENDED_SDTR; - pSRB->MsgOutBuf[3] = pDCB->NegoPeriod; - if (!(Sync_Off & 0x0f)) Sync_Off = SYNC_NEGO_OFFSET; - pSRB->MsgOutBuf[4] = Sync_Off; - pSRB->MsgCnt = 5; - //pSRB->SRBState = SRB_MSGOUT_; - pSRB->SRBState |= DO_SYNC_NEGO; - cmd = SEL_W_ATN_STOP; - } - - /* Command is written in CommandPhase, if SEL_W_ATN_STOP ... */ - if (cmd != SEL_W_ATN_STOP) - { - if( pSRB->SRBFlag & AUTO_REQSENSE ) - { - DC390_write8 (ScsiFifo, REQUEST_SENSE); - DC390_write8 (ScsiFifo, pDCB->TargetLUN << 5); - DC390_write8 (ScsiFifo, 0); - DC390_write8 (ScsiFifo, 0); - DC390_write8 (ScsiFifo, SCSI_SENSE_BUFFERSIZE); - DC390_write8 (ScsiFifo, 0); - DEBUG1(printk (KERN_DEBUG "DC390: AutoReqSense !\n")); - } - else /* write cmnd to bus */ - { - u8 *ptr; u8 i; - ptr = (u8 *)scmd->cmnd; - for (i = 0; i < scmd->cmd_len; i++) - DC390_write8 (ScsiFifo, *(ptr++)); - } - } - DEBUG0(if (pACB->pActiveDCB) \ - printk (KERN_WARNING "DC390: ActiveDCB != 0\n")); - DEBUG0(if (pDCB->pActiveSRB) \ - printk (KERN_WARNING "DC390: ActiveSRB != 0\n")); - //DC390_write8 (DMA_Cmd, DMA_IDLE_CMD); - if (DC390_read8 (Scsi_Status) & INTERRUPT) - { - dc390_freetag (pDCB, pSRB); - DEBUG0(printk ("DC390: Interrupt during Start SCSI (target %02i-%02i)\n", - scmd->device->id, (u8)scmd->device->lun)); - pSRB->SRBState = SRB_READY; - //DC390_write8 (ScsiCmd, CLEAR_FIFO_CMD); - pACB->SelLost++; - return 1; - } - DC390_write8 (ScsiCmd, cmd); - pACB->pActiveDCB = pDCB; - pDCB->pActiveSRB = pSRB; - pACB->Connected = 1; - pSRB->ScsiPhase = SCSI_NOP1; - return 0; -} - - -static void __inline__ -dc390_InvalidCmd(struct dc390_acb* pACB) -{ - if (pACB->pActiveDCB->pActiveSRB->SRBState & (SRB_START_ | SRB_MSGOUT)) - DC390_write8(ScsiCmd, CLEAR_FIFO_CMD); -} - - -static irqreturn_t __inline__ -DC390_Interrupt(void *dev_id) -{ - struct dc390_acb *pACB = dev_id; - struct dc390_dcb *pDCB; - struct dc390_srb *pSRB; - u8 sstatus=0; - u8 phase; - void (*stateV)( struct dc390_acb*, struct dc390_srb*, u8 *); - u8 istate, istatus; - - sstatus = DC390_read8 (Scsi_Status); - if( !(sstatus & INTERRUPT) ) - return IRQ_NONE; - - DEBUG1(printk (KERN_DEBUG "sstatus=%02x,", sstatus)); - - //DC390_write32 (DMA_ScsiBusCtrl, WRT_ERASE_DMA_STAT | EN_INT_ON_PCI_ABORT); - //dstatus = DC390_read8 (DMA_Status); - //DC390_write32 (DMA_ScsiBusCtrl, EN_INT_ON_PCI_ABORT); - - spin_lock_irq(pACB->pScsiHost->host_lock); - - istate = DC390_read8 (Intern_State); - istatus = DC390_read8 (INT_Status); /* This clears Scsi_Status, Intern_State and INT_Status ! */ - - DEBUG1(printk (KERN_INFO "Istatus(Res,Inv,Dis,Serv,Succ,ReS,SelA,Sel)=%02x,",istatus)); - dc390_laststatus &= ~0x00ffffff; - dc390_laststatus |= /* dstatus<<24 | */ sstatus<<16 | istate<<8 | istatus; - - if (sstatus & ILLEGAL_OP_ERR) - { - printk ("DC390: Illegal Operation detected (%08x)!\n", dc390_laststatus); - dc390_dumpinfo (pACB, pACB->pActiveDCB, pACB->pActiveDCB->pActiveSRB); - } - - else if (istatus & INVALID_CMD) - { - printk ("DC390: Invalid Command detected (%08x)!\n", dc390_laststatus); - dc390_InvalidCmd( pACB ); - goto unlock; - } - - if (istatus & SCSI_RESET) - { - dc390_ScsiRstDetect( pACB ); - goto unlock; - } - - if (istatus & DISCONNECTED) - { - dc390_Disconnect( pACB ); - goto unlock; - } - - if (istatus & RESELECTED) - { - dc390_Reselect( pACB ); - goto unlock; - } - - else if (istatus & (SELECTED | SEL_ATTENTION)) - { - printk (KERN_ERR "DC390: Target mode not supported!\n"); - goto unlock; - } - - if (istatus & (SUCCESSFUL_OP|SERVICE_REQUEST) ) - { - pDCB = pACB->pActiveDCB; - if (!pDCB) - { - printk (KERN_ERR "DC390: Suc. op/ Serv. req: pActiveDCB = 0!\n"); - goto unlock; - } - pSRB = pDCB->pActiveSRB; - if( pDCB->DCBFlag & ABORT_DEV_ ) - dc390_EnableMsgOut_Abort (pACB, pSRB); - - phase = pSRB->ScsiPhase; - DEBUG1(printk (KERN_INFO "DC390: [%i]%s(0) (%02x)\n", phase, dc390_p0_str[phase], sstatus)); - stateV = (void *) dc390_phase0[phase]; - ( *stateV )( pACB, pSRB, &sstatus ); - - pSRB->ScsiPhase = sstatus & 7; - phase = (u8) sstatus & 7; - DEBUG1(printk (KERN_INFO "DC390: [%i]%s(1) (%02x)\n", phase, dc390_p1_str[phase], sstatus)); - stateV = (void *) dc390_phase1[phase]; - ( *stateV )( pACB, pSRB, &sstatus ); - } - - unlock: - spin_unlock_irq(pACB->pScsiHost->host_lock); - return IRQ_HANDLED; -} - -static irqreturn_t do_DC390_Interrupt(int irq, void *dev_id) -{ - irqreturn_t ret; - DEBUG1(printk (KERN_INFO "DC390: Irq (%i) caught: ", irq)); - /* Locking is done in DC390_Interrupt */ - ret = DC390_Interrupt(dev_id); - DEBUG1(printk (".. IRQ returned\n")); - return ret; -} - -static void -dc390_DataOut_0(struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus) -{ - u8 sstatus; - u32 ResidCnt; - u8 dstate = 0; - - sstatus = *psstatus; - - if( !(pSRB->SRBState & SRB_XFERPAD) ) - { - if( sstatus & (PARITY_ERR | ILLEGAL_OP_ERR) ) - pSRB->SRBStatus |= PARITY_ERROR; - - if( sstatus & COUNT_2_ZERO ) - { - unsigned long timeout = jiffies + HZ; - - /* Function called from the ISR with the host_lock held and interrupts disabled */ - if (pSRB->SGToBeXferLen) - while (time_before(jiffies, timeout) && !((dstate = DC390_read8 (DMA_Status)) & DMA_XFER_DONE)) { - spin_unlock_irq(pACB->pScsiHost->host_lock); - udelay(50); - spin_lock_irq(pACB->pScsiHost->host_lock); - } - if (!time_before(jiffies, timeout)) - printk (KERN_CRIT "DC390: Deadlock in DataOut_0: DMA aborted unfinished: %06x bytes remain!!\n", - DC390_read32 (DMA_Wk_ByteCntr)); - dc390_laststatus &= ~0xff000000; - dc390_laststatus |= dstate << 24; - pSRB->TotalXferredLen += pSRB->SGToBeXferLen; - pSRB->SGIndex++; - if( pSRB->SGIndex < pSRB->SGcount ) - { - pSRB->pSegmentList++; - - dc390_start_segment(pSRB); - } - else - pSRB->SGToBeXferLen = 0; - } - else - { - ResidCnt = ((u32) DC390_read8 (Current_Fifo) & 0x1f) + - (((u32) DC390_read8 (CtcReg_High) << 16) | - ((u32) DC390_read8 (CtcReg_Mid) << 8) | - (u32) DC390_read8 (CtcReg_Low)); - - dc390_advance_segment(pSRB, ResidCnt); - } - } - if ((*psstatus & 7) != SCSI_DATA_OUT) - { - DC390_write8 (DMA_Cmd, WRITE_DIRECTION+DMA_IDLE_CMD); - DC390_write8 (ScsiCmd, CLEAR_FIFO_CMD); - } -} - -static void -dc390_DataIn_0(struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus) -{ - u8 sstatus, residual, bval; - u32 ResidCnt, i; - unsigned long xferCnt; - - sstatus = *psstatus; - - if( !(pSRB->SRBState & SRB_XFERPAD) ) - { - if( sstatus & (PARITY_ERR | ILLEGAL_OP_ERR)) - pSRB->SRBStatus |= PARITY_ERROR; - - if( sstatus & COUNT_2_ZERO ) - { - int dstate = 0; - unsigned long timeout = jiffies + HZ; - - /* Function called from the ISR with the host_lock held and interrupts disabled */ - if (pSRB->SGToBeXferLen) - while (time_before(jiffies, timeout) && !((dstate = DC390_read8 (DMA_Status)) & DMA_XFER_DONE)) { - spin_unlock_irq(pACB->pScsiHost->host_lock); - udelay(50); - spin_lock_irq(pACB->pScsiHost->host_lock); - } - if (!time_before(jiffies, timeout)) { - printk (KERN_CRIT "DC390: Deadlock in DataIn_0: DMA aborted unfinished: %06x bytes remain!!\n", - DC390_read32 (DMA_Wk_ByteCntr)); - printk (KERN_CRIT "DC390: DataIn_0: DMA State: %i\n", dstate); - } - dc390_laststatus &= ~0xff000000; - dc390_laststatus |= dstate << 24; - DEBUG1(ResidCnt = ((unsigned long) DC390_read8 (CtcReg_High) << 16) \ - + ((unsigned long) DC390_read8 (CtcReg_Mid) << 8) \ - + ((unsigned long) DC390_read8 (CtcReg_Low))); - DEBUG1(printk (KERN_DEBUG "Count_2_Zero (ResidCnt=%u,ToBeXfer=%lu),", ResidCnt, pSRB->SGToBeXferLen)); - - DC390_write8 (DMA_Cmd, READ_DIRECTION+DMA_IDLE_CMD); - - pSRB->TotalXferredLen += pSRB->SGToBeXferLen; - pSRB->SGIndex++; - if( pSRB->SGIndex < pSRB->SGcount ) - { - pSRB->pSegmentList++; - - dc390_start_segment(pSRB); - } - else - pSRB->SGToBeXferLen = 0; - } - else /* phase changed */ - { - residual = 0; - bval = DC390_read8 (Current_Fifo); - while( bval & 0x1f ) - { - DEBUG1(printk (KERN_DEBUG "Check for residuals,")); - if( (bval & 0x1f) == 1 ) - { - for(i=0; i < 0x100; i++) - { - bval = DC390_read8 (Current_Fifo); - if( !(bval & 0x1f) ) - goto din_1; - else if( i == 0x0ff ) - { - residual = 1; /* ;1 residual byte */ - goto din_1; - } - } - } - else - bval = DC390_read8 (Current_Fifo); - } -din_1: - DC390_write8 (DMA_Cmd, READ_DIRECTION+DMA_BLAST_CMD); - for (i = 0xa000; i; i--) - { - bval = DC390_read8 (DMA_Status); - if (bval & BLAST_COMPLETE) - break; - } - /* It seems a DMA Blast abort isn't that bad ... */ - if (!i) printk (KERN_ERR "DC390: DMA Blast aborted unfinished!\n"); - //DC390_write8 (DMA_Cmd, READ_DIRECTION+DMA_IDLE_CMD); - dc390_laststatus &= ~0xff000000; - dc390_laststatus |= bval << 24; - - DEBUG1(printk (KERN_DEBUG "Blast: Read %i times DMA_Status %02x", 0xa000-i, bval)); - ResidCnt = (((u32) DC390_read8 (CtcReg_High) << 16) | - ((u32) DC390_read8 (CtcReg_Mid) << 8)) | - (u32) DC390_read8 (CtcReg_Low); - - xferCnt = dc390_advance_segment(pSRB, ResidCnt); - - if (residual) { - size_t count = 1; - size_t offset = pSRB->SGBusAddr - sg_dma_address(pSRB->pSegmentList); - unsigned long flags; - u8 *ptr; - - bval = DC390_read8 (ScsiFifo); /* get one residual byte */ - - local_irq_save(flags); - ptr = scsi_kmap_atomic_sg(pSRB->pSegmentList, pSRB->SGcount, &offset, &count); - if (likely(ptr)) { - *(ptr + offset) = bval; - scsi_kunmap_atomic_sg(ptr); - } - local_irq_restore(flags); - WARN_ON(!ptr); - - /* 1 more byte read */ - xferCnt += dc390_advance_segment(pSRB, pSRB->SGToBeXferLen - 1); - } - DEBUG1(printk (KERN_DEBUG "Xfered: %lu, Total: %lu, Remaining: %lu\n", xferCnt,\ - pSRB->TotalXferredLen, pSRB->SGToBeXferLen)); - } - } - if ((*psstatus & 7) != SCSI_DATA_IN) - { - DC390_write8 (ScsiCmd, CLEAR_FIFO_CMD); - DC390_write8 (DMA_Cmd, READ_DIRECTION+DMA_IDLE_CMD); - } -} - -static void -dc390_Command_0( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus) -{ -} - -static void -dc390_Status_0( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus) -{ - - pSRB->TargetStatus = DC390_read8 (ScsiFifo); - //udelay (1); - pSRB->EndMessage = DC390_read8 (ScsiFifo); /* get message */ - - *psstatus = SCSI_NOP0; - pSRB->SRBState = SRB_COMPLETED; - DC390_write8 (ScsiCmd, MSG_ACCEPTED_CMD); -} - -static void -dc390_MsgOut_0( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus) -{ - if( pSRB->SRBState & (SRB_UNEXPECT_RESEL+SRB_ABORT_SENT) ) - *psstatus = SCSI_NOP0; - //DC390_write8 (DMA_Cmd, DMA_IDLE_CMD); -} - - -static void __inline__ -dc390_reprog (struct dc390_acb* pACB, struct dc390_dcb* pDCB) -{ - DC390_write8 (Sync_Period, pDCB->SyncPeriod); - DC390_write8 (Sync_Offset, pDCB->SyncOffset); - DC390_write8 (CtrlReg3, pDCB->CtrlR3); - DC390_write8 (CtrlReg4, pDCB->CtrlR4); - dc390_SetXferRate (pACB, pDCB); -} - - -#ifdef DC390_DEBUG0 -static void -dc390_printMsg (u8 *MsgBuf, u8 len) -{ - int i; - printk (" %02x", MsgBuf[0]); - for (i = 1; i < len; i++) - printk (" %02x", MsgBuf[i]); - printk ("\n"); -} -#endif - -#define DC390_ENABLE_MSGOUT DC390_write8 (ScsiCmd, SET_ATN_CMD) - -/* reject_msg */ -static void __inline__ -dc390_MsgIn_reject (struct dc390_acb* pACB, struct dc390_srb* pSRB) -{ - pSRB->MsgOutBuf[0] = MESSAGE_REJECT; - pSRB->MsgCnt = 1; - DC390_ENABLE_MSGOUT; - DEBUG0 (printk (KERN_INFO "DC390: Reject message\n")); -} - -/* abort command */ -static void -dc390_EnableMsgOut_Abort ( struct dc390_acb* pACB, struct dc390_srb* pSRB ) -{ - pSRB->MsgOutBuf[0] = ABORT; - pSRB->MsgCnt = 1; DC390_ENABLE_MSGOUT; - pSRB->pSRBDCB->DCBFlag &= ~ABORT_DEV_; -} - -static struct dc390_srb* -dc390_MsgIn_QTag (struct dc390_acb* pACB, struct dc390_dcb* pDCB, s8 tag) -{ - struct dc390_srb* pSRB = pDCB->pGoingSRB; - - if (pSRB) - { - struct scsi_cmnd *scmd = scsi_find_tag(pSRB->pcmd->device, tag); - pSRB = (struct dc390_srb *)scmd->host_scribble; - - if (pDCB->DCBFlag & ABORT_DEV_) - { - pSRB->SRBState = SRB_ABORT_SENT; - dc390_EnableMsgOut_Abort( pACB, pSRB ); - } - - if (!(pSRB->SRBState & SRB_DISCONNECT)) - goto mingx0; - - pDCB->pActiveSRB = pSRB; - pSRB->SRBState = SRB_DATA_XFER; - } - else - { - mingx0: - pSRB = pACB->pTmpSRB; - pSRB->SRBState = SRB_UNEXPECT_RESEL; - pDCB->pActiveSRB = pSRB; - pSRB->MsgOutBuf[0] = ABORT_TAG; - pSRB->MsgCnt = 1; DC390_ENABLE_MSGOUT; - } - return pSRB; -} - - -/* set async transfer mode */ -static void -dc390_MsgIn_set_async (struct dc390_acb* pACB, struct dc390_srb* pSRB) -{ - struct dc390_dcb* pDCB = pSRB->pSRBDCB; - if (!(pSRB->SRBState & DO_SYNC_NEGO)) - printk (KERN_INFO "DC390: Target %i initiates Non-Sync?\n", pDCB->TargetID); - pSRB->SRBState &= ~DO_SYNC_NEGO; - pDCB->SyncMode &= ~(SYNC_ENABLE+SYNC_NEGO_DONE); - pDCB->SyncPeriod = 0; - pDCB->SyncOffset = 0; - //pDCB->NegoPeriod = 50; /* 200ns <=> 5 MHz */ - pDCB->CtrlR3 = FAST_CLK; /* fast clock / normal scsi */ - pDCB->CtrlR4 &= 0x3f; - pDCB->CtrlR4 |= pACB->glitch_cfg; /* glitch eater */ - dc390_reprog (pACB, pDCB); -} - -/* set sync transfer mode */ -static void -dc390_MsgIn_set_sync (struct dc390_acb* pACB, struct dc390_srb* pSRB) -{ - u8 bval; - u16 wval, wval1; - struct dc390_dcb* pDCB = pSRB->pSRBDCB; - u8 oldsyncperiod = pDCB->SyncPeriod; - u8 oldsyncoffset = pDCB->SyncOffset; - - if (!(pSRB->SRBState & DO_SYNC_NEGO)) - { - printk (KERN_INFO "DC390: Target %i initiates Sync: %ins %i ... answer ...\n", - pDCB->TargetID, pSRB->MsgInBuf[3]<<2, pSRB->MsgInBuf[4]); - - /* reject */ - //dc390_MsgIn_reject (pACB, pSRB); - //return dc390_MsgIn_set_async (pACB, pSRB); - - /* Reply with corrected SDTR Message */ - if (pSRB->MsgInBuf[4] > 15) - { - printk (KERN_INFO "DC390: Lower Sync Offset to 15\n"); - pSRB->MsgInBuf[4] = 15; - } - if (pSRB->MsgInBuf[3] < pDCB->NegoPeriod) - { - printk (KERN_INFO "DC390: Set sync nego period to %ins\n", pDCB->NegoPeriod << 2); - pSRB->MsgInBuf[3] = pDCB->NegoPeriod; - } - memcpy (pSRB->MsgOutBuf, pSRB->MsgInBuf, 5); - pSRB->MsgCnt = 5; - DC390_ENABLE_MSGOUT; - } - - pSRB->SRBState &= ~DO_SYNC_NEGO; - pDCB->SyncMode |= SYNC_ENABLE+SYNC_NEGO_DONE; - pDCB->SyncOffset &= 0x0f0; - pDCB->SyncOffset |= pSRB->MsgInBuf[4]; - pDCB->NegoPeriod = pSRB->MsgInBuf[3]; - - wval = (u16) pSRB->MsgInBuf[3]; - wval = wval << 2; wval -= 3; wval1 = wval / 25; /* compute speed */ - if( (wval1 * 25) != wval) wval1++; - bval = FAST_CLK+FAST_SCSI; /* fast clock / fast scsi */ - - pDCB->CtrlR4 &= 0x3f; /* Glitch eater: 12ns less than normal */ - if (pACB->glitch_cfg != NS_TO_GLITCH(0)) - pDCB->CtrlR4 |= NS_TO_GLITCH(((GLITCH_TO_NS(pACB->glitch_cfg)) - 1)); - else - pDCB->CtrlR4 |= NS_TO_GLITCH(0); - if (wval1 < 4) pDCB->CtrlR4 |= NS_TO_GLITCH(0); /* Ultra */ - - if (wval1 >= 8) - { - wval1--; /* Timing computation differs by 1 from FAST_SCSI */ - bval = FAST_CLK; /* fast clock / normal scsi */ - pDCB->CtrlR4 |= pACB->glitch_cfg; /* glitch eater */ - } - - pDCB->CtrlR3 = bval; - pDCB->SyncPeriod = (u8)wval1; - - if ((oldsyncperiod != wval1 || oldsyncoffset != pDCB->SyncOffset) && pDCB->TargetLUN == 0) - { - if (! (bval & FAST_SCSI)) wval1++; - printk (KERN_INFO "DC390: Target %i: Sync transfer %i.%1i MHz, Offset %i\n", pDCB->TargetID, - 40/wval1, ((40%wval1)*10+wval1/2)/wval1, pDCB->SyncOffset & 0x0f); - } - - dc390_reprog (pACB, pDCB); -} - - -/* handle RESTORE_PTR */ -/* This doesn't look very healthy... to-be-fixed */ -static void -dc390_restore_ptr (struct dc390_acb* pACB, struct dc390_srb* pSRB) -{ - struct scsi_cmnd *pcmd = pSRB->pcmd; - struct scatterlist *psgl; - pSRB->TotalXferredLen = 0; - pSRB->SGIndex = 0; - if (scsi_sg_count(pcmd)) { - size_t saved; - pSRB->pSegmentList = scsi_sglist(pcmd); - psgl = pSRB->pSegmentList; - //dc390_pci_sync(pSRB); - - while (pSRB->TotalXferredLen + (unsigned long) sg_dma_len(psgl) < pSRB->Saved_Ptr) - { - pSRB->TotalXferredLen += (unsigned long) sg_dma_len(psgl); - pSRB->SGIndex++; - if( pSRB->SGIndex < pSRB->SGcount ) - { - pSRB->pSegmentList++; - - dc390_start_segment(pSRB); - } - else - pSRB->SGToBeXferLen = 0; - } - - saved = pSRB->Saved_Ptr - pSRB->TotalXferredLen; - pSRB->SGToBeXferLen -= saved; - pSRB->SGBusAddr += saved; - printk (KERN_INFO "DC390: Pointer restored. Segment %i, Total %li, Bus %08lx\n", - pSRB->SGIndex, pSRB->Saved_Ptr, pSRB->SGBusAddr); - - } else { - pSRB->SGcount = 0; - printk (KERN_INFO "DC390: RESTORE_PTR message for Transfer without Scatter-Gather ??\n"); - } - - pSRB->TotalXferredLen = pSRB->Saved_Ptr; -} - - -/* According to the docs, the AM53C974 reads the message and - * generates a Successful Operation IRQ before asserting ACK for - * the last byte (how does it know whether it's the last ?) */ -/* The old code handled it in another way, indicating, that on - * every message byte an IRQ is generated and every byte has to - * be manually ACKed. Hmmm ? (KG, 98/11/28) */ -/* The old implementation was correct. Sigh! */ - -/* Check if the message is complete */ -static u8 __inline__ -dc390_MsgIn_complete (u8 *msgbuf, u32 len) -{ - if (*msgbuf == EXTENDED_MESSAGE) - { - if (len < 2) return 0; - if (len < msgbuf[1] + 2) return 0; - } - else if (*msgbuf >= 0x20 && *msgbuf <= 0x2f) // two byte messages - if (len < 2) return 0; - return 1; -} - - - -/* read and eval received messages */ -static void -dc390_MsgIn_0( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus) -{ - struct dc390_dcb* pDCB = pACB->pActiveDCB; - - /* Read the msg */ - - pSRB->MsgInBuf[pACB->MsgLen++] = DC390_read8 (ScsiFifo); - //pSRB->SRBState = 0; - - /* Msg complete ? */ - if (dc390_MsgIn_complete (pSRB->MsgInBuf, pACB->MsgLen)) - { - DEBUG0 (printk (KERN_INFO "DC390: MsgIn:"); dc390_printMsg (pSRB->MsgInBuf, pACB->MsgLen)); - /* Now eval the msg */ - switch (pSRB->MsgInBuf[0]) - { - case DISCONNECT: - pSRB->SRBState = SRB_DISCONNECT; break; - - case SIMPLE_QUEUE_TAG: - case HEAD_OF_QUEUE_TAG: - case ORDERED_QUEUE_TAG: - pSRB = dc390_MsgIn_QTag (pACB, pDCB, pSRB->MsgInBuf[1]); - break; - - case MESSAGE_REJECT: - DC390_write8 (ScsiCmd, RESET_ATN_CMD); - pDCB->NegoPeriod = 50; /* 200ns <=> 5 MHz */ - if( pSRB->SRBState & DO_SYNC_NEGO) - dc390_MsgIn_set_async (pACB, pSRB); - break; - - case EXTENDED_MESSAGE: - /* reject every extended msg but SDTR */ - if (pSRB->MsgInBuf[1] != 3 || pSRB->MsgInBuf[2] != EXTENDED_SDTR) - dc390_MsgIn_reject (pACB, pSRB); - else - { - if (pSRB->MsgInBuf[3] == 0 || pSRB->MsgInBuf[4] == 0) - dc390_MsgIn_set_async (pACB, pSRB); - else - dc390_MsgIn_set_sync (pACB, pSRB); - } - - // nothing has to be done - case COMMAND_COMPLETE: break; - - // SAVE POINTER may be ignored as we have the struct dc390_srb* associated with the - // scsi command. Thanks, Gerard, for pointing it out. - case SAVE_POINTERS: - pSRB->Saved_Ptr = pSRB->TotalXferredLen; - break; - // The device might want to restart transfer with a RESTORE - case RESTORE_POINTERS: - DEBUG0(printk ("DC390: RESTORE POINTER message received ... try to handle\n")); - dc390_restore_ptr (pACB, pSRB); - break; - - // reject unknown messages - default: dc390_MsgIn_reject (pACB, pSRB); - } - - /* Clear counter and MsgIn state */ - pSRB->SRBState &= ~SRB_MSGIN; - pACB->MsgLen = 0; - } - - *psstatus = SCSI_NOP0; - DC390_write8 (ScsiCmd, MSG_ACCEPTED_CMD); - //DC390_write8 (DMA_Cmd, DMA_IDLE_CMD); -} - - -static void -dc390_DataIO_Comm( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 ioDir) -{ - unsigned long lval; - struct dc390_dcb* pDCB = pACB->pActiveDCB; - - if (pSRB == pACB->pTmpSRB) - { - if (pDCB) - printk(KERN_ERR "DC390: pSRB == pTmpSRB! (TagQ Error?) (%02i-%i)\n", pDCB->TargetID, pDCB->TargetLUN); - else - printk(KERN_ERR "DC390: pSRB == pTmpSRB! (TagQ Error?) (DCB 0!)\n"); - - /* Try to recover - some broken disks react badly to tagged INQUIRY */ - if (pDCB && pACB->scan_devices && pDCB->GoingSRBCnt == 1) { - pSRB = pDCB->pGoingSRB; - pDCB->pActiveSRB = pSRB; - } else { - pSRB->pSRBDCB = pDCB; - dc390_EnableMsgOut_Abort(pACB, pSRB); - if (pDCB) - pDCB->DCBFlag |= ABORT_DEV; - return; - } - } - - if( pSRB->SGIndex < pSRB->SGcount ) - { - DC390_write8 (DMA_Cmd, DMA_IDLE_CMD | ioDir); - if( !pSRB->SGToBeXferLen ) - { - dc390_start_segment(pSRB); - - DEBUG1(printk (KERN_DEBUG " DC390: Next SG segment.")); - } - lval = pSRB->SGToBeXferLen; - DEBUG1(printk (KERN_DEBUG " DC390: Start transfer: %li bytes (address %08lx)\n", lval, pSRB->SGBusAddr)); - DC390_write8 (CtcReg_Low, (u8) lval); - lval >>= 8; - DC390_write8 (CtcReg_Mid, (u8) lval); - lval >>= 8; - DC390_write8 (CtcReg_High, (u8) lval); - - DC390_write32 (DMA_XferCnt, pSRB->SGToBeXferLen); - DC390_write32 (DMA_XferAddr, pSRB->SGBusAddr); - - //DC390_write8 (DMA_Cmd, DMA_IDLE_CMD | ioDir); - pSRB->SRBState = SRB_DATA_XFER; - - DC390_write8 (ScsiCmd, DMA_COMMAND+INFO_XFER_CMD); - - DC390_write8 (DMA_Cmd, DMA_START_CMD | ioDir); - //DEBUG1(DC390_write32 (DMA_ScsiBusCtrl, WRT_ERASE_DMA_STAT | EN_INT_ON_PCI_ABORT)); - //DEBUG1(printk (KERN_DEBUG "DC390: DMA_Status: %02x\n", DC390_read8 (DMA_Status))); - //DEBUG1(DC390_write32 (DMA_ScsiBusCtrl, EN_INT_ON_PCI_ABORT)); - } - else /* xfer pad */ - { - if( pSRB->SGcount ) - { - pSRB->AdaptStatus = H_OVER_UNDER_RUN; - pSRB->SRBStatus |= OVER_RUN; - DEBUG0(printk (KERN_WARNING " DC390: Overrun -")); - } - DEBUG0(printk (KERN_WARNING " Clear transfer pad \n")); - DC390_write8 (CtcReg_Low, 0); - DC390_write8 (CtcReg_Mid, 0); - DC390_write8 (CtcReg_High, 0); - - pSRB->SRBState |= SRB_XFERPAD; - DC390_write8 (ScsiCmd, DMA_COMMAND+XFER_PAD_BYTE); -/* - DC390_write8 (DMA_Cmd, DMA_IDLE_CMD | ioDir); - DC390_write8 (DMA_Cmd, DMA_START_CMD | ioDir); -*/ - } -} - - -static void -dc390_DataOutPhase( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus) -{ - dc390_DataIO_Comm (pACB, pSRB, WRITE_DIRECTION); -} - -static void -dc390_DataInPhase( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus) -{ - dc390_DataIO_Comm (pACB, pSRB, READ_DIRECTION); -} - -static void -dc390_CommandPhase( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus) -{ - struct dc390_dcb* pDCB; - u8 i, cnt; - u8 *ptr; - - DC390_write8 (ScsiCmd, RESET_ATN_CMD); - DC390_write8 (ScsiCmd, CLEAR_FIFO_CMD); - if( !(pSRB->SRBFlag & AUTO_REQSENSE) ) - { - cnt = (u8) pSRB->pcmd->cmd_len; - ptr = (u8 *) pSRB->pcmd->cmnd; - for(i=0; i < cnt; i++) - DC390_write8 (ScsiFifo, *(ptr++)); - } - else - { - DC390_write8 (ScsiFifo, REQUEST_SENSE); - pDCB = pACB->pActiveDCB; - DC390_write8 (ScsiFifo, pDCB->TargetLUN << 5); - DC390_write8 (ScsiFifo, 0); - DC390_write8 (ScsiFifo, 0); - DC390_write8 (ScsiFifo, SCSI_SENSE_BUFFERSIZE); - DC390_write8 (ScsiFifo, 0); - DEBUG0(printk(KERN_DEBUG "DC390: AutoReqSense (CmndPhase)!\n")); - } - pSRB->SRBState = SRB_COMMAND; - DC390_write8 (ScsiCmd, INFO_XFER_CMD); -} - -static void -dc390_StatusPhase( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus) -{ - DC390_write8 (ScsiCmd, CLEAR_FIFO_CMD); - pSRB->SRBState = SRB_STATUS; - DC390_write8 (ScsiCmd, INITIATOR_CMD_CMPLTE); - //DC390_write8 (DMA_Cmd, DMA_IDLE_CMD); -} - -static void -dc390_MsgOutPhase( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus) -{ - u8 bval, i, cnt; - u8 *ptr; - struct dc390_dcb* pDCB; - - DC390_write8 (ScsiCmd, CLEAR_FIFO_CMD); - pDCB = pACB->pActiveDCB; - if( !(pSRB->SRBState & SRB_MSGOUT) ) - { - cnt = pSRB->MsgCnt; - if( cnt ) - { - ptr = (u8 *) pSRB->MsgOutBuf; - for(i=0; i < cnt; i++) - DC390_write8 (ScsiFifo, *(ptr++)); - pSRB->MsgCnt = 0; - if( (pDCB->DCBFlag & ABORT_DEV_) && - (pSRB->MsgOutBuf[0] == ABORT) ) - pSRB->SRBState = SRB_ABORT_SENT; - } - else - { - bval = ABORT; /* ??? MSG_NOP */ - if( (pSRB->pcmd->cmnd[0] == INQUIRY ) || - (pSRB->pcmd->cmnd[0] == REQUEST_SENSE) || - (pSRB->SRBFlag & AUTO_REQSENSE) ) - { - if( pDCB->SyncMode & SYNC_ENABLE ) - goto mop1; - } - DC390_write8 (ScsiFifo, bval); - } - DC390_write8 (ScsiCmd, INFO_XFER_CMD); - } - else - { -mop1: - printk (KERN_ERR "DC390: OLD Sync Nego code triggered! (%i %i)\n", pDCB->TargetID, pDCB->TargetLUN); - DC390_write8 (ScsiFifo, EXTENDED_MESSAGE); - DC390_write8 (ScsiFifo, 3); /* ;length of extended msg */ - DC390_write8 (ScsiFifo, EXTENDED_SDTR); /* ; sync nego */ - DC390_write8 (ScsiFifo, pDCB->NegoPeriod); - if (pDCB->SyncOffset & 0x0f) - DC390_write8 (ScsiFifo, pDCB->SyncOffset); - else - DC390_write8 (ScsiFifo, SYNC_NEGO_OFFSET); - pSRB->SRBState |= DO_SYNC_NEGO; - DC390_write8 (ScsiCmd, INFO_XFER_CMD); - } -} - -static void -dc390_MsgInPhase( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus) -{ - DC390_write8 (ScsiCmd, CLEAR_FIFO_CMD); - if( !(pSRB->SRBState & SRB_MSGIN) ) - { - pSRB->SRBState &= ~SRB_DISCONNECT; - pSRB->SRBState |= SRB_MSGIN; - } - DC390_write8 (ScsiCmd, INFO_XFER_CMD); - //DC390_write8 (DMA_Cmd, DMA_IDLE_CMD); -} - -static void -dc390_Nop_0( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus) -{ -} - -static void -dc390_Nop_1( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus) -{ -} - - -static void -dc390_SetXferRate( struct dc390_acb* pACB, struct dc390_dcb* pDCB ) -{ - u8 bval, i, cnt; - struct dc390_dcb* ptr; - - if( !(pDCB->TargetLUN) ) - { - if( !pACB->scan_devices ) - { - ptr = pACB->pLinkDCB; - cnt = pACB->DCBCnt; - bval = pDCB->TargetID; - for(i=0; i<cnt; i++) - { - if( ptr->TargetID == bval ) - { - ptr->SyncPeriod = pDCB->SyncPeriod; - ptr->SyncOffset = pDCB->SyncOffset; - ptr->CtrlR3 = pDCB->CtrlR3; - ptr->CtrlR4 = pDCB->CtrlR4; - ptr->SyncMode = pDCB->SyncMode; - } - ptr = ptr->pNextDCB; - } - } - } - return; -} - - -static void -dc390_Disconnect( struct dc390_acb* pACB ) -{ - struct dc390_dcb *pDCB; - struct dc390_srb *pSRB, *psrb; - u8 i, cnt; - - DEBUG0(printk(KERN_INFO "DISC,")); - - if (!pACB->Connected) printk(KERN_ERR "DC390: Disconnect not-connected bus?\n"); - pACB->Connected = 0; - pDCB = pACB->pActiveDCB; - if (!pDCB) - { - DEBUG0(printk(KERN_ERR "ACB:%p->ActiveDCB:%p IOPort:%04x IRQ:%02x !\n",\ - pACB, pDCB, pACB->IOPortBase, pACB->IRQLevel)); - mdelay(400); - DC390_read8 (INT_Status); /* Reset Pending INT */ - DC390_write8 (ScsiCmd, EN_SEL_RESEL); - return; - } - DC390_write8 (ScsiCmd, EN_SEL_RESEL); - pSRB = pDCB->pActiveSRB; - pACB->pActiveDCB = NULL; - pSRB->ScsiPhase = SCSI_NOP0; - if( pSRB->SRBState & SRB_UNEXPECT_RESEL ) - pSRB->SRBState = 0; - else if( pSRB->SRBState & SRB_ABORT_SENT ) - { - pDCB->TagMask = 0; - pDCB->DCBFlag = 0; - cnt = pDCB->GoingSRBCnt; - pDCB->GoingSRBCnt = 0; - pSRB = pDCB->pGoingSRB; - for( i=0; i < cnt; i++) - { - psrb = pSRB->pNextSRB; - dc390_Free_insert (pACB, pSRB); - pSRB = psrb; - } - pDCB->pGoingSRB = NULL; - } - else - { - if( (pSRB->SRBState & (SRB_START_+SRB_MSGOUT)) || - !(pSRB->SRBState & (SRB_DISCONNECT+SRB_COMPLETED)) ) - { /* Selection time out */ - pSRB->AdaptStatus = H_SEL_TIMEOUT; - pSRB->TargetStatus = 0; - goto disc1; - } - else if (!(pSRB->SRBState & SRB_DISCONNECT) && (pSRB->SRBState & SRB_COMPLETED)) - { -disc1: - dc390_freetag (pDCB, pSRB); - pDCB->pActiveSRB = NULL; - pSRB->SRBState = SRB_FREE; - dc390_SRBdone( pACB, pDCB, pSRB); - } - } - pACB->MsgLen = 0; -} - - -static void -dc390_Reselect( struct dc390_acb* pACB ) -{ - struct dc390_dcb* pDCB; - struct dc390_srb* pSRB; - u8 id, lun; - - DEBUG0(printk(KERN_INFO "RSEL,")); - pACB->Connected = 1; - pDCB = pACB->pActiveDCB; - if( pDCB ) - { /* Arbitration lost but Reselection won */ - DEBUG0(printk ("DC390: (ActiveDCB != 0: Arb. lost but resel. won)!\n")); - pSRB = pDCB->pActiveSRB; - if( !( pACB->scan_devices ) ) - { - struct scsi_cmnd *pcmd = pSRB->pcmd; - scsi_set_resid(pcmd, scsi_bufflen(pcmd)); - SET_RES_DID(pcmd->result, DID_SOFT_ERROR); - dc390_Going_remove(pDCB, pSRB); - dc390_Free_insert(pACB, pSRB); - pcmd->scsi_done (pcmd); - DEBUG0(printk(KERN_DEBUG"DC390: Return SRB %p to free\n", pSRB)); - } - } - /* Get ID */ - lun = DC390_read8 (ScsiFifo); - DEBUG0(printk ("Dev %02x,", lun)); - if (!(lun & (1 << pACB->pScsiHost->this_id))) - printk (KERN_ERR "DC390: Reselection must select host adapter: %02x!\n", lun); - else - lun ^= 1 << pACB->pScsiHost->this_id; /* Mask AdapterID */ - id = 0; while (lun >>= 1) id++; - /* Get LUN */ - lun = DC390_read8 (ScsiFifo); - if (!(lun & IDENTIFY_BASE)) printk (KERN_ERR "DC390: Resel: Expect identify message!\n"); - lun &= 7; - DEBUG0(printk ("(%02i-%i),", id, lun)); - pDCB = dc390_findDCB (pACB, id, lun); - if (!pDCB) - { - printk (KERN_ERR "DC390: Reselect from non existing device (%02i-%i)\n", - id, lun); - return; - } - pACB->pActiveDCB = pDCB; - /* TagQ: We expect a message soon, so never mind the exact SRB */ - if( pDCB->SyncMode & EN_TAG_QUEUEING ) - { - pSRB = pACB->pTmpSRB; - pDCB->pActiveSRB = pSRB; - } - else - { - pSRB = pDCB->pActiveSRB; - if( !pSRB || !(pSRB->SRBState & SRB_DISCONNECT) ) - { - pSRB= pACB->pTmpSRB; - pSRB->SRBState = SRB_UNEXPECT_RESEL; - printk (KERN_ERR "DC390: Reselect without outstanding cmnd (%02i-%i)\n", - id, lun); - pDCB->pActiveSRB = pSRB; - dc390_EnableMsgOut_Abort ( pACB, pSRB ); - } - else - { - if( pDCB->DCBFlag & ABORT_DEV_ ) - { - pSRB->SRBState = SRB_ABORT_SENT; - printk (KERN_INFO "DC390: Reselect: Abort (%02i-%i)\n", - id, lun); - dc390_EnableMsgOut_Abort( pACB, pSRB ); - } - else - pSRB->SRBState = SRB_DATA_XFER; - } - } - - DEBUG1(printk (KERN_DEBUG "Resel SRB(%p): TagNum (%02x)\n", pSRB, pSRB->TagNumber)); - pSRB->ScsiPhase = SCSI_NOP0; - DC390_write8 (Scsi_Dest_ID, pDCB->TargetID); - DC390_write8 (Sync_Period, pDCB->SyncPeriod); - DC390_write8 (Sync_Offset, pDCB->SyncOffset); - DC390_write8 (CtrlReg1, pDCB->CtrlR1); - DC390_write8 (CtrlReg3, pDCB->CtrlR3); - DC390_write8 (CtrlReg4, pDCB->CtrlR4); /* ; Glitch eater */ - DC390_write8 (ScsiCmd, MSG_ACCEPTED_CMD); /* ;to release the /ACK signal */ -} - -static int __inline__ -dc390_RequestSense(struct dc390_acb* pACB, struct dc390_dcb* pDCB, struct dc390_srb* pSRB) -{ - struct scsi_cmnd *pcmd; - - pcmd = pSRB->pcmd; - - REMOVABLEDEBUG(printk(KERN_INFO "DC390: RequestSense(Cmd %02x, Id %02x, LUN %02x)\n",\ - pcmd->cmnd[0], pDCB->TargetID, pDCB->TargetLUN)); - - pSRB->SRBFlag |= AUTO_REQSENSE; - pSRB->SavedTotXLen = pSRB->TotalXferredLen; - pSRB->AdaptStatus = 0; - pSRB->TargetStatus = 0; /* CHECK_CONDITION<<1; */ - - /* We are called from SRBdone, original PCI mapping has been removed - * already, new one is set up from StartSCSI */ - pSRB->SGIndex = 0; - - pSRB->TotalXferredLen = 0; - pSRB->SGToBeXferLen = 0; - return dc390_StartSCSI(pACB, pDCB, pSRB); -} - - -static void -dc390_SRBdone( struct dc390_acb* pACB, struct dc390_dcb* pDCB, struct dc390_srb* pSRB ) -{ - u8 status; - struct scsi_cmnd *pcmd; - - pcmd = pSRB->pcmd; - /* KG: Moved pci_unmap here */ - dc390_pci_unmap(pSRB); - - status = pSRB->TargetStatus; - - DEBUG0(printk (" SRBdone (%02x,%08x), SRB %p\n", status, pcmd->result, pSRB)); - if(pSRB->SRBFlag & AUTO_REQSENSE) - { /* Last command was a Request Sense */ - pSRB->SRBFlag &= ~AUTO_REQSENSE; - pSRB->AdaptStatus = 0; - pSRB->TargetStatus = SAM_STAT_CHECK_CONDITION; - - //pcmd->result = MK_RES(DRIVER_SENSE,DID_OK,0,status); - if (status == SAM_STAT_CHECK_CONDITION) - pcmd->result = MK_RES_LNX(0, DID_BAD_TARGET, 0, /*CHECK_CONDITION*/0); - else /* Retry */ - { - if( pSRB->pcmd->cmnd[0] == TEST_UNIT_READY /* || pSRB->pcmd->cmnd[0] == START_STOP */) - { - /* Don't retry on TEST_UNIT_READY */ - pcmd->result = MK_RES_LNX(DRIVER_SENSE, DID_OK, 0, SAM_STAT_CHECK_CONDITION); - REMOVABLEDEBUG(printk(KERN_INFO "Cmd=%02x, Result=%08x, XferL=%08x\n",pSRB->pcmd->cmnd[0],\ - (u32) pcmd->result, (u32) pSRB->TotalXferredLen)); - } else { - SET_RES_DRV(pcmd->result, DRIVER_SENSE); - //pSRB->ScsiCmdLen = (u8) (pSRB->Segment1[0] >> 8); - DEBUG0 (printk ("DC390: RETRY (%02x), target %02i-%02i\n", pcmd->cmnd[0], pcmd->device->id, (u8)pcmd->device->lun)); - pSRB->TotalXferredLen = 0; - SET_RES_DID(pcmd->result, DID_SOFT_ERROR); - } - } - goto cmd_done; - } - if( status ) - { - if (status == SAM_STAT_CHECK_CONDITION) - { - if (dc390_RequestSense(pACB, pDCB, pSRB)) { - SET_RES_DID(pcmd->result, DID_ERROR); - goto cmd_done; - } - return; - } - else if (status == SAM_STAT_TASK_SET_FULL) - { - scsi_track_queue_full(pcmd->device, pDCB->GoingSRBCnt - 1); - DEBUG0 (printk ("DC390: RETRY (%02x), target %02i-%02i\n", pcmd->cmnd[0], pcmd->device->id, (u8)pcmd->device->lun)); - pSRB->TotalXferredLen = 0; - SET_RES_DID(pcmd->result, DID_SOFT_ERROR); - } - else if (status == SAM_STAT_BUSY && - (pcmd->cmnd[0] == TEST_UNIT_READY || pcmd->cmnd[0] == INQUIRY) && - pACB->scan_devices) - { - pSRB->AdaptStatus = 0; - pSRB->TargetStatus = status; - pcmd->result = MK_RES(0,0,pSRB->EndMessage,/*status*/0); - } - else - { /* Another error */ - pSRB->TotalXferredLen = 0; - SET_RES_DID(pcmd->result, DID_SOFT_ERROR); - goto cmd_done; - } - } - else - { /* Target status == 0 */ - status = pSRB->AdaptStatus; - if (status == H_OVER_UNDER_RUN) - { - pSRB->TargetStatus = 0; - SET_RES_DID(pcmd->result,DID_OK); - SET_RES_MSG(pcmd->result,pSRB->EndMessage); - } - else if (status == H_SEL_TIMEOUT) - { - pcmd->result = MK_RES(0, DID_NO_CONNECT, 0, 0); - /* Devices are removed below ... */ - } - else if( pSRB->SRBStatus & PARITY_ERROR) - { - //pcmd->result = MK_RES(0,DID_PARITY,pSRB->EndMessage,0); - SET_RES_DID(pcmd->result,DID_PARITY); - SET_RES_MSG(pcmd->result,pSRB->EndMessage); - } - else /* No error */ - { - pSRB->AdaptStatus = 0; - pSRB->TargetStatus = 0; - SET_RES_DID(pcmd->result,DID_OK); - } - } - -cmd_done: - scsi_set_resid(pcmd, scsi_bufflen(pcmd) - pSRB->TotalXferredLen); - - dc390_Going_remove (pDCB, pSRB); - /* Add to free list */ - dc390_Free_insert (pACB, pSRB); - - DEBUG0(printk (KERN_DEBUG "DC390: SRBdone: done\n")); - pcmd->scsi_done (pcmd); - - return; -} - - -/* Remove all SRBs from Going list and inform midlevel */ -static void -dc390_DoingSRB_Done(struct dc390_acb* pACB, struct scsi_cmnd *cmd) -{ - struct dc390_dcb *pDCB, *pdcb; - struct dc390_srb *psrb, *psrb2; - int i; - struct scsi_cmnd *pcmd; - - pDCB = pACB->pLinkDCB; - pdcb = pDCB; - if (! pdcb) return; - do - { - psrb = pdcb->pGoingSRB; - for (i = 0; i < pdcb->GoingSRBCnt; i++) - { - psrb2 = psrb->pNextSRB; - pcmd = psrb->pcmd; - dc390_Free_insert (pACB, psrb); - psrb = psrb2; - } - pdcb->GoingSRBCnt = 0; - pdcb->pGoingSRB = NULL; - pdcb->TagMask = 0; - pdcb = pdcb->pNextDCB; - } while( pdcb != pDCB ); -} - - -static void -dc390_ResetSCSIBus( struct dc390_acb* pACB ) -{ - //DC390_write8 (ScsiCmd, RST_DEVICE_CMD); - //udelay (250); - //DC390_write8 (ScsiCmd, NOP_CMD); - - DC390_write8 (ScsiCmd, CLEAR_FIFO_CMD); - DC390_write8 (DMA_Cmd, DMA_IDLE_CMD); - DC390_write8 (ScsiCmd, RST_SCSI_BUS_CMD); - pACB->Connected = 0; - - return; -} - -static void -dc390_ScsiRstDetect( struct dc390_acb* pACB ) -{ - printk ("DC390: Rst_Detect: laststat = %08x\n", dc390_laststatus); - //DEBUG0(printk(KERN_INFO "RST_DETECT,")); - - DC390_write8 (DMA_Cmd, DMA_IDLE_CMD); - /* Unlock before ? */ - /* delay half a second */ - udelay (1000); - DC390_write8 (ScsiCmd, CLEAR_FIFO_CMD); - pACB->last_reset = jiffies + 5*HZ/2 - + HZ * dc390_eepromBuf[pACB->AdapterIndex][EE_DELAY]; - pACB->Connected = 0; - - if( pACB->ACBFlag & RESET_DEV ) - pACB->ACBFlag |= RESET_DONE; - else - { /* Reset was issued by sb else */ - pACB->ACBFlag |= RESET_DETECT; - - dc390_ResetDevParam( pACB ); - dc390_DoingSRB_Done( pACB, NULL); - //dc390_RecoverSRB( pACB ); - pACB->pActiveDCB = NULL; - pACB->ACBFlag = 0; - } - return; -} - -static int DC390_queuecommand_lck(struct scsi_cmnd *cmd, - void (*done)(struct scsi_cmnd *)) -{ - struct scsi_device *sdev = cmd->device; - struct dc390_acb *acb = (struct dc390_acb *)sdev->host->hostdata; - struct dc390_dcb *dcb = sdev->hostdata; - struct dc390_srb *srb; - - if (sdev->queue_depth <= dcb->GoingSRBCnt) - goto device_busy; - if (acb->pActiveDCB) - goto host_busy; - if (acb->ACBFlag & (RESET_DETECT|RESET_DONE|RESET_DEV)) - goto host_busy; - - srb = acb->pFreeSRB; - if (unlikely(srb == NULL)) - goto host_busy; - - cmd->scsi_done = done; - cmd->result = 0; - acb->Cmds++; - - acb->pFreeSRB = srb->pNextSRB; - srb->pNextSRB = NULL; - - srb->pSRBDCB = dcb; - srb->pcmd = cmd; - cmd->host_scribble = (char *)srb; - - srb->SGIndex = 0; - srb->AdaptStatus = 0; - srb->TargetStatus = 0; - srb->MsgCnt = 0; - - srb->SRBStatus = 0; - srb->SRBFlag = 0; - srb->SRBState = 0; - srb->TotalXferredLen = 0; - srb->SGBusAddr = 0; - srb->SGToBeXferLen = 0; - srb->ScsiPhase = 0; - srb->EndMessage = 0; - srb->TagNumber = SCSI_NO_TAG; - - if (dc390_StartSCSI(acb, dcb, srb)) { - dc390_Free_insert(acb, srb); - goto host_busy; - } - - dc390_Going_append(dcb, srb); - - return 0; - - host_busy: - return SCSI_MLQUEUE_HOST_BUSY; - - device_busy: - return SCSI_MLQUEUE_DEVICE_BUSY; -} - -static DEF_SCSI_QCMD(DC390_queuecommand) - -static void dc390_dumpinfo (struct dc390_acb* pACB, struct dc390_dcb* pDCB, struct dc390_srb* pSRB) -{ - struct pci_dev *pdev; - u16 pstat; - - if (!pDCB) pDCB = pACB->pActiveDCB; - if (!pSRB && pDCB) pSRB = pDCB->pActiveSRB; - - if (pSRB) - { - printk ("DC390: SRB: Xferred %08lx, Remain %08lx, State %08x, Phase %02x\n", - pSRB->TotalXferredLen, pSRB->SGToBeXferLen, pSRB->SRBState, - pSRB->ScsiPhase); - printk ("DC390: AdpaterStatus: %02x, SRB Status %02x\n", pSRB->AdaptStatus, pSRB->SRBStatus); - } - printk ("DC390: Status of last IRQ (DMA/SC/Int/IRQ): %08x\n", dc390_laststatus); - printk ("DC390: Register dump: SCSI block:\n"); - printk ("DC390: XferCnt Cmd Stat IntS IRQS FFIS Ctl1 Ctl2 Ctl3 Ctl4\n"); - printk ("DC390: %06x %02x %02x %02x", - DC390_read8(CtcReg_Low) + (DC390_read8(CtcReg_Mid) << 8) + (DC390_read8(CtcReg_High) << 16), - DC390_read8(ScsiCmd), DC390_read8(Scsi_Status), DC390_read8(Intern_State)); - printk (" %02x %02x %02x %02x %02x %02x\n", - DC390_read8(INT_Status), DC390_read8(Current_Fifo), DC390_read8(CtrlReg1), - DC390_read8(CtrlReg2), DC390_read8(CtrlReg3), DC390_read8(CtrlReg4)); - DC390_write32 (DMA_ScsiBusCtrl, WRT_ERASE_DMA_STAT | EN_INT_ON_PCI_ABORT); - if (DC390_read8(Current_Fifo) & 0x1f) - { - printk ("DC390: FIFO:"); - while (DC390_read8(Current_Fifo) & 0x1f) printk (" %02x", DC390_read8(ScsiFifo)); - printk ("\n"); - } - printk ("DC390: Register dump: DMA engine:\n"); - printk ("DC390: Cmd STrCnt SBusA WrkBC WrkAC Stat SBusCtrl\n"); - printk ("DC390: %02x %08x %08x %08x %08x %02x %08x\n", - DC390_read8(DMA_Cmd), DC390_read32(DMA_XferCnt), DC390_read32(DMA_XferAddr), - DC390_read32(DMA_Wk_ByteCntr), DC390_read32(DMA_Wk_AddrCntr), - DC390_read8(DMA_Status), DC390_read32(DMA_ScsiBusCtrl)); - DC390_write32 (DMA_ScsiBusCtrl, EN_INT_ON_PCI_ABORT); - - pdev = pACB->pdev; - pci_read_config_word(pdev, PCI_STATUS, &pstat); - printk ("DC390: Register dump: PCI Status: %04x\n", pstat); - printk ("DC390: In case of driver trouble read Documentation/scsi/tmscsim.txt\n"); -} - - -static int DC390_abort(struct scsi_cmnd *cmd) -{ - struct dc390_acb *pACB = (struct dc390_acb*) cmd->device->host->hostdata; - struct dc390_dcb *pDCB = (struct dc390_dcb*) cmd->device->hostdata; - - scmd_printk(KERN_WARNING, cmd, "DC390: Abort command\n"); - - /* abort() is too stupid for already sent commands at the moment. - * If it's called we are in trouble anyway, so let's dump some info - * into the syslog at least. (KG, 98/08/20,99/06/20) */ - dc390_dumpinfo(pACB, pDCB, NULL); - - pDCB->DCBFlag |= ABORT_DEV_; - printk(KERN_INFO "DC390: Aborted.\n"); - - return FAILED; -} - - -static void dc390_ResetDevParam( struct dc390_acb* pACB ) -{ - struct dc390_dcb *pDCB, *pdcb; - - pDCB = pACB->pLinkDCB; - if (! pDCB) return; - pdcb = pDCB; - do - { - pDCB->SyncMode &= ~SYNC_NEGO_DONE; - pDCB->SyncPeriod = 0; - pDCB->SyncOffset = 0; - pDCB->TagMask = 0; - pDCB->CtrlR3 = FAST_CLK; - pDCB->CtrlR4 &= NEGATE_REQACKDATA | CTRL4_RESERVED | NEGATE_REQACK; - pDCB->CtrlR4 |= pACB->glitch_cfg; - pDCB = pDCB->pNextDCB; - } - while( pdcb != pDCB ); - pACB->ACBFlag &= ~(RESET_DEV | RESET_DONE | RESET_DETECT); - -} - -static int DC390_bus_reset (struct scsi_cmnd *cmd) -{ - struct dc390_acb* pACB = (struct dc390_acb*) cmd->device->host->hostdata; - u8 bval; - - spin_lock_irq(cmd->device->host->host_lock); - - bval = DC390_read8(CtrlReg1) | DIS_INT_ON_SCSI_RST; - DC390_write8(CtrlReg1, bval); /* disable IRQ on bus reset */ - - pACB->ACBFlag |= RESET_DEV; - dc390_ResetSCSIBus(pACB); - - dc390_ResetDevParam(pACB); - mdelay(1); - pACB->last_reset = jiffies + 3*HZ/2 - + HZ * dc390_eepromBuf[pACB->AdapterIndex][EE_DELAY]; - - DC390_write8(ScsiCmd, CLEAR_FIFO_CMD); - DC390_read8(INT_Status); /* Reset Pending INT */ - - dc390_DoingSRB_Done(pACB, cmd); - - pACB->pActiveDCB = NULL; - pACB->ACBFlag = 0; - - bval = DC390_read8(CtrlReg1) & ~DIS_INT_ON_SCSI_RST; - DC390_write8(CtrlReg1, bval); /* re-enable interrupt */ - - spin_unlock_irq(cmd->device->host->host_lock); - - return SUCCESS; -} - -/** - * dc390_slave_alloc - Called by the scsi mid layer to tell us about a new - * scsi device that we need to deal with. - * - * @scsi_device: The new scsi device that we need to handle. - */ -static int dc390_slave_alloc(struct scsi_device *scsi_device) -{ - struct dc390_acb *pACB = (struct dc390_acb*) scsi_device->host->hostdata; - struct dc390_dcb *pDCB, *pDCB2 = NULL; - uint id = scsi_device->id; - uint lun = scsi_device->lun; - - pDCB = kzalloc(sizeof(struct dc390_dcb), GFP_KERNEL); - if (!pDCB) - return -ENOMEM; - - if (!pACB->DCBCnt++) { - pACB->pLinkDCB = pDCB; - pACB->pDCBRunRobin = pDCB; - } else { - pACB->pLastDCB->pNextDCB = pDCB; - } - - pDCB->pNextDCB = pACB->pLinkDCB; - pACB->pLastDCB = pDCB; - - pDCB->pDCBACB = pACB; - pDCB->TargetID = id; - pDCB->TargetLUN = lun; - - /* - * Some values are for all LUNs: Copy them - * In a clean way: We would have an own structure for a SCSI-ID - */ - if (lun && (pDCB2 = dc390_findDCB(pACB, id, 0))) { - pDCB->DevMode = pDCB2->DevMode; - pDCB->SyncMode = pDCB2->SyncMode & SYNC_NEGO_DONE; - pDCB->SyncPeriod = pDCB2->SyncPeriod; - pDCB->SyncOffset = pDCB2->SyncOffset; - pDCB->NegoPeriod = pDCB2->NegoPeriod; - - pDCB->CtrlR3 = pDCB2->CtrlR3; - pDCB->CtrlR4 = pDCB2->CtrlR4; - } else { - u8 index = pACB->AdapterIndex; - PEEprom prom = (PEEprom) &dc390_eepromBuf[index][id << 2]; - - pDCB->DevMode = prom->EE_MODE1; - pDCB->NegoPeriod = - (dc390_clock_period1[prom->EE_SPEED] * 25) >> 2; - pDCB->CtrlR3 = FAST_CLK; - pDCB->CtrlR4 = pACB->glitch_cfg | CTRL4_RESERVED; - if (dc390_eepromBuf[index][EE_MODE2] & ACTIVE_NEGATION) - pDCB->CtrlR4 |= NEGATE_REQACKDATA | NEGATE_REQACK; - } - - if (pDCB->DevMode & SYNC_NEGO_) - pDCB->SyncMode |= SYNC_ENABLE; - else { - pDCB->SyncMode = 0; - pDCB->SyncOffset &= ~0x0f; - } - - pDCB->CtrlR1 = pACB->pScsiHost->this_id; - if (pDCB->DevMode & PARITY_CHK_) - pDCB->CtrlR1 |= PARITY_ERR_REPO; - - pACB->scan_devices = 1; - scsi_device->hostdata = pDCB; - return 0; -} - -/** - * dc390_slave_destroy - Called by the scsi mid layer to tell us about a - * device that is going away. - * - * @scsi_device: The scsi device that we need to remove. - */ -static void dc390_slave_destroy(struct scsi_device *scsi_device) -{ - struct dc390_acb* pACB = (struct dc390_acb*) scsi_device->host->hostdata; - struct dc390_dcb* pDCB = (struct dc390_dcb*) scsi_device->hostdata; - struct dc390_dcb* pPrevDCB = pACB->pLinkDCB; - - pACB->scan_devices = 0; - - BUG_ON(pDCB->GoingSRBCnt > 1); - - if (pDCB == pACB->pLinkDCB) { - if (pACB->pLastDCB == pDCB) { - pDCB->pNextDCB = NULL; - pACB->pLastDCB = NULL; - } - pACB->pLinkDCB = pDCB->pNextDCB; - } else { - while (pPrevDCB->pNextDCB != pDCB) - pPrevDCB = pPrevDCB->pNextDCB; - pPrevDCB->pNextDCB = pDCB->pNextDCB; - if (pDCB == pACB->pLastDCB) - pACB->pLastDCB = pPrevDCB; - } - - if (pDCB == pACB->pActiveDCB) - pACB->pActiveDCB = NULL; - if (pDCB == pACB->pLinkDCB) - pACB->pLinkDCB = pDCB->pNextDCB; - if (pDCB == pACB->pDCBRunRobin) - pACB->pDCBRunRobin = pDCB->pNextDCB; - kfree(pDCB); - - pACB->DCBCnt--; -} - -static int dc390_slave_configure(struct scsi_device *sdev) -{ - struct dc390_acb *acb = (struct dc390_acb *)sdev->host->hostdata; - struct dc390_dcb *dcb = (struct dc390_dcb *)sdev->hostdata; - - acb->scan_devices = 0; - if (sdev->tagged_supported && (dcb->DevMode & TAG_QUEUEING_)) { - dcb->SyncMode |= EN_TAG_QUEUEING; - scsi_activate_tcq(sdev, acb->TagMaxNum); - } - - return 0; -} - -static struct scsi_host_template driver_template = { - .module = THIS_MODULE, - .proc_name = "tmscsim", - .name = DC390_BANNER " V" DC390_VERSION, - .slave_alloc = dc390_slave_alloc, - .slave_configure = dc390_slave_configure, - .slave_destroy = dc390_slave_destroy, - .queuecommand = DC390_queuecommand, - .eh_abort_handler = DC390_abort, - .eh_bus_reset_handler = DC390_bus_reset, - .can_queue = 1, - .this_id = 7, - .sg_tablesize = SG_ALL, - .cmd_per_lun = 1, - .use_clustering = ENABLE_CLUSTERING, - .max_sectors = 0x4000, /* 8MiB = 16 * 1024 * 512 */ -}; - -/*********************************************************************** - * Functions for access to DC390 EEPROM - * and some to emulate it - * - **********************************************************************/ - -static void dc390_eeprom_prepare_read(struct pci_dev *pdev, u8 cmd) -{ - u8 carryFlag = 1, j = 0x80, bval; - int i; - - for (i = 0; i < 9; i++) { - if (carryFlag) { - pci_write_config_byte(pdev, 0x80, 0x40); - bval = 0xc0; - } else - bval = 0x80; - - udelay(160); - pci_write_config_byte(pdev, 0x80, bval); - udelay(160); - pci_write_config_byte(pdev, 0x80, 0); - udelay(160); - - carryFlag = (cmd & j) ? 1 : 0; - j >>= 1; - } -} - -static u16 dc390_eeprom_get_data(struct pci_dev *pdev) -{ - int i; - u16 wval = 0; - u8 bval; - - for (i = 0; i < 16; i++) { - wval <<= 1; - - pci_write_config_byte(pdev, 0x80, 0x80); - udelay(160); - pci_write_config_byte(pdev, 0x80, 0x40); - udelay(160); - pci_read_config_byte(pdev, 0x00, &bval); - - if (bval == 0x22) - wval |= 1; - } - - return wval; -} - -static void dc390_read_eeprom(struct pci_dev *pdev, u16 *ptr) -{ - u8 cmd = EEPROM_READ, i; - - for (i = 0; i < 0x40; i++) { - pci_write_config_byte(pdev, 0xc0, 0); - udelay(160); - - dc390_eeprom_prepare_read(pdev, cmd++); - *ptr++ = dc390_eeprom_get_data(pdev); - - pci_write_config_byte(pdev, 0x80, 0); - pci_write_config_byte(pdev, 0x80, 0); - udelay(160); - } -} - -/* Override EEprom values with explicitly set values */ -static void dc390_eeprom_override(u8 index) -{ - u8 *ptr = (u8 *) dc390_eepromBuf[index], id; - - /* Adapter Settings */ - if (tmscsim[0] != -2) - ptr[EE_ADAPT_SCSI_ID] = (u8)tmscsim[0]; /* Adapter ID */ - if (tmscsim[3] != -2) - ptr[EE_MODE2] = (u8)tmscsim[3]; - if (tmscsim[5] != -2) - ptr[EE_DELAY] = tmscsim[5]; /* Reset delay */ - if (tmscsim[4] != -2) - ptr[EE_TAG_CMD_NUM] = (u8)tmscsim[4]; /* Tagged Cmds */ - - /* Device Settings */ - for (id = 0; id < MAX_SCSI_ID; id++) { - if (tmscsim[2] != -2) - ptr[id << 2] = (u8)tmscsim[2]; /* EE_MODE1 */ - if (tmscsim[1] != -2) - ptr[(id << 2) + 1] = (u8)tmscsim[1]; /* EE_Speed */ - } -} - -static int tmscsim_def[] = { - 7, - 0 /* 10MHz */, - PARITY_CHK_ | SEND_START_ | EN_DISCONNECT_ | SYNC_NEGO_ | TAG_QUEUEING_, - MORE2_DRV | GREATER_1G | RST_SCSI_BUS | ACTIVE_NEGATION | LUN_CHECK, - 3 /* 16 Tags per LUN */, - 1 /* s delay after Reset */, -}; - -/* Copy defaults over set values where missing */ -static void dc390_fill_with_defaults (void) -{ - int i; - - for (i = 0; i < 6; i++) { - if (tmscsim[i] < 0 || tmscsim[i] > 255) - tmscsim[i] = tmscsim_def[i]; - } - - /* Sanity checks */ - if (tmscsim[0] > 7) - tmscsim[0] = 7; - if (tmscsim[1] > 7) - tmscsim[1] = 4; - if (tmscsim[4] > 5) - tmscsim[4] = 4; - if (tmscsim[5] > 180) - tmscsim[5] = 180; -} - -static void dc390_check_eeprom(struct pci_dev *pdev, u8 index) -{ - u8 interpd[] = {1, 3, 5, 10, 16, 30, 60, 120}; - u8 EEbuf[128]; - u16 *ptr = (u16 *)EEbuf, wval = 0; - int i; - - dc390_read_eeprom(pdev, ptr); - memcpy(dc390_eepromBuf[index], EEbuf, EE_ADAPT_SCSI_ID); - memcpy(&dc390_eepromBuf[index][EE_ADAPT_SCSI_ID], - &EEbuf[REAL_EE_ADAPT_SCSI_ID], EE_LEN - EE_ADAPT_SCSI_ID); - - dc390_eepromBuf[index][EE_DELAY] = interpd[dc390_eepromBuf[index][EE_DELAY]]; - - for (i = 0; i < 0x40; i++, ptr++) - wval += *ptr; - - /* no Tekram EEprom found */ - if (wval != 0x1234) { - int speed; - - printk(KERN_INFO "DC390_init: No EEPROM found! Trying default settings ...\n"); - - /* - * XXX(hch): bogus, because we might have tekram and - * non-tekram hbas in a single machine. - */ - dc390_fill_with_defaults(); - - speed = dc390_clock_speed[tmscsim[1]]; - printk(KERN_INFO "DC390: Used defaults: AdaptID=%i, SpeedIdx=%i (%i.%i MHz), " - "DevMode=0x%02x, AdaptMode=0x%02x, TaggedCmnds=%i (%i), DelayReset=%is\n", - tmscsim[0], tmscsim[1], speed / 10, speed % 10, - (u8)tmscsim[2], (u8)tmscsim[3], tmscsim[4], 2 << (tmscsim[4]), tmscsim[5]); - } -} - -static void dc390_init_hw(struct dc390_acb *pACB, u8 index) -{ - struct Scsi_Host *shost = pACB->pScsiHost; - u8 dstate; - - /* Disable SCSI bus reset interrupt */ - DC390_write8(CtrlReg1, DIS_INT_ON_SCSI_RST | shost->this_id); - - if (pACB->Gmode2 & RST_SCSI_BUS) { - dc390_ResetSCSIBus(pACB); - udelay(1000); - pACB->last_reset = jiffies + HZ/2 + - HZ * dc390_eepromBuf[pACB->AdapterIndex][EE_DELAY]; - } - - pACB->ACBFlag = 0; - - /* Reset Pending INT */ - DC390_read8(INT_Status); - - /* 250ms selection timeout */ - DC390_write8(Scsi_TimeOut, SEL_TIMEOUT); - - /* Conversion factor = 0 , 40MHz clock */ - DC390_write8(Clk_Factor, CLK_FREQ_40MHZ); - - /* NOP cmd - clear command register */ - DC390_write8(ScsiCmd, NOP_CMD); - - /* Enable Feature and SCSI-2 */ - DC390_write8(CtrlReg2, EN_FEATURE+EN_SCSI2_CMD); - - /* Fast clock */ - DC390_write8(CtrlReg3, FAST_CLK); - - /* Negation */ - DC390_write8(CtrlReg4, pACB->glitch_cfg | /* glitch eater */ - (dc390_eepromBuf[index][EE_MODE2] & ACTIVE_NEGATION) ? - NEGATE_REQACKDATA : 0); - - /* Clear Transfer Count High: ID */ - DC390_write8(CtcReg_High, 0); - DC390_write8(DMA_Cmd, DMA_IDLE_CMD); - DC390_write8(ScsiCmd, CLEAR_FIFO_CMD); - DC390_write32(DMA_ScsiBusCtrl, EN_INT_ON_PCI_ABORT); - - dstate = DC390_read8(DMA_Status); - DC390_write8(DMA_Status, dstate); -} - -static int dc390_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) -{ - struct dc390_acb *pACB; - struct Scsi_Host *shost; - unsigned long io_port; - int error = -ENODEV, i; - - if (pci_enable_device(pdev)) - goto out; - - pci_set_master(pdev); - - error = -ENOMEM; - if (disable_clustering) - driver_template.use_clustering = DISABLE_CLUSTERING; - shost = scsi_host_alloc(&driver_template, sizeof(struct dc390_acb)); - if (!shost) - goto out_disable_device; - - pACB = (struct dc390_acb *)shost->hostdata; - memset(pACB, 0, sizeof(struct dc390_acb)); - - dc390_check_eeprom(pdev, dc390_adapterCnt); - dc390_eeprom_override(dc390_adapterCnt); - - io_port = pci_resource_start(pdev, 0); - - shost->this_id = dc390_eepromBuf[dc390_adapterCnt][EE_ADAPT_SCSI_ID]; - shost->io_port = io_port; - shost->n_io_port = 0x80; - shost->irq = pdev->irq; - shost->base = io_port; - shost->unique_id = io_port; - - pACB->last_reset = jiffies; - pACB->pScsiHost = shost; - pACB->IOPortBase = (u16) io_port; - pACB->IRQLevel = pdev->irq; - - shost->max_id = 8; - - if (shost->max_id - 1 == - dc390_eepromBuf[dc390_adapterCnt][EE_ADAPT_SCSI_ID]) - shost->max_id--; - - if (dc390_eepromBuf[dc390_adapterCnt][EE_MODE2] & LUN_CHECK) - shost->max_lun = 8; - else - shost->max_lun = 1; - - pACB->pFreeSRB = pACB->SRB_array; - pACB->SRBCount = MAX_SRB_CNT; - pACB->AdapterIndex = dc390_adapterCnt; - pACB->TagMaxNum = - 2 << dc390_eepromBuf[dc390_adapterCnt][EE_TAG_CMD_NUM]; - pACB->Gmode2 = dc390_eepromBuf[dc390_adapterCnt][EE_MODE2]; - - for (i = 0; i < pACB->SRBCount-1; i++) - pACB->SRB_array[i].pNextSRB = &pACB->SRB_array[i+1]; - pACB->SRB_array[pACB->SRBCount-1].pNextSRB = NULL; - pACB->pTmpSRB = &pACB->TmpSRB; - - pACB->sel_timeout = SEL_TIMEOUT; - pACB->glitch_cfg = EATER_25NS; - pACB->pdev = pdev; - - if (!request_region(io_port, shost->n_io_port, "tmscsim")) { - printk(KERN_ERR "DC390: register IO ports error!\n"); - goto out_host_put; - } - - /* Reset Pending INT */ - DC390_read8_(INT_Status, io_port); - - if (request_irq(pdev->irq, do_DC390_Interrupt, IRQF_SHARED, - "tmscsim", pACB)) { - printk(KERN_ERR "DC390: register IRQ error!\n"); - goto out_release_region; - } - - dc390_init_hw(pACB, dc390_adapterCnt); - - dc390_adapterCnt++; - - pci_set_drvdata(pdev, shost); - - error = scsi_add_host(shost, &pdev->dev); - if (error) - goto out_free_irq; - scsi_scan_host(shost); - return 0; - - out_free_irq: - free_irq(pdev->irq, pACB); - out_release_region: - release_region(io_port, shost->n_io_port); - out_host_put: - scsi_host_put(shost); - out_disable_device: - pci_disable_device(pdev); - out: - return error; -} - -/** - * dc390_remove_one - Called to remove a single instance of the adapter. - * - * @dev: The PCI device to remove. - */ -static void dc390_remove_one(struct pci_dev *dev) -{ - struct Scsi_Host *scsi_host = pci_get_drvdata(dev); - unsigned long iflags; - struct dc390_acb* pACB = (struct dc390_acb*) scsi_host->hostdata; - u8 bval; - - scsi_remove_host(scsi_host); - - spin_lock_irqsave(scsi_host->host_lock, iflags); - pACB->ACBFlag = RESET_DEV; - bval = DC390_read8(CtrlReg1) | DIS_INT_ON_SCSI_RST; - DC390_write8 (CtrlReg1, bval); /* disable interrupt */ - if (pACB->Gmode2 & RST_SCSI_BUS) - dc390_ResetSCSIBus(pACB); - spin_unlock_irqrestore(scsi_host->host_lock, iflags); - - free_irq(scsi_host->irq, pACB); - release_region(scsi_host->io_port, scsi_host->n_io_port); - - pci_disable_device(dev); - scsi_host_put(scsi_host); -} - -static struct pci_device_id tmscsim_pci_tbl[] = { - { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD53C974, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, - { } -}; -MODULE_DEVICE_TABLE(pci, tmscsim_pci_tbl); - -static struct pci_driver dc390_driver = { - .name = "tmscsim", - .id_table = tmscsim_pci_tbl, - .probe = dc390_probe_one, - .remove = dc390_remove_one, -}; - -static int __init dc390_module_init(void) -{ - if (!disable_clustering) { - printk(KERN_INFO "DC390: clustering now enabled by default. If you get problems load\n"); - printk(KERN_INFO " with \"disable_clustering=1\" and report to maintainers\n"); - } - - if (tmscsim[0] == -1 || tmscsim[0] > 15) { - tmscsim[0] = 7; - tmscsim[1] = 4; - tmscsim[2] = PARITY_CHK_ | TAG_QUEUEING_; - tmscsim[3] = MORE2_DRV | GREATER_1G | RST_SCSI_BUS | ACTIVE_NEGATION; - tmscsim[4] = 2; - tmscsim[5] = 10; - printk (KERN_INFO "DC390: Using safe settings.\n"); - } - - return pci_register_driver(&dc390_driver); -} - -static void __exit dc390_module_exit(void) -{ - pci_unregister_driver(&dc390_driver); -} - -module_init(dc390_module_init); -module_exit(dc390_module_exit); - -#ifndef MODULE -static int __init dc390_setup (char *str) -{ - int ints[8],i, im; - - get_options(str, ARRAY_SIZE(ints), ints); - im = ints[0]; - - if (im > 6) { - printk (KERN_NOTICE "DC390: ignore extra params!\n"); - im = 6; - } - - for (i = 0; i < im; i++) - tmscsim[i] = ints[i+1]; - /* dc390_checkparams (); */ - return 1; -} - -__setup("tmscsim=", dc390_setup); -#endif diff --git a/drivers/scsi/tmscsim.h b/drivers/scsi/tmscsim.h deleted file mode 100644 index 3d1bb4ad1826..000000000000 --- a/drivers/scsi/tmscsim.h +++ /dev/null @@ -1,551 +0,0 @@ -/*********************************************************************** -;* File Name : TMSCSIM.H * -;* TEKRAM DC-390(T) PCI SCSI Bus Master Host Adapter * -;* Device Driver * -;***********************************************************************/ -/* $Id: tmscsim.h,v 2.15.2.3 2000/11/17 20:52:27 garloff Exp $ */ - -#ifndef _TMSCSIM_H -#define _TMSCSIM_H - -#include <linux/types.h> - -#define SCSI_IRQ_NONE 255 - -#define MAX_ADAPTER_NUM 4 -#define MAX_SG_LIST_BUF 16 /* Not used */ -#define MAX_SCSI_ID 8 -#define MAX_SRB_CNT 50 /* Max number of started commands */ - -#define SEL_TIMEOUT 153 /* 250 ms selection timeout (@ 40 MHz) */ - -/* -;----------------------------------------------------------------------- -; SCSI Request Block -;----------------------------------------------------------------------- -*/ -struct dc390_srb -{ -//u8 CmdBlock[12]; - -struct dc390_srb *pNextSRB; -struct dc390_dcb *pSRBDCB; -struct scsi_cmnd *pcmd; -struct scatterlist *pSegmentList; - -struct scatterlist Segmentx; /* make a one entry of S/G list table */ - -unsigned long SGBusAddr; /*;a segment starting address as seen by AM53C974A - in CPU endianness. We're only getting 32-bit bus - addresses by default */ -unsigned long SGToBeXferLen; /*; to be xfer length */ -unsigned long TotalXferredLen; -unsigned long SavedTotXLen; -unsigned long Saved_Ptr; -u32 SRBState; - -u8 SRBStatus; -u8 SRBFlag; /*; b0-AutoReqSense,b6-Read,b7-write */ - /*; b4-settimeout,b5-Residual valid */ -u8 AdaptStatus; -u8 TargetStatus; - -u8 ScsiPhase; -s8 TagNumber; -u8 SGIndex; -u8 SGcount; - -u8 MsgCnt; -u8 EndMessage; - -u8 MsgInBuf[6]; -u8 MsgOutBuf[6]; - -//u8 IORBFlag; /*;81h-Reset, 2-retry */ -}; - - -/* -;----------------------------------------------------------------------- -; Device Control Block -;----------------------------------------------------------------------- -*/ -struct dc390_dcb -{ -struct dc390_dcb *pNextDCB; -struct dc390_acb *pDCBACB; - -/* Queued SRBs */ -struct dc390_srb *pGoingSRB; -struct dc390_srb *pGoingLast; -struct dc390_srb *pActiveSRB; -u8 GoingSRBCnt; - -u32 TagMask; - -u8 TargetID; /*; SCSI Target ID (SCSI Only) */ -u8 TargetLUN; /*; SCSI Log. Unit (SCSI Only) */ -u8 DevMode; -u8 DCBFlag; - -u8 CtrlR1; -u8 CtrlR3; -u8 CtrlR4; - -u8 SyncMode; /*; 0:async mode */ -u8 NegoPeriod; /*;for nego. */ -u8 SyncPeriod; /*;for reg. */ -u8 SyncOffset; /*;for reg. and nego.(low nibble) */ -}; - - -/* -;----------------------------------------------------------------------- -; Adapter Control Block -;----------------------------------------------------------------------- -*/ -struct dc390_acb -{ -struct Scsi_Host *pScsiHost; -u16 IOPortBase; -u8 IRQLevel; -u8 status; - -u8 SRBCount; -u8 AdapterIndex; /*; nth Adapter this driver */ -u8 DCBCnt; - -u8 TagMaxNum; -u8 ACBFlag; -u8 Gmode2; -u8 scan_devices; - -struct dc390_dcb *pLinkDCB; -struct dc390_dcb *pLastDCB; -struct dc390_dcb *pDCBRunRobin; - -struct dc390_dcb *pActiveDCB; -struct dc390_srb *pFreeSRB; -struct dc390_srb *pTmpSRB; - -u8 msgin123[4]; -u8 Connected; -u8 pad; - -#if defined(USE_SPINLOCKS) && USE_SPINLOCKS > 1 && (defined(CONFIG_SMP) || DEBUG_SPINLOCKS > 0) -spinlock_t lock; -#endif -u8 sel_timeout; -u8 glitch_cfg; - -u8 MsgLen; -u8 Ignore_IRQ; /* Not used */ - -struct pci_dev *pdev; - -unsigned long last_reset; -unsigned long Cmds; -u32 SelLost; -u32 SelConn; -u32 CmdInQ; -u32 CmdOutOfSRB; - -struct dc390_srb TmpSRB; -struct dc390_srb SRB_array[MAX_SRB_CNT]; /* 50 SRBs */ -}; - - -/*;-----------------------------------------------------------------------*/ - - -#define BIT31 0x80000000 -#define BIT30 0x40000000 -#define BIT29 0x20000000 -#define BIT28 0x10000000 -#define BIT27 0x08000000 -#define BIT26 0x04000000 -#define BIT25 0x02000000 -#define BIT24 0x01000000 -#define BIT23 0x00800000 -#define BIT22 0x00400000 -#define BIT21 0x00200000 -#define BIT20 0x00100000 -#define BIT19 0x00080000 -#define BIT18 0x00040000 -#define BIT17 0x00020000 -#define BIT16 0x00010000 -#define BIT15 0x00008000 -#define BIT14 0x00004000 -#define BIT13 0x00002000 -#define BIT12 0x00001000 -#define BIT11 0x00000800 -#define BIT10 0x00000400 -#define BIT9 0x00000200 -#define BIT8 0x00000100 -#define BIT7 0x00000080 -#define BIT6 0x00000040 -#define BIT5 0x00000020 -#define BIT4 0x00000010 -#define BIT3 0x00000008 -#define BIT2 0x00000004 -#define BIT1 0x00000002 -#define BIT0 0x00000001 - -/*;---UnitCtrlFlag */ -#define UNIT_ALLOCATED BIT0 -#define UNIT_INFO_CHANGED BIT1 -#define FORMATING_MEDIA BIT2 -#define UNIT_RETRY BIT3 - -/*;---UnitFlags */ -#define DASD_SUPPORT BIT0 -#define SCSI_SUPPORT BIT1 -#define ASPI_SUPPORT BIT2 - -/*;----SRBState machine definition */ -#define SRB_FREE 0 -#define SRB_WAIT BIT0 -#define SRB_READY BIT1 -#define SRB_MSGOUT BIT2 /*;arbitration+msg_out 1st byte*/ -#define SRB_MSGIN BIT3 -#define SRB_MSGIN_MULTI BIT4 -#define SRB_COMMAND BIT5 -#define SRB_START_ BIT6 /*;arbitration+msg_out+command_out*/ -#define SRB_DISCONNECT BIT7 -#define SRB_DATA_XFER BIT8 -#define SRB_XFERPAD BIT9 -#define SRB_STATUS BIT10 -#define SRB_COMPLETED BIT11 -#define SRB_ABORT_SENT BIT12 -#define DO_SYNC_NEGO BIT13 -#define SRB_UNEXPECT_RESEL BIT14 - -/*;---SRBstatus */ -#define SRB_OK BIT0 -#define ABORTION BIT1 -#define OVER_RUN BIT2 -#define UNDER_RUN BIT3 -#define PARITY_ERROR BIT4 -#define SRB_ERROR BIT5 - -/*;---ACBFlag */ -#define RESET_DEV BIT0 -#define RESET_DETECT BIT1 -#define RESET_DONE BIT2 - -/*;---DCBFlag */ -#define ABORT_DEV_ BIT0 - -/*;---SRBFlag */ -#define DATAOUT BIT7 -#define DATAIN BIT6 -#define RESIDUAL_VALID BIT5 -#define ENABLE_TIMER BIT4 -#define RESET_DEV0 BIT2 -#define ABORT_DEV BIT1 -#define AUTO_REQSENSE BIT0 - -/*;---Adapter status */ -#define H_STATUS_GOOD 0 -#define H_SEL_TIMEOUT 0x11 -#define H_OVER_UNDER_RUN 0x12 -#define H_UNEXP_BUS_FREE 0x13 -#define H_TARGET_PHASE_F 0x14 -#define H_INVALID_CCB_OP 0x16 -#define H_LINK_CCB_BAD 0x17 -#define H_BAD_TARGET_DIR 0x18 -#define H_DUPLICATE_CCB 0x19 -#define H_BAD_CCB_OR_SG 0x1A -#define H_ABORT 0x0FF - -/* cmd->result */ -#define RES_TARGET 0x000000FF /* Target State */ -#define RES_TARGET_LNX STATUS_MASK /* Only official ... */ -#define RES_ENDMSG 0x0000FF00 /* End Message */ -#define RES_DID 0x00FF0000 /* DID_ codes */ -#define RES_DRV 0xFF000000 /* DRIVER_ codes */ - -#define MK_RES(drv,did,msg,tgt) ((int)(drv)<<24 | (int)(did)<<16 | (int)(msg)<<8 | (int)(tgt)) -#define MK_RES_LNX(drv,did,msg,tgt) ((int)(drv)<<24 | (int)(did)<<16 | (int)(msg)<<8 | (int)(tgt)) - -#define SET_RES_TARGET(who, tgt) do { who &= ~RES_TARGET; who |= (int)(tgt); } while (0) -#define SET_RES_TARGET_LNX(who, tgt) do { who &= ~RES_TARGET_LNX; who |= (int)(tgt) << 1; } while (0) -#define SET_RES_MSG(who, msg) do { who &= ~RES_ENDMSG; who |= (int)(msg) << 8; } while (0) -#define SET_RES_DID(who, did) do { who &= ~RES_DID; who |= (int)(did) << 16; } while (0) -#define SET_RES_DRV(who, drv) do { who &= ~RES_DRV; who |= (int)(drv) << 24; } while (0) - -/*;---Sync_Mode */ -#define SYNC_DISABLE 0 -#define SYNC_ENABLE BIT0 -#define SYNC_NEGO_DONE BIT1 -#define WIDE_ENABLE BIT2 /* Not used ;-) */ -#define WIDE_NEGO_DONE BIT3 /* Not used ;-) */ -#define EN_TAG_QUEUEING BIT4 -#define EN_ATN_STOP BIT5 - -#define SYNC_NEGO_OFFSET 15 - -/*;---SCSI bus phase*/ -#define SCSI_DATA_OUT 0 -#define SCSI_DATA_IN 1 -#define SCSI_COMMAND 2 -#define SCSI_STATUS_ 3 -#define SCSI_NOP0 4 -#define SCSI_NOP1 5 -#define SCSI_MSG_OUT 6 -#define SCSI_MSG_IN 7 - -/*;----SCSI MSG BYTE*/ /* see scsi/scsi.h */ /* One is missing ! */ -#define ABORT_TAG 0x0d - -/* - * SISC query queue - */ -typedef struct { - dma_addr_t saved_dma_handle; -} dc390_cmd_scp_t; - -/* -;========================================================== -; EEPROM byte offset -;========================================================== -*/ -typedef struct _EEprom -{ -u8 EE_MODE1; -u8 EE_SPEED; -u8 xx1; -u8 xx2; -} EEprom, *PEEprom; - -#define REAL_EE_ADAPT_SCSI_ID 64 -#define REAL_EE_MODE2 65 -#define REAL_EE_DELAY 66 -#define REAL_EE_TAG_CMD_NUM 67 - -#define EE_ADAPT_SCSI_ID 32 -#define EE_MODE2 33 -#define EE_DELAY 34 -#define EE_TAG_CMD_NUM 35 - -#define EE_LEN 40 - -/*; EE_MODE1 bits definition*/ -#define PARITY_CHK_ BIT0 -#define SYNC_NEGO_ BIT1 -#define EN_DISCONNECT_ BIT2 -#define SEND_START_ BIT3 -#define TAG_QUEUEING_ BIT4 - -/*; EE_MODE2 bits definition*/ -#define MORE2_DRV BIT0 -#define GREATER_1G BIT1 -#define RST_SCSI_BUS BIT2 -#define ACTIVE_NEGATION BIT3 -#define NO_SEEK BIT4 -#define LUN_CHECK BIT5 - -#define ENABLE_CE 1 -#define DISABLE_CE 0 -#define EEPROM_READ 0x80 - -/* -;========================================================== -; AMD 53C974 Registers bit Definition -;========================================================== -*/ -/* -;==================== -; SCSI Register -;==================== -*/ - -/*; Command Reg.(+0CH) (rw) */ -#define DMA_COMMAND BIT7 -#define NOP_CMD 0 -#define CLEAR_FIFO_CMD 1 -#define RST_DEVICE_CMD 2 -#define RST_SCSI_BUS_CMD 3 - -#define INFO_XFER_CMD 0x10 -#define INITIATOR_CMD_CMPLTE 0x11 -#define MSG_ACCEPTED_CMD 0x12 -#define XFER_PAD_BYTE 0x18 -#define SET_ATN_CMD 0x1A -#define RESET_ATN_CMD 0x1B - -#define SEL_WO_ATN 0x41 /* currently not used */ -#define SEL_W_ATN 0x42 -#define SEL_W_ATN_STOP 0x43 -#define SEL_W_ATN3 0x46 -#define EN_SEL_RESEL 0x44 -#define DIS_SEL_RESEL 0x45 /* currently not used */ -#define RESEL 0x40 /* " */ -#define RESEL_ATN3 0x47 /* " */ - -#define DATA_XFER_CMD INFO_XFER_CMD - - -/*; SCSI Status Reg.(+10H) (r) */ -#define INTERRUPT BIT7 -#define ILLEGAL_OP_ERR BIT6 -#define PARITY_ERR BIT5 -#define COUNT_2_ZERO BIT4 -#define GROUP_CODE_VALID BIT3 -#define SCSI_PHASE_MASK (BIT2+BIT1+BIT0) -/* BIT2: MSG phase; BIT1: C/D physe; BIT0: I/O phase */ - -/*; Interrupt Status Reg.(+14H) (r) */ -#define SCSI_RESET BIT7 -#define INVALID_CMD BIT6 -#define DISCONNECTED BIT5 -#define SERVICE_REQUEST BIT4 -#define SUCCESSFUL_OP BIT3 -#define RESELECTED BIT2 -#define SEL_ATTENTION BIT1 -#define SELECTED BIT0 - -/*; Internal State Reg.(+18H) (r) */ -#define SYNC_OFFSET_FLAG BIT3 -#define INTRN_STATE_MASK (BIT2+BIT1+BIT0) -/* 0x04: Sel. successful (w/o stop), 0x01: Sel. successful (w/ stop) */ - -/*; Clock Factor Reg.(+24H) (w) */ -#define CLK_FREQ_40MHZ 0 -#define CLK_FREQ_35MHZ (BIT2+BIT1+BIT0) -#define CLK_FREQ_30MHZ (BIT2+BIT1) -#define CLK_FREQ_25MHZ (BIT2+BIT0) -#define CLK_FREQ_20MHZ BIT2 -#define CLK_FREQ_15MHZ (BIT1+BIT0) -#define CLK_FREQ_10MHZ BIT1 - -/*; Control Reg. 1(+20H) (rw) */ -#define EXTENDED_TIMING BIT7 -#define DIS_INT_ON_SCSI_RST BIT6 -#define PARITY_ERR_REPO BIT4 -#define SCSI_ID_ON_BUS (BIT2+BIT1+BIT0) /* host adapter ID */ - -/*; Control Reg. 2(+2CH) (rw) */ -#define EN_FEATURE BIT6 -#define EN_SCSI2_CMD BIT3 - -/*; Control Reg. 3(+30H) (rw) */ -#define ID_MSG_CHECK BIT7 -#define EN_QTAG_MSG BIT6 -#define EN_GRP2_CMD BIT5 -#define FAST_SCSI BIT4 /* ;10MB/SEC */ -#define FAST_CLK BIT3 /* ;25 - 40 MHZ */ - -/*; Control Reg. 4(+34H) (rw) */ -#define EATER_12NS 0 -#define EATER_25NS BIT7 -#define EATER_35NS BIT6 -#define EATER_0NS (BIT7+BIT6) -#define REDUCED_POWER BIT5 -#define CTRL4_RESERVED BIT4 /* must be 1 acc. to AM53C974.c */ -#define NEGATE_REQACKDATA BIT2 -#define NEGATE_REQACK BIT3 - -#define GLITCH_TO_NS(x) (((~x>>6 & 2) >> 1) | ((x>>6 & 1) << 1 ^ (x>>6 & 2))) -#define NS_TO_GLITCH(y) (((~y<<7) | ~((y<<6) ^ ((y<<5 & 1<<6) | ~0x40))) & 0xc0) - -/* -;==================== -; DMA Register -;==================== -*/ -/*; DMA Command Reg.(+40H) (rw) */ -#define READ_DIRECTION BIT7 -#define WRITE_DIRECTION 0 -#define EN_DMA_INT BIT6 -#define EN_PAGE_INT BIT5 /* page transfer interrupt enable */ -#define MAP_TO_MDL BIT4 -#define DIAGNOSTIC BIT2 -#define DMA_IDLE_CMD 0 -#define DMA_BLAST_CMD BIT0 -#define DMA_ABORT_CMD BIT1 -#define DMA_START_CMD (BIT1+BIT0) - -/*; DMA Status Reg.(+54H) (r) */ -#define PCI_MS_ABORT BIT6 -#define BLAST_COMPLETE BIT5 -#define SCSI_INTERRUPT BIT4 -#define DMA_XFER_DONE BIT3 -#define DMA_XFER_ABORT BIT2 -#define DMA_XFER_ERROR BIT1 -#define POWER_DOWN BIT0 - -/*; DMA SCSI Bus and Ctrl.(+70H) */ -#define EN_INT_ON_PCI_ABORT BIT25 -#define WRT_ERASE_DMA_STAT BIT24 -#define PW_DOWN_CTRL BIT21 -#define SCSI_BUSY BIT20 -#define SCLK BIT19 -#define SCAM BIT18 -#define SCSI_LINES 0x0003ffff - -/* -;========================================================== -; SCSI Chip register address offset -;========================================================== -;Registers are rw unless declared otherwise -*/ -#define CtcReg_Low 0x00 /* r curr. transfer count */ -#define CtcReg_Mid 0x04 /* r */ -#define CtcReg_High 0x38 /* r */ -#define ScsiFifo 0x08 -#define ScsiCmd 0x0C -#define Scsi_Status 0x10 /* r */ -#define INT_Status 0x14 /* r */ -#define Sync_Period 0x18 /* w */ -#define Sync_Offset 0x1C /* w */ -#define Clk_Factor 0x24 /* w */ -#define CtrlReg1 0x20 -#define CtrlReg2 0x2C -#define CtrlReg3 0x30 -#define CtrlReg4 0x34 -#define DMA_Cmd 0x40 -#define DMA_XferCnt 0x44 /* rw starting transfer count (32 bit) */ -#define DMA_XferAddr 0x48 /* rw starting physical address (32 bit) */ -#define DMA_Wk_ByteCntr 0x4C /* r working byte counter */ -#define DMA_Wk_AddrCntr 0x50 /* r working address counter */ -#define DMA_Status 0x54 /* r */ -#define DMA_MDL_Addr 0x58 /* rw starting MDL address */ -#define DMA_Wk_MDL_Cntr 0x5C /* r working MDL counter */ -#define DMA_ScsiBusCtrl 0x70 /* rw SCSI Bus, PCI/DMA Ctrl */ - -#define StcReg_Low CtcReg_Low /* w start transfer count */ -#define StcReg_Mid CtcReg_Mid /* w */ -#define StcReg_High CtcReg_High /* w */ -#define Scsi_Dest_ID Scsi_Status /* w */ -#define Scsi_TimeOut INT_Status /* w */ -#define Intern_State Sync_Period /* r */ -#define Current_Fifo Sync_Offset /* r Curr. FIFO / int. state */ - - -#define DC390_read8(address) \ - (inb (pACB->IOPortBase + (address))) - -#define DC390_read8_(address, base) \ - (inb ((u16)(base) + (address))) - -#define DC390_read16(address) \ - (inw (pACB->IOPortBase + (address))) - -#define DC390_read32(address) \ - (inl (pACB->IOPortBase + (address))) - -#define DC390_write8(address,value) \ - outb ((value), pACB->IOPortBase + (address)) - -#define DC390_write8_(address,value,base) \ - outb ((value), (u16)(base) + (address)) - -#define DC390_write16(address,value) \ - outw ((value), pACB->IOPortBase + (address)) - -#define DC390_write32(address,value) \ - outl ((value), pACB->IOPortBase + (address)) - - -#endif /* _TMSCSIM_H */ diff --git a/drivers/scsi/u14-34f.c b/drivers/scsi/u14-34f.c index d8dcf36aed11..14eb50b95a1e 100644 --- a/drivers/scsi/u14-34f.c +++ b/drivers/scsi/u14-34f.c @@ -696,25 +696,25 @@ static int u14_34f_slave_configure(struct scsi_device *dev) { if (TLDEV(dev->type) && dev->tagged_supported) if (tag_mode == TAG_SIMPLE) { - scsi_adjust_queue_depth(dev, MSG_SIMPLE_TAG, tqd); + scsi_change_queue_depth(dev, tqd); tag_suffix = ", simple tags"; } else if (tag_mode == TAG_ORDERED) { - scsi_adjust_queue_depth(dev, MSG_ORDERED_TAG, tqd); + scsi_change_queue_depth(dev, tqd); tag_suffix = ", ordered tags"; } else { - scsi_adjust_queue_depth(dev, 0, tqd); + scsi_change_queue_depth(dev, tqd); tag_suffix = ", no tags"; } else if (TLDEV(dev->type) && linked_comm) { - scsi_adjust_queue_depth(dev, 0, tqd); + scsi_change_queue_depth(dev, tqd); tag_suffix = ", untagged"; } else { - scsi_adjust_queue_depth(dev, 0, utqd); + scsi_change_queue_depth(dev, utqd); tag_suffix = ""; } diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index 605ca60e8a10..2e4614b9dddf 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -2713,7 +2713,7 @@ static void ufshcd_set_queue_depth(struct scsi_device *sdev) dev_dbg(hba->dev, "%s: activate tcq with queue depth %d\n", __func__, lun_qdepth); - scsi_activate_tcq(sdev, lun_qdepth); + scsi_change_queue_depth(sdev, lun_qdepth); } /* @@ -2783,11 +2783,9 @@ static int ufshcd_slave_alloc(struct scsi_device *sdev) struct ufs_hba *hba; hba = shost_priv(sdev->host); - sdev->tagged_supported = 1; /* Mode sense(6) is not supported by UFS, so use Mode sense(10) */ sdev->use_10_for_ms = 1; - scsi_set_tag_type(sdev, MSG_SIMPLE_TAG); /* allow SCSI layer to restart the device in case of errors */ sdev->allow_restart = 1; @@ -2807,34 +2805,16 @@ static int ufshcd_slave_alloc(struct scsi_device *sdev) * ufshcd_change_queue_depth - change queue depth * @sdev: pointer to SCSI device * @depth: required depth to set - * @reason: reason for changing the depth * - * Change queue depth according to the reason and make sure - * the max. limits are not crossed. + * Change queue depth and make sure the max. limits are not crossed. */ -static int ufshcd_change_queue_depth(struct scsi_device *sdev, - int depth, int reason) +static int ufshcd_change_queue_depth(struct scsi_device *sdev, int depth) { struct ufs_hba *hba = shost_priv(sdev->host); if (depth > hba->nutrs) depth = hba->nutrs; - - switch (reason) { - case SCSI_QDEPTH_DEFAULT: - case SCSI_QDEPTH_RAMP_UP: - if (!sdev->tagged_supported) - depth = 1; - scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth); - break; - case SCSI_QDEPTH_QFULL: - scsi_track_queue_full(sdev, depth); - break; - default: - return -EOPNOTSUPP; - } - - return depth; + return scsi_change_queue_depth(sdev, depth); } /** @@ -2860,7 +2840,6 @@ static void ufshcd_slave_destroy(struct scsi_device *sdev) struct ufs_hba *hba; hba = shost_priv(sdev->host); - scsi_deactivate_tcq(sdev, hba->nutrs); /* Drop the reference as it won't be needed anymore */ if (ufshcd_scsi_to_upiu_lun(sdev->lun) == UFS_UPIU_UFS_DEVICE_WLUN) { unsigned long flags; @@ -4239,6 +4218,8 @@ static struct scsi_host_template ufshcd_driver_template = { .cmd_per_lun = UFSHCD_CMD_PER_LUN, .can_queue = UFSHCD_CAN_QUEUE, .max_host_blocked = 1, + .use_blk_tags = 1, + .track_queue_depth = 1, }; static int ufshcd_config_vreg_load(struct device *dev, struct ufs_vreg *vreg, @@ -4731,11 +4712,11 @@ static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba, START_STOP_TIMEOUT, 0, NULL, REQ_PM); if (ret) { sdev_printk(KERN_WARNING, sdp, - "START_STOP failed for power mode: %d\n", pwr_mode); - scsi_show_result(ret); + "START_STOP failed for power mode: %d, result %x\n", + pwr_mode, ret); if (driver_byte(ret) & DRIVER_SENSE) { - scsi_show_sense_hdr(&sshdr); - scsi_show_extd_sense(sshdr.asc, sshdr.ascq); + scsi_show_sense_hdr(sdp, NULL, &sshdr); + scsi_show_extd_sense(sdp, NULL, sshdr.asc, sshdr.ascq); } } diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c index b83846fc7859..22e70126425b 100644 --- a/drivers/scsi/virtio_scsi.c +++ b/drivers/scsi/virtio_scsi.c @@ -561,6 +561,15 @@ static int virtscsi_queuecommand_single(struct Scsi_Host *sh, return virtscsi_queuecommand(vscsi, &vscsi->req_vqs[0], sc); } +static struct virtio_scsi_vq *virtscsi_pick_vq_mq(struct virtio_scsi *vscsi, + struct scsi_cmnd *sc) +{ + u32 tag = blk_mq_unique_tag(sc->request); + u16 hwq = blk_mq_unique_tag_to_hwq(tag); + + return &vscsi->req_vqs[hwq]; +} + static struct virtio_scsi_vq *virtscsi_pick_vq(struct virtio_scsi *vscsi, struct virtio_scsi_target_state *tgt) { @@ -604,7 +613,12 @@ static int virtscsi_queuecommand_multi(struct Scsi_Host *sh, struct virtio_scsi *vscsi = shost_priv(sh); struct virtio_scsi_target_state *tgt = scsi_target(sc->device)->hostdata; - struct virtio_scsi_vq *req_vq = virtscsi_pick_vq(vscsi, tgt); + struct virtio_scsi_vq *req_vq; + + if (shost_use_blk_mq(sh)) + req_vq = virtscsi_pick_vq_mq(vscsi, sc); + else + req_vq = virtscsi_pick_vq(vscsi, tgt); return virtscsi_queuecommand(vscsi, req_vq, sc); } @@ -668,30 +682,13 @@ static int virtscsi_device_reset(struct scsi_cmnd *sc) * virtscsi_change_queue_depth() - Change a virtscsi target's queue depth * @sdev: Virtscsi target whose queue depth to change * @qdepth: New queue depth - * @reason: Reason for the queue depth change. */ -static int virtscsi_change_queue_depth(struct scsi_device *sdev, - int qdepth, - int reason) +static int virtscsi_change_queue_depth(struct scsi_device *sdev, int qdepth) { struct Scsi_Host *shost = sdev->host; int max_depth = shost->cmd_per_lun; - switch (reason) { - case SCSI_QDEPTH_QFULL: /* Drop qdepth in response to BUSY state */ - scsi_track_queue_full(sdev, qdepth); - break; - case SCSI_QDEPTH_RAMP_UP: /* Raise qdepth after BUSY state resolved */ - case SCSI_QDEPTH_DEFAULT: /* Manual change via sysfs */ - scsi_adjust_queue_depth(sdev, - scsi_get_tag_type(sdev), - min(max_depth, qdepth)); - break; - default: - return -EOPNOTSUPP; - } - - return sdev->queue_depth; + return scsi_change_queue_depth(sdev, min(max_depth, qdepth)); } static int virtscsi_abort(struct scsi_cmnd *sc) @@ -758,6 +755,7 @@ static struct scsi_host_template virtscsi_host_template_single = { .use_clustering = ENABLE_CLUSTERING, .target_alloc = virtscsi_target_alloc, .target_destroy = virtscsi_target_destroy, + .track_queue_depth = 1, }; static struct scsi_host_template virtscsi_host_template_multi = { @@ -776,6 +774,7 @@ static struct scsi_host_template virtscsi_host_template_multi = { .use_clustering = ENABLE_CLUSTERING, .target_alloc = virtscsi_target_alloc, .target_destroy = virtscsi_target_destroy, + .track_queue_depth = 1, }; #define virtscsi_config_get(vdev, fld) \ @@ -983,6 +982,7 @@ static int virtscsi_probe(struct virtio_device *vdev) shost->max_id = num_targets; shost->max_channel = 0; shost->max_cmd_len = VIRTIO_SCSI_CDB_SIZE; + shost->nr_hw_queues = num_queues; if (virtio_has_feature(vdev, VIRTIO_SCSI_F_T10_PI)) { host_prot = SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION | diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c index 598f65efaaec..0f133c1817de 100644 --- a/drivers/scsi/vmw_pvscsi.c +++ b/drivers/scsi/vmw_pvscsi.c @@ -504,33 +504,11 @@ static void pvscsi_setup_all_rings(const struct pvscsi_adapter *adapter) } } -static int pvscsi_change_queue_depth(struct scsi_device *sdev, - int qdepth, - int reason) +static int pvscsi_change_queue_depth(struct scsi_device *sdev, int qdepth) { - int max_depth; - struct Scsi_Host *shost = sdev->host; - - if (reason != SCSI_QDEPTH_DEFAULT) - /* - * We support only changing default. - */ - return -EOPNOTSUPP; - - max_depth = shost->can_queue; if (!sdev->tagged_supported) - max_depth = 1; - if (qdepth > max_depth) - qdepth = max_depth; - scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth); - - if (sdev->inquiry_len > 7) - sdev_printk(KERN_INFO, sdev, - "qdepth(%d), tagged(%d), simple(%d), ordered(%d), scsi_level(%d), cmd_que(%d)\n", - sdev->queue_depth, sdev->tagged_supported, - sdev->simple_tags, sdev->ordered_tags, - sdev->scsi_level, (sdev->inquiry[7] & 2) >> 1); - return sdev->queue_depth; + qdepth = 1; + return scsi_change_queue_depth(sdev, qdepth); } /* @@ -723,10 +701,6 @@ static int pvscsi_queue_ring(struct pvscsi_adapter *adapter, memcpy(e->cdb, cmd->cmnd, e->cdbLen); e->tag = SIMPLE_QUEUE_TAG; - if (sdev->tagged_supported && - (cmd->tag == HEAD_OF_QUEUE_TAG || - cmd->tag == ORDERED_QUEUE_TAG)) - e->tag = cmd->tag; if (cmd->sc_data_direction == DMA_FROM_DEVICE) e->flags = PVSCSI_FLAG_CMD_DIR_TOHOST; diff --git a/drivers/scsi/wd7000.c b/drivers/scsi/wd7000.c index 32674236fec7..f94d73611ab4 100644 --- a/drivers/scsi/wd7000.c +++ b/drivers/scsi/wd7000.c @@ -1653,7 +1653,6 @@ static struct scsi_host_template driver_template = { .can_queue = WD7000_Q, .this_id = 7, .sg_tablesize = WD7000_SG, - .cmd_per_lun = 1, .unchecked_isa_dma = 1, .use_clustering = ENABLE_CLUSTERING, }; diff --git a/drivers/scsi/wd719x.c b/drivers/scsi/wd719x.c new file mode 100644 index 000000000000..7702664d7ed3 --- /dev/null +++ b/drivers/scsi/wd719x.c @@ -0,0 +1,996 @@ +/* + * Driver for Western Digital WD7193, WD7197 and WD7296 SCSI cards + * Copyright 2013 Ondrej Zary + * + * Original driver by + * Aaron Dewell <dewell@woods.net> + * Gaerti <Juergen.Gaertner@mbox.si.uni-hannover.de> + * + * HW documentation available in book: + * + * SPIDER Command Protocol + * by Chandru M. Sippy + * SCSI Storage Products (MCP) + * Western Digital Corporation + * 09-15-95 + * + * http://web.archive.org/web/20070717175254/http://sun1.rrzn.uni-hannover.de/gaertner.juergen/wd719x/Linux/Docu/Spider/ + */ + +/* + * Driver workflow: + * 1. SCSI command is transformed to SCB (Spider Control Block) by the + * queuecommand function. + * 2. The address of the SCB is stored in a list to be able to access it, if + * something goes wrong. + * 3. The address of the SCB is written to the Controller, which loads the SCB + * via BM-DMA and processes it. + * 4. After it has finished, it generates an interrupt, and sets registers. + * + * flaws: + * - abort/reset functions + * + * ToDo: + * - tagged queueing + */ + +#include <linux/interrupt.h> +#include <linux/module.h> +#include <linux/delay.h> +#include <linux/pci.h> +#include <linux/firmware.h> +#include <linux/eeprom_93cx6.h> +#include <scsi/scsi_cmnd.h> +#include <scsi/scsi_device.h> +#include <scsi/scsi_host.h> +#include "wd719x.h" + +/* low-level register access */ +static inline u8 wd719x_readb(struct wd719x *wd, u8 reg) +{ + return ioread8(wd->base + reg); +} + +static inline u32 wd719x_readl(struct wd719x *wd, u8 reg) +{ + return ioread32(wd->base + reg); +} + +static inline void wd719x_writeb(struct wd719x *wd, u8 reg, u8 val) +{ + iowrite8(val, wd->base + reg); +} + +static inline void wd719x_writew(struct wd719x *wd, u8 reg, u16 val) +{ + iowrite16(val, wd->base + reg); +} + +static inline void wd719x_writel(struct wd719x *wd, u8 reg, u32 val) +{ + iowrite32(val, wd->base + reg); +} + +/* wait until the command register is ready */ +static inline int wd719x_wait_ready(struct wd719x *wd) +{ + int i = 0; + + do { + if (wd719x_readb(wd, WD719X_AMR_COMMAND) == WD719X_CMD_READY) + return 0; + udelay(1); + } while (i++ < WD719X_WAIT_FOR_CMD_READY); + + dev_err(&wd->pdev->dev, "command register is not ready: 0x%02x\n", + wd719x_readb(wd, WD719X_AMR_COMMAND)); + + return -ETIMEDOUT; +} + +/* poll interrupt status register until command finishes */ +static inline int wd719x_wait_done(struct wd719x *wd, int timeout) +{ + u8 status; + + while (timeout > 0) { + status = wd719x_readb(wd, WD719X_AMR_INT_STATUS); + if (status) + break; + timeout--; + udelay(1); + } + + if (timeout <= 0) { + dev_err(&wd->pdev->dev, "direct command timed out\n"); + return -ETIMEDOUT; + } + + if (status != WD719X_INT_NOERRORS) { + dev_err(&wd->pdev->dev, "direct command failed, status 0x%02x, SUE 0x%02x\n", + status, wd719x_readb(wd, WD719X_AMR_SCB_ERROR)); + return -EIO; + } + + return 0; +} + +static int wd719x_direct_cmd(struct wd719x *wd, u8 opcode, u8 dev, u8 lun, + u8 tag, dma_addr_t data, int timeout) +{ + int ret = 0; + + /* clear interrupt status register (allow command register to clear) */ + wd719x_writeb(wd, WD719X_AMR_INT_STATUS, WD719X_INT_NONE); + + /* Wait for the Command register to become free */ + if (wd719x_wait_ready(wd)) + return -ETIMEDOUT; + + /* make sure we get NO interrupts */ + dev |= WD719X_DISABLE_INT; + wd719x_writeb(wd, WD719X_AMR_CMD_PARAM, dev); + wd719x_writeb(wd, WD719X_AMR_CMD_PARAM_2, lun); + wd719x_writeb(wd, WD719X_AMR_CMD_PARAM_3, tag); + if (data) + wd719x_writel(wd, WD719X_AMR_SCB_IN, data); + + /* clear interrupt status register again */ + wd719x_writeb(wd, WD719X_AMR_INT_STATUS, WD719X_INT_NONE); + + /* Now, write the command */ + wd719x_writeb(wd, WD719X_AMR_COMMAND, opcode); + + if (timeout) /* wait for the command to complete */ + ret = wd719x_wait_done(wd, timeout); + + /* clear interrupt status register (clean up) */ + if (opcode != WD719X_CMD_READ_FIRMVER) + wd719x_writeb(wd, WD719X_AMR_INT_STATUS, WD719X_INT_NONE); + + return ret; +} + +static void wd719x_destroy(struct wd719x *wd) +{ + struct wd719x_scb *scb; + + /* stop the RISC */ + if (wd719x_direct_cmd(wd, WD719X_CMD_SLEEP, 0, 0, 0, 0, + WD719X_WAIT_FOR_RISC)) + dev_warn(&wd->pdev->dev, "RISC sleep command failed\n"); + /* disable RISC */ + wd719x_writeb(wd, WD719X_PCI_MODE_SELECT, 0); + + /* free all SCBs */ + list_for_each_entry(scb, &wd->active_scbs, list) + pci_free_consistent(wd->pdev, sizeof(struct wd719x_scb), scb, + scb->phys); + list_for_each_entry(scb, &wd->free_scbs, list) + pci_free_consistent(wd->pdev, sizeof(struct wd719x_scb), scb, + scb->phys); + /* free internal buffers */ + pci_free_consistent(wd->pdev, wd->fw_size, wd->fw_virt, wd->fw_phys); + wd->fw_virt = NULL; + pci_free_consistent(wd->pdev, WD719X_HASH_TABLE_SIZE, wd->hash_virt, + wd->hash_phys); + wd->hash_virt = NULL; + pci_free_consistent(wd->pdev, sizeof(struct wd719x_host_param), + wd->params, wd->params_phys); + wd->params = NULL; + free_irq(wd->pdev->irq, wd); +} + +/* finish a SCSI command, mark SCB (if any) as free, unmap buffers */ +static void wd719x_finish_cmd(struct scsi_cmnd *cmd, int result) +{ + struct wd719x *wd = shost_priv(cmd->device->host); + struct wd719x_scb *scb = (struct wd719x_scb *) cmd->host_scribble; + + if (scb) { + list_move(&scb->list, &wd->free_scbs); + dma_unmap_single(&wd->pdev->dev, cmd->SCp.dma_handle, + SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE); + scsi_dma_unmap(cmd); + } + cmd->result = result << 16; + cmd->scsi_done(cmd); +} + +/* Build a SCB and send it to the card */ +static int wd719x_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *cmd) +{ + int i, count_sg; + unsigned long flags; + struct wd719x_scb *scb; + struct wd719x *wd = shost_priv(sh); + dma_addr_t phys; + + cmd->host_scribble = NULL; + + /* get a free SCB - either from existing ones or allocate a new one */ + spin_lock_irqsave(wd->sh->host_lock, flags); + scb = list_first_entry_or_null(&wd->free_scbs, struct wd719x_scb, list); + if (scb) { + list_del(&scb->list); + phys = scb->phys; + } else { + spin_unlock_irqrestore(wd->sh->host_lock, flags); + scb = pci_alloc_consistent(wd->pdev, sizeof(struct wd719x_scb), + &phys); + spin_lock_irqsave(wd->sh->host_lock, flags); + if (!scb) { + dev_err(&wd->pdev->dev, "unable to allocate SCB\n"); + wd719x_finish_cmd(cmd, DID_ERROR); + spin_unlock_irqrestore(wd->sh->host_lock, flags); + return 0; + } + } + memset(scb, 0, sizeof(struct wd719x_scb)); + list_add(&scb->list, &wd->active_scbs); + + scb->phys = phys; + scb->cmd = cmd; + cmd->host_scribble = (char *) scb; + + scb->CDB_tag = 0; /* Tagged queueing not supported yet */ + scb->devid = cmd->device->id; + scb->lun = cmd->device->lun; + + /* copy the command */ + memcpy(scb->CDB, cmd->cmnd, cmd->cmd_len); + + /* map sense buffer */ + scb->sense_buf_length = SCSI_SENSE_BUFFERSIZE; + cmd->SCp.dma_handle = dma_map_single(&wd->pdev->dev, cmd->sense_buffer, + SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE); + scb->sense_buf = cpu_to_le32(cmd->SCp.dma_handle); + + /* request autosense */ + scb->SCB_options |= WD719X_SCB_FLAGS_AUTO_REQUEST_SENSE; + + /* check direction */ + if (cmd->sc_data_direction == DMA_TO_DEVICE) + scb->SCB_options |= WD719X_SCB_FLAGS_CHECK_DIRECTION + | WD719X_SCB_FLAGS_PCI_TO_SCSI; + else if (cmd->sc_data_direction == DMA_FROM_DEVICE) + scb->SCB_options |= WD719X_SCB_FLAGS_CHECK_DIRECTION; + + /* Scather/gather */ + count_sg = scsi_dma_map(cmd); + if (count_sg < 0) { + wd719x_finish_cmd(cmd, DID_ERROR); + spin_unlock_irqrestore(wd->sh->host_lock, flags); + return 0; + } + BUG_ON(count_sg > WD719X_SG); + + if (count_sg) { + struct scatterlist *sg; + + scb->data_length = cpu_to_le32(count_sg * + sizeof(struct wd719x_sglist)); + scb->data_p = cpu_to_le32(scb->phys + + offsetof(struct wd719x_scb, sg_list)); + + scsi_for_each_sg(cmd, sg, count_sg, i) { + scb->sg_list[i].ptr = cpu_to_le32(sg_dma_address(sg)); + scb->sg_list[i].length = cpu_to_le32(sg_dma_len(sg)); + } + scb->SCB_options |= WD719X_SCB_FLAGS_DO_SCATTER_GATHER; + } else { /* zero length */ + scb->data_length = 0; + scb->data_p = 0; + } + + /* check if the Command register is free */ + if (wd719x_readb(wd, WD719X_AMR_COMMAND) != WD719X_CMD_READY) { + spin_unlock_irqrestore(wd->sh->host_lock, flags); + return SCSI_MLQUEUE_HOST_BUSY; + } + + /* write pointer to the AMR */ + wd719x_writel(wd, WD719X_AMR_SCB_IN, scb->phys); + /* send SCB opcode */ + wd719x_writeb(wd, WD719X_AMR_COMMAND, WD719X_CMD_PROCESS_SCB); + + spin_unlock_irqrestore(wd->sh->host_lock, flags); + + return 0; +} + +static int wd719x_chip_init(struct wd719x *wd) +{ + int i, ret; + u32 risc_init[3]; + const struct firmware *fw_wcs, *fw_risc; + const char fwname_wcs[] = "wd719x-wcs.bin"; + const char fwname_risc[] = "wd719x-risc.bin"; + + memset(wd->hash_virt, 0, WD719X_HASH_TABLE_SIZE); + + /* WCS (sequencer) firmware */ + ret = request_firmware(&fw_wcs, fwname_wcs, &wd->pdev->dev); + if (ret) { + dev_err(&wd->pdev->dev, "Unable to load firmware %s: %d\n", + fwname_wcs, ret); + return ret; + } + /* RISC firmware */ + ret = request_firmware(&fw_risc, fwname_risc, &wd->pdev->dev); + if (ret) { + dev_err(&wd->pdev->dev, "Unable to load firmware %s: %d\n", + fwname_risc, ret); + release_firmware(fw_wcs); + return ret; + } + wd->fw_size = ALIGN(fw_wcs->size, 4) + fw_risc->size; + + if (!wd->fw_virt) + wd->fw_virt = pci_alloc_consistent(wd->pdev, wd->fw_size, + &wd->fw_phys); + if (!wd->fw_virt) { + ret = -ENOMEM; + goto wd719x_init_end; + } + + /* make a fresh copy of WCS and RISC code */ + memcpy(wd->fw_virt, fw_wcs->data, fw_wcs->size); + memcpy(wd->fw_virt + ALIGN(fw_wcs->size, 4), fw_risc->data, + fw_risc->size); + + /* Reset the Spider Chip and adapter itself */ + wd719x_writeb(wd, WD719X_PCI_PORT_RESET, WD719X_PCI_RESET); + udelay(WD719X_WAIT_FOR_RISC); + /* Clear PIO mode bits set by BIOS */ + wd719x_writeb(wd, WD719X_AMR_CMD_PARAM, 0); + /* ensure RISC is not running */ + wd719x_writeb(wd, WD719X_PCI_MODE_SELECT, 0); + /* ensure command port is ready */ + wd719x_writeb(wd, WD719X_AMR_COMMAND, 0); + if (wd719x_wait_ready(wd)) { + ret = -ETIMEDOUT; + goto wd719x_init_end; + } + + /* Transfer the first 2K words of RISC code to kick start the uP */ + risc_init[0] = wd->fw_phys; /* WCS FW */ + risc_init[1] = wd->fw_phys + ALIGN(fw_wcs->size, 4); /* RISC FW */ + risc_init[2] = wd->hash_phys; /* hash table */ + + /* clear DMA status */ + wd719x_writeb(wd, WD719X_PCI_CHANNEL2_3STATUS, 0); + + /* address to read firmware from */ + wd719x_writel(wd, WD719X_PCI_EXTERNAL_ADDR, risc_init[1]); + /* base address to write firmware to (on card) */ + wd719x_writew(wd, WD719X_PCI_INTERNAL_ADDR, WD719X_PRAM_BASE_ADDR); + /* size: first 2K words */ + wd719x_writew(wd, WD719X_PCI_DMA_TRANSFER_SIZE, 2048 * 2); + /* start DMA */ + wd719x_writeb(wd, WD719X_PCI_CHANNEL2_3CMD, WD719X_START_CHANNEL2_3DMA); + + /* wait for DMA to complete */ + i = WD719X_WAIT_FOR_RISC; + while (i-- > 0) { + u8 status = wd719x_readb(wd, WD719X_PCI_CHANNEL2_3STATUS); + if (status == WD719X_START_CHANNEL2_3DONE) + break; + if (status == WD719X_START_CHANNEL2_3ABORT) { + dev_warn(&wd->pdev->dev, "RISC bootstrap failed: DMA aborted\n"); + ret = -EIO; + goto wd719x_init_end; + } + udelay(1); + } + if (i < 1) { + dev_warn(&wd->pdev->dev, "RISC bootstrap failed: DMA timeout\n"); + ret = -ETIMEDOUT; + goto wd719x_init_end; + } + + /* firmware is loaded, now initialize and wake up the RISC */ + /* write RISC initialization long words to Spider */ + wd719x_writel(wd, WD719X_AMR_SCB_IN, risc_init[0]); + wd719x_writel(wd, WD719X_AMR_SCB_IN + 4, risc_init[1]); + wd719x_writel(wd, WD719X_AMR_SCB_IN + 8, risc_init[2]); + + /* disable interrupts during initialization of RISC */ + wd719x_writeb(wd, WD719X_AMR_CMD_PARAM, WD719X_DISABLE_INT); + + /* issue INITIALIZE RISC comand */ + wd719x_writeb(wd, WD719X_AMR_COMMAND, WD719X_CMD_INIT_RISC); + /* enable advanced mode (wake up RISC) */ + wd719x_writeb(wd, WD719X_PCI_MODE_SELECT, WD719X_ENABLE_ADVANCE_MODE); + udelay(WD719X_WAIT_FOR_RISC); + + ret = wd719x_wait_done(wd, WD719X_WAIT_FOR_RISC); + /* clear interrupt status register */ + wd719x_writeb(wd, WD719X_AMR_INT_STATUS, WD719X_INT_NONE); + if (ret) { + dev_warn(&wd->pdev->dev, "Unable to initialize RISC\n"); + goto wd719x_init_end; + } + /* RISC is up and running */ + + /* Read FW version from RISC */ + ret = wd719x_direct_cmd(wd, WD719X_CMD_READ_FIRMVER, 0, 0, 0, 0, + WD719X_WAIT_FOR_RISC); + if (ret) { + dev_warn(&wd->pdev->dev, "Unable to read firmware version\n"); + goto wd719x_init_end; + } + dev_info(&wd->pdev->dev, "RISC initialized with firmware version %.2x.%.2x\n", + wd719x_readb(wd, WD719X_AMR_SCB_OUT + 1), + wd719x_readb(wd, WD719X_AMR_SCB_OUT)); + + /* RESET SCSI bus */ + ret = wd719x_direct_cmd(wd, WD719X_CMD_BUSRESET, 0, 0, 0, 0, + WD719X_WAIT_FOR_SCSI_RESET); + if (ret) { + dev_warn(&wd->pdev->dev, "SCSI bus reset failed\n"); + goto wd719x_init_end; + } + + /* use HostParameter structure to set Spider's Host Parameter Block */ + ret = wd719x_direct_cmd(wd, WD719X_CMD_SET_PARAM, 0, + sizeof(struct wd719x_host_param), 0, + wd->params_phys, WD719X_WAIT_FOR_RISC); + if (ret) { + dev_warn(&wd->pdev->dev, "Failed to set HOST PARAMETERS\n"); + goto wd719x_init_end; + } + + /* initiate SCAM (does nothing if disabled in BIOS) */ + /* bug?: we should pass a mask of static IDs which we don't have */ + ret = wd719x_direct_cmd(wd, WD719X_CMD_INIT_SCAM, 0, 0, 0, 0, + WD719X_WAIT_FOR_SCSI_RESET); + if (ret) { + dev_warn(&wd->pdev->dev, "SCAM initialization failed\n"); + goto wd719x_init_end; + } + + /* clear AMR_BIOS_SHARE_INT register */ + wd719x_writeb(wd, WD719X_AMR_BIOS_SHARE_INT, 0); + +wd719x_init_end: + release_firmware(fw_wcs); + release_firmware(fw_risc); + + return ret; +} + +static int wd719x_abort(struct scsi_cmnd *cmd) +{ + int action, result; + unsigned long flags; + struct wd719x_scb *scb = (struct wd719x_scb *)cmd->host_scribble; + struct wd719x *wd = shost_priv(cmd->device->host); + + dev_info(&wd->pdev->dev, "abort command, tag: %x\n", cmd->tag); + + action = /*cmd->tag ? WD719X_CMD_ABORT_TAG : */WD719X_CMD_ABORT; + + spin_lock_irqsave(wd->sh->host_lock, flags); + result = wd719x_direct_cmd(wd, action, cmd->device->id, + cmd->device->lun, cmd->tag, scb->phys, 0); + spin_unlock_irqrestore(wd->sh->host_lock, flags); + if (result) + return FAILED; + + return SUCCESS; +} + +static int wd719x_reset(struct scsi_cmnd *cmd, u8 opcode, u8 device) +{ + int result; + unsigned long flags; + struct wd719x *wd = shost_priv(cmd->device->host); + + dev_info(&wd->pdev->dev, "%s reset requested\n", + (opcode == WD719X_CMD_BUSRESET) ? "bus" : "device"); + + spin_lock_irqsave(wd->sh->host_lock, flags); + result = wd719x_direct_cmd(wd, opcode, device, 0, 0, 0, + WD719X_WAIT_FOR_SCSI_RESET); + spin_unlock_irqrestore(wd->sh->host_lock, flags); + if (result) + return FAILED; + + return SUCCESS; +} + +static int wd719x_dev_reset(struct scsi_cmnd *cmd) +{ + return wd719x_reset(cmd, WD719X_CMD_RESET, cmd->device->id); +} + +static int wd719x_bus_reset(struct scsi_cmnd *cmd) +{ + return wd719x_reset(cmd, WD719X_CMD_BUSRESET, 0); +} + +static int wd719x_host_reset(struct scsi_cmnd *cmd) +{ + struct wd719x *wd = shost_priv(cmd->device->host); + struct wd719x_scb *scb, *tmp; + unsigned long flags; + int result; + + dev_info(&wd->pdev->dev, "host reset requested\n"); + spin_lock_irqsave(wd->sh->host_lock, flags); + /* Try to reinit the RISC */ + if (wd719x_chip_init(wd) == 0) + result = SUCCESS; + else + result = FAILED; + + /* flush all SCBs */ + list_for_each_entry_safe(scb, tmp, &wd->active_scbs, list) { + struct scsi_cmnd *tmp_cmd = scb->cmd; + wd719x_finish_cmd(tmp_cmd, result); + } + spin_unlock_irqrestore(wd->sh->host_lock, flags); + + return result; +} + +static int wd719x_biosparam(struct scsi_device *sdev, struct block_device *bdev, + sector_t capacity, int geom[]) +{ + if (capacity >= 0x200000) { + geom[0] = 255; /* heads */ + geom[1] = 63; /* sectors */ + } else { + geom[0] = 64; /* heads */ + geom[1] = 32; /* sectors */ + } + geom[2] = sector_div(capacity, geom[0] * geom[1]); /* cylinders */ + + return 0; +} + +/* process a SCB-completion interrupt */ +static inline void wd719x_interrupt_SCB(struct wd719x *wd, + union wd719x_regs regs, + struct wd719x_scb *scb) +{ + struct scsi_cmnd *cmd; + int result; + + /* now have to find result from card */ + switch (regs.bytes.SUE) { + case WD719X_SUE_NOERRORS: + result = DID_OK; + break; + case WD719X_SUE_REJECTED: + dev_err(&wd->pdev->dev, "command rejected\n"); + result = DID_ERROR; + break; + case WD719X_SUE_SCBQFULL: + dev_err(&wd->pdev->dev, "SCB queue is full\n"); + result = DID_ERROR; + break; + case WD719X_SUE_TERM: + dev_dbg(&wd->pdev->dev, "SCB terminated by direct command\n"); + result = DID_ABORT; /* or DID_RESET? */ + break; + case WD719X_SUE_CHAN1ABORT: + case WD719X_SUE_CHAN23ABORT: + result = DID_ABORT; + dev_err(&wd->pdev->dev, "DMA abort\n"); + break; + case WD719X_SUE_CHAN1PAR: + case WD719X_SUE_CHAN23PAR: + result = DID_PARITY; + dev_err(&wd->pdev->dev, "DMA parity error\n"); + break; + case WD719X_SUE_TIMEOUT: + result = DID_TIME_OUT; + dev_dbg(&wd->pdev->dev, "selection timeout\n"); + break; + case WD719X_SUE_RESET: + dev_dbg(&wd->pdev->dev, "bus reset occured\n"); + result = DID_RESET; + break; + case WD719X_SUE_BUSERROR: + dev_dbg(&wd->pdev->dev, "SCSI bus error\n"); + result = DID_ERROR; + break; + case WD719X_SUE_WRONGWAY: + dev_err(&wd->pdev->dev, "wrong data transfer direction\n"); + result = DID_ERROR; + break; + case WD719X_SUE_BADPHASE: + dev_err(&wd->pdev->dev, "invalid SCSI phase\n"); + result = DID_ERROR; + break; + case WD719X_SUE_TOOLONG: + dev_err(&wd->pdev->dev, "record too long\n"); + result = DID_ERROR; + break; + case WD719X_SUE_BUSFREE: + dev_err(&wd->pdev->dev, "unexpected bus free\n"); + result = DID_NO_CONNECT; /* or DID_ERROR ???*/ + break; + case WD719X_SUE_ARSDONE: + dev_dbg(&wd->pdev->dev, "auto request sense\n"); + if (regs.bytes.SCSI == 0) + result = DID_OK; + else + result = DID_PARITY; + break; + case WD719X_SUE_IGNORED: + dev_err(&wd->pdev->dev, "target id %d ignored command\n", + scb->cmd->device->id); + result = DID_NO_CONNECT; + break; + case WD719X_SUE_WRONGTAGS: + dev_err(&wd->pdev->dev, "reversed tags\n"); + result = DID_ERROR; + break; + case WD719X_SUE_BADTAGS: + dev_err(&wd->pdev->dev, "tag type not supported by target\n"); + result = DID_ERROR; + break; + case WD719X_SUE_NOSCAMID: + dev_err(&wd->pdev->dev, "no SCAM soft ID available\n"); + result = DID_ERROR; + break; + default: + dev_warn(&wd->pdev->dev, "unknown SUE error code: 0x%x\n", + regs.bytes.SUE); + result = DID_ERROR; + break; + } + cmd = scb->cmd; + + wd719x_finish_cmd(cmd, result); +} + +static irqreturn_t wd719x_interrupt(int irq, void *dev_id) +{ + struct wd719x *wd = dev_id; + union wd719x_regs regs; + unsigned long flags; + u32 SCB_out; + + spin_lock_irqsave(wd->sh->host_lock, flags); + /* read SCB pointer back from card */ + SCB_out = wd719x_readl(wd, WD719X_AMR_SCB_OUT); + /* read all status info at once */ + regs.all = cpu_to_le32(wd719x_readl(wd, WD719X_AMR_OP_CODE)); + + switch (regs.bytes.INT) { + case WD719X_INT_NONE: + spin_unlock_irqrestore(wd->sh->host_lock, flags); + return IRQ_NONE; + case WD719X_INT_LINKNOSTATUS: + dev_err(&wd->pdev->dev, "linked command completed with no status\n"); + break; + case WD719X_INT_BADINT: + dev_err(&wd->pdev->dev, "unsolicited interrupt\n"); + break; + case WD719X_INT_NOERRORS: + case WD719X_INT_LINKNOERRORS: + case WD719X_INT_ERRORSLOGGED: + case WD719X_INT_SPIDERFAILED: + /* was the cmd completed a direct or SCB command? */ + if (regs.bytes.OPC == WD719X_CMD_PROCESS_SCB) { + struct wd719x_scb *scb; + list_for_each_entry(scb, &wd->active_scbs, list) + if (SCB_out == scb->phys) + break; + if (SCB_out == scb->phys) + wd719x_interrupt_SCB(wd, regs, scb); + else + dev_err(&wd->pdev->dev, "card returned invalid SCB pointer\n"); + } else + dev_warn(&wd->pdev->dev, "direct command 0x%x completed\n", + regs.bytes.OPC); + break; + case WD719X_INT_PIOREADY: + dev_err(&wd->pdev->dev, "card indicates PIO data ready but we never use PIO\n"); + /* interrupt will not be cleared until all data is read */ + break; + default: + dev_err(&wd->pdev->dev, "unknown interrupt reason: %d\n", + regs.bytes.INT); + + } + /* clear interrupt so another can happen */ + wd719x_writeb(wd, WD719X_AMR_INT_STATUS, WD719X_INT_NONE); + spin_unlock_irqrestore(wd->sh->host_lock, flags); + + return IRQ_HANDLED; +} + +static void wd719x_eeprom_reg_read(struct eeprom_93cx6 *eeprom) +{ + struct wd719x *wd = eeprom->data; + u8 reg = wd719x_readb(wd, WD719X_PCI_GPIO_DATA); + + eeprom->reg_data_out = reg & WD719X_EE_DO; +} + +static void wd719x_eeprom_reg_write(struct eeprom_93cx6 *eeprom) +{ + struct wd719x *wd = eeprom->data; + u8 reg = 0; + + if (eeprom->reg_data_in) + reg |= WD719X_EE_DI; + if (eeprom->reg_data_clock) + reg |= WD719X_EE_CLK; + if (eeprom->reg_chip_select) + reg |= WD719X_EE_CS; + + wd719x_writeb(wd, WD719X_PCI_GPIO_DATA, reg); +} + +/* read config from EEPROM so it can be downloaded by the RISC on (re-)init */ +static void wd719x_read_eeprom(struct wd719x *wd) +{ + struct eeprom_93cx6 eeprom; + u8 gpio; + struct wd719x_eeprom_header header; + + eeprom.data = wd; + eeprom.register_read = wd719x_eeprom_reg_read; + eeprom.register_write = wd719x_eeprom_reg_write; + eeprom.width = PCI_EEPROM_WIDTH_93C46; + + /* set all outputs to low */ + wd719x_writeb(wd, WD719X_PCI_GPIO_DATA, 0); + /* configure GPIO pins */ + gpio = wd719x_readb(wd, WD719X_PCI_GPIO_CONTROL); + /* GPIO outputs */ + gpio &= (~(WD719X_EE_CLK | WD719X_EE_DI | WD719X_EE_CS)); + /* GPIO input */ + gpio |= WD719X_EE_DO; + wd719x_writeb(wd, WD719X_PCI_GPIO_CONTROL, gpio); + + /* read EEPROM header */ + eeprom_93cx6_multireadb(&eeprom, 0, (u8 *)&header, sizeof(header)); + + if (header.sig1 == 'W' && header.sig2 == 'D') + eeprom_93cx6_multireadb(&eeprom, header.cfg_offset, + (u8 *)wd->params, + sizeof(struct wd719x_host_param)); + else { /* default EEPROM values */ + dev_warn(&wd->pdev->dev, "EEPROM signature is invalid (0x%02x 0x%02x), using default values\n", + header.sig1, header.sig2); + wd->params->ch_1_th = 0x10; /* 16 DWs = 64 B */ + wd->params->scsi_conf = 0x4c; /* 48ma, spue, parity check */ + wd->params->own_scsi_id = 0x07; /* ID 7, SCAM disabled */ + wd->params->sel_timeout = 0x4d; /* 250 ms */ + wd->params->sleep_timer = 0x01; + wd->params->cdb_size = cpu_to_le16(0x5555); /* all 6 B */ + wd->params->scsi_pad = 0x1b; + if (wd->type == WD719X_TYPE_7193) /* narrow card - disable */ + wd->params->wide = cpu_to_le32(0x00000000); + else /* initiate & respond to WIDE messages */ + wd->params->wide = cpu_to_le32(0xffffffff); + wd->params->sync = cpu_to_le32(0xffffffff); + wd->params->soft_mask = 0x00; /* all disabled */ + wd->params->unsol_mask = 0x00; /* all disabled */ + } + /* disable TAGGED messages */ + wd->params->tag_en = cpu_to_le16(0x0000); +} + +/* Read card type from GPIO bits 1 and 3 */ +static enum wd719x_card_type wd719x_detect_type(struct wd719x *wd) +{ + u8 card = wd719x_readb(wd, WD719X_PCI_GPIO_CONTROL); + + card |= WD719X_GPIO_ID_BITS; + wd719x_writeb(wd, WD719X_PCI_GPIO_CONTROL, card); + card = wd719x_readb(wd, WD719X_PCI_GPIO_DATA) & WD719X_GPIO_ID_BITS; + switch (card) { + case 0x08: + return WD719X_TYPE_7193; + case 0x02: + return WD719X_TYPE_7197; + case 0x00: + return WD719X_TYPE_7296; + default: + dev_warn(&wd->pdev->dev, "unknown card type 0x%x\n", card); + return WD719X_TYPE_UNKNOWN; + } +} + +static int wd719x_board_found(struct Scsi_Host *sh) +{ + struct wd719x *wd = shost_priv(sh); + char *card_types[] = { "Unknown card", "WD7193", "WD7197", "WD7296" }; + int ret; + + INIT_LIST_HEAD(&wd->active_scbs); + INIT_LIST_HEAD(&wd->free_scbs); + + sh->base = pci_resource_start(wd->pdev, 0); + + wd->type = wd719x_detect_type(wd); + + wd->sh = sh; + sh->irq = wd->pdev->irq; + wd->fw_virt = NULL; + + /* memory area for host (EEPROM) parameters */ + wd->params = pci_alloc_consistent(wd->pdev, + sizeof(struct wd719x_host_param), + &wd->params_phys); + if (!wd->params) { + dev_warn(&wd->pdev->dev, "unable to allocate parameter buffer\n"); + return -ENOMEM; + } + + /* memory area for the RISC for hash table of outstanding requests */ + wd->hash_virt = pci_alloc_consistent(wd->pdev, WD719X_HASH_TABLE_SIZE, + &wd->hash_phys); + if (!wd->hash_virt) { + dev_warn(&wd->pdev->dev, "unable to allocate hash buffer\n"); + ret = -ENOMEM; + goto fail_free_params; + } + + ret = request_irq(wd->pdev->irq, wd719x_interrupt, IRQF_SHARED, + "wd719x", wd); + if (ret) { + dev_warn(&wd->pdev->dev, "unable to assign IRQ %d\n", + wd->pdev->irq); + goto fail_free_hash; + } + + /* read parameters from EEPROM */ + wd719x_read_eeprom(wd); + + ret = wd719x_chip_init(wd); + if (ret) + goto fail_free_irq; + + sh->this_id = wd->params->own_scsi_id & WD719X_EE_SCSI_ID_MASK; + + dev_info(&wd->pdev->dev, "%s at I/O 0x%lx, IRQ %u, SCSI ID %d\n", + card_types[wd->type], sh->base, sh->irq, sh->this_id); + + return 0; + +fail_free_irq: + free_irq(wd->pdev->irq, wd); +fail_free_hash: + pci_free_consistent(wd->pdev, WD719X_HASH_TABLE_SIZE, wd->hash_virt, + wd->hash_phys); +fail_free_params: + pci_free_consistent(wd->pdev, sizeof(struct wd719x_host_param), + wd->params, wd->params_phys); + + return ret; +} + +static struct scsi_host_template wd719x_template = { + .name = "Western Digital 719x", + .queuecommand = wd719x_queuecommand, + .eh_abort_handler = wd719x_abort, + .eh_device_reset_handler = wd719x_dev_reset, + .eh_bus_reset_handler = wd719x_bus_reset, + .eh_host_reset_handler = wd719x_host_reset, + .bios_param = wd719x_biosparam, + .proc_name = "wd719x", + .can_queue = 255, + .this_id = 7, + .sg_tablesize = WD719X_SG, + .cmd_per_lun = WD719X_CMD_PER_LUN, + .use_clustering = ENABLE_CLUSTERING, +}; + +static int wd719x_pci_probe(struct pci_dev *pdev, const struct pci_device_id *d) +{ + int err; + struct Scsi_Host *sh; + struct wd719x *wd; + + err = pci_enable_device(pdev); + if (err) + goto fail; + + if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) { + dev_warn(&pdev->dev, "Unable to set 32-bit DMA mask\n"); + goto disable_device; + } + + err = pci_request_regions(pdev, "wd719x"); + if (err) + goto disable_device; + pci_set_master(pdev); + + err = -ENODEV; + if (pci_resource_len(pdev, 0) == 0) + goto release_region; + + err = -ENOMEM; + sh = scsi_host_alloc(&wd719x_template, sizeof(struct wd719x)); + if (!sh) + goto release_region; + + wd = shost_priv(sh); + wd->base = pci_iomap(pdev, 0, 0); + if (!wd->base) + goto free_host; + wd->pdev = pdev; + + err = wd719x_board_found(sh); + if (err) + goto unmap; + + err = scsi_add_host(sh, &wd->pdev->dev); + if (err) + goto destroy; + + scsi_scan_host(sh); + + pci_set_drvdata(pdev, sh); + return 0; + +destroy: + wd719x_destroy(wd); +unmap: + pci_iounmap(pdev, wd->base); +free_host: + scsi_host_put(sh); +release_region: + pci_release_regions(pdev); +disable_device: + pci_disable_device(pdev); +fail: + return err; +} + + +static void wd719x_pci_remove(struct pci_dev *pdev) +{ + struct Scsi_Host *sh = pci_get_drvdata(pdev); + struct wd719x *wd = shost_priv(sh); + + scsi_remove_host(sh); + wd719x_destroy(wd); + pci_iounmap(pdev, wd->base); + pci_release_regions(pdev); + pci_disable_device(pdev); + + scsi_host_put(sh); +} + +static DEFINE_PCI_DEVICE_TABLE(wd719x_pci_table) = { + { PCI_DEVICE(PCI_VENDOR_ID_WD, 0x3296) }, + {} +}; + +MODULE_DEVICE_TABLE(pci, wd719x_pci_table); + +static struct pci_driver wd719x_pci_driver = { + .name = "wd719x", + .id_table = wd719x_pci_table, + .probe = wd719x_pci_probe, + .remove = wd719x_pci_remove, +}; + +static int __init wd719x_init(void) +{ + return pci_register_driver(&wd719x_pci_driver); +} + +static void __exit wd719x_exit(void) +{ + pci_unregister_driver(&wd719x_pci_driver); +} + +module_init(wd719x_init); +module_exit(wd719x_exit); + +MODULE_DESCRIPTION("Western Digital WD7193/7197/7296 SCSI driver"); +MODULE_AUTHOR("Ondrej Zary, Aaron Dewell, Juergen Gaertner"); +MODULE_LICENSE("GPL"); +MODULE_FIRMWARE("wd719x-wcs.bin"); +MODULE_FIRMWARE("wd719x-risc.bin"); diff --git a/drivers/scsi/wd719x.h b/drivers/scsi/wd719x.h new file mode 100644 index 000000000000..185e30e4eb93 --- /dev/null +++ b/drivers/scsi/wd719x.h @@ -0,0 +1,249 @@ +#ifndef _WD719X_H_ +#define _WD719X_H_ + +#define WD719X_SG 255 /* Scatter/gather size */ +#define WD719X_CMD_PER_LUN 1 /* We should be able to do linked commands, but + * this is 1 for now to be safe. */ + +struct wd719x_sglist { + __le32 ptr; + __le32 length; +} __packed; + +enum wd719x_card_type { + WD719X_TYPE_UNKNOWN = 0, + WD719X_TYPE_7193, + WD719X_TYPE_7197, + WD719X_TYPE_7296, +}; + +union wd719x_regs { + __le32 all; /* All Status at once */ + struct { + u8 OPC; /* Opcode register */ + u8 SCSI; /* SCSI Errors */ + u8 SUE; /* Spider unique Errors */ + u8 INT; /* Interrupt Status */ + } bytes; +}; + +/* Spider Command Block (SCB) */ +struct wd719x_scb { + __le32 Int_SCB; /* 00-03 Internal SCB link pointer (must be cleared) */ + u8 SCB_opcode; /* 04 SCB Command opcode */ + u8 CDB_tag; /* 05 SCSI Tag byte for CDB queues (0 if untagged) */ + u8 lun; /* 06 SCSI LUN */ + u8 devid; /* 07 SCSI Device ID */ + u8 CDB[16]; /* 08-23 SCSI CDB (16 bytes as defined by ANSI spec. */ + __le32 data_p; /* 24-27 Data transfer address (or SG list address) */ + __le32 data_length; /* 28-31 Data transfer Length (or SG list length) */ + __le32 CDB_link; /* 32-35 SCSI CDB Link Ptr */ + __le32 sense_buf; /* 36-39 Auto request sense buffer address */ + u8 sense_buf_length;/* 40 Auto request sense transfer length */ + u8 reserved; /* 41 reserved */ + u8 SCB_options; /* 42 SCB-options */ + u8 SCB_tag_msg; /* 43 Tagged messages options */ + /* Not filled in by host */ + __le32 req_ptr; /* 44-47 Ptr to Host Request returned on interrupt */ + u8 host_opcode; /* 48 Host Command Opcode (same as AMR_00) */ + u8 scsi_stat; /* 49 SCSI Status returned */ + u8 ret_error; /* 50 SPIDER Unique Error Code returned (SUE) */ + u8 int_stat; /* 51 Message u8 / Interrupt Status byte returned */ + __le32 transferred; /* 52-55 Bytes Transferred */ + u8 last_trans[3]; /* 56-58 Bytes Transferred in last session */ + u8 length; /* 59 SCSI Messages Length (1-8) */ + u8 sync_offset; /* 60 Synchronous offset */ + u8 sync_rate; /* 61 Synchronous rate */ + u8 flags[2]; /* 62-63 SCB specific flags (local to each thread) */ + /* everything below is for driver use (not used by card) */ + dma_addr_t phys; /* bus address of the SCB */ + struct scsi_cmnd *cmd; /* a copy of the pointer we were passed */ + struct list_head list; + struct wd719x_sglist sg_list[WD719X_SG] __aligned(8); /* SG list */ +} __packed; + +struct wd719x { + struct Scsi_Host *sh; /* pointer to host structure */ + struct pci_dev *pdev; + void __iomem *base; + enum wd719x_card_type type; /* type of card */ + void *fw_virt; /* firmware buffer CPU address */ + dma_addr_t fw_phys; /* firmware buffer bus address */ + size_t fw_size; /* firmware buffer size */ + struct wd719x_host_param *params; /* host parameters (EEPROM) */ + dma_addr_t params_phys; /* host parameters bus address */ + void *hash_virt; /* hash table CPU address */ + dma_addr_t hash_phys; /* hash table bus address */ + struct list_head active_scbs; + struct list_head free_scbs; +}; + +/* timeout delays in microsecs */ +#define WD719X_WAIT_FOR_CMD_READY 500 +#define WD719X_WAIT_FOR_RISC 2000 +#define WD719X_WAIT_FOR_SCSI_RESET 3000000 + +/* All commands except 0x00 generate an interrupt */ +#define WD719X_CMD_READY 0x00 /* Command register ready (or noop) */ +#define WD719X_CMD_INIT_RISC 0x01 /* Initialize RISC */ +/* 0x02 is reserved */ +#define WD719X_CMD_BUSRESET 0x03 /* Assert SCSI bus reset */ +#define WD719X_CMD_READ_FIRMVER 0x04 /* Read the Firmware Revision */ +#define WD719X_CMD_ECHO_BYTES 0x05 /* Echo command bytes (DW) */ +/* 0x06 is reserved */ +/* 0x07 is reserved */ +#define WD719X_CMD_GET_PARAM 0x08 /* Get programmable parameters */ +#define WD719X_CMD_SET_PARAM 0x09 /* Set programmable parameters */ +#define WD719X_CMD_SLEEP 0x0a /* Put SPIDER to sleep */ +#define WD719X_CMD_READ_INIT 0x0b /* Read initialization parameters */ +#define WD719X_CMD_RESTORE_INIT 0x0c /* Restore initialization parameters */ +/* 0x0d is reserved */ +/* 0x0e is reserved */ +/* 0x0f is reserved */ +#define WD719X_CMD_ABORT_TAG 0x10 /* Send Abort tag message to target */ +#define WD719X_CMD_ABORT 0x11 /* Send Abort message to target */ +#define WD719X_CMD_RESET 0x12 /* Send Reset message to target */ +#define WD719X_CMD_INIT_SCAM 0x13 /* Initiate SCAM */ +#define WD719X_CMD_GET_SYNC 0x14 /* Get synchronous rates */ +#define WD719X_CMD_SET_SYNC 0x15 /* Set synchronous rates */ +#define WD719X_CMD_GET_WIDTH 0x16 /* Get SCSI bus width */ +#define WD719X_CMD_SET_WIDTH 0x17 /* Set SCSI bus width */ +#define WD719X_CMD_GET_TAGS 0x18 /* Get tag flags */ +#define WD719X_CMD_SET_TAGS 0x19 /* Set tag flags */ +#define WD719X_CMD_GET_PARAM2 0x1a /* Get programmable params (format 2) */ +#define WD719X_CMD_SET_PARAM2 0x1b /* Set programmable params (format 2) */ +/* Commands with request pointers (mailbox) */ +#define WD719X_CMD_PROCESS_SCB 0x80 /* Process SCSI Control Block (SCB) */ +/* No interrupt generated on acceptance of SCB pointer */ + +/* interrupt status defines */ +#define WD719X_INT_NONE 0x00 /* No interrupt pending */ +#define WD719X_INT_NOERRORS 0x01 /* Command completed with no errors */ +#define WD719X_INT_LINKNOERRORS 0x02 /* link cmd completed with no errors */ +#define WD719X_INT_LINKNOSTATUS 0x03 /* link cmd completed with no flag set */ +#define WD719X_INT_ERRORSLOGGED 0x04 /* cmd completed with errors logged */ +#define WD719X_INT_SPIDERFAILED 0x05 /* cmd failed without valid SCSI status */ +#define WD719X_INT_BADINT 0x80 /* unsolicited interrupt */ +#define WD719X_INT_PIOREADY 0xf0 /* data ready for PIO output */ + +/* Spider Unique Error Codes (SUE) */ +#define WD719X_SUE_NOERRORS 0x00 /* No errors detected by SPIDER */ +#define WD719X_SUE_REJECTED 0x01 /* Command Rejected (bad opcode/param) */ +#define WD719X_SUE_SCBQFULL 0x02 /* SCB queue full */ +/* 0x03 is reserved */ +#define WD719X_SUE_TERM 0x04 /* Host terminated SCB via primative cmd */ +#define WD719X_SUE_CHAN1PAR 0x05 /* PCI Channel 1 parity error occurred */ +#define WD719X_SUE_CHAN1ABORT 0x06 /* PCI Channel 1 system abort occurred */ +#define WD719X_SUE_CHAN23PAR 0x07 /* PCI Channel 2/3 parity error occurred */ +#define WD719X_SUE_CHAN23ABORT 0x08 /* PCI Channel 2/3 system abort occurred */ +#define WD719X_SUE_TIMEOUT 0x10 /* Selection/reselection timeout */ +#define WD719X_SUE_RESET 0x11 /* SCSI bus reset occurred */ +#define WD719X_SUE_BUSERROR 0x12 /* SCSI bus error */ +#define WD719X_SUE_WRONGWAY 0x13 /* Wrong data transfer dir set by target */ +#define WD719X_SUE_BADPHASE 0x14 /* SCSI phase illegal or unexpected */ +#define WD719X_SUE_TOOLONG 0x15 /* target requested too much data */ +#define WD719X_SUE_BUSFREE 0x16 /* Unexpected SCSI bus free */ +#define WD719X_SUE_ARSDONE 0x17 /* Auto request sense executed */ +#define WD719X_SUE_IGNORED 0x18 /* SCSI message was ignored by target */ +#define WD719X_SUE_WRONGTAGS 0x19 /* Tagged SCB & tags off (or vice versa) */ +#define WD719X_SUE_BADTAGS 0x1a /* Wrong tag message type for target */ +#define WD719X_SUE_NOSCAMID 0x1b /* No SCAM soft ID available */ + +/* code sizes */ +#define WD719X_HASH_TABLE_SIZE 4096 + +/* Advanced Mode Registers */ +/* Regs 0x00..0x1f are for Advanced Mode of the card (RISC is running). */ +#define WD719X_AMR_COMMAND 0x00 +#define WD719X_AMR_CMD_PARAM 0x01 +#define WD719X_AMR_CMD_PARAM_2 0x02 +#define WD719X_AMR_CMD_PARAM_3 0x03 +#define WD719X_AMR_SCB_IN 0x04 + +#define WD719X_AMR_BIOS_SHARE_INT 0x0f + +#define WD719X_AMR_SCB_OUT 0x18 +#define WD719X_AMR_OP_CODE 0x1c +#define WD719X_AMR_SCSI_STATUS 0x1d +#define WD719X_AMR_SCB_ERROR 0x1e +#define WD719X_AMR_INT_STATUS 0x1f + +#define WD719X_DISABLE_INT 0x80 + +/* SCB flags */ +#define WD719X_SCB_FLAGS_CHECK_DIRECTION 0x01 +#define WD719X_SCB_FLAGS_PCI_TO_SCSI 0x02 +#define WD719X_SCB_FLAGS_AUTO_REQUEST_SENSE 0x10 +#define WD719X_SCB_FLAGS_DO_SCATTER_GATHER 0x20 +#define WD719X_SCB_FLAGS_NO_DISCONNECT 0x40 + +/* PCI Registers used for reset, initial code download */ +/* Regs 0x20..0x3f are for Normal (DOS) mode (RISC is asleep). */ +#define WD719X_PCI_GPIO_CONTROL 0x3C +#define WD719X_PCI_GPIO_DATA 0x3D +#define WD719X_PCI_PORT_RESET 0x3E +#define WD719X_PCI_MODE_SELECT 0x3F + +#define WD719X_PCI_EXTERNAL_ADDR 0x60 +#define WD719X_PCI_INTERNAL_ADDR 0x64 +#define WD719X_PCI_DMA_TRANSFER_SIZE 0x66 +#define WD719X_PCI_CHANNEL2_3CMD 0x68 +#define WD719X_PCI_CHANNEL2_3STATUS 0x69 + +#define WD719X_GPIO_ID_BITS 0x0a +#define WD719X_PRAM_BASE_ADDR 0x00 + +/* codes written to or read from the card */ +#define WD719X_PCI_RESET 0x01 +#define WD719X_ENABLE_ADVANCE_MODE 0x01 + +#define WD719X_START_CHANNEL2_3DMA 0x17 +#define WD719X_START_CHANNEL2_3DONE 0x01 +#define WD719X_START_CHANNEL2_3ABORT 0x20 + +/* 33C296 GPIO bits for EEPROM pins */ +#define WD719X_EE_DI (1 << 1) +#define WD719X_EE_CS (1 << 2) +#define WD719X_EE_CLK (1 << 3) +#define WD719X_EE_DO (1 << 4) + +/* EEPROM contents */ +struct wd719x_eeprom_header { + u8 sig1; + u8 sig2; + u8 version; + u8 checksum; + u8 cfg_offset; + u8 cfg_size; + u8 setup_offset; + u8 setup_size; +} __packed; + +#define WD719X_EE_SIG1 0 +#define WD719X_EE_SIG2 1 +#define WD719X_EE_VERSION 2 +#define WD719X_EE_CHECKSUM 3 +#define WD719X_EE_CFG_OFFSET 4 +#define WD719X_EE_CFG_SIZE 5 +#define WD719X_EE_SETUP_OFFSET 6 +#define WD719X_EE_SETUP_SIZE 7 + +#define WD719X_EE_SCSI_ID_MASK 0xf + +/* SPIDER Host Parameters Block (=EEPROM configuration block) */ +struct wd719x_host_param { + u8 ch_1_th; /* FIFO threshold */ + u8 scsi_conf; /* SCSI configuration */ + u8 own_scsi_id; /* controller SCSI ID */ + u8 sel_timeout; /* selection timeout*/ + u8 sleep_timer; /* seep timer */ + __le16 cdb_size;/* CDB size groups */ + __le16 tag_en; /* Tag msg enables (ID 0-15) */ + u8 scsi_pad; /* SCSI pad control */ + __le32 wide; /* WIDE msg options (ID 0-15) */ + __le32 sync; /* SYNC msg options (ID 0-15) */ + u8 soft_mask; /* soft error mask */ + u8 unsol_mask; /* unsolicited error mask */ +} __packed; + +#endif /* _WD719X_H_ */ diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c index ab3ab27d49b7..4d1b7224a7f2 100644 --- a/drivers/target/loopback/tcm_loop.c +++ b/drivers/target/loopback/tcm_loop.c @@ -110,58 +110,6 @@ static struct device_driver tcm_loop_driverfs = { */ struct device *tcm_loop_primary; -/* - * Copied from drivers/scsi/libfc/fc_fcp.c:fc_change_queue_depth() and - * drivers/scsi/libiscsi.c:iscsi_change_queue_depth() - */ -static int tcm_loop_change_queue_depth( - struct scsi_device *sdev, - int depth, - int reason) -{ - switch (reason) { - case SCSI_QDEPTH_DEFAULT: - scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth); - break; - case SCSI_QDEPTH_QFULL: - scsi_track_queue_full(sdev, depth); - break; - case SCSI_QDEPTH_RAMP_UP: - scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth); - break; - default: - return -EOPNOTSUPP; - } - return sdev->queue_depth; -} - -static int tcm_loop_change_queue_type(struct scsi_device *sdev, int tag) -{ - if (sdev->tagged_supported) { - scsi_set_tag_type(sdev, tag); - - if (tag) - scsi_activate_tcq(sdev, sdev->queue_depth); - else - scsi_deactivate_tcq(sdev, sdev->queue_depth); - } else - tag = 0; - - return tag; -} - -/* - * Locate the SAM Task Attr from struct scsi_cmnd * - */ -static int tcm_loop_sam_attr(struct scsi_cmnd *sc, int tag) -{ - if (sc->device->tagged_supported && - sc->device->ordered_tags && tag >= 0) - return MSG_ORDERED_TAG; - - return MSG_SIMPLE_TAG; -} - static void tcm_loop_submission_work(struct work_struct *work) { struct tcm_loop_cmd *tl_cmd = @@ -220,7 +168,7 @@ static void tcm_loop_submission_work(struct work_struct *work) rc = target_submit_cmd_map_sgls(se_cmd, tl_nexus->se_sess, sc->cmnd, &tl_cmd->tl_sense_buf[0], tl_cmd->sc->device->lun, - transfer_length, tcm_loop_sam_attr(sc, tl_cmd->sc_cmd_tag), + transfer_length, MSG_SIMPLE_TAG, sc->sc_data_direction, 0, scsi_sglist(sc), scsi_sg_count(sc), sgl_bidi, sgl_bidi_count, @@ -431,27 +379,13 @@ static int tcm_loop_slave_alloc(struct scsi_device *sd) return 0; } -static int tcm_loop_slave_configure(struct scsi_device *sd) -{ - if (sd->tagged_supported) { - scsi_activate_tcq(sd, sd->queue_depth); - scsi_adjust_queue_depth(sd, MSG_SIMPLE_TAG, - sd->host->cmd_per_lun); - } else { - scsi_adjust_queue_depth(sd, 0, - sd->host->cmd_per_lun); - } - - return 0; -} - static struct scsi_host_template tcm_loop_driver_template = { .show_info = tcm_loop_show_info, .proc_name = "tcm_loopback", .name = "TCM_Loopback", .queuecommand = tcm_loop_queuecommand, - .change_queue_depth = tcm_loop_change_queue_depth, - .change_queue_type = tcm_loop_change_queue_type, + .change_queue_depth = scsi_change_queue_depth, + .change_queue_type = scsi_change_queue_type, .eh_abort_handler = tcm_loop_abort_task, .eh_device_reset_handler = tcm_loop_device_reset, .eh_target_reset_handler = tcm_loop_target_reset, @@ -462,8 +396,9 @@ static struct scsi_host_template tcm_loop_driver_template = { .max_sectors = 0xFFFF, .use_clustering = DISABLE_CLUSTERING, .slave_alloc = tcm_loop_slave_alloc, - .slave_configure = tcm_loop_slave_configure, .module = THIS_MODULE, + .use_blk_tags = 1, + .track_queue_depth = 1, }; static int tcm_loop_driver_probe(struct device *dev) diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c index fb87780929d2..75cbde1f7c5b 100644 --- a/drivers/target/target_core_alua.c +++ b/drivers/target/target_core_alua.c @@ -576,7 +576,7 @@ static inline int core_alua_state_standby( case SEND_DIAGNOSTIC: case READ_CAPACITY: return 0; - case SERVICE_ACTION_IN: + case SERVICE_ACTION_IN_16: switch (cdb[1] & 0x1f) { case SAI_READ_CAPACITY_16: return 0; diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c index 9f93b8234095..4c261c33cf55 100644 --- a/drivers/target/target_core_pr.c +++ b/drivers/target/target_core_pr.c @@ -459,7 +459,7 @@ static int core_scsi3_pr_seq_non_holder( case ACCESS_CONTROL_OUT: case INQUIRY: case LOG_SENSE: - case READ_MEDIA_SERIAL_NUMBER: + case SERVICE_ACTION_IN_12: case REPORT_LUNS: case REQUEST_SENSE: case PERSISTENT_RESERVE_IN: diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c index ebe62afb957d..8d171ff77e75 100644 --- a/drivers/target/target_core_sbc.c +++ b/drivers/target/target_core_sbc.c @@ -852,7 +852,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) size = READ_CAP_LEN; cmd->execute_cmd = sbc_emulate_readcapacity; break; - case SERVICE_ACTION_IN: + case SERVICE_ACTION_IN_16: switch (cmd->t_task_cdb[1] & 0x1f) { case SAI_READ_CAPACITY_16: cmd->execute_cmd = sbc_emulate_readcapacity_16; diff --git a/drivers/usb/gadget/legacy/tcm_usb_gadget.c b/drivers/usb/gadget/legacy/tcm_usb_gadget.c index 6cdb7a534f23..024f58475a94 100644 --- a/drivers/usb/gadget/legacy/tcm_usb_gadget.c +++ b/drivers/usb/gadget/legacy/tcm_usb_gadget.c @@ -912,7 +912,7 @@ static int get_cmd_dir(const unsigned char *cdb) case INQUIRY: case MODE_SENSE: case MODE_SENSE_10: - case SERVICE_ACTION_IN: + case SERVICE_ACTION_IN_16: case MAINTENANCE_IN: case PERSISTENT_RESERVE_IN: case SECURITY_PROTOCOL_IN: diff --git a/drivers/usb/storage/debug.c b/drivers/usb/storage/debug.c index e08f64780e30..66a684a29938 100644 --- a/drivers/usb/storage/debug.c +++ b/drivers/usb/storage/debug.c @@ -164,10 +164,10 @@ void usb_stor_show_sense(const struct us_data *us, unsigned char asc, unsigned char ascq) { - const char *what, *keystr; + const char *what, *keystr, *fmt; keystr = scsi_sense_key_string(key); - what = scsi_extd_sense_format(asc, ascq); + what = scsi_extd_sense_format(asc, ascq, &fmt); if (keystr == NULL) keystr = "(Unknown Key)"; @@ -175,8 +175,10 @@ void usb_stor_show_sense(const struct us_data *us, what = "(unknown ASC/ASCQ)"; usb_stor_dbg(us, "%s: ", keystr); - US_DEBUGPX(what, ascq); - US_DEBUGPX("\n"); + if (fmt) + US_DEBUGPX("%s (%s%x)\n", what, fmt, ascq); + else + US_DEBUGPX("%s\n", what); } int usb_stor_dbg(const struct us_data *us, const char *fmt, ...) diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c index 89b24349269e..4047edfb64e1 100644 --- a/drivers/usb/storage/uas.c +++ b/drivers/usb/storage/uas.c @@ -181,7 +181,7 @@ static int uas_get_tag(struct scsi_cmnd *cmnd) { int tag; - if (blk_rq_tagged(cmnd->request)) + if (cmnd->flags & SCMD_TAGGED) tag = cmnd->request->tag + 2; else tag = 1; @@ -799,8 +799,7 @@ static int uas_slave_configure(struct scsi_device *sdev) if (devinfo->flags & US_FL_NO_REPORT_OPCODES) sdev->no_report_opcodes = 1; - scsi_set_tag_type(sdev, MSG_ORDERED_TAG); - scsi_activate_tcq(sdev, devinfo->qdepth - 2); + scsi_change_queue_depth(sdev, devinfo->qdepth - 2); return 0; } @@ -817,7 +816,6 @@ static struct scsi_host_template uas_host_template = { .sg_tablesize = SG_NONE, .cmd_per_lun = 1, /* until we override it */ .skip_settle_delay = 1, - .ordered_tag = 1, /* * The uas drivers expects tags not to be bigger than the maximum @@ -825,6 +823,7 @@ static struct scsi_host_template uas_host_template = { * allocator. */ .disable_blk_mq = true, + .use_blk_tags = 1, }; #define UNUSUAL_DEV(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax, \ diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c index 3e32146472a5..50610a6acf3d 100644 --- a/drivers/xen/xen-scsiback.c +++ b/drivers/xen/xen-scsiback.c @@ -274,10 +274,6 @@ static void scsiback_print_status(char *sense_buffer, int errors, tpg->tport->tport_name, pending_req->v2p->lun, pending_req->cmnd[0], status_byte(errors), msg_byte(errors), host_byte(errors), driver_byte(errors)); - - if (CHECK_CONDITION & status_byte(errors)) - __scsi_print_sense("xen-pvscsi", sense_buffer, - SCSI_SENSE_BUFFERSIZE); } static void scsiback_fast_flush_area(struct vscsibk_pend *req) diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index c9be1589415a..15f7034aa377 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -167,6 +167,23 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp, bool reserved); struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag); +enum { + BLK_MQ_UNIQUE_TAG_BITS = 16, + BLK_MQ_UNIQUE_TAG_MASK = (1 << BLK_MQ_UNIQUE_TAG_BITS) - 1, +}; + +u32 blk_mq_unique_tag(struct request *rq); + +static inline u16 blk_mq_unique_tag_to_hwq(u32 unique_tag) +{ + return unique_tag >> BLK_MQ_UNIQUE_TAG_BITS; +} + +static inline u16 blk_mq_unique_tag_to_tag(u32 unique_tag) +{ + return unique_tag & BLK_MQ_UNIQUE_TAG_MASK; +} + struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index); struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *, unsigned int, int); diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index aac0f9ea952a..6d76b8b4aa2b 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -1136,7 +1136,6 @@ static inline bool blk_needs_flush_plug(struct task_struct *tsk) /* * tag stuff */ -#define blk_rq_tagged(rq) ((rq)->cmd_flags & REQ_QUEUED) extern int blk_queue_start_tag(struct request_queue *, struct request *); extern struct request *blk_queue_find_tag(struct request_queue *, int); extern void blk_queue_end_tag(struct request_queue *, struct request *); diff --git a/include/linux/eeprom_93cx6.h b/include/linux/eeprom_93cx6.h index e50f98b0297a..eb0b1988050a 100644 --- a/include/linux/eeprom_93cx6.h +++ b/include/linux/eeprom_93cx6.h @@ -75,6 +75,10 @@ extern void eeprom_93cx6_read(struct eeprom_93cx6 *eeprom, const u8 word, u16 *data); extern void eeprom_93cx6_multiread(struct eeprom_93cx6 *eeprom, const u8 word, __le16 *data, const u16 words); +extern void eeprom_93cx6_readb(struct eeprom_93cx6 *eeprom, + const u8 byte, u8 *data); +extern void eeprom_93cx6_multireadb(struct eeprom_93cx6 *eeprom, + const u8 byte, u8 *data, const u16 bytes); extern void eeprom_93cx6_wren(struct eeprom_93cx6 *eeprom, bool enable); diff --git a/include/linux/libata.h b/include/linux/libata.h index bd5fefeaf548..bfbc817c34ee 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -1191,9 +1191,9 @@ extern void ata_scsi_unlock_native_capacity(struct scsi_device *sdev); extern int ata_scsi_slave_config(struct scsi_device *sdev); extern void ata_scsi_slave_destroy(struct scsi_device *sdev); extern int ata_scsi_change_queue_depth(struct scsi_device *sdev, - int queue_depth, int reason); + int queue_depth); extern int __ata_change_queue_depth(struct ata_port *ap, struct scsi_device *sdev, - int queue_depth, int reason); + int queue_depth); extern struct ata_device *ata_dev_pair(struct ata_device *adev); extern int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev); extern void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap); diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h index 52beadf9a29b..93d14daf0994 100644 --- a/include/scsi/libfc.h +++ b/include/scsi/libfc.h @@ -1105,8 +1105,6 @@ int fc_eh_abort(struct scsi_cmnd *); int fc_eh_device_reset(struct scsi_cmnd *); int fc_eh_host_reset(struct scsi_cmnd *); int fc_slave_alloc(struct scsi_device *); -int fc_change_queue_depth(struct scsi_device *, int qdepth, int reason); -int fc_change_queue_type(struct scsi_device *, int tag_type); /* * ELS/CT interface diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h index 728c9ad9feb0..4d1c46aac331 100644 --- a/include/scsi/libiscsi.h +++ b/include/scsi/libiscsi.h @@ -378,8 +378,6 @@ struct iscsi_host { /* * scsi host template */ -extern int iscsi_change_queue_depth(struct scsi_device *sdev, int depth, - int reason); extern int iscsi_eh_abort(struct scsi_cmnd *sc); extern int iscsi_eh_recover_target(struct scsi_cmnd *sc); extern int iscsi_eh_session_reset(struct scsi_cmnd *sc); diff --git a/include/scsi/libsas.h b/include/scsi/libsas.h index ef7872c20da9..832dcc9f86ec 100644 --- a/include/scsi/libsas.h +++ b/include/scsi/libsas.h @@ -365,12 +365,6 @@ struct asd_sas_phy { struct scsi_core { struct Scsi_Host *shost; - struct mutex task_queue_flush; - spinlock_t task_queue_lock; - struct list_head task_queue; - int task_queue_size; - - struct task_struct *queue_thread; }; struct sas_ha_event { @@ -422,9 +416,6 @@ struct sas_ha_struct { struct asd_sas_port **sas_port; /* array of valid pointers, must be set */ int num_phys; /* must be set, gt 0, static */ - /* The class calls this to send a task for execution. */ - int lldd_max_execute_num; - int lldd_queue_size; int strict_wide_ports; /* both sas_addr and attached_sas_addr must match * their siblings when forming wide ports */ @@ -612,7 +603,6 @@ struct sas_ssp_task { struct sas_task { struct domain_device *dev; - struct list_head list; spinlock_t task_state_lock; unsigned task_state_flags; @@ -665,8 +655,7 @@ struct sas_domain_function_template { int (*lldd_dev_found)(struct domain_device *); void (*lldd_dev_gone)(struct domain_device *); - int (*lldd_execute_task)(struct sas_task *, int num, - gfp_t gfp_flags); + int (*lldd_execute_task)(struct sas_task *, gfp_t gfp_flags); /* Task Management Functions. Must be called from process context. */ int (*lldd_abort_task)(struct sas_task *); @@ -700,12 +689,10 @@ extern void sas_suspend_ha(struct sas_ha_struct *sas_ha); int sas_set_phy_speed(struct sas_phy *phy, struct sas_phy_linkrates *rates); int sas_phy_reset(struct sas_phy *phy, int hard_reset); -int sas_queue_up(struct sas_task *task); extern int sas_queuecommand(struct Scsi_Host * ,struct scsi_cmnd *); extern int sas_target_alloc(struct scsi_target *); extern int sas_slave_configure(struct scsi_device *); -extern int sas_change_queue_depth(struct scsi_device *, int new_depth, - int reason); +extern int sas_change_queue_depth(struct scsi_device *, int new_depth); extern int sas_change_queue_type(struct scsi_device *, int qt); extern int sas_bios_param(struct scsi_device *, struct block_device *, diff --git a/include/scsi/scsi.h b/include/scsi/scsi.h index d17178e6fcdd..8a7f8ad58aac 100644 --- a/include/scsi/scsi.h +++ b/include/scsi/scsi.h @@ -128,8 +128,10 @@ enum scsi_timeouts { #define MOVE_MEDIUM 0xa5 #define EXCHANGE_MEDIUM 0xa6 #define READ_12 0xa8 +#define SERVICE_ACTION_OUT_12 0xa9 #define WRITE_12 0xaa -#define READ_MEDIA_SERIAL_NUMBER 0xab +#define READ_MEDIA_SERIAL_NUMBER 0xab /* Obsolete with SPC-2 */ +#define SERVICE_ACTION_IN_12 0xab #define WRITE_VERIFY_12 0xae #define VERIFY_12 0xaf #define SEARCH_HIGH_12 0xb0 @@ -151,7 +153,9 @@ enum scsi_timeouts { #define VERIFY_16 0x8f #define SYNCHRONIZE_CACHE_16 0x91 #define WRITE_SAME_16 0x93 -#define SERVICE_ACTION_IN 0x9e +#define SERVICE_ACTION_BIDIRECTIONAL 0x9d +#define SERVICE_ACTION_IN_16 0x9e +#define SERVICE_ACTION_OUT_16 0x9f /* values for service action in */ #define SAI_READ_CAPACITY_16 0x10 #define SAI_GET_LBA_STATUS 0x12 @@ -165,8 +169,8 @@ enum scsi_timeouts { #define MI_REPORT_ALIASES 0x0b #define MI_REPORT_SUPPORTED_OPERATION_CODES 0x0c #define MI_REPORT_SUPPORTED_TASK_MANAGEMENT_FUNCTIONS 0x0d -#define MI_REPORT_PRIORITY 0x0e -#define MI_REPORT_TIMESTAMP 0x0f +#define MI_REPORT_PRIORITY 0x0e +#define MI_REPORT_TIMESTAMP 0x0f #define MI_MANAGEMENT_PROTOCOL_IN 0x10 /* value for MI_REPORT_TARGET_PGS ext header */ #define MI_EXT_HDR_PARAM_FMT 0x20 diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h index 522a5f27f553..9fc1aecfc813 100644 --- a/include/scsi/scsi_cmnd.h +++ b/include/scsi/scsi_cmnd.h @@ -53,6 +53,9 @@ struct scsi_pointer { volatile int phase; }; +/* for scmd->flags */ +#define SCMD_TAGGED (1 << 0) + struct scsi_cmnd { struct scsi_device *device; struct list_head list; /* scsi_cmnd participates in queue lists */ @@ -132,6 +135,7 @@ struct scsi_cmnd { * to be at an address < 16Mb). */ int result; /* Status code from lower level driver */ + int flags; /* Command flags */ unsigned char tag; /* SCSI-II queued command tag */ }; @@ -159,7 +163,7 @@ extern void *scsi_kmap_atomic_sg(struct scatterlist *sg, int sg_count, size_t *offset, size_t *len); extern void scsi_kunmap_atomic_sg(void *virt); -extern int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask); +extern int scsi_init_io(struct scsi_cmnd *cmd); extern int scsi_dma_map(struct scsi_cmnd *cmd); extern void scsi_dma_unmap(struct scsi_cmnd *cmd); diff --git a/include/scsi/scsi_dbg.h b/include/scsi/scsi_dbg.h index e89844cc2cd3..7982795df595 100644 --- a/include/scsi/scsi_dbg.h +++ b/include/scsi/scsi_dbg.h @@ -2,23 +2,27 @@ #define _SCSI_SCSI_DBG_H struct scsi_cmnd; +struct scsi_device; struct scsi_sense_hdr; extern void scsi_print_command(struct scsi_cmnd *); -extern void __scsi_print_command(unsigned char *); -extern void scsi_show_extd_sense(unsigned char, unsigned char); -extern void scsi_show_sense_hdr(struct scsi_sense_hdr *); -extern void scsi_print_sense_hdr(const char *, struct scsi_sense_hdr *); -extern void scsi_cmd_print_sense_hdr(struct scsi_cmnd *, const char *, - struct scsi_sense_hdr *); -extern void scsi_print_sense(char *, struct scsi_cmnd *); -extern void __scsi_print_sense(const char *name, +extern void __scsi_print_command(const unsigned char *, size_t); +extern void scsi_show_extd_sense(const struct scsi_device *, const char *, + unsigned char, unsigned char); +extern void scsi_show_sense_hdr(const struct scsi_device *, const char *, + const struct scsi_sense_hdr *); +extern void scsi_print_sense_hdr(const struct scsi_device *, const char *, + const struct scsi_sense_hdr *); +extern void scsi_print_sense(const struct scsi_cmnd *); +extern void __scsi_print_sense(const struct scsi_device *, const char *name, const unsigned char *sense_buffer, int sense_len); -extern void scsi_show_result(int); -extern void scsi_print_result(struct scsi_cmnd *); -extern void scsi_print_status(unsigned char); +extern void scsi_print_result(struct scsi_cmnd *, const char *, int); +extern const char *scsi_hostbyte_string(int); +extern const char *scsi_driverbyte_string(int); +extern const char *scsi_mlreturn_string(int); extern const char *scsi_sense_key_string(unsigned char); -extern const char *scsi_extd_sense_format(unsigned char, unsigned char); +extern const char *scsi_extd_sense_format(unsigned char, unsigned char, + const char **); #endif /* _SCSI_SCSI_DBG_H */ diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h index 27ecee73bd72..6364e23454dd 100644 --- a/include/scsi/scsi_device.h +++ b/include/scsi/scsi_device.h @@ -141,7 +141,6 @@ struct scsi_device { unsigned ppr:1; /* Device supports PPR messages */ unsigned tagged_supported:1; /* Supports SCSI-II tagged queuing */ unsigned simple_tags:1; /* simple queue tag messages are enabled */ - unsigned ordered_tags:1;/* ordered queue tag messages are enabled */ unsigned was_reset:1; /* There was a bus reset on the bus for * this device */ unsigned expecting_cc_ua:1; /* Expecting a CHECK_CONDITION/UNIT_ATTN @@ -201,11 +200,6 @@ struct scsi_device { unsigned long sdev_data[0]; } __attribute__((aligned(sizeof(unsigned long)))); -struct scsi_dh_devlist { - char *vendor; - char *model; -}; - typedef void (*activate_complete)(void *, int); struct scsi_device_handler { /* Used by the infrastructure */ @@ -214,9 +208,8 @@ struct scsi_device_handler { /* Filled by the hardware handler */ struct module *module; const char *name; - const struct scsi_dh_devlist *devlist; int (*check_sense)(struct scsi_device *, struct scsi_sense_hdr *); - int (*attach)(struct scsi_device *); + struct scsi_dh_data *(*attach)(struct scsi_device *); void (*detach)(struct scsi_device *); int (*activate)(struct scsi_device *, activate_complete, void *); int (*prep_fn)(struct scsi_device *, struct request *); @@ -228,7 +221,6 @@ struct scsi_dh_data { struct scsi_device_handler *scsi_dh; struct scsi_device *sdev; struct kref kref; - char buf[0]; }; #define to_scsi_device(d) \ @@ -244,6 +236,15 @@ struct scsi_dh_data { #define sdev_dbg(sdev, fmt, a...) \ dev_dbg(&(sdev)->sdev_gendev, fmt, ##a) +/* + * like scmd_printk, but the device name is passed in + * as a string pointer + */ +#define sdev_prefix_printk(l, sdev, p, fmt, a...) \ + (p) ? \ + sdev_printk(l, sdev, "[%s] " fmt, p, ##a) : \ + sdev_printk(l, sdev, fmt, ##a) + #define scmd_printk(prefix, scmd, fmt, a...) \ (scmd)->request->rq_disk ? \ sdev_printk(prefix, (scmd)->device, "[%s] " fmt, \ @@ -379,7 +380,7 @@ extern struct scsi_device *__scsi_iterate_devices(struct Scsi_Host *, #define __shost_for_each_device(sdev, shost) \ list_for_each_entry((sdev), &((shost)->__devices), siblings) -extern void scsi_adjust_queue_depth(struct scsi_device *, int, int); +extern int scsi_change_queue_depth(struct scsi_device *, int); extern int scsi_track_queue_full(struct scsi_device *, int); extern int scsi_set_medium_removal(struct scsi_device *, char); diff --git a/include/scsi/scsi_driver.h b/include/scsi/scsi_driver.h index c2b759809d8a..891a658aa867 100644 --- a/include/scsi/scsi_driver.h +++ b/include/scsi/scsi_driver.h @@ -9,7 +9,6 @@ struct scsi_cmnd; struct scsi_device; struct scsi_driver { - struct module *owner; struct device_driver gendrv; void (*rescan)(struct device *); diff --git a/include/scsi/scsi_eh.h b/include/scsi/scsi_eh.h index 06a8790893ef..1e1421b06565 100644 --- a/include/scsi/scsi_eh.h +++ b/include/scsi/scsi_eh.h @@ -27,10 +27,10 @@ struct scsi_sense_hdr { /* See SPC-3 section 4.5 */ u8 additional_length; /* always 0 for fixed sense format */ }; -static inline int scsi_sense_valid(struct scsi_sense_hdr *sshdr) +static inline bool scsi_sense_valid(const struct scsi_sense_hdr *sshdr) { if (!sshdr) - return 0; + return false; return (sshdr->response_code & 0x70) == 0x70; } @@ -42,12 +42,12 @@ extern void scsi_eh_flush_done_q(struct list_head *done_q); extern void scsi_report_bus_reset(struct Scsi_Host *, int); extern void scsi_report_device_reset(struct Scsi_Host *, int, int); extern int scsi_block_when_processing_errors(struct scsi_device *); -extern int scsi_normalize_sense(const u8 *sense_buffer, int sb_len, - struct scsi_sense_hdr *sshdr); -extern int scsi_command_normalize_sense(struct scsi_cmnd *cmd, - struct scsi_sense_hdr *sshdr); +extern bool scsi_normalize_sense(const u8 *sense_buffer, int sb_len, + struct scsi_sense_hdr *sshdr); +extern bool scsi_command_normalize_sense(const struct scsi_cmnd *cmd, + struct scsi_sense_hdr *sshdr); -static inline int scsi_sense_is_deferred(struct scsi_sense_hdr *sshdr) +static inline bool scsi_sense_is_deferred(const struct scsi_sense_hdr *sshdr) { return ((sshdr->response_code >= 0x70) && (sshdr->response_code & 1)); } @@ -60,15 +60,7 @@ extern int scsi_get_sense_info_fld(const u8 * sense_buffer, int sb_len, extern void scsi_build_sense_buffer(int desc, u8 *buf, u8 key, u8 asc, u8 ascq); -/* - * Reset request from external source - */ -#define SCSI_TRY_RESET_DEVICE 1 -#define SCSI_TRY_RESET_BUS 2 -#define SCSI_TRY_RESET_HOST 3 -#define SCSI_TRY_RESET_TARGET 4 - -extern int scsi_reset_provider(struct scsi_device *, int); +extern int scsi_ioctl_reset(struct scsi_device *, int __user *); struct scsi_eh_save { /* saved state */ diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h index 5e362489ee88..c8a462ef9a4e 100644 --- a/include/scsi/scsi_host.h +++ b/include/scsi/scsi_host.h @@ -46,12 +46,6 @@ struct blk_queue_tags; #define DISABLE_CLUSTERING 0 #define ENABLE_CLUSTERING 1 -enum { - SCSI_QDEPTH_DEFAULT, /* default requested change, e.g. from sysfs */ - SCSI_QDEPTH_QFULL, /* scsi-ml requested due to queue full */ - SCSI_QDEPTH_RAMP_UP, /* scsi-ml requested due to threshold event */ -}; - struct scsi_host_template { struct module *module; const char *name; @@ -195,7 +189,7 @@ struct scsi_host_template { * Things currently recommended to be handled at this time include: * * 1. Setting the device queue depth. Proper setting of this is - * described in the comments for scsi_adjust_queue_depth. + * described in the comments for scsi_change_queue_depth. * 2. Determining if the device supports the various synchronous * negotiation protocols. The device struct will already have * responded to INQUIRY and the results of the standard items @@ -281,7 +275,7 @@ struct scsi_host_template { * * Status: OPTIONAL */ - int (* change_queue_depth)(struct scsi_device *, int, int); + int (* change_queue_depth)(struct scsi_device *, int); /* * Fill in this function to allow the changing of tag types @@ -422,6 +416,16 @@ struct scsi_host_template { unsigned char present; /* + * Let the block layer assigns tags to all commands. + */ + unsigned use_blk_tags:1; + + /* + * Track QUEUE_FULL events and reduce queue depth on demand. + */ + unsigned track_queue_depth:1; + + /* * This specifies the mode that a LLD supports. */ unsigned supported_mode:2; @@ -451,11 +455,6 @@ struct scsi_host_template { */ unsigned skip_settle_delay:1; - /* - * True if we are using ordered write support. - */ - unsigned ordered_tag:1; - /* True if the controller does not support WRITE SAME */ unsigned no_write_same:1; @@ -638,6 +637,14 @@ struct Scsi_Host { short unsigned int sg_prot_tablesize; unsigned int max_sectors; unsigned long dma_boundary; + /* + * In scsi-mq mode, the number of hardware queues supported by the LLD. + * + * Note: it is assumed that each hardware queue has a queue depth of + * can_queue. In other words, the total queue depth per host + * is nr_hw_queues * can_queue. + */ + unsigned nr_hw_queues; /* * Used to assign serial numbers to the cmds. * Protected by the host lock. @@ -647,7 +654,6 @@ struct Scsi_Host { unsigned active_mode:2; unsigned unchecked_isa_dma:1; unsigned use_clustering:1; - unsigned use_blk_tcq:1; /* * Host has requested that no further requests come through for the @@ -662,11 +668,6 @@ struct Scsi_Host { */ unsigned reverse_ordering:1; - /* - * Ordered write support - */ - unsigned ordered_tag:1; - /* Task mgmt function in progress */ unsigned tmf_in_progress:1; diff --git a/include/scsi/scsi_ioctl.h b/include/scsi/scsi_ioctl.h index b9006848b813..8d19d1d233c3 100644 --- a/include/scsi/scsi_ioctl.h +++ b/include/scsi/scsi_ioctl.h @@ -40,9 +40,9 @@ typedef struct scsi_fctargaddress { unsigned char host_wwn[8]; // include NULL term. } Scsi_FCTargAddress; +int scsi_ioctl_block_when_processing_errors(struct scsi_device *sdev, + int cmd, bool ndelay); extern int scsi_ioctl(struct scsi_device *, int, void __user *); -extern int scsi_nonblockable_ioctl(struct scsi_device *sdev, int cmd, - void __user *arg, int ndelay); #endif /* __KERNEL__ */ #endif /* _SCSI_IOCTL_H */ diff --git a/include/scsi/scsi_tcq.h b/include/scsi/scsi_tcq.h index 56ed843969ca..fe4a70299419 100644 --- a/include/scsi/scsi_tcq.h +++ b/include/scsi/scsi_tcq.h @@ -16,20 +16,16 @@ #ifdef CONFIG_BLOCK +int scsi_change_queue_type(struct scsi_device *sdev, int tag_type); + /** * scsi_get_tag_type - get the type of tag the device supports * @sdev: the scsi device - * - * Notes: - * If the drive only supports simple tags, returns MSG_SIMPLE_TAG - * if it supports all tag types, returns MSG_ORDERED_TAG. */ static inline int scsi_get_tag_type(struct scsi_device *sdev) { if (!sdev->tagged_supported) return 0; - if (sdev->ordered_tags) - return MSG_ORDERED_TAG; if (sdev->simple_tags) return MSG_SIMPLE_TAG; return 0; @@ -39,90 +35,33 @@ static inline void scsi_set_tag_type(struct scsi_device *sdev, int tag) { switch (tag) { case MSG_ORDERED_TAG: - sdev->ordered_tags = 1; - /* fall through */ case MSG_SIMPLE_TAG: sdev->simple_tags = 1; break; case 0: /* fall through */ default: - sdev->ordered_tags = 0; sdev->simple_tags = 0; break; } } -/** - * scsi_activate_tcq - turn on tag command queueing - * @SDpnt: device to turn on TCQ for - * @depth: queue depth - * - * Notes: - * Eventually, I hope depth would be the maximum depth - * the device could cope with and the real queue depth - * would be adjustable from 0 to depth. - **/ -static inline void scsi_activate_tcq(struct scsi_device *sdev, int depth) -{ - if (!sdev->tagged_supported) - return; - - if (shost_use_blk_mq(sdev->host)) - queue_flag_set_unlocked(QUEUE_FLAG_QUEUED, sdev->request_queue); - else if (!blk_queue_tagged(sdev->request_queue)) - blk_queue_init_tags(sdev->request_queue, depth, - sdev->host->bqt); - - scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth); -} - -/** - * scsi_deactivate_tcq - turn off tag command queueing - * @SDpnt: device to turn off TCQ for - **/ -static inline void scsi_deactivate_tcq(struct scsi_device *sdev, int depth) -{ - if (blk_queue_tagged(sdev->request_queue)) - blk_queue_free_tags(sdev->request_queue); - scsi_adjust_queue_depth(sdev, 0, depth); -} - -/** - * scsi_populate_tag_msg - place a tag message in a buffer - * @SCpnt: pointer to the Scsi_Cmnd for the tag - * @msg: pointer to the area to place the tag - * - * Notes: - * designed to create the correct type of tag message for the - * particular request. Returns the size of the tag message. - * May return 0 if TCQ is disabled for this device. - **/ -static inline int scsi_populate_tag_msg(struct scsi_cmnd *cmd, char *msg) -{ - struct request *req = cmd->request; - - if (blk_rq_tagged(req)) { - *msg++ = MSG_SIMPLE_TAG; - *msg++ = req->tag; - return 2; - } - - return 0; -} static inline struct scsi_cmnd *scsi_mq_find_tag(struct Scsi_Host *shost, - unsigned int hw_ctx, int tag) + int unique_tag) { - struct request *req; + u16 hwq = blk_mq_unique_tag_to_hwq(unique_tag); + struct request *req = NULL; - req = blk_mq_tag_to_rq(shost->tag_set.tags[hw_ctx], tag); + if (hwq < shost->tag_set.nr_hw_queues) + req = blk_mq_tag_to_rq(shost->tag_set.tags[hwq], + blk_mq_unique_tag_to_tag(unique_tag)); return req ? (struct scsi_cmnd *)req->special : NULL; } /** * scsi_find_tag - find a tagged command by device * @SDpnt: pointer to the ScSI device - * @tag: the tag number + * @tag: tag generated by blk_mq_unique_tag() * * Notes: * Only works with tags allocated by the generic blk layer. @@ -133,9 +72,9 @@ static inline struct scsi_cmnd *scsi_find_tag(struct scsi_device *sdev, int tag) if (tag != SCSI_NO_TAG) { if (shost_use_blk_mq(sdev->host)) - return scsi_mq_find_tag(sdev->host, 0, tag); + return scsi_mq_find_tag(sdev->host, tag); - req = blk_queue_find_tag(sdev->request_queue, tag); + req = blk_queue_find_tag(sdev->request_queue, tag); return req ? (struct scsi_cmnd *)req->special : NULL; } @@ -174,7 +113,7 @@ static inline int scsi_init_shared_tag_map(struct Scsi_Host *shost, int depth) /** * scsi_host_find_tag - find the tagged command by host * @shost: pointer to scsi_host - * @tag: tag of the scsi_cmnd + * @tag: tag generated by blk_mq_unique_tag() * * Notes: * Only works with tags allocated by the generic blk layer. @@ -186,7 +125,7 @@ static inline struct scsi_cmnd *scsi_host_find_tag(struct Scsi_Host *shost, if (tag != SCSI_NO_TAG) { if (shost_use_blk_mq(shost)) - return scsi_mq_find_tag(shost, 0, tag); + return scsi_mq_find_tag(shost, tag); req = blk_map_queue_find_tag(shost->bqt, tag); return req ? (struct scsi_cmnd *)req->special : NULL; } diff --git a/include/scsi/scsi_transport_spi.h b/include/scsi/scsi_transport_spi.h index 7497a383b1a4..a4fa52b4d5c5 100644 --- a/include/scsi/scsi_transport_spi.h +++ b/include/scsi/scsi_transport_spi.h @@ -157,5 +157,6 @@ int spi_populate_width_msg(unsigned char *msg, int width); int spi_populate_sync_msg(unsigned char *msg, int period, int offset); int spi_populate_ppr_msg(unsigned char *msg, int period, int offset, int width, int options); +int spi_populate_tag_msg(unsigned char *msg, struct scsi_cmnd *cmd); #endif /* SCSI_TRANSPORT_SPI_H */ diff --git a/include/scsi/sg.h b/include/scsi/sg.h index 750e5db7c6bf..3afec7032448 100644 --- a/include/scsi/sg.h +++ b/include/scsi/sg.h @@ -164,12 +164,15 @@ typedef struct sg_req_info { /* used by SG_GET_REQUEST_TABLE ioctl() */ /* Returns -EBUSY if occupied. 3rd argument pointer to int (see next) */ #define SG_SCSI_RESET 0x2284 -/* Associated values that can be given to SG_SCSI_RESET follow */ +/* Associated values that can be given to SG_SCSI_RESET follow. + * SG_SCSI_RESET_NO_ESCALATE may be OR-ed to the _DEVICE, _TARGET, _BUS + * or _HOST reset value so only that action is attempted. */ #define SG_SCSI_RESET_NOTHING 0 #define SG_SCSI_RESET_DEVICE 1 #define SG_SCSI_RESET_BUS 2 #define SG_SCSI_RESET_HOST 3 #define SG_SCSI_RESET_TARGET 4 +#define SG_SCSI_RESET_NO_ESCALATE 0x100 /* synchronous SCSI command ioctl, (only in version 3 interface) */ #define SG_IO 0x2285 /* similar effect as write() followed by read() */ diff --git a/include/trace/events/scsi.h b/include/trace/events/scsi.h index db6c93510f74..079bd10a01b4 100644 --- a/include/trace/events/scsi.h +++ b/include/trace/events/scsi.h @@ -94,7 +94,7 @@ scsi_opcode_name(WRITE_16), \ scsi_opcode_name(VERIFY_16), \ scsi_opcode_name(WRITE_SAME_16), \ - scsi_opcode_name(SERVICE_ACTION_IN), \ + scsi_opcode_name(SERVICE_ACTION_IN_16), \ scsi_opcode_name(SAI_READ_CAPACITY_16), \ scsi_opcode_name(SAI_GET_LBA_STATUS), \ scsi_opcode_name(MI_REPORT_TARGET_PGS), \ diff --git a/include/trace/events/target.h b/include/trace/events/target.h index da9cc0f05c93..45403443dd82 100644 --- a/include/trace/events/target.h +++ b/include/trace/events/target.h @@ -96,7 +96,7 @@ scsi_opcode_name(WRITE_16), \ scsi_opcode_name(VERIFY_16), \ scsi_opcode_name(WRITE_SAME_16), \ - scsi_opcode_name(SERVICE_ACTION_IN), \ + scsi_opcode_name(SERVICE_ACTION_IN_16), \ scsi_opcode_name(SAI_READ_CAPACITY_16), \ scsi_opcode_name(SAI_GET_LBA_STATUS), \ scsi_opcode_name(MI_REPORT_TARGET_PGS), \ diff --git a/tools/lib/traceevent/plugin_scsi.c b/tools/lib/traceevent/plugin_scsi.c index eda326fc8620..3727de48c8d5 100644 --- a/tools/lib/traceevent/plugin_scsi.c +++ b/tools/lib/traceevent/plugin_scsi.c @@ -85,8 +85,9 @@ typedef unsigned int u32; #define MOVE_MEDIUM 0xa5 #define EXCHANGE_MEDIUM 0xa6 #define READ_12 0xa8 +#define SERVICE_ACTION_OUT_12 0xa9 #define WRITE_12 0xaa -#define READ_MEDIA_SERIAL_NUMBER 0xab +#define SERVICE_ACTION_IN_12 0xab #define WRITE_VERIFY_12 0xae #define VERIFY_12 0xaf #define SEARCH_HIGH_12 0xb0 @@ -107,7 +108,9 @@ typedef unsigned int u32; #define VERIFY_16 0x8f #define SYNCHRONIZE_CACHE_16 0x91 #define WRITE_SAME_16 0x93 -#define SERVICE_ACTION_IN 0x9e +#define SERVICE_ACTION_BIDIRECTIONAL 0x9d +#define SERVICE_ACTION_IN_16 0x9e +#define SERVICE_ACTION_OUT_16 0x9f /* values for service action in */ #define SAI_READ_CAPACITY_16 0x10 #define SAI_GET_LBA_STATUS 0x12 @@ -393,7 +396,7 @@ scsi_trace_parse_cdb(struct trace_seq *p, unsigned char *cdb, int len) return scsi_trace_rw16(p, cdb, len); case UNMAP: return scsi_trace_unmap(p, cdb, len); - case SERVICE_ACTION_IN: + case SERVICE_ACTION_IN_16: return scsi_trace_service_action_in(p, cdb, len); case VARIABLE_LENGTH_CMD: return scsi_trace_varlen(p, cdb, len); |