summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-11-01 15:02:38 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2011-11-01 15:02:38 -0700
commitc18ae42aa5b3473c00f51b6937d0c00bdb6ce2d2 (patch)
tree327ca3f9c0a7eae6031eb1b610d48efa0940db87 /drivers
parent1c398651518c7e25a9fb3f08b456c73d5ca22469 (diff)
parenta572e688cf5d99d2382016c7241ec37b523b0137 (diff)
downloadlinux-c18ae42aa5b3473c00f51b6937d0c00bdb6ce2d2.tar.bz2
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394/linux1394
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394/linux1394: firewire: ohci: fix isochronous DMA synchronization firewire: ohci: work around selfID junk due to wrong gap count firewire: net: Use posted writes firewire: use clamp and min3 macros firewire: ohci: optimize TSB41BA3D detection firewire: ohci: TSB41BA3D support tweaks firewire: ohci: Add support for TSB41BA3D phy firewire: ohci: Move code from the bus reset tasklet into a workqueue firewire: sbp2: fold two functions into one firewire: sbp2: move some code to more sensible places firewire: sbp2: remove obsolete reference counting
Diffstat (limited to 'drivers')
-rw-r--r--drivers/firewire/core-transaction.c4
-rw-r--r--drivers/firewire/net.c15
-rw-r--r--drivers/firewire/ohci.c255
-rw-r--r--drivers/firewire/sbp2.c260
4 files changed, 362 insertions, 172 deletions
diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
index 334b82a3542c..855ab3f5936f 100644
--- a/drivers/firewire/core-transaction.c
+++ b/drivers/firewire/core-transaction.c
@@ -1046,8 +1046,8 @@ static void update_split_timeout(struct fw_card *card)
cycles = card->split_timeout_hi * 8000 + (card->split_timeout_lo >> 19);
- cycles = max(cycles, 800u); /* minimum as per the spec */
- cycles = min(cycles, 3u * 8000u); /* maximum OHCI timeout */
+ /* minimum per IEEE 1394, maximum which doesn't overflow OHCI */
+ cycles = clamp(cycles, 800u, 3u * 8000u);
card->split_timeout_cycles = cycles;
card->split_timeout_jiffies = DIV_ROUND_UP(cycles * HZ, 8000);
diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c
index 03a7a85d0424..a20f45b1e7e5 100644
--- a/drivers/firewire/net.c
+++ b/drivers/firewire/net.c
@@ -502,11 +502,7 @@ static struct fwnet_peer *fwnet_peer_find_by_node_id(struct fwnet_device *dev,
static unsigned fwnet_max_payload(unsigned max_rec, unsigned speed)
{
max_rec = min(max_rec, speed + 8);
- max_rec = min(max_rec, 0xbU); /* <= 4096 */
- if (max_rec < 8) {
- fw_notify("max_rec %x out of range\n", max_rec);
- max_rec = 8;
- }
+ max_rec = clamp(max_rec, 8U, 11U); /* 512...4096 */
return (1 << (max_rec + 1)) - RFC2374_FRAG_HDR_SIZE;
}
@@ -1125,17 +1121,12 @@ static int fwnet_broadcast_start(struct fwnet_device *dev)
unsigned u;
if (dev->local_fifo == FWNET_NO_FIFO_ADDR) {
- /* outside OHCI posted write area? */
- static const struct fw_address_region region = {
- .start = 0xffff00000000ULL,
- .end = CSR_REGISTER_BASE,
- };
-
dev->handler.length = 4096;
dev->handler.address_callback = fwnet_receive_packet;
dev->handler.callback_data = dev;
- retval = fw_core_add_address_handler(&dev->handler, &region);
+ retval = fw_core_add_address_handler(&dev->handler,
+ &fw_high_memory_region);
if (retval < 0)
goto failed_initial;
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index fd7170a9ad2c..6628feaa7622 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -42,6 +42,7 @@
#include <linux/string.h>
#include <linux/time.h>
#include <linux/vmalloc.h>
+#include <linux/workqueue.h>
#include <asm/byteorder.h>
#include <asm/page.h>
@@ -125,6 +126,7 @@ struct context {
struct fw_ohci *ohci;
u32 regs;
int total_allocation;
+ u32 current_bus;
bool running;
bool flushing;
@@ -226,7 +228,7 @@ struct fw_ohci {
__le32 *self_id_cpu;
dma_addr_t self_id_bus;
- struct tasklet_struct bus_reset_tasklet;
+ struct work_struct bus_reset_work;
u32 self_id_buffer[512];
};
@@ -263,6 +265,8 @@ static char ohci_driver_name[] = KBUILD_MODNAME;
#define PCI_DEVICE_ID_AGERE_FW643 0x5901
#define PCI_DEVICE_ID_JMICRON_JMB38X_FW 0x2380
#define PCI_DEVICE_ID_TI_TSB12LV22 0x8009
+#define PCI_DEVICE_ID_TI_TSB12LV26 0x8020
+#define PCI_DEVICE_ID_TI_TSB82AA2 0x8025
#define PCI_VENDOR_ID_PINNACLE_SYSTEMS 0x11bd
#define QUIRK_CYCLE_TIMER 1
@@ -270,6 +274,7 @@ static char ohci_driver_name[] = KBUILD_MODNAME;
#define QUIRK_BE_HEADERS 4
#define QUIRK_NO_1394A 8
#define QUIRK_NO_MSI 16
+#define QUIRK_TI_SLLZ059 32
/* In case of multiple matches in ohci_quirks[], only the first one is used. */
static const struct {
@@ -299,6 +304,12 @@ static const struct {
{PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_TSB12LV22, PCI_ANY_ID,
QUIRK_CYCLE_TIMER | QUIRK_RESET_PACKET | QUIRK_NO_1394A},
+ {PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_TSB12LV26, PCI_ANY_ID,
+ QUIRK_RESET_PACKET | QUIRK_TI_SLLZ059},
+
+ {PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_TSB82AA2, PCI_ANY_ID,
+ QUIRK_RESET_PACKET | QUIRK_TI_SLLZ059},
+
{PCI_VENDOR_ID_TI, PCI_ANY_ID, PCI_ANY_ID,
QUIRK_RESET_PACKET},
@@ -315,6 +326,7 @@ MODULE_PARM_DESC(quirks, "Chip quirks (default = 0"
", AR/selfID endianess = " __stringify(QUIRK_BE_HEADERS)
", no 1394a enhancements = " __stringify(QUIRK_NO_1394A)
", disable MSI = " __stringify(QUIRK_NO_MSI)
+ ", TI SLLZ059 erratum = " __stringify(QUIRK_TI_SLLZ059)
")");
#define OHCI_PARAM_DEBUG_AT_AR 1
@@ -859,7 +871,7 @@ static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
*
* Alas some chips sometimes emit bus reset packets with a
* wrong generation. We set the correct generation for these
- * at a slightly incorrect time (in bus_reset_tasklet).
+ * at a slightly incorrect time (in bus_reset_work).
*/
if (evt == OHCI1394_evt_bus_reset) {
if (!(ohci->quirks & QUIRK_RESET_PACKET))
@@ -1046,6 +1058,7 @@ static void context_tasklet(unsigned long data)
address = le32_to_cpu(last->branch_address);
z = address & 0xf;
address &= ~0xf;
+ ctx->current_bus = address;
/* If the branch address points to a buffer outside of the
* current buffer, advance to the next buffer. */
@@ -1713,9 +1726,94 @@ static u32 update_bus_time(struct fw_ohci *ohci)
return ohci->bus_time | cycle_time_seconds;
}
-static void bus_reset_tasklet(unsigned long data)
+static int get_status_for_port(struct fw_ohci *ohci, int port_index)
+{
+ int reg;
+
+ mutex_lock(&ohci->phy_reg_mutex);
+ reg = write_phy_reg(ohci, 7, port_index);
+ if (reg >= 0)
+ reg = read_phy_reg(ohci, 8);
+ mutex_unlock(&ohci->phy_reg_mutex);
+ if (reg < 0)
+ return reg;
+
+ switch (reg & 0x0f) {
+ case 0x06:
+ return 2; /* is child node (connected to parent node) */
+ case 0x0e:
+ return 3; /* is parent node (connected to child node) */
+ }
+ return 1; /* not connected */
+}
+
+static int get_self_id_pos(struct fw_ohci *ohci, u32 self_id,
+ int self_id_count)
+{
+ int i;
+ u32 entry;
+
+ for (i = 0; i < self_id_count; i++) {
+ entry = ohci->self_id_buffer[i];
+ if ((self_id & 0xff000000) == (entry & 0xff000000))
+ return -1;
+ if ((self_id & 0xff000000) < (entry & 0xff000000))
+ return i;
+ }
+ return i;
+}
+
+/*
+ * TI TSB82AA2B and TSB12LV26 do not receive the selfID of a locally
+ * attached TSB41BA3D phy; see http://www.ti.com/litv/pdf/sllz059.
+ * Construct the selfID from phy register contents.
+ * FIXME: How to determine the selfID.i flag?
+ */
+static int find_and_insert_self_id(struct fw_ohci *ohci, int self_id_count)
+{
+ int reg, i, pos, status;
+ /* link active 1, speed 3, bridge 0, contender 1, more packets 0 */
+ u32 self_id = 0x8040c800;
+
+ reg = reg_read(ohci, OHCI1394_NodeID);
+ if (!(reg & OHCI1394_NodeID_idValid)) {
+ fw_notify("node ID not valid, new bus reset in progress\n");
+ return -EBUSY;
+ }
+ self_id |= ((reg & 0x3f) << 24); /* phy ID */
+
+ reg = ohci_read_phy_reg(&ohci->card, 4);
+ if (reg < 0)
+ return reg;
+ self_id |= ((reg & 0x07) << 8); /* power class */
+
+ reg = ohci_read_phy_reg(&ohci->card, 1);
+ if (reg < 0)
+ return reg;
+ self_id |= ((reg & 0x3f) << 16); /* gap count */
+
+ for (i = 0; i < 3; i++) {
+ status = get_status_for_port(ohci, i);
+ if (status < 0)
+ return status;
+ self_id |= ((status & 0x3) << (6 - (i * 2)));
+ }
+
+ pos = get_self_id_pos(ohci, self_id, self_id_count);
+ if (pos >= 0) {
+ memmove(&(ohci->self_id_buffer[pos+1]),
+ &(ohci->self_id_buffer[pos]),
+ (self_id_count - pos) * sizeof(*ohci->self_id_buffer));
+ ohci->self_id_buffer[pos] = self_id;
+ self_id_count++;
+ }
+ return self_id_count;
+}
+
+static void bus_reset_work(struct work_struct *work)
{
- struct fw_ohci *ohci = (struct fw_ohci *)data;
+ struct fw_ohci *ohci =
+ container_of(work, struct fw_ohci, bus_reset_work);
int self_id_count, i, j, reg;
int generation, new_generation;
unsigned long flags;
@@ -1753,21 +1851,50 @@ static void bus_reset_tasklet(unsigned long data)
* bit extra to get the actual number of self IDs.
*/
self_id_count = (reg >> 3) & 0xff;
- if (self_id_count == 0 || self_id_count > 252) {
+
+ if (self_id_count > 252) {
fw_notify("inconsistent self IDs\n");
return;
}
+
generation = (cond_le32_to_cpu(ohci->self_id_cpu[0]) >> 16) & 0xff;
rmb();
for (i = 1, j = 0; j < self_id_count; i += 2, j++) {
if (ohci->self_id_cpu[i] != ~ohci->self_id_cpu[i + 1]) {
- fw_notify("inconsistent self IDs\n");
- return;
+ /*
+ * If the invalid data looks like a cycle start packet,
+ * it's likely to be the result of the cycle master
+ * having a wrong gap count. In this case, the self IDs
+ * so far are valid and should be processed so that the
+ * bus manager can then correct the gap count.
+ */
+ if (cond_le32_to_cpu(ohci->self_id_cpu[i])
+ == 0xffff008f) {
+ fw_notify("ignoring spurious self IDs\n");
+ self_id_count = j;
+ break;
+ } else {
+ fw_notify("inconsistent self IDs\n");
+ return;
+ }
}
ohci->self_id_buffer[j] =
cond_le32_to_cpu(ohci->self_id_cpu[i]);
}
+
+ if (ohci->quirks & QUIRK_TI_SLLZ059) {
+ self_id_count = find_and_insert_self_id(ohci, self_id_count);
+ if (self_id_count < 0) {
+ fw_notify("could not construct local self ID\n");
+ return;
+ }
+ }
+
+ if (self_id_count == 0) {
+ fw_notify("inconsistent self IDs\n");
+ return;
+ }
rmb();
/*
@@ -1887,7 +2014,7 @@ static irqreturn_t irq_handler(int irq, void *data)
log_irqs(event);
if (event & OHCI1394_selfIDComplete)
- tasklet_schedule(&ohci->bus_reset_tasklet);
+ queue_work(fw_workqueue, &ohci->bus_reset_work);
if (event & OHCI1394_RQPkt)
tasklet_schedule(&ohci->ar_request_ctx.tasklet);
@@ -1934,7 +2061,8 @@ static irqreturn_t irq_handler(int irq, void *data)
reg_read(ohci, OHCI1394_PostedWriteAddressLo);
reg_write(ohci, OHCI1394_IntEventClear,
OHCI1394_postedWriteErr);
- fw_error("PCI posted write error\n");
+ if (printk_ratelimit())
+ fw_error("PCI posted write error\n");
}
if (unlikely(event & OHCI1394_cycleTooLong)) {
@@ -2048,6 +2176,28 @@ static int configure_1394a_enhancements(struct fw_ohci *ohci)
return 0;
}
+static int probe_tsb41ba3d(struct fw_ohci *ohci)
+{
+ /* TI vendor ID = 0x080028, TSB41BA3D product ID = 0x833005 (sic) */
+ static const u8 id[] = { 0x08, 0x00, 0x28, 0x83, 0x30, 0x05, };
+ int reg, i;
+
+ reg = read_phy_reg(ohci, 2);
+ if (reg < 0)
+ return reg;
+ if ((reg & PHY_EXTENDED_REGISTERS) != PHY_EXTENDED_REGISTERS)
+ return 0;
+
+ for (i = ARRAY_SIZE(id) - 1; i >= 0; i--) {
+ reg = read_paged_phy_reg(ohci, 1, i + 10);
+ if (reg < 0)
+ return reg;
+ if (reg != id[i])
+ return 0;
+ }
+ return 1;
+}
+
static int ohci_enable(struct fw_card *card,
const __be32 *config_rom, size_t length)
{
@@ -2085,6 +2235,16 @@ static int ohci_enable(struct fw_card *card,
return -EIO;
}
+ if (ohci->quirks & QUIRK_TI_SLLZ059) {
+ ret = probe_tsb41ba3d(ohci);
+ if (ret < 0)
+ return ret;
+ if (ret)
+ fw_notify("local TSB41BA3D phy\n");
+ else
+ ohci->quirks &= ~QUIRK_TI_SLLZ059;
+ }
+
reg_write(ohci, OHCI1394_HCControlClear,
OHCI1394_HCControl_noByteSwapData);
@@ -2260,7 +2420,7 @@ static int ohci_set_config_rom(struct fw_card *card,
* then set up the real values for the two registers.
*
* We use ohci->lock to avoid racing with the code that sets
- * ohci->next_config_rom to NULL (see bus_reset_tasklet).
+ * ohci->next_config_rom to NULL (see bus_reset_work).
*/
next_config_rom =
@@ -2539,6 +2699,7 @@ static int handle_ir_packet_per_buffer(struct context *context,
struct iso_context *ctx =
container_of(context, struct iso_context, context);
struct descriptor *pd;
+ u32 buffer_dma;
__le32 *ir_header;
void *p;
@@ -2549,6 +2710,16 @@ static int handle_ir_packet_per_buffer(struct context *context,
/* Descriptor(s) not done yet, stop iteration */
return 0;
+ while (!(d->control & cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS))) {
+ d++;
+ buffer_dma = le32_to_cpu(d->data_address);
+ dma_sync_single_range_for_cpu(context->ohci->card.device,
+ buffer_dma & PAGE_MASK,
+ buffer_dma & ~PAGE_MASK,
+ le16_to_cpu(d->req_count),
+ DMA_FROM_DEVICE);
+ }
+
p = last + 1;
copy_iso_headers(ctx, p);
@@ -2571,11 +2742,19 @@ static int handle_ir_buffer_fill(struct context *context,
{
struct iso_context *ctx =
container_of(context, struct iso_context, context);
+ u32 buffer_dma;
if (!last->transfer_status)
/* Descriptor(s) not done yet, stop iteration */
return 0;
+ buffer_dma = le32_to_cpu(last->data_address);
+ dma_sync_single_range_for_cpu(context->ohci->card.device,
+ buffer_dma & PAGE_MASK,
+ buffer_dma & ~PAGE_MASK,
+ le16_to_cpu(last->req_count),
+ DMA_FROM_DEVICE);
+
if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS)
ctx->base.callback.mc(&ctx->base,
le32_to_cpu(last->data_address) +
@@ -2586,6 +2765,43 @@ static int handle_ir_buffer_fill(struct context *context,
return 1;
}
+static inline void sync_it_packet_for_cpu(struct context *context,
+ struct descriptor *pd)
+{
+ __le16 control;
+ u32 buffer_dma;
+
+ /* only packets beginning with OUTPUT_MORE* have data buffers */
+ if (pd->control & cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS))
+ return;
+
+ /* skip over the OUTPUT_MORE_IMMEDIATE descriptor */
+ pd += 2;
+
+ /*
+ * If the packet has a header, the first OUTPUT_MORE/LAST descriptor's
+ * data buffer is in the context program's coherent page and must not
+ * be synced.
+ */
+ if ((le32_to_cpu(pd->data_address) & PAGE_MASK) ==
+ (context->current_bus & PAGE_MASK)) {
+ if (pd->control & cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS))
+ return;
+ pd++;
+ }
+
+ do {
+ buffer_dma = le32_to_cpu(pd->data_address);
+ dma_sync_single_range_for_cpu(context->ohci->card.device,
+ buffer_dma & PAGE_MASK,
+ buffer_dma & ~PAGE_MASK,
+ le16_to_cpu(pd->req_count),
+ DMA_TO_DEVICE);
+ control = pd->control;
+ pd++;
+ } while (!(control & cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS)));
+}
+
static int handle_it_packet(struct context *context,
struct descriptor *d,
struct descriptor *last)
@@ -2602,6 +2818,8 @@ static int handle_it_packet(struct context *context,
/* Descriptor(s) not done yet, stop iteration */
return 0;
+ sync_it_packet_for_cpu(context, d);
+
i = ctx->header_length;
if (i + 4 < PAGE_SIZE) {
/* Present this value as big-endian to match the receive code */
@@ -2971,6 +3189,10 @@ static int queue_iso_transmit(struct iso_context *ctx,
page_bus = page_private(buffer->pages[page]);
pd[i].data_address = cpu_to_le32(page_bus + offset);
+ dma_sync_single_range_for_device(ctx->context.ohci->card.device,
+ page_bus, offset, length,
+ DMA_TO_DEVICE);
+
payload_index += length;
}
@@ -2995,6 +3217,7 @@ static int queue_iso_packet_per_buffer(struct iso_context *ctx,
struct fw_iso_buffer *buffer,
unsigned long payload)
{
+ struct device *device = ctx->context.ohci->card.device;
struct descriptor *d, *pd;
dma_addr_t d_bus, page_bus;
u32 z, header_z, rest;
@@ -3049,6 +3272,10 @@ static int queue_iso_packet_per_buffer(struct iso_context *ctx,
page_bus = page_private(buffer->pages[page]);
pd->data_address = cpu_to_le32(page_bus + offset);
+ dma_sync_single_range_for_device(device, page_bus,
+ offset, length,
+ DMA_FROM_DEVICE);
+
offset = (offset + length) & ~PAGE_MASK;
rest -= length;
if (offset == 0)
@@ -3108,6 +3335,10 @@ static int queue_iso_buffer_fill(struct iso_context *ctx,
page_bus = page_private(buffer->pages[page]);
d->data_address = cpu_to_le32(page_bus + offset);
+ dma_sync_single_range_for_device(ctx->context.ohci->card.device,
+ page_bus, offset, length,
+ DMA_FROM_DEVICE);
+
rest -= length;
offset = 0;
page++;
@@ -3239,8 +3470,7 @@ static int __devinit pci_probe(struct pci_dev *dev,
spin_lock_init(&ohci->lock);
mutex_init(&ohci->phy_reg_mutex);
- tasklet_init(&ohci->bus_reset_tasklet,
- bus_reset_tasklet, (unsigned long)ohci);
+ INIT_WORK(&ohci->bus_reset_work, bus_reset_work);
err = pci_request_region(dev, 0, ohci_driver_name);
if (err) {
@@ -3382,6 +3612,7 @@ static void pci_remove(struct pci_dev *dev)
ohci = pci_get_drvdata(dev);
reg_write(ohci, OHCI1394_IntMaskClear, ~0);
flush_writes(ohci);
+ cancel_work_sync(&ohci->bus_reset_work);
fw_core_remove_card(&ohci->card);
/*
diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c
index 17cef864506a..68375bc3aef6 100644
--- a/drivers/firewire/sbp2.c
+++ b/drivers/firewire/sbp2.c
@@ -154,12 +154,16 @@ struct sbp2_logical_unit {
bool blocked;
};
+static void sbp2_queue_work(struct sbp2_logical_unit *lu, unsigned long delay)
+{
+ queue_delayed_work(fw_workqueue, &lu->work, delay);
+}
+
/*
* We create one struct sbp2_target per IEEE 1212 Unit Directory
* and one struct Scsi_Host per sbp2_target.
*/
struct sbp2_target {
- struct kref kref;
struct fw_unit *unit;
const char *bus_id;
struct list_head lu_list;
@@ -772,71 +776,6 @@ static int sbp2_lun2int(u16 lun)
return scsilun_to_int(&eight_bytes_lun);
}
-static void sbp2_release_target(struct kref *kref)
-{
- struct sbp2_target *tgt = container_of(kref, struct sbp2_target, kref);
- struct sbp2_logical_unit *lu, *next;
- struct Scsi_Host *shost =
- container_of((void *)tgt, struct Scsi_Host, hostdata[0]);
- struct scsi_device *sdev;
- struct fw_device *device = target_device(tgt);
-
- /* prevent deadlocks */
- sbp2_unblock(tgt);
-
- list_for_each_entry_safe(lu, next, &tgt->lu_list, link) {
- sdev = scsi_device_lookup(shost, 0, 0, sbp2_lun2int(lu->lun));
- if (sdev) {
- scsi_remove_device(sdev);
- scsi_device_put(sdev);
- }
- if (lu->login_id != INVALID_LOGIN_ID) {
- int generation, node_id;
- /*
- * tgt->node_id may be obsolete here if we failed
- * during initial login or after a bus reset where
- * the topology changed.
- */
- generation = device->generation;
- smp_rmb(); /* node_id vs. generation */
- node_id = device->node_id;
- sbp2_send_management_orb(lu, node_id, generation,
- SBP2_LOGOUT_REQUEST,
- lu->login_id, NULL);
- }
- fw_core_remove_address_handler(&lu->address_handler);
- list_del(&lu->link);
- kfree(lu);
- }
- scsi_remove_host(shost);
- fw_notify("released %s, target %d:0:0\n", tgt->bus_id, shost->host_no);
-
- fw_unit_put(tgt->unit);
- scsi_host_put(shost);
- fw_device_put(device);
-}
-
-static void sbp2_target_get(struct sbp2_target *tgt)
-{
- kref_get(&tgt->kref);
-}
-
-static void sbp2_target_put(struct sbp2_target *tgt)
-{
- kref_put(&tgt->kref, sbp2_release_target);
-}
-
-/*
- * Always get the target's kref when scheduling work on one its units.
- * Each workqueue job is responsible to call sbp2_target_put() upon return.
- */
-static void sbp2_queue_work(struct sbp2_logical_unit *lu, unsigned long delay)
-{
- sbp2_target_get(lu->tgt);
- if (!queue_delayed_work(fw_workqueue, &lu->work, delay))
- sbp2_target_put(lu->tgt);
-}
-
/*
* Write retransmit retry values into the BUSY_TIMEOUT register.
* - The single-phase retry protocol is supported by all SBP-2 devices, but the
@@ -877,7 +816,7 @@ static void sbp2_login(struct work_struct *work)
int generation, node_id, local_node_id;
if (fw_device_is_shutdown(device))
- goto out;
+ return;
generation = device->generation;
smp_rmb(); /* node IDs must not be older than generation */
@@ -899,7 +838,7 @@ static void sbp2_login(struct work_struct *work)
/* Let any waiting I/O fail from now on. */
sbp2_unblock(lu->tgt);
}
- goto out;
+ return;
}
tgt->node_id = node_id;
@@ -925,7 +864,8 @@ static void sbp2_login(struct work_struct *work)
if (lu->has_sdev) {
sbp2_cancel_orbs(lu);
sbp2_conditionally_unblock(lu);
- goto out;
+
+ return;
}
if (lu->tgt->workarounds & SBP2_WORKAROUND_DELAY_INQUIRY)
@@ -957,7 +897,8 @@ static void sbp2_login(struct work_struct *work)
lu->has_sdev = true;
scsi_device_put(sdev);
sbp2_allow_block(lu);
- goto out;
+
+ return;
out_logout_login:
smp_rmb(); /* generation may have changed */
@@ -971,8 +912,57 @@ static void sbp2_login(struct work_struct *work)
* lu->work already. Reset the work from reconnect to login.
*/
PREPARE_DELAYED_WORK(&lu->work, sbp2_login);
- out:
- sbp2_target_put(tgt);
+}
+
+static void sbp2_reconnect(struct work_struct *work)
+{
+ struct sbp2_logical_unit *lu =
+ container_of(work, struct sbp2_logical_unit, work.work);
+ struct sbp2_target *tgt = lu->tgt;
+ struct fw_device *device = target_device(tgt);
+ int generation, node_id, local_node_id;
+
+ if (fw_device_is_shutdown(device))
+ return;
+
+ generation = device->generation;
+ smp_rmb(); /* node IDs must not be older than generation */
+ node_id = device->node_id;
+ local_node_id = device->card->node_id;
+
+ if (sbp2_send_management_orb(lu, node_id, generation,
+ SBP2_RECONNECT_REQUEST,
+ lu->login_id, NULL) < 0) {
+ /*
+ * If reconnect was impossible even though we are in the
+ * current generation, fall back and try to log in again.
+ *
+ * We could check for "Function rejected" status, but
+ * looking at the bus generation as simpler and more general.
+ */
+ smp_rmb(); /* get current card generation */
+ if (generation == device->card->generation ||
+ lu->retries++ >= 5) {
+ fw_error("%s: failed to reconnect\n", tgt->bus_id);
+ lu->retries = 0;
+ PREPARE_DELAYED_WORK(&lu->work, sbp2_login);
+ }
+ sbp2_queue_work(lu, DIV_ROUND_UP(HZ, 5));
+
+ return;
+ }
+
+ tgt->node_id = node_id;
+ tgt->address_high = local_node_id << 16;
+ smp_wmb(); /* node IDs must not be older than generation */
+ lu->generation = generation;
+
+ fw_notify("%s: reconnected to LUN %04x (%d retries)\n",
+ tgt->bus_id, lu->lun, lu->retries);
+
+ sbp2_agent_reset(lu);
+ sbp2_cancel_orbs(lu);
+ sbp2_conditionally_unblock(lu);
}
static int sbp2_add_logical_unit(struct sbp2_target *tgt, int lun_entry)
@@ -1120,6 +1110,7 @@ static void sbp2_init_workarounds(struct sbp2_target *tgt, u32 model,
}
static struct scsi_host_template scsi_driver_template;
+static int sbp2_remove(struct device *dev);
static int sbp2_probe(struct device *dev)
{
@@ -1141,7 +1132,6 @@ static int sbp2_probe(struct device *dev)
tgt = (struct sbp2_target *)shost->hostdata;
dev_set_drvdata(&unit->device, tgt);
tgt->unit = unit;
- kref_init(&tgt->kref);
INIT_LIST_HEAD(&tgt->lu_list);
tgt->bus_id = dev_name(&unit->device);
tgt->guid = (u64)device->config_rom[3] << 32 | device->config_rom[4];
@@ -1154,9 +1144,6 @@ static int sbp2_probe(struct device *dev)
if (scsi_add_host(shost, &unit->device) < 0)
goto fail_shost_put;
- fw_device_get(device);
- fw_unit_get(unit);
-
/* implicit directory ID */
tgt->directory_id = ((unit->directory - device->config_rom) * 4
+ CSR_CONFIG_ROM) & 0xffffff;
@@ -1166,7 +1153,7 @@ static int sbp2_probe(struct device *dev)
if (sbp2_scan_unit_dir(tgt, unit->directory, &model,
&firmware_revision) < 0)
- goto fail_tgt_put;
+ goto fail_remove;
sbp2_clamp_management_orb_timeout(tgt);
sbp2_init_workarounds(tgt, model, firmware_revision);
@@ -1177,16 +1164,17 @@ static int sbp2_probe(struct device *dev)
* specifies the max payload size as 2 ^ (max_payload + 2), so
* if we set this to max_speed + 7, we get the right value.
*/
- tgt->max_payload = min(device->max_speed + 7, 10U);
- tgt->max_payload = min(tgt->max_payload, device->card->max_receive - 1);
+ tgt->max_payload = min3(device->max_speed + 7, 10U,
+ device->card->max_receive - 1);
/* Do the login in a workqueue so we can easily reschedule retries. */
list_for_each_entry(lu, &tgt->lu_list, link)
sbp2_queue_work(lu, DIV_ROUND_UP(HZ, 5));
+
return 0;
- fail_tgt_put:
- sbp2_target_put(tgt);
+ fail_remove:
+ sbp2_remove(dev);
return -ENOMEM;
fail_shost_put:
@@ -1194,71 +1182,6 @@ static int sbp2_probe(struct device *dev)
return -ENOMEM;
}
-static int sbp2_remove(struct device *dev)
-{
- struct fw_unit *unit = fw_unit(dev);
- struct sbp2_target *tgt = dev_get_drvdata(&unit->device);
- struct sbp2_logical_unit *lu;
-
- list_for_each_entry(lu, &tgt->lu_list, link)
- cancel_delayed_work_sync(&lu->work);
-
- sbp2_target_put(tgt);
- return 0;
-}
-
-static void sbp2_reconnect(struct work_struct *work)
-{
- struct sbp2_logical_unit *lu =
- container_of(work, struct sbp2_logical_unit, work.work);
- struct sbp2_target *tgt = lu->tgt;
- struct fw_device *device = target_device(tgt);
- int generation, node_id, local_node_id;
-
- if (fw_device_is_shutdown(device))
- goto out;
-
- generation = device->generation;
- smp_rmb(); /* node IDs must not be older than generation */
- node_id = device->node_id;
- local_node_id = device->card->node_id;
-
- if (sbp2_send_management_orb(lu, node_id, generation,
- SBP2_RECONNECT_REQUEST,
- lu->login_id, NULL) < 0) {
- /*
- * If reconnect was impossible even though we are in the
- * current generation, fall back and try to log in again.
- *
- * We could check for "Function rejected" status, but
- * looking at the bus generation as simpler and more general.
- */
- smp_rmb(); /* get current card generation */
- if (generation == device->card->generation ||
- lu->retries++ >= 5) {
- fw_error("%s: failed to reconnect\n", tgt->bus_id);
- lu->retries = 0;
- PREPARE_DELAYED_WORK(&lu->work, sbp2_login);
- }
- sbp2_queue_work(lu, DIV_ROUND_UP(HZ, 5));
- goto out;
- }
-
- tgt->node_id = node_id;
- tgt->address_high = local_node_id << 16;
- smp_wmb(); /* node IDs must not be older than generation */
- lu->generation = generation;
-
- fw_notify("%s: reconnected to LUN %04x (%d retries)\n",
- tgt->bus_id, lu->lun, lu->retries);
-
- sbp2_agent_reset(lu);
- sbp2_cancel_orbs(lu);
- sbp2_conditionally_unblock(lu);
- out:
- sbp2_target_put(tgt);
-}
-
static void sbp2_update(struct fw_unit *unit)
{
struct sbp2_target *tgt = dev_get_drvdata(&unit->device);
@@ -1277,6 +1200,51 @@ static void sbp2_update(struct fw_unit *unit)
}
}
+static int sbp2_remove(struct device *dev)
+{
+ struct fw_unit *unit = fw_unit(dev);
+ struct fw_device *device = fw_parent_device(unit);
+ struct sbp2_target *tgt = dev_get_drvdata(&unit->device);
+ struct sbp2_logical_unit *lu, *next;
+ struct Scsi_Host *shost =
+ container_of((void *)tgt, struct Scsi_Host, hostdata[0]);
+ struct scsi_device *sdev;
+
+ /* prevent deadlocks */
+ sbp2_unblock(tgt);
+
+ list_for_each_entry_safe(lu, next, &tgt->lu_list, link) {
+ cancel_delayed_work_sync(&lu->work);
+ sdev = scsi_device_lookup(shost, 0, 0, sbp2_lun2int(lu->lun));
+ if (sdev) {
+ scsi_remove_device(sdev);
+ scsi_device_put(sdev);
+ }
+ if (lu->login_id != INVALID_LOGIN_ID) {
+ int generation, node_id;
+ /*
+ * tgt->node_id may be obsolete here if we failed
+ * during initial login or after a bus reset where
+ * the topology changed.
+ */
+ generation = device->generation;
+ smp_rmb(); /* node_id vs. generation */
+ node_id = device->node_id;
+ sbp2_send_management_orb(lu, node_id, generation,
+ SBP2_LOGOUT_REQUEST,
+ lu->login_id, NULL);
+ }
+ fw_core_remove_address_handler(&lu->address_handler);
+ list_del(&lu->link);
+ kfree(lu);
+ }
+ scsi_remove_host(shost);
+ fw_notify("released %s, target %d:0:0\n", tgt->bus_id, shost->host_no);
+
+ scsi_host_put(shost);
+ return 0;
+}
+
#define SBP2_UNIT_SPEC_ID_ENTRY 0x0000609e
#define SBP2_SW_VERSION_ENTRY 0x00010483